RuntimeError: mat1 and mat2 shapes cannot be multiplied (32x6422528 and 100352x256)

import os
import pandas as pd
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import transforms

Define a custom dataset class

class AllergyDataset(Dataset):
def init(self, csv_file, image_folder, transform=None):
self.data = pd.read_csv(csv_file)
self.image_folder = image_folder
self.transform = transform

    # Filter out rows with missing values
    self.data = self.data.dropna(subset=['Chip_Image_Name', 'Present'])
    
def __len__(self):
    return len(self.data)

def __getitem__(self, idx):
    row = self.data.iloc[idx]
    image_name = row['Chip_Image_Name']
    
    image_path = os.path.join(self.image_folder, f'{image_name}.bmp')
    
    image = Image.open(image_path[:-4]).convert('RGB')
    
    label = row['Present']
    
    if self.transform is not None:
        image = self.transform(image)
    
    return image, label

Set the path to your dataset CSV file and image folder

csv_file = ‘data/train.csv’
image_folder = ‘data/images’

Set the transformation for your images

transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

Create the dataset

dataset = AllergyDataset(csv_file, image_folder, transform=transform)

Create data loaders

batch_size = 32
train_ratio = 0.8
train_size = int(train_ratio * len(dataset))
val_size = len(dataset) - train_size

train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])

train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)

Define your neural network model

class AllergyClassifier(nn.Module):
def init(self):
super(AllergyClassifier, self).init()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
self.fc1 = nn.Linear(128 * 28 * 28, 256) # Update the input size here
self.fc2 = nn.Linear(256, 1)
self.sigmoid = nn.Sigmoid()

def forward(self, x):
    x = nn.functional.relu(self.conv1(x))
    x = nn.functional.relu(self.conv2(x))
    x = x.view(x.size(0), -1)
    x = nn.functional.relu(self.fc1(x))
    x = self.fc2(x)
    x = self.sigmoid(x)
    return x

Create an instance of your model

model = AllergyClassifier()

Define the loss function and optimizer

criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

Set the device

device = torch.device(‘cuda’ if torch.cuda.is_available() else ‘cpu’)
model.to(device)

Training loop

num_epochs = 10
for epoch in range(num_epochs):
model.train()
running_loss = 0.0

for images, labels in train_loader:
    images = images.to(device)
    labels = labels.to(device)
    
    optimizer.zero_grad()
    
    outputs = model(images)
    loss = criterion(outputs.squeeze(), labels.float())
    
    loss.backward()
    optimizer.step()
    
    running_loss += loss.item() * images.size(0)

epoch_loss = running_loss / len(train_dataset)
print(f'Training Loss - Epoch {epoch+1}: {epoch_loss:.4f}')

Validation loop

model.eval()
num_correct = 0
num_samples = 0

with torch.no_grad():
for images, labels in val_loader:
images = images.to(device)
labels = labels.to(device)

    outputs = model(images)
    predictions = (outputs >= 0.5).squeeze().long()
    
    num_correct += (predictions == labels).sum().item()
    num_samples += labels.size(0)

accuracy = num_correct / num_samples
print(f'Validation Accuracy: {accuracy:.4f}')

facing some issue

The error is raised in self.fc1 as its in_features do not match the number of features of the input activation. The error message shows the feature size of the activation, so change the in_features to this value and it should work.