RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x1065360 and 12544x30)

dataset = veri(
    csv_file="f111.csv",
    root_dir=r"C:\Users\yasar\Desktop\f1_classification",
    transform=torchvision.transforms.Compose([transforms.ToTensor(),
                                              transforms.Resize(size=(28*28)),
                                             transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))```

train_set, test_set = torch.utils.data.random_split(dataset, [200, 79])
train_loader = DataLoader(dataset=train_set, batch_size=1, shuffle=True)
test_loader = DataLoader(dataset=test_set, batch_size=1, shuffle=True)```

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        
        self.conv1=nn.Conv2d(in_channels=3,out_channels=4,kernel_size=(5,5))
        self.conv2=nn.Conv2d(in_channels=4,out_channels=8,kernel_size=(3,3))
        self.conv3=nn.Conv2d(in_channels=8,out_channels=16,kernel_size=(2,2))
        #self.conv4=nn.Conv2d(in_channels=32,out_channels=64,kernel_size=(3,3))
        #self.conv5=nn.Conv2d(in_channels=64,out_channels=128,kernel_size=(5,5))

        self.max=nn.MaxPool2d(kernel_size=(2,2))
        self.func=nn.ReLU()
        self.out=nn.Sigmoid()
        
        self.fc1=nn.Linear(in_features=16*28*28,out_features=30)
        self.fc2=nn.Linear(in_features=30,out_features=30)
        self.fc3=nn.Linear(in_features=30,out_features=4)
        #self.fc4=nn.Linear(in_features=200,out_features=200)
        #self.fc5=nn.Linear(in_features=200,out_features=200)
        #self.fc6=nn.Linear(in_features=200,out_features=4)
        
        
    def forward(self,x):
        
        x=self.conv1(x)
        x=self.func(x)
        
        x=self.max(x)
        
        x=self.conv2(x)
        x=self.func(x)
        
        x=self.max(x)
        
        x=self.conv3(x)
        x=self.func(x)

        #x=self.max(x)
        
        #x=self.conv4(x)
        #x=self.func(x)

        #x = x.view(x.size(0),16*4*4) 
        x=x.view(x.size(0), -1)
        x=self.fc1(x)
        x=self.func(x)
        x=self.fc2(x)
        x=self.func(x)
        x=self.fc3(x)
        #x=self.fc4(x)
        #x=self.fc5(x)
        #x=self.fc6(x)
        
        return x        ```

device=torch.device(“cuda”)
model=Net()

optimizer=torch.optim.Adam(model.parameters(),lr=0.01)
error=torch.nn.CrossEntropyLoss()

epoch=1

for i in range(epoch):
for i,(images,label) in enumerate(train_loader):

    #images=images.to(device)
    #label=label.to(device)
    
    out=model(images)
    
    optimizer.zero_grad()
    
    loss=error(out,label)
    
    loss.backward()
    
    optimizer.step()    ```

can you help me please

This error is raised in self.fc1, which expects 16*28*28=12544 input features, while the input activation x is flattened to [1, 1065360] in x=x.view(x.size(0), -1).
You could either change the in_feautres of self.fc1 to 1065360 or make sure the input tensor has such a spatial size that the flattened activation has 12544 features.

thank you. how can I do that
This is how I did it and it worked. but i am not sure of its accuracy. because I entered a value like 1065360 in the fc1 layer

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        
        self.conv1=nn.Conv2d(in_channels=3,out_channels=4,kernel_size=(5,5))
        self.conv2=nn.Conv2d(in_channels=4,out_channels=8,kernel_size=(3,3))
        self.conv3=nn.Conv2d(in_channels=8,out_channels=16,kernel_size=(2,2))

        self.max=nn.MaxPool2d(kernel_size=(2,2))
        self.func=nn.ReLU()
        
        self.fc1=nn.Linear(in_features=1065360,out_features=30)
        self.fc2=nn.Linear(in_features=30,out_features=30)
        self.fc3=nn.Linear(in_features=30,out_features=4)
        
    def forward(self,x):
        
        x=self.conv1(x)
        x=self.func(x)
        
        x=self.max(x)
        
        x=self.conv2(x)
        x=self.func(x)
        
        x=self.max(x)
        
        x=self.conv3(x)
        x=self.func(x)


        x = x.view(x.size(0),1065360) 
        
        x=self.fc1(x)
        x=self.func(x)
        x=self.fc2(x)
        x=self.func(x)
        x=self.fc3(x)
        return x    ```

Your change reflects my first suggestion of changing the in_features of self.fc1 to 1065360 and thus works.
I’m unsure what “but i am not sure of its accuracy” means. Are you concerned you’ve changed the wrong value or is accuracy related to the model accuracy?

I’m not sure of the model accuracy. When I changed fc1 it worked and this table. if there is no problem with model accuracy. I will try methods such as data augmentation - optimization