Hi, I am defining a Custom Model for image classification. My code is as follows
class CustomNeuralNet(nn.Module):
def __init__(self,num_classes):
super(CustomNeuralNet,self).__init__()
self.conv_1 = nn.Conv2d(in_channels=3,out_channels=32,kernel_size=3,padding=1,stride=1)
self.activation_1 = nn.LeakyReLU()
self.conv_2 = nn.Conv2d(in_channels=32,out_channels=64,kernel_size=3,padding=1,stride=1)
self.activation_2 = nn.LeakyReLU()
self.conv_3 = nn.Conv2d(in_channels=64,out_channels=64,kernel_size=3,padding=1,stride=1)
self.activation_3 = nn.LeakyReLU()
self.fc = nn.Linear(in_features=300*300*64,out_features=num_classes)
def forward(self,inp):
out = self.conv_1(inp)
out = self.activation_1(out)
out = self.conv_2(out)
out = self.activation_2(out)
out = self.conv_3(out)
out = self.activation_3(out)
out = out.view(-1,150*150*64)
out = self.fc(out)
return out
training_dir = 'train/'
validation_dir = 'val/'
train_transforms = transforms.Compose([
transforms.Resize((300,300)),
transforms.RandomCrop(300),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)),
])
validation_transforms = transforms.Compose([
transforms.Resize((300,300)),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)),
])
train_data = ImageFolder(training_dir,transform=train_transforms)
validation_data = ImageFolder(validation_dir,transform=validation_transforms)
train_loader = DataLoader(train_data,batch_size=64)
val_loader = DataLoader(validation_data,batch_size=64)
device = torch.device("cuda" if torch.cuda.is_available()
else "cpu")
model = CustomNeuralNet(num_classes=20)
model.to(device)
optimizer = Adam(model.parameters(), weight_decay=0.0001)
loss_function = nn.CrossEntropyLoss()
epochs = 50
print_every = 10
train_losses, test_losses = [], []
prev_loss = -1000
for epoch in range(epochs):
running_loss = 0.0
model.train()
for i,data in enumerate(train_loader):
inputs,labels = data
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = CustomNeuralNet(inputs)
loss = loss_function(outputs,labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % print_every == 0:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / print_every))
train_losses.append(running_loss/len(train_loader))
However, when I begin training, I am getting the following error
File "pytorch_image_classification.py", line 121, in <module>
outputs = CustomNeuralNet(inputs)
File "pytorch_image_classification.py", line 25, in __init__
self.fc = nn.Linear(in_features=300*300*64,out_features=num_classes)
File "/home/jitesh/anaconda3/envs/torch_cpu/lib/python3.8/site-packages/torch/nn/modules/linear.py", line 72, in __init__
self.weight = Parameter(torch.Tensor(out_features, in_features))
TypeError: new() received an invalid combination of arguments - got (Tensor, int), but expected one of:
* (torch.device device)
* (torch.Storage storage)
* (Tensor other)
* (tuple of ints size, torch.device device)
didn't match because some of the arguments have invalid types: (Tensor, int)
* (object data, torch.device device)
didn't match because some of the arguments have invalid types: (Tensor, int)
I am not quite able to understand what is wrong in line 25 of my Model definition, I am resizing all images to 300x300 and thus gave that(multiplied by the number of output filters) as the input for the last fully connected layer. Please correct me if I am wrong