Log_softmax(): argument 'input' (position 1) must be Tensor, not Linear

I’m new to pytorch, how is my inputs Linear? when I print their type they are <class ‘torch.Tensor’>

class Network(nn.Module):
    def __init__(self):
        super().__init__()
        self.fc1 = nn.Linear(150528, 250)
        self.fc2 = nn.Linear(250, 64)
        self.fc3 = nn.Linear(64, 10)

        
    def forward(self, x):
        x = x.view(x.shape[0], -1)
        
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))

        
        x = F.log_softmax(self.fc3, dim=1)
        return x
        
model = Network()
model
epochs = 10
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
steps = 0
running_loss = 0
print_every = 5

for epoch in range(epochs):
    for inputs, labels in trainloader:
        steps += 1
        # Move input and label tensors to the default device
        inputs, labels = inputs.to(device), labels.to(device)
        
        optimizer.zero_grad()
        print(type(inputs))
        logps = model.forward(inputs)
        loss = criterion(logps, labels)
        output, x = model(inputs)
        loss = criterion(output, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()

Your input to log_softmax is fc3which is a nn.Linear(64, 10). I presume you want to pass the output of fc3 instead.

Replace x = F.log_softmax(self.fc3, dim=1) with x = F.log_softmax(self.fc3(x), dim=1)

Cheers!

1 Like