Transfer Learning: IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)

Hi guys,

I’m using transfer learning (MobileNetV2) for image classification. Here is a summary of my model:

`

model = torch.hub.load('pytorch/vision:v0.10.0', 'mobilenet_v2', pretrained=True)

for param in model.parameters():
    param.requires_grad = False

model.classifier[0] = nn.Dropout(p=0.7, inplace=False)
model.classifier[1] = nn.Linear(1280, 2)

model = model.to(device)
loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# Train the Model:

def train(model, num_epochs, train_dl, valid_dl):
    loss_hist_train = [0]*num_epochs
    accuracy_hist_train = [0]*num_epochs
    loss_hist_valid = [0]*num_epochs
    accuracy_hist_valid = [0]*num_epochs
    
    for epoch in range(num_epochs):
        model.train()
        for x_batch, y_batch in train_dl:
            x_batch = x_batch.to(device)
            pred = model(x_batch)[:,0]
            y_batch = y_batch.to(device)
            y_batch = y_batch.to(torch.float32)
            print(pred[:2])
            print(y_batch[:2])
            loss = loss_func(pred, y_batch)
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            loss_hist_train[epoch] += loss.item()*y_batch.size(0)
            is_correct = (torch.argmax(pred, dim=1)==y_batch).float()
            accuracy_hist_train[epoch] += is_correct.sum().item()
        loss_hist_train[epoch] /= len(train_dl.dataset)
        accuracy_hist_train[epoch] /= len(train_dl.dataset)
        
        model.eval()
        
        with torch.no_grad():
            for x_batch, y_batch in valid_dl:
                x_batch = x_batch.to(device)
                pred = model(x_batch)[:,0]
                y_batch = y_batch.to(device)
                y_batch = y_batch.to(torch.float32)
                loss = loss_func(pred, y_batch)
                loss_hist_valid[epoch] += loss.item()*y_batch.size(0)
                is_correct = (torch.argmax(pred, dim=1)==y_batch).float()
                accuracy_hist_valid[epoch] += is_correct.sum().item()
            loss_hist_valid[epoch] /= len(valid_dl.dataset)
            accuracy_hist_valid[epoch] /= len(valid_dl.dataset)
            
            print(f'Epoch {epoch+1} accuracy: '
                  f'{accuracy_hist_train[epoch]:.4f} val_accuracy: '
                  f'{accuracy_hist_valid[epoch]:.4f}')
    return loss_hist_train, loss_hist_valid, accuracy_hist_train, accuracy_hist_valid




torch.manual_seed(123)
num_epochs=100
model_sum = train(model, num_epochs, train_dl, valid_dl)


Error:

`IndexError                                Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_44336\783710356.py in <module>
      1 torch.manual_seed(123)
      2 num_epochs=100
----> 3 model_sum = train(model, num_epochs, train_dl, valid_dl)

~\AppData\Local\Temp\ipykernel_44336\2381628931.py in train(model, num_epochs, train_dl, valid_dl)
     21             optimizer.zero_grad()
     22             loss_hist_train[epoch] += loss.item()*y_batch.size(0)
---> 23             is_correct = (torch.argmax(pred, dim=1)==y_batch).float()
     24             accuracy_hist_train[epoch] += is_correct.sum().item()
     25         loss_hist_train[epoch] /= len(train_dl.dataset)

IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
​`

You are explicitly indexing pred in dim1 at index 0 here:

pred = model(x_batch)[:,0]

and are later trying to use argmax in dim=1, which isn’t available anymore. Could you explain why the first indexing is done?