Unequal dimensions when converting from tensors to numpy

Why is my dimension unequal?
Full script here:

https://github.com/adomakor412/NERTO_CIMSS_GOES-R/blob/ca2d75bce4a279a8781c0fc1f3567bdac1856fce/AI_model-BW-NUMPY.ipynb

device = torch.device("cuda" if torch.cuda.is_available() 
                                  else "cpu")
# model = models.resnet50(pretrained=True)
PATH = 'MODELS/model_epoch_60_May-10-21:1605_1620677105.pth'
model = torch.load(PATH)
#torch.save(model, 'ResnetPretrained.pth');
myTestData = []
myTrainData = []
myValData = []

def load_split_train_test(traindir, testdir, valdir):
    train_transforms = transforms.Compose([transforms.Resize(32),
                                       transforms.ToTensor(),
                                       ])
    test_transforms = transforms.Compose([transforms.Resize(32),
                                      transforms.ToTensor(),
                                      ])
    val_transforms = transforms.Compose([transforms.Resize(32),
                                      transforms.ToTensor(),
                                      ])

    train_data = datasets.ImageFolder(traindir,       
                    transform=train_transforms)
    test_data = datasets.ImageFolder(testdir,
                    transform=test_transforms)
    val_data = datasets.ImageFolder(valdir,
                    transform=test_transforms)
    
    
#     train_idx = list(range(len(traindir)))
#     nr.shuffle(np.array(train_idx))
    
#     test_idx = list(range(len(testdir)))
#     nr.shuffle(np.array(test_idx))
    
#     val_idx = list(range(len(valdir)))
#     nr.shuffle(np.array(val_idx))

    train_idx = list(range(len(traindir)))
    nr.shuffle(train_idx)
    
    test_idx = list(range(len(testdir)))
    nr.shuffle(test_idx)
    
    val_idx = list(range(len(valdir)))
    nr.shuffle(val_idx)
    
    train_sampler = SubsetRandomSampler(train_idx)
    test_sampler = SubsetRandomSampler(test_idx)
    val_sampler = SubsetRandomSampler(val_idx)

#     trainloader = torch.utils.data.DataLoader(train_data, sampler = train_sampler, batch_size = batch_size)
#     testloader = torch.utils.data.DataLoader(test_data, sampler = test_sampler, batch_size = batch_size)
#     valloader = torch.utils.data.DataLoader(val_data, sampler = val_sampler, batch_size = batch_size)
    
    trainloader = torch.utils.data.DataLoader(train_data, batch_size = batch_size)
    testloader = torch.utils.data.DataLoader(test_data, batch_size = batch_size)
    valloader = torch.utils.data.DataLoader(val_data, batch_size = batch_size)
    
    myTestData.append(test_data)
    myTrainData.append(train_data)
    myValData.append(val_data)
    
    return trainloader, testloader, valloader
trainloader, testloader, valloader = load_split_train_test(data_train_dir,
                                                           data_test_dir, 
                                                           data_val_dir)

print(valloader.dataset.classes)
print(testloader.dataset.classes)
print(trainloader.dataset.classes)

[‘fillin’, ‘sharkfin’]
[‘fillin’, ‘sharkfin’]
[‘fillin’, ‘sharkfin’]

train_X = next(iter(trainloader.dataset))[0].numpy()
test_X = trainloader.dataset.targets
train_y = next(iter(testloader.dataset))[0].numpy()
test_y = trainloader.dataset.targets

train_dataset = TensorDataset(torch.Tensor(train_X), torch.Tensor(train_y))
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)

net = model

criterion = nn.CrossEntropyLoss()# cross entropy loss

optimizer = torch.optim.SGD(net.parameters(), lr=0.01)

net.train()

# for epoch in range(1000):
    
#     for inputs, targets in train_loader:
#         optimizer.zero_grad()
#         out = net(inputs)
#         loss = criterion(out, targets.long())
#         loss.backward()
#         optimizer.step()

#     if epoch % 100 == 0:
#         print('number of epoch', epoch, 'loss', loss.item())

predict_out = net(torch.Tensor(test_X))
_, predict_y = torch.max(predict_out, 1)

print('prediction accuracy', accuracy_score(test_y.data, predict_y.data))

print('macro precision', precision_score(test_y.data, predict_y.data, average='macro'))
print('micro precision', precision_score(test_y.data, predict_y.data, average='micro'))
print('macro recall', recall_score(test_y.data, predict_y.data, average='macro'))
print('micro recall', recall_score(test_y.data, predict_y.data, average='micro'))

RuntimeError Traceback (most recent call last)
in
27 # print(‘number of epoch’, epoch, ‘loss’, loss.item())
28
—> 29 predict_out = net(torch.Tensor(test_X))
30 _, predict_y = torch.max(predict_out, 1)
31

/scratch/adomakor412/conda/envs/MyEnv/lib/python3.6/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
→ 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)

/scratch/adomakor412/conda/envs/MyEnv/lib/python3.6/site-packages/torchvision/models/resnet.py in forward(self, x)
214
215 def forward(self, x):
→ 216 return self._forward_impl(x)
217
218

/scratch/adomakor412/conda/envs/MyEnv/lib/python3.6/site-packages/torchvision/models/resnet.py in _forward_impl(self, x)
197 def _forward_impl(self, x):
198 # See note [TorchScript super()]
→ 199 x = self.conv1(x)
200 x = self.bn1(x)
201 x = self.relu(x)

/scratch/adomakor412/conda/envs/MyEnv/lib/python3.6/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
→ 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)

/scratch/adomakor412/conda/envs/MyEnv/lib/python3.6/site-packages/torch/nn/modules/conv.py in forward(self, input)
343
344 def forward(self, input):
→ 345 return self.conv2d_forward(input, self.weight)
346
347 class Conv3d(_ConvNd):

/scratch/adomakor412/conda/envs/MyEnv/lib/python3.6/site-packages/torch/nn/modules/conv.py in conv2d_forward(self, input, weight)
340 _pair(0), self.dilation, self.groups)
341 return F.conv2d(input, weight, self.bias, self.stride,
→ 342 self.padding, self.dilation, self.groups)
343
344 def forward(self, input):

RuntimeError: Expected 4-dimensional input for 4-dimensional weight 64 3 7 7, but got 1-dimensional input of size [1455] instead

Well, even though I had the right dim I need to pick the last value of nn.Linear(…, last) to match the length of my class

for param in model.parameters():
    param.requires_grad = False
    
model.fc = nn.Sequential(nn.Linear(2048, 512),
                                 nn.ReLU(),
                                 nn.Dropout(0.2),
                                 nn.Linear(512, 2),
                                 nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.fc.parameters(), lr=0.003)
model.to(device);

model.fc = nn.Sequential(nn.Linear(x, y),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(y, len(class)),
nn.LogSoftmax(dim=1))