Why is my dimension unequal?
Full script here:
device = torch.device("cuda" if torch.cuda.is_available()
else "cpu")
# model = models.resnet50(pretrained=True)
PATH = 'MODELS/model_epoch_60_May-10-21:1605_1620677105.pth'
model = torch.load(PATH)
#torch.save(model, 'ResnetPretrained.pth');
myTestData = []
myTrainData = []
myValData = []
def load_split_train_test(traindir, testdir, valdir):
train_transforms = transforms.Compose([transforms.Resize(32),
transforms.ToTensor(),
])
test_transforms = transforms.Compose([transforms.Resize(32),
transforms.ToTensor(),
])
val_transforms = transforms.Compose([transforms.Resize(32),
transforms.ToTensor(),
])
train_data = datasets.ImageFolder(traindir,
transform=train_transforms)
test_data = datasets.ImageFolder(testdir,
transform=test_transforms)
val_data = datasets.ImageFolder(valdir,
transform=test_transforms)
# train_idx = list(range(len(traindir)))
# nr.shuffle(np.array(train_idx))
# test_idx = list(range(len(testdir)))
# nr.shuffle(np.array(test_idx))
# val_idx = list(range(len(valdir)))
# nr.shuffle(np.array(val_idx))
train_idx = list(range(len(traindir)))
nr.shuffle(train_idx)
test_idx = list(range(len(testdir)))
nr.shuffle(test_idx)
val_idx = list(range(len(valdir)))
nr.shuffle(val_idx)
train_sampler = SubsetRandomSampler(train_idx)
test_sampler = SubsetRandomSampler(test_idx)
val_sampler = SubsetRandomSampler(val_idx)
# trainloader = torch.utils.data.DataLoader(train_data, sampler = train_sampler, batch_size = batch_size)
# testloader = torch.utils.data.DataLoader(test_data, sampler = test_sampler, batch_size = batch_size)
# valloader = torch.utils.data.DataLoader(val_data, sampler = val_sampler, batch_size = batch_size)
trainloader = torch.utils.data.DataLoader(train_data, batch_size = batch_size)
testloader = torch.utils.data.DataLoader(test_data, batch_size = batch_size)
valloader = torch.utils.data.DataLoader(val_data, batch_size = batch_size)
myTestData.append(test_data)
myTrainData.append(train_data)
myValData.append(val_data)
return trainloader, testloader, valloader
trainloader, testloader, valloader = load_split_train_test(data_train_dir,
data_test_dir,
data_val_dir)
print(valloader.dataset.classes)
print(testloader.dataset.classes)
print(trainloader.dataset.classes)
[‘fillin’, ‘sharkfin’]
[‘fillin’, ‘sharkfin’]
[‘fillin’, ‘sharkfin’]
train_X = next(iter(trainloader.dataset))[0].numpy()
test_X = trainloader.dataset.targets
train_y = next(iter(testloader.dataset))[0].numpy()
test_y = trainloader.dataset.targets
train_dataset = TensorDataset(torch.Tensor(train_X), torch.Tensor(train_y))
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
net = model
criterion = nn.CrossEntropyLoss()# cross entropy loss
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)
net.train()
# for epoch in range(1000):
# for inputs, targets in train_loader:
# optimizer.zero_grad()
# out = net(inputs)
# loss = criterion(out, targets.long())
# loss.backward()
# optimizer.step()
# if epoch % 100 == 0:
# print('number of epoch', epoch, 'loss', loss.item())
predict_out = net(torch.Tensor(test_X))
_, predict_y = torch.max(predict_out, 1)
print('prediction accuracy', accuracy_score(test_y.data, predict_y.data))
print('macro precision', precision_score(test_y.data, predict_y.data, average='macro'))
print('micro precision', precision_score(test_y.data, predict_y.data, average='micro'))
print('macro recall', recall_score(test_y.data, predict_y.data, average='macro'))
print('micro recall', recall_score(test_y.data, predict_y.data, average='micro'))