Thank you. I proceeded this way:
'class val_Dataset(Dataset):
def __init__(self, imgarray_val, labelarray_val, transform=None):
self.images = imgarray_val
self.labels = labelarray_val
self.transform = transform
def __getitem__(self, index):
img = self.images[index]
if self.transform is not None:
img = self.transform(img)
Img = Image.fromarray(self.labels[index]) #numpy array to PIL
PilImg = Img.resize((224, 224),Image.NEAREST) #resize the PIL image
label = torch.from_numpy(np.asarray(PilImg)) # convert PIL to numpy and then convert the numpyarray to tensor
#trnsf1 = transforms.RandomResizedCrop(input_size)
#trnsf2 = transforms.ToTensor()
#label = trnsf2(trnsf1(self.masks[index]))
print(img.size())
print(label.size())
return img, label
def __len__(self):
return len(self.images) '
Now I encounter this error:
āRuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 0. Got 231 and 228 in dimension 3 at ā¦\aten\src\TH/generic/THTensor.cpp:711ā
what I get as output is this:
'Epoch 0/0
torch.Size([3, 224, 224]) torch.Size([224, 224])
torch.Size([3, 224, 224]) torch.Size([224, 224])
torch.Size([3, 224, 224]) torch.Size([224, 224])
torch.Size([3, 224, 224]) torch.Size([224, 224])
image shape: torch.Size([4, 3, 224, 224])
label shape: torch.Size([4, 224, 224])
squeezed label shape:torch.Size([4, 224, 224])
model output shape:torch.Size([4, 4, 224, 224])
torch.Size([3, 224, 224]) torch.Size([224, 224])
torch.Size([3, 224, 224]) torch.Size([224, 224])
torch.Size([3, 224, 224]) torch.Size([224, 224])
torch.Size([3, 224, 224]) torch.Size([224, 224])
image shape: torch.Size([4, 3, 224, 224])
label shape: torch.Size([4, 224, 224])
squeezed label shape:torch.Size([4, 224, 224])
model output shape:torch.Size([4, 4, 224, 224])
torch.Size([3, 224, 224]) torch.Size([224, 224])
torch.Size([3, 224, 224]) torch.Size([224, 224])
torch.Size([3, 224, 224]) torch.Size([224, 224])
torch.Size([3, 224, 224]) torch.Size([224, 224])
image shape: torch.Size([4, 3, 224, 224])
label shape: torch.Size([4, 224, 224])
squeezed label shape:torch.Size([4, 224, 224])
model output shape:torch.Size([4, 4, 224, 224])
torch.Size([3, 224, 224]) torch.Size([224, 224])
torch.Size([3, 224, 224]) torch.Size([224, 224])
torch.Size([3, 224, 224]) torch.Size([224, 224])
torch.Size([3, 224, 224]) torch.Size([224, 224])
image shape: torch.Size([4, 3, 224, 224])
label shape: torch.Size([4, 224, 224])
squeezed label shape:torch.Size([4, 224, 224])
model output shape:torch.Size([4, 4, 224, 224])
train Loss: 1.7062 Acc: 29483.5625
torch.Size([3, 224, 231])
torch.Size([224, 224])
torch.Size([3, 224, 228])
torch.Size([224, 224])
torch.Size([3, 224, 232])
torch.Size([224, 224])
torch.Size([3, 224, 229])
torch.Size([224, 224])
ā
and the training function is as follows:
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, has_aux = True):
since = time.time()
val_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())#??????
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs, labels = Variable(inputs), Variable(labels)
inputs = inputs.to('cpu')
labels = labels.to('cpu')
print('image shape: {}'.format(inputs.shape))
print('label shape: {}'.format(labels.shape))
#labels = labels.squeeze(1)
#print('squeezed label shape:{}'.format(labels.shape))
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# Get model outputs and calculate loss
# Special case for inception because in training it has an auxiliary output. In train
# mode we calculate the loss by summing the final output and the auxiliary output
# but in testing we only consider the final output.
if has_aux and phase == 'train':
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
outputs = model(inputs)
print('model output shape:{}'.format(outputs['out'].shape))
loss1 = criterion(outputs['out'], labels.long())
loss2 = criterion(outputs['aux'], labels.long())
#loss2 = output['aux']
loss = loss1 + 0.4*loss2
#else:
# outputs = model(inputs)
# loss = criterion(outputs, labels.long())
_, preds = torch.max(outputs['out'], 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data.long())
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'val':
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, val_acc_history
What I understood so far is that transformation works fine for the train images and not the validation images but I am checking my code and I cannot find the error!
this is the data set function and data loaders:
data_transforms = {
ātrainā: transforms.Compose([
transforms.Resize(input_size, input_size),
#transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
āvalā: transforms.Compose([
transforms.Resize(input_size, input_size),
#transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
print(āInitializing Datasets and Dataloadersā¦ā)
class Train_Dataset(Dataset):
def __init__(self, imgarray, labelarray, transform=None):
self.images = imgarray
self.labels = labelarray
self.transform = transform
def __getitem__(self, index):
img = self.images[index]
if self.transform is not None:
img = self.transform(img)
Img = Image.fromarray(self.labels[index]) #numpy array to PIL
PilImg = Img.resize((224, 224), Image.NEAREST ) #resize the PIL image
label = torch.from_numpy(np.asarray(PilImg)) # convert PIL to numpy and then convert the numpyarray to tensor
print(img.size(), label.size())
return img, label
def __len__(self):
return len(self.images)
class val_Dataset(Dataset):
def __init__(self, imgarray_val, labelarray_val, transform=None):
self.images = imgarray_val
self.labels = labelarray_val
self.transform = transform
def __getitem__(self, index):
img = self.images[index]
if self.transform is not None:
img = self.transform(img)
Img = Image.fromarray(self.labels[index]) #numpy array to PIL
PilImg = Img.resize((224, 224),Image.NEAREST) #resize the PIL image
label = torch.from_numpy(np.asarray(PilImg)) # convert PIL to numpy and then convert the numpyarray to tensor
#trnsf1 = transforms.RandomResizedCrop(input_size)
#trnsf2 = transforms.ToTensor()
#label = trnsf2(trnsf1(self.masks[index]))
print(img.size(), label.size())
return img, label
def __len__(self):
return len(self.images)
I markeddown two lines of code in transformation code block:
for train set
#transforms.RandomHorizontalFlip(),
and for val set:
#transforms.CenterCrop(input_size),
might it be this is the problem?