Yes, but this size throws the error that I have mentioned above. It doesn’t accept that size. Add to this, it upload the the image as a 3 channel image but it’s actually a one channel! So I have to convert it.
For example, this function doesn’t work:
def visulaizeTrainData(self, class_names):
dataiter = iter(self.train_loader)
images, labels = dataiter.next()
images = images.numpy()
inp = images[1].copy().T
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(images.shape[0]):
ax = fig.add_subplot(2, (images.shape[0])/2, idx+1, xticks=[], yticks=[])
inp = np.transpose(images[idx], (1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
ax.set_title(class_names[labels[idx]])
it throws the same error
and this function as well:
def trainNetwork(self,n_epochs , model_save_name):
self.n_epochs = n_epochs
train_on_gpu = torch.cuda.is_available()
valid_loss_min = np.Inf # track change in validation loss
for epoch in range(1, n_epochs+1):
train_loss = 0.0
valid_loss = 0.0
#Train
model.train()
for data, target in self.train_loader:
if train_on_gpu:
data, target = data.cuda(), target.cuda()
self.data = data
self.optimizer.zero_grad()
self.output = model(data)
loss = self.criterion(self.output, target)
loss.backward()
optimizer.step()
train_loss += loss.item()*data.size(0)
###################################################
#Validation
model.eval()
for data, target in self.valid_loader:
if train_on_gpu:
data, target = data.cuda(), target.cuda()
output = model(data)
loss = criterion(output, target)
valid_loss += loss.item()*data.size(0)
train_loss = train_loss/len(self.train_loader.dataset)
valid_loss = valid_loss/len(self.valid_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, train_loss, valid_loss))
if valid_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,valid_loss))
path = F"/content/gdrive/My Drive/Colab Notebooks/New Trials/colorChannelsWeights/{model_save_name}"
torch.save(model.state_dict(), path)
valid_loss_min = valid_loss