I just trained a model and now performing iteration on custom images with different sizes and types (.tif,bmp,and .PNG).
I just load a model and want to generate three images, compressed, original and final images. The below code works fine when i put into training file because where i i gave CIFAR dataset for testing. Now i am giving custom images and make separate file for testing.
import torch
from model import End_to_end
from torch.autograd import Variable
from loss import loss_function
from grid import save_image
from torchvision import datasets, transforms
CUDA = torch.cuda.is_available()
if CUDA:
model = End_to_end().cuda()
else:
model = End_to_end()
EPOCHS = 20
testset = datasets.ImageFolder(root="/home/khawar/Desktop/End-to-End_IEEE-TVSCT/test/", transform=transforms.ToTensor())
model.load_state_dict(torch.load('./checkpoint/model.pth'))
print(testset.imgs)
def test(epoch):
model.eval()
test_loss = 0
for i, (data, _) in enumerate(testset):
data = Variable(data)
final, residual_img, upscaled_image, com_img, orig_im = model(data.cuda())
test_loss += loss_function(final, residual_img, upscaled_image, com_img, orig_im).data
if epoch == EPOCHS and i == 0:
# save_image(final.data[0],'reconstruction_final',nrow=8)
# save_image(com_img.data[0],'com_img',nrow=8)
n = min(data.size(0), 6)
print("saving the image " + str(n))
comparison = torch.cat([data[:n], final[:n].cpu()])
comparison = comparison.cpu()
# print(comparison.data)
save_image(com_img[:n].data, 'compressed_' + str(epoch) + '.png', nrow=n)
save_image(comparison.data, 'reconstruction_' + str(epoch) + '.png', nrow=n)
test_loss /= len(testset.dataset)
print('====> Test set loss: {:.4f}'.format(test_loss))
def save_images():
epoch = EPOCHS
model.eval()
test_loss = 0
for i, (data, _) in enumerate(testset):
data = Variable(data).unsqueeze(0)
final, residual_img, upscaled_image, com_img, orig_im = model(data.cuda())
test_loss += loss_function(final, residual_img, upscaled_image, com_img, orig_im).data
if i == 3:
# save_image(final.data[0],'reconstruction_final',nrow=8)
# save_image(com_img.data[0],'com_img',nrow=8)
n = min(data.size(0), 6)
print("saving the image " + str(n))
comparison = torch.cat([data[:n], final[:n].cpu()])
comparison = comparison.cpu()
# print(comparison.data)
save_image(com_img[:1].data, './compressed_image/compressed_' + str(i) + '.png', nrow=n)
save_image(final[:1].data, './final_image/final_' + str(epoch) + '.png', nrow=n)
save_image(orig_im[:1].data, './orginal_image/original_' + str(epoch) + '.png', nrow=n)
test_loss /= len(testset.dataset)
print('====> Test set loss: {:.4f}'.format(test_loss))
save_images()
Error
Traceback (most recent call last):
File "/home/khawar/Desktop/End-to-End_IEEE-TVSCT/test.py", line 74, in <module>
save_images()
File "/home/khawar/Desktop/End-to-End_IEEE-TVSCT/test.py", line 51, in save_images
for i, (data, _) in enumerate(test_loader):
File "/home/khawar/anaconda3/envs/End-to-End_IEEE-TVSCT/lib/python3.5/site-packages/torch/utils/data/dataloader.py", line 345, in __next__
data = self._next_data()
File "/home/khawar/anaconda3/envs/End-to-End_IEEE-TVSCT/lib/python3.5/site-packages/torch/utils/data/dataloader.py", line 385, in _next_data
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
File "/home/khawar/anaconda3/envs/End-to-End_IEEE-TVSCT/lib/python3.5/site-packages/torch/utils/data/_utils/fetch.py", line 47, in fetch
return self.collate_fn(data)
File "/home/khawar/anaconda3/envs/End-to-End_IEEE-TVSCT/lib/python3.5/site-packages/torch/utils/data/_utils/collate.py", line 79, in default_collate
return [default_collate(samples) for samples in transposed]
File "/home/khawar/anaconda3/envs/End-to-End_IEEE-TVSCT/lib/python3.5/site-packages/torch/utils/data/_utils/collate.py", line 79, in <listcomp>
return [default_collate(samples) for samples in transposed]
File "/home/khawar/anaconda3/envs/End-to-End_IEEE-TVSCT/lib/python3.5/site-packages/torch/utils/data/_utils/collate.py", line 55, in default_collate
return torch.stack(batch, 0, out=out)
RuntimeError: stack expects each tensor to be equal size, but got [3, 288, 352] at entry 0 and [3, 256, 256] at entry 1