Respected Sir,
I AM TRYING TO CONSTRUCT CVAE WHICH TAKES A 256X256 IMAGE AND OUTPUTS THE SAME. THAT IS I WILL TAKE RAW IMAGE AND TRAIN MY CVAE. AND THEN SEE HOW CLOSELY IT RESEMBLES AN ENHANCED IMAGE. AFTER THAT I SHALL PASS A RAW TEST IMAGE TO MY TRIANED MODEL, AND SEE HOW CLOSELY IT RESMBLES MY ENHANCED TEST IMAGE. I HAVE USED THE FOLLOWING PYTORCH CODE, FOR PREPARING THE TEST-LOADER BUT ERROR IS COMING WHILE RUNNIGN IN GOOGLE COLAB. PLEASE HELP ME TO DEBUG IT.
class prepare_dataset
all_image_pathtestraw = []
for i in tqdm(range(len(image_dirs))):
image_pathtestraw = glob.glob(f"/content/drive/MyDrive/Autoencoders1/test/raw/*")
image_pathtestraw.sort()
for image_path in image_pathtestraw:
all_image_pathtestraw.append(image_path)
train_data = all_image_paths[1:4000]
valid_data = all_image_pathsval[1:4000]
test_data = all_image_pathtestraw[1:1000]
batch_size = 15
transform = transform()
prepare the training and validation data loaders
train_data, valid_data, test_data = prepare_dataset(
ROOT_PATH=‘/content/drive/MyDrive/Autoencoders1/’
)
class Dataset(Dataset):
def init(self, data_list, transform):
self.data = data_list
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, index):
image = cv2.imread(self.data[index])
image = cv2.resize(image, (256,256))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = self.transform(image)
return image
trainset = Dataset(train_data, transform=transform)
#train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
validset = Dataset(valid_data, transform=transform)
#val_loader = DataLoader(validset, batch_size=batch_size)
testset = Dataset(test_data, transform=transform)
extracting training images
training_images = [x for x in trainset.data]
extracting validation images
validation_images = [x for x in validset.data]
extracting test images for visualization purposes
test_images = [x for x in testset.data]
class Custom(Dataset):
def init(self, data, transforms=None):
self.data = data
self.transforms = transforms
def len(self):
return len(self.data)
def getitem(self, idx):
image = self.data[idx]
if self.transforms!=None:
image = self.transforms(image)
return image
creating pytorch datasets
training_data = Custom(training_images, transforms=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
validation_data = Custom(validation_images, transforms=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
test_data = Custom(test_images, transforms=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
training model
model = ConvolutionalAutoencoder(Autoencoder(Encoder(), Decoder()))
log_dict = model.train(nn.MSELoss() , epochs=1, batch_size=15,
training_set=training_data, validation_set=validation_data,
testset=test_data)
TypeError Traceback (most recent call last)
----> 4 log_dict = model.train(nn.MSELoss() , epochs=1, batch_size=15,
5 training_set=training_data, validation_set=validation_data,
6 testset=test_data)
8 frames
/usr/local/lib/python3.8/dist-packages/torchvision/transforms/functional.py in to_tensor(pic)
135 _log_api_usage_once(to_tensor)
136 if not (F_pil._is_pil_image(pic) or _is_numpy(pic)):
→ 137 raise TypeError(f"pic should be PIL Image or ndarray. Got {type(pic)}")
138
139 if _is_numpy(pic) and not _is_numpy_image(pic):
TypeError: pic should be PIL Image or ndarray. Got <class ‘str’>