Torch.cuda.ByteTensor and Torch.Cuda.FloatTensor should be the same

I have a dataloader function defined as -


def getDataloader(path):
    os.chdir(path=path)
    files = os.listdir()
    WBC = []
    notWBC = []
    Data = []
    for file in files:
        if file.startswith('WBC'):
            WBC.append(file)
        if file.startswith('not'):
            notWBC.append(file)

    transform = transforms.Compose([
        transforms.Resize((50, 50)),
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(degrees=30),
        transforms.RandomAffine(degrees=10),
        transforms.RandomPerspective()
    ])
    for i in range(100):
        # Equal number of samples for both classes 0 and 1
        if i < 50:
            image1 = random.choice(WBC)
            while True:
                image2 = random.choice(WBC)
                if image1 != image2:
                    break
            image1 = Image.open(image1)
            image2 = Image.open(image2)

            image1 = transform(image1)
            image2 = transform(image2)
            image1 = torch.from_numpy(numpy.array(image1)[..., :3]).permute(2, 0, 1)
            image2 = torch.from_numpy(numpy.array(image2)[..., :3]).permute(2, 0, 1)
            label = torch.tensor(data=0, dtype=torch.long)

            data = {'image1': image1, 'image2': image2, 'label': label}
            Data.append(data)

        else:
            image1 = random.choice(WBC)
            image2 = random.choice(notWBC)

            image1 = Image.open(image1)
            image2 = Image.open(image2)

            image1 = transform(image1)
            image2 = transform(image2)
            image1 = torch.from_numpy(numpy.array(image1)[..., :3]).permute(2, 0, 1)
            image2 = torch.from_numpy(numpy.array(image2)[..., :3]).permute(2, 0, 1)
            label = torch.tensor(data=1, dtype=torch.long)

            data = {'image1': image1, 'image2': image2, 'label': label}
            Data.append(data)

    return DataLoader(Data)

Now when I enumerate through the dataset the type of both image1 and image2 are torch.tensor.
but when I train the model I get aforementioned error at -

image1_features = self.features(image1)

where self.features is a model defined as -


class perceptualFeatures(nn.Module):
    def __init__(self):
        super(perceptualFeatures, self).__init__()
        print('Using VGG16 for extracting style and content')
        layers = []
        layers.append(torchvision.models.vgg16(pretrained=True).cuda().features[:4].eval())
        layers.append(torchvision.models.vgg16(pretrained=True).cuda().features[4:9].eval())
        layers.append(torchvision.models.vgg16(pretrained=True).cuda().features[9:16].eval())
        layers.append(torchvision.models.vgg16(pretrained=True).cuda().features[16:23].eval())

        for layer in layers:
            for parameters in layer:
                parameters.required_grad = False

        self.layers = nn.ModuleList(layers)

    def gramMatrix(self, image):
        (b, c, h, w) = image.size()
        f = image.view(b, c, w * h)
        G = f.bmm(f.transpose(1, 2)) / c * w * h
        return G

    def forward(self, image):
        features = []
        image = image.cuda()
        for layer in self.layers:
            image = layer(image)
            features.append(image)
        return features

and the following line throws the error -

image = layer(image)

What could be going wrong ?
TIA

Hi,

You might want to check the type of your Tensors. I guess one is byte while the other is float. I guess for the input to your network, you want both to be floats.