Expected object of scalar type Double but got scalar type Float for argument #3 'mat1' in call to _th_addmm_

Hello community :slight_smile:
I am building a model to train a dataset of images 224*224 with a channel 3 RGB using Alexnet cnn network
I am stuck on the below error, if you can please advise how to solve this issue, appreciated your guidance.

Output of my tensors before getting the error:
[[0.70980392 0. 0.03921569 … 0.02745098 0. 0. ]
[0.61568627 0. 0.02352941 … 0.01960784 0. 0. ]
[0.46666667 0.04705882 0. … 0.00784314 0.00392157 0.02745098]
…
[0.41960784 0.42352941 0.22745098 … 0.04705882 0.01568627 0.04705882]
[0.41960784 0.43529412 0.18431373 … 0. 0. 0. ]
[0.41568627 0.43921569 0.15294118 … 0. 0. 0. ]]]
index of the image in get target 20
img shape (3, 224, 224)

The error I am getting is :
RuntimeError: Expected object of scalar type Double but got scalar type Float for argument #3 β€˜mat1’ in call to th_addmm

This is my code for the dataloader:

torch.utils.data.Dataset

class Mydataset(Dataset):
def init(self, root, classes, task_num, memory_classes, memory, num_samples_per_class, train, transform= transforms.ToTensor(), target_transform=None, download=True):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train

    self.filename = 'mydataset.zip'
    fpath = os.path.join(root, self.filename)
    if not os.path.isfile(fpath):
        if not download:
            raise RuntimeError('Dataset not found. You can use download=True to download it')
        else:
            print('Downloading from ' + self.url)
            download_url(self.url, root, filename=self.filename)

    import zipfile
    zip_ref = zipfile.ZipFile(fpath, 'r')
    zip_ref = zipfile.ZipFile(fpath, 'r')
    zip_ref.extractall(root)
    zip_ref.close()

    if self.train:
        fpath = os.path.join(root, 'mydataset', 'train')

    else:
        fpath = os.path.join(root, 'mydataset', 'val')

    #adding mapping
    self.class_mapping = {c: i for i, c in enumerate(classes)}
    self.class_indices= {}
    for cls in classes:
        self.class_indices[self.class_mapping[cls]] = []

    #image data array
    data = []
    #class name array
    labels = []
    tt = [] #task module labels
    td = [] #discriminator labels
    pop_mean = []
    pop_std = []
    targets= []

    for folder in os.listdir(fpath):
        folder_path = os.path.join(fpath, folder)
        print("folder path", folder_path)
        for ims in os.listdir(folder_path):
            try:
                img_path = os.path.join(folder_path, ims)
                image = np.array(Image.open(img_path))

                #image= np.resize(image,(IMG_HEIGHT,IMG_WIDTH,1))
                #X.append(np.array(Image.open(img_path)))   #.convert('RGB')
                #image = ToTensor()(image).unsqueeze(1)  
                data.append(image)
                labels.append(folder)
            except:
                print("File {}/{} is broken".format(folder, ims))
    self.data = np.array(data)
    self.targets = labels
    self.num_classes = len(set(labels))

    for i in range(len(self.data)):
        if self.targets[i] in classes:
          data.append(self.data[i])
          labels.append(self.class_mapping[self.labels[i]])
          tt.append(task_num)
          td.append(task_num + 1)
          self.class_indices[self.class_mapping[self.labels[i]]].append(i)

        if memory_classes:
            for task_id in range(task_num):
                for i in range(len(memory[task_id]['x'])):
                    if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
                        data.append(memory[task_id]['x'][i])
                        labels.append(memory[task_id]['y'][i])
                        tt.append(memory[task_id]['tt'][i])
                        td.append(memory[task_id]['td'][i])
    self.data = np.array(data)
    self.labels = labels
    self.tt = tt
    self.td = td
    #self.data = self.data.reshape((10000,3,244,244))
    self.data = self.data.transpose((0, 3, 1, 2))
  
    if num_samples_per_class:
        x, y = [], []
        tt, td = [], []
        for l in range(self.num_classes): #l from 0 to 25 num_classes is 26 # changed np.array(self.targets) to (self.labels)
            indices_with_label_l = np.where(np.array(self.labels)) #== l)  # indices with label l = numpy array (target as label array Y [] == value of l from 0 to 25)

            x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
            shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
            x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
            y_with_label_l = [l] * len(shuffled_indices)
            x.append(x_with_label_l)
            y.append(y_with_label_l)

        self.data = np.array(sum(x, []))
        self.labels = sum(y, [])
        #target_dict = {k: v for v, k in enumerate(np.unique(Y))}
        print("data", self.data)
       # print("labels", self.labels)
    self.tt = [task_num for _ in range(len(self.data))]
    self.td = [task_num + 1 for _ in range(len(self.data))

def __getitem__(self, index):
    img, target, tt, td = self.data[index], self.labels[index], self.tt[index], self.td[index]

#An error RuntimeError: value cannot be converted to type uint8_t without overflow: -0.00481544-
img = img / 255
#img = Image.fromarray(img).convert(β€˜RGB’)
#img = self.transform(img)

    try:
        if self.transform is not None: img= self.transform(img)
    except:
        pass
    try:
        if self.target_transform is not None: tt= self.target_transform(tt)
        if self.target_transform is not None: td= self.target_transform(td)
    except:
        pass
    return img, target, tt, td

def processed_folder(self):
     return os.path.join(self.root, self.__class__.__name__, 'processed')

def __len__(self):
    return len(self.data)

class DatasetGen(object):

def __init__(self, args):
    super(DatasetGen, self).__init__()

    self.seed = args.seed
    self.batch_size=args.batch_size
    self.pc_valid=args.pc_valid
    self.root = args.data_dir
    self.latent_dim = args.latent_dim
    self.use_memory = args.use_memory

    self.num_tasks = args.ntasks
    self.num_classes = 25

    self.num_samples = args.samples

    self.num_samples_per_class = 2


    self.inputsize = [3,224,224]
    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]
    #transformation compose
    self.transformation = transforms.Compose([
        transforms.ToTensor(), transforms.Normalize(mean, std)])

    #taskcla
    self.taskcla = [[t, int(self.num_classes / self.num_tasks)] for t in range(self.num_tasks)]

    self.indices = {}
    self.dataloaders = {}
    self.idx = {}

    self.num_workers = args.workers
    #i changed the value of pin memory from true to flase to test
    self.pin_memory = False ##coda only set to false on cpu

    np.random.seed(self.seed)
    self.task_ids = [[0,1], [2,3], [4,5], [6,7], [8,9], [10,11], [12,13], [14,15], [16,17], [18,19], [20,21], [22,23], [24,25]]
    self.train_set = {}
    self.test_set = {}

    #task memory
    #self.task_memory = [] or

    self.task_memory = {}

    for i in range(self.num_tasks):
        self.task_memory[i] = {}
        self.task_memory[i]['x'] = []
        self.task_memory[i]['y'] = []
        self.task_memory[i]['tt'] = []
        self.task_memory[i]['td'] = []

    #def get(task_id)
def get(self, task_id):

    self.dataloaders[task_id] = {}
    sys.stdout.flush()

    if task_id == 0:
        memory_classes = None
        memory = None
    else:
        memory_classes = self.task_ids
        memory = self.task_memory
    print(self.task_ids[task_id])

    self.train_set[task_id] = mydataset(root=self.root, classes=self.task_ids[task_id],
                                        memory_classes=self.task_ids,
                                        memory=memory, num_samples_per_class= self.num_samples_per_class,task_num=task_id, train=True,
                                        download=False, transform=self.transformation)
    self.test_set[task_id] = mydataset(root=self.root, classes=self.task_ids[task_id], memory_classes=self.task_ids,
                                       memory=None, num_samples_per_class= self.num_samples_per_class, task_num=task_id, train=False,
                                       download=False, transform=self.transformation)

    split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
    train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id],
                                                             [len(self.train_set[task_id]) - split, split])

    #update shuffle to false for testing
    train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size,
                                               num_workers=self.num_workers,
                                               pin_memory=self.pin_memory, shuffle=False, drop_last=True) 


    pop_mean = []
    pop_std = []

    valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
                                               shuffle=False,
                                               num_workers=self.num_workers, pin_memory=self.pin_memory) 
    test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size,
                                              num_workers=self.num_workers,
                                              pin_memory=self.pin_memory, shuffle=False)
    print("task_ids", self.task_ids)
    print(len(train_loader.dataset))
    self.dataloaders[task_id]['train'] = train_loader
    self.dataloaders[task_id]['valid'] = valid_loader
    self.dataloaders[task_id]['test'] = test_loader
    self.dataloaders[task_id]['name'] = 'Mydataset-{}-{}'.format(task_id, self.task_ids[task_id])

    print("Training set size:      {}  images of {}x{}".format(len(train_loader.dataset), self.inputsize[1], self.inputsize[1]))
    print("Validation set size:    {}  images of {}x{}".format(len(valid_loader.dataset), self.inputsize[1], self.inputsize[1]))
    print("Test set size:          {}  images of {}x{}".format(len(test_loader.dataset), self.inputsize[1], self.inputsize[1]))

  
    return self.dataloaders

This error is raised by a dtype mismatch in the input and the parameters of the model.
I guess your input tensor might be a DoubleTensor, since it seems you are creating it from numpy arrays, which use float64 as the default dtype.
If that’s the case, transform it to a FloatTensor via input = input.float() before passing it to the model.

PS: you can post code snippets by wrapping them into three backticks ```, which makes debugging easier. :wink:

1 Like

Hi @ptrblck I appreciate you took the time and look into the error and you are right:

β€˜β€™β€™
[0.41960784 0.42352941 0.22745098 … 0.04705882 0.01568627 0.04705882]
[0.41960784 0.43529412 0.18431373 … 0. 0. 0. ]
[0.41568627 0.43921569 0.15294118 … 0. 0. 0. ]]]
img shape float64
index of the image in get target 20
img shape (3, 224, 224)
β€˜β€™β€™
the dtype of the img (object of ndarray ) is float64

on my input tensor x I updated the code as showing below and I can confirm it is returning float32 from the debugging image attached after I updated the code into dtype:torch.float32
x=data.to(device=self.device, dtype=torch.float32)

however I am still getting the same error, I added after the above code x = x.float()
but didnt change my input dtype as the picture below showing:


this is the flow currently I have from Image 244*244 β†’ store into data of a numpy array β†’ img = data [index] β†’ forward x= data.to(device=self.device, dtype=torch.float32) β†’ x = x.float() to the model
I am still getting the same error :frowning_face:

My Problem now is solved thank you very much @ptrblck for your guidance :wink:
It was solved by converting the right input to float:
because I am working with sequential learning I had another input called x_module_task = x_module_task.float() adding this solved the problem, and I am able to pass the data into the model I have another issue I will open another thread and close this one cheers.