Run the Pytoch with Double precision gets error!

type or paste code hereclass CSVDataset(Dataset):
    def __init__(self, path):
        df  = read_csv(path,usecols=[5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,30], header=None)
        self.X = df.values[:, :-1]
        self.y = df.values[:, -1]

        self.X = self.X.astype('float32')
        self.y = self.y.astype('float32')

        self.y = self.y.reshape((len(self.y), 1))

    def __len__(self):
        return len(self.X)

    def __getitem__(self, idx):
        return [self.X[idx], self.y[idx]]

    def get_splits(self, n_test=0.20):                              # spliting of dataset 
        test_size = int(round(n_test * len(self.X)))
        train_size = int(len(self.X) - test_size)
        return random_split(self, [train_size, test_size])

def prepare_data(path):
    dataset = CSVDataset(path)
    train, test = dataset.get_splits()
    train_dl = DataLoader(train, batch_size=50, shuffle=False)
    test_dl = DataLoader(test, batch_size=500, shuffle=False)
    return train_dl, test_dl

Here I have assigned by datatype as “float32”. But when I try to change the dtype to double precision it shows some error.

Traceback (most recent call last):
File “./”, line 267, in
train_model(train_dl, model)
File “./”, line 147, in train_model
yhat = model(inputs)
File “/home/sumanta/.local/lib/python2.7/site-packages/torch/nn/modules/”, line 532, in call
result = self.forward(*input, **kwargs)
File “./”, line 87, in forward
X = self.hidden1(X)
File “/home/sumanta/.local/lib/python2.7/site-packages/torch/nn/modules/”, line 532, in call
result = self.forward(*input, **kwargs)
File “/home/sumanta/.local/lib/python2.7/site-packages/torch/nn/modules/”, line 87, in forward
return F.linear(input, self.weight, self.bias)
File “/home/sumanta/.local/lib/python2.7/site-packages/torch/nn/”, line 1370, in linear
ret = torch.addmm(bias, input, weight.t())
RuntimeError: Expected object of scalar type Float but got scalar type Double for argument #2 ‘mat1’ in call to _th_addmm

Can pytorch train the data with double precision ? If yes then how can that be fixed?

You have to transform the input data as well as the model parameters to float64.
The error points to a dtype mismatch so I guess your model parameters are still in float32.