RuntimeError: Expected object of scalar type Double but got scalar type Float for argument #2 'weight'

This is the complete error message. When I call the function I get error.

RuntimeError                              Traceback (most recent call last)
<ipython-input-13-1ad2836344b5> in <module>()
----> 1 vis_model(net)

<ipython-input-12-d875cfa1fa86> in vis_model(net)
     18     net.eval()
     19     with torch.no_grad():
---> 20         outp=net(w)
     21         pred=torch.max(outp,1)
     22 

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    487             result = self._slow_forward(*input, **kwargs)
    488         else:
--> 489             result = self.forward(*input, **kwargs)
    490         for hook in self._forward_hooks.values():
    491             hook_result = hook(self, input, result)

<ipython-input-4-ce890d3bdcf7> in forward(self, x)
     13     def forward(self, x):
     14         print(x.shape)
---> 15         out = self.cnn1(x)
     16         print(out.shape)
     17         out = self.relu1(out)

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    487             result = self._slow_forward(*input, **kwargs)
    488         else:
--> 489             result = self.forward(*input, **kwargs)
    490         for hook in self._forward_hooks.values():
    491             hook_result = hook(self, input, result)

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\conv.py in forward(self, input)
    318     def forward(self, input):
    319         return F.conv2d(input, self.weight, self.bias, self.stride,
--> 320                         self.padding, self.dilation, self.groups)
    321 
    322 

RuntimeError: Expected object of scalar type Double but got scalar type Float for argument #2 'weight'

Can you run, before you enter the training loop:

net = net.float()

It will transform the model parameters to float.

And then in your training loop:

z = net(x.float())

That should proceed without error.

PS: replace .float() by .double() if you wish to have network + data in double precision format.

35 Likes

This worked… Thanks

I got the same error:

RuntimeError: Expected object of scalar type Double but got scalar type Float for argument #2 ‘weight’

so I’m wondering why that error would happended?
and why it still wrong when I have converted the dataset to double?

why I must convert the model to double?

Both, the data and model parameters, should have the same dtype.
If you’ve converted your data to double, you would have to do the same for your model.

5 Likes

Hi I have a same issue, however I couldn’t fix it. would you please support? Error is coming from last line.

np_data = genfromtxt('Top10_data.csv', delimiter=',', dtype='complex', skip_header=0)
inputs_T = np_data[:, 0:20].real
targets_T = np_data[:, 20:22].real

inputs = torch.from_numpy(inputs_T)
targets = torch.from_numpy(targets_T)


train_ds = TensorDataset(inputs, targets)



batch_size = 500
train_dl = DataLoader(train_ds, batch_size, shuffle=True)


class SimpleNet(nn.Module):
   # Initialize the layers
   def __init__(self):
      super().__init__()
      self.linear1 = nn.Linear(20, 20)
      self.act1 = nn.ReLU()  # Activation function
      self.linear2 = nn.Linear(20, 2)

# Perform the computation
def forward(self, x):
    x = self.linear1(x)
    x = self.act1(x)
    x = self.linear2(x)
    return x


model = SimpleNet()

opt = torch.optim.SGD(model.parameters(), 1e-5)

loss_fn = F.mse_loss


def fit(num_epochs, model, loss_fn, opt):
   for epoch in range(num_epochs):
       for xb, yb in train_dl:
          # Generate predictions
          xb = Variable(xb.float(), requires_grad=False)
          yb = Variable(yb.float(), requires_grad=False)
          pred = model(xb)
          loss = loss_fn(pred, yb)
          # Perform gradient descent
          loss.backward()
          opt.step()
          opt.zero_grad()
     print('Training loss: ', loss_fn(model(inputs), targets))


fit(100, model, loss_fn, opt)

numpy uses float64 as their default type, so call float() on these tensors before passing them to the TensorDataset:

inputs = torch.from_numpy(inputs_T),float()
targets = torch.from_numpy(targets_T).float()

(or cast them using numpy’s astype before).

15 Likes

Thank you so much. It works. Great !

That’s worked. Thank you…

2 Likes

This worked for me as well!! Thanks

Thanks, That’s work… I have the same problem

Dear all,

I have similar issue and wish to get some help from you guys.
class VehicleDataset(Dataset):

def __init__(self, small_sequences):
    self.sequences = small_sequences

def __len__(self):
    return len(self.sequences)

def __getitem__(self, idx):
    sequence, cluster_label = self.sequences[idx]

    return dict(
        sequence=torch.transpose(torch.Tensor(sequence.to_numpy()), 0, 1),
        cluster_label=torch.tensor(cluster_label).long()
    )

class VehicleDataModule(pl.LightningDataModule):
def init(self, train_sequences, test_sequences, batch_size=8):
super().init()
self.train_sequences = train_sequences
self.test_sequences = test_sequences
self.batch_size = batch_size

def setup(self):
    self.train_dataset = VehicleDataset(self.train_sequences)
    self.test_dataset = VehicleDataset(self.test_sequences)

def train_dataloader(self):
    return DataLoader(
        self.train_dataset,
        batch_size=self.batch_size,
        shuffle=True,
        num_workers=2
    )

def val_dataloader(self):
    return DataLoader(
        self.test_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=1
    )

def test_dataloader(self):
    return DataLoader(
        self.test_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=1
    )

N_EPOCHS = 50
BATCH_SIZE = 64

class TCNModel(nn.Module):
def init(self, num_inputs, n_classes, num_channels, kernel_size=3, dropout=0.3):
super(TCNModel, self).init()
self.tcn = TemporalConvNet(
num_inputs, num_channels, kernel_size=kernel_size, dropout=dropout
)
self.linear = nn.Linear(num_channels[-1], n_classes)

def forward(self, x):
    y1 = self.tcn(x)
    out = self.linear(y1[:, :, -1])
    return out

Define Lightning Module

class Classification(pl.LightningModule):

def __init__(self, num_inputs: int, n_classes: int, num_channels):
    super().__init__()
    self.model = TCNModel(num_inputs, n_classes, num_channels)
    self.criterion = nn.CrossEntropyLoss()

def forward(self, x, cluster_labels=None):
    output = self.model(x)
    loss = 0
    if cluster_labels is not None:
        loss = self.criterion(output, cluster_labels)
    return loss, output

def training_step(self, batch, batch_idx):
    sequences = batch["sequence"]
    cluster_labels = batch["cluster_label"]

    loss, outputs = self(sequences, cluster_labels)
    predictions = torch.argmax(outputs, dim=1)
    step_accuracy = accuracy(predictions, cluster_labels)
    self.log("train_loss", loss, prog_bar=True, logger=True)
    self.log("train_accuracy", step_accuracy, prog_bar=True, logger=True)
    # in training step
    self.logger.experiment.add_scalars("losses", {"train_loss": loss}, global_step=self.current_epoch)
    return {"loss": loss, "accuracy": step_accuracy}

def validation_step(self, batch, batch_idx):
    sequences = batch["sequence"]
    cluster_labels = batch["cluster_label"]

    loss, outputs = self(sequences, cluster_labels)
    predictions = torch.argmax(outputs, dim=1)
    step_accuracy = accuracy(predictions, cluster_labels)
    self.log("val_loss", loss, prog_bar=True, logger=True)
    self.log("val_accuracy", step_accuracy, prog_bar=True, logger=True)
    # in validation step
    self.logger.experiment.add_scalars("losses", {"val_loss": loss}, global_step=self.current_epoch)
    return {"loss": loss, "accuracy": step_accuracy}

def test_step(self, batch, batch_idx):
    sequences = batch["sequence"]
    cluster_labels = batch["cluster_label"]

    loss, outputs = self(sequences, cluster_labels)
    predictions = torch.argmax(outputs, dim=1)
    step_accuracy = accuracy(predictions, cluster_labels)
    self.log("test_loss", loss, prog_bar=True, logger=True)
    self.log("test_accuracy", step_accuracy, prog_bar=True, logger=True)
    return {"loss": loss, "accuracy": step_accuracy}

def configure_optimizers(self):
    return optim.Adam(self.model.parameters(), lr=0.001)

model = Classification(num_inputs = 3, n_classes = 4, num_channels=[128]*4)

The error happened in step accuracy

This error only happen when I set the default datatype at the beginning of the script.
torch.set_default_dtype(torch.float64)

thanks

The error is raised in the torchmetrics module, which seems to rely on float32 being the default type.
If you want to keep setting the default type to float64 you might need to explicitly cast the tensors to the expected type before passing them to torchmetrics.

1 Like

Hi @ptrblck ,

Thanks for the reply.

Means I have to convert back to float32 and pass to torchmetrics?
I checked input datatype for torchmetric, which are int and float only. double is not accepted

In that case I guess that torchmetrics might internally create new tensors in the default type.
You could then try to pass a DoubleTensor and see if this would be working.

Hi @ptrblck,


both my datatype for predictions and cluster labels are int64.
They suppose not giving error but it still give me the error as my attached image earlier.

The difference that I notice is at line 264.
When I use default datatype of float32, the outputs value will all positive.
When I use float64, the outputs will be mix of positive and negative as shown in the image.
I am not sure whether this cause the error or not.

Thanks

I am facing the same issue in the below code.

This is my input. I have made the changes as mentioned in the above thread

# create validation set
# for creating validation set
X = np.asarray(images)
y = np.asarray(labels)
from sklearn.model_selection import train_test_split
train_x, val_x, train_y, val_y = train_test_split(X, y, test_size = 0.1)
(train_x.shape, train_y.shape), (val_x.shape, val_y.shape)

import torch
train_x  = torch.from_numpy(train_x)

# converting the target into torch format
# train_y = train_y.astype(int);
train_y = torch.from_numpy(train_y).float()

# shape of training data
train_x.shape, train_y.shape

val_x  = torch.from_numpy(val_x)

# converting the target into torch format
# val_y = val_y.astype(int);
val_y = torch.from_numpy(val_y).float()

# shape of validation data
val_x.shape, val_y.shape

import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):   
  def __init__(self):
    super(Net, self).__init__()

    self.cnn_layers = nn.Sequential(
        # Defining a 2D convolution layer
        nn.Conv2d(9, 4, kernel_size=3, stride=1, padding=1),
        nn.BatchNorm2d(4),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(kernel_size=2, stride=2),
        # Defining another 2D convolution layer
        nn.Conv2d(4, 4, kernel_size=3, stride=1, padding=1),
        nn.BatchNorm2d(4),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(kernel_size=2, stride=2),
    )

    self.linear_layers = nn.Sequential(
        nn.Linear(4 * 8 * 8, 1)
    )

  # Defining the forward pass    
  def forward(self, x):
    x = self.cnn_layers(x)
    x = x.view(x.size(0), -1)
    x = self.linear_layers(x)
    return x


# defining the model
model = Net()
# defining the optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.07)
# defining the loss function
criterion = nn.CrossEntropyLoss()
# checking if GPU is available
if torch.cuda.is_available():
    model = model.cuda()
    criterion = criterion.cuda()
    
print(model)

This is the model

from torch.autograd import Variable
def train(epoch):
    model.train()
    tr_loss = 0
    # getting the training set
    x_train, y_train = Variable(train_x), Variable(train_y)
    # getting the validation set
    x_val, y_val = Variable(val_x), Variable(val_y)
    # converting the data into GPU format
    if torch.cuda.is_available():
        x_train = x_train.cuda()
        y_train = y_train.cuda()
        x_val = x_val.cuda()
        y_val = y_val.cuda()

    # clearing the Gradients of the model parameters
    optimizer.zero_grad()
    
    # prediction for training and validation set
    output_train = model(x_train)
    output_val = model(x_val)

    # computing the training and validation loss
    loss_train = criterion(output_train, y_train)
    loss_val = criterion(output_val, y_val)
    train_losses.append(loss_train)
    val_losses.append(loss_val)

    # computing the updated weights of all the model parameters
    loss_train.backward()
    optimizer.step()
    tr_loss = loss_train.item()
    if epoch%2 == 0:
        # printing the validation loss
        print('Epoch : ',epoch+1, '\t', 'loss :', loss_val)

This is the error shown below,

# defining the number of epochs
n_epochs = 5
# empty list to store training losses
train_losses = []
# empty list to store validation losses
val_losses = []
# training the model
# print(y_train)
for epoch in range(n_epochs):
    train(epoch)


RuntimeError                              Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_29660/3917732757.py in <module>
      8 # print(y_train)
      9 for epoch in range(n_epochs):
---> 10     train(epoch)

~\AppData\Local\Temp/ipykernel_29660/431183214.py in train(epoch)
     18 
     19     # prediction for training and validation set
---> 20     output_train = model(x_train)
     21     output_val = model(x_val)
     22 

~\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
   1100         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1101                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102             return forward_call(*input, **kwargs)
   1103         # Do not call functions when jit is used
   1104         full_backward_hooks, non_full_backward_hooks = [], []

~\AppData\Local\Temp/ipykernel_29660/1066969557.py in forward(self, x)
     24   # Defining the forward pass
     25   def forward(self, x):
---> 26     x = self.cnn_layers(x)
     27     x = x.view(x.size(0), -1)
     28     x = self.linear_layers(x)

~\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
   1100         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1101                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102             return forward_call(*input, **kwargs)
   1103         # Do not call functions when jit is used
   1104         full_backward_hooks, non_full_backward_hooks = [], []

~\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\container.py in forward(self, input)
    139     def forward(self, input):
    140         for module in self:
--> 141             input = module(input)
    142         return input
    143 

~\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
   1100         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1101                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102             return forward_call(*input, **kwargs)
   1103         # Do not call functions when jit is used
   1104         full_backward_hooks, non_full_backward_hooks = [], []

~\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\conv.py in forward(self, input)
    444 
    445     def forward(self, input: Tensor) -> Tensor:
--> 446         return self._conv_forward(input, self.weight, self.bias)
    447 
    448 class Conv3d(_ConvNd):

~\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\conv.py in _conv_forward(self, input, weight, bias)
    441                             _pair(0), self.dilation, self.groups)
    442         return F.conv2d(input, weight, bias, self.stride,
--> 443                         self.padding, self.dilation, self.groups)
    444 
    445     def forward(self, input: Tensor) -> Tensor:

RuntimeError: expected scalar type Double but found Float

Can anyone help me, I am quite new to pytorch. I want to build an image regressor model with the help of pytorch.

Most likely x_train is a DoubleTensor, since numpy uses float64 be default and you didn’t transform the tensor via float():

train_x  = torch.from_numpy(train_x)
import torch
train_x  = torch.from_numpy(train_x).float()

# converting the target into torch format
# train_y = train_y.astype(int);
train_y = torch.from_numpy(train_y).float()

# shape of training data
train_x.shape, train_y.shape
val_x  = torch.from_numpy(val_x).float()

# converting the target into torch format
# val_y = val_y.astype(int);
val_y = torch.from_numpy(val_y).float()

# shape of validation data
val_x.shape, val_y.shape
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_29660/3917732757.py in <module>
      8 # print(y_train)
      9 for epoch in range(n_epochs):
---> 10     train(epoch)

~\AppData\Local\Temp/ipykernel_29660/431183214.py in train(epoch)
     22 
     23     # computing the training and validation loss
---> 24     loss_train = criterion(output_train, y_train)
     25     loss_val = criterion(output_val, y_val)
     26     train_losses.append(loss_train)

~\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
   1100         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1101                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102             return forward_call(*input, **kwargs)
   1103         # Do not call functions when jit is used
   1104         full_backward_hooks, non_full_backward_hooks = [], []

~\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\loss.py in forward(self, input, target)
   1150         return F.cross_entropy(input, target, weight=self.weight,
   1151                                ignore_index=self.ignore_index, reduction=self.reduction,
-> 1152                                label_smoothing=self.label_smoothing)
   1153 
   1154 

~\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction, label_smoothing)
   2844     if size_average is not None or reduce is not None:
   2845         reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2846     return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
   2847 
   2848 

RuntimeError: expected scalar type Long but found Double

Now it’s asking for long value.

I have managed to solve the issue now. Thank a lot ptrblck