RuntimeError: Expected object of scalar type Double but got scalar type Float for argument #2 'weight'

my code is

classes=["not a face","face"]
path = "F:/project/Database/sample1.jpg"
b=cv2.imread(path)
q=torch.from_numpy(b)
print(q.shape)
d=np.transpose(q.numpy(), (2, 0, 1))
print(d.shape)
print(type(d))
w=torch.from_numpy(d)
w = w.unsqueeze(0)
w= w.double()
print(type(w))
print(w.shape)
print(type(w))

def vis_model(net):
    was_training = net.training
    net.eval()
    with torch.no_grad():
        outp=net(w)
        pred=torch.max(outp,1)

Basically I have made a model now I want to give a single image as an input to the model and get the prediction…

2 Likes

A fix would be to call .double() for convert to 64bit float.

3 Likes

I have done that still I am getting that error…

You should do that for your inputs and model also.

1 Like

Which line triggers that error? It seems the error is in the model itself, which is the only source code not available in the example.

As @Kushaj stated, try converting parameters to float or your input image to double (either).

But without reproducible code there is only so much we can do to help…

1 Like

Here is the code for model… And it would be really helpful if you give a little information on my error.

class CNN(nn.Module):
    
    def __init__(self, out_1=13, out_2=32):
        super(CNN, self).__init__()
        self.cnn1 = nn.Conv2d(in_channels=3, out_channels=out_1, kernel_size=3, padding=1)
        self.relu1 = nn.ReLU()
        self.maxpool1 = nn.MaxPool2d(kernel_size=2)
        self.cnn2 = nn.Conv2d(in_channels=out_1, out_channels=out_2, kernel_size=5, stride=1, padding=0)
        self.relu2 = nn.ReLU()
        self.maxpool2 = nn.MaxPool2d(kernel_size=2)
        self.fc1 = nn.Linear(out_2 * 23 * 23, 2)
    
    def forward(self, x):
        print(x.shape)
        out = self.cnn1(x)
        print(out.shape)
        out = self.relu1(out)
        out = self.maxpool1(out)
        out = self.cnn2(out)
        out = self.relu2(out)
        out = self.maxpool2(out)
        #print(out.shape)
        out = out.view(out.size(0), -1)
        out = self.fc1(out)
        return out
    
    def activations(self, x):
        z1 = self.cnn1(x)
        a1 = self.relu1(z1)
        out = self.maxpool1(a1)
        
        z2 = self.cnn2(out)
        a2 = self.relu2(z2)
        out = self.maxpool2(a2)
        out = out.view(out.size(0),-1)
        return z1, a1, z2, a2, out

If required this is the method by which I have trained the model…

n_epochs=3
loss_list=[]
accuracy_list=[]
N_test=len(validation_dataset)

def train_model(n_epochs):
    for epoch in range(n_epochs):
        for x, y in train_loader:
            optimizer.zero_grad()
            z = net(x)
            loss = criterion(z, y)
            loss.backward()
            optimizer.step()

        correct=0
        #perform a prediction on the validation  data  
        for x_test, y_test in validation_loader:
            z = net(x_test)
            _, yhat = torch.max(z.data, 1)
            correct += (yhat == y_test).sum().item()
        accuracy = correct / N_test
        accuracy_list.append(accuracy)
        loss_list.append(loss.data)
train_model(n_epochs)

Additional Information
2 classes each having 624 images and shape 3x100x100

1 Like

Which line exactly throws the runtimeerror?

This is the complete error message. When I call the function I get error.

RuntimeError                              Traceback (most recent call last)
<ipython-input-13-1ad2836344b5> in <module>()
----> 1 vis_model(net)

<ipython-input-12-d875cfa1fa86> in vis_model(net)
     18     net.eval()
     19     with torch.no_grad():
---> 20         outp=net(w)
     21         pred=torch.max(outp,1)
     22 

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    487             result = self._slow_forward(*input, **kwargs)
    488         else:
--> 489             result = self.forward(*input, **kwargs)
    490         for hook in self._forward_hooks.values():
    491             hook_result = hook(self, input, result)

<ipython-input-4-ce890d3bdcf7> in forward(self, x)
     13     def forward(self, x):
     14         print(x.shape)
---> 15         out = self.cnn1(x)
     16         print(out.shape)
     17         out = self.relu1(out)

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    487             result = self._slow_forward(*input, **kwargs)
    488         else:
--> 489             result = self.forward(*input, **kwargs)
    490         for hook in self._forward_hooks.values():
    491             hook_result = hook(self, input, result)

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\conv.py in forward(self, input)
    318     def forward(self, input):
    319         return F.conv2d(input, self.weight, self.bias, self.stride,
--> 320                         self.padding, self.dilation, self.groups)
    321 
    322 

RuntimeError: Expected object of scalar type Double but got scalar type Float for argument #2 'weight'

Can you run, before you enter the training loop:

net = net.float()

It will transform the model parameters to float.

And then in your training loop:

z = net(x.float())

That should proceed without error.

PS: replace .float() by .double() if you wish to have network + data in double precision format.

19 Likes

This worked… Thanks

I got the same error:

RuntimeError: Expected object of scalar type Double but got scalar type Float for argument #2 ‘weight’

so I’m wondering why that error would happended?
and why it still wrong when I have converted the dataset to double?

why I must convert the model to double?

Both, the data and model parameters, should have the same dtype.
If you’ve converted your data to double, you would have to do the same for your model.

4 Likes

Hi I have a same issue, however I couldn’t fix it. would you please support? Error is coming from last line.

np_data = genfromtxt('Top10_data.csv', delimiter=',', dtype='complex', skip_header=0)
inputs_T = np_data[:, 0:20].real
targets_T = np_data[:, 20:22].real

inputs = torch.from_numpy(inputs_T)
targets = torch.from_numpy(targets_T)


train_ds = TensorDataset(inputs, targets)



batch_size = 500
train_dl = DataLoader(train_ds, batch_size, shuffle=True)


class SimpleNet(nn.Module):
   # Initialize the layers
   def __init__(self):
      super().__init__()
      self.linear1 = nn.Linear(20, 20)
      self.act1 = nn.ReLU()  # Activation function
      self.linear2 = nn.Linear(20, 2)

# Perform the computation
def forward(self, x):
    x = self.linear1(x)
    x = self.act1(x)
    x = self.linear2(x)
    return x


model = SimpleNet()

opt = torch.optim.SGD(model.parameters(), 1e-5)

loss_fn = F.mse_loss


def fit(num_epochs, model, loss_fn, opt):
   for epoch in range(num_epochs):
       for xb, yb in train_dl:
          # Generate predictions
          xb = Variable(xb.float(), requires_grad=False)
          yb = Variable(yb.float(), requires_grad=False)
          pred = model(xb)
          loss = loss_fn(pred, yb)
          # Perform gradient descent
          loss.backward()
          opt.step()
          opt.zero_grad()
     print('Training loss: ', loss_fn(model(inputs), targets))


fit(100, model, loss_fn, opt)

numpy uses float64 as their default type, so call float() on these tensors before passing them to the TensorDataset:

inputs = torch.from_numpy(inputs_T),float()
targets = torch.from_numpy(targets_T).float()

(or cast them using numpy’s astype before).

7 Likes

Thank you so much. It works. Great !

That’s worked. Thank you…

2 Likes

This worked for me as well!! Thanks

Thanks, That’s work… I have the same problem