Expected object of type torch.DoubleTensor but found type torch.FloatTensor for argument #2 'weight'

I want to try to feed the image to the Unet.

for i_batch, sample_batched in enumerate(dataloader):
    print(i_batch, sample_batched['image'].size(), sample_batched['semantic'].size())
    out = unet(sample_batched['image'])
    #Observe the 4th batch and stop
    if i_batch == 1:
        plt.figure()
        show_semantic_batch(sample_batched)
        print(out)
        break

and I got this error:

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-28-d128d402f147> in <module>
      3 for i_batch, sample_batched in enumerate(dataloader):
      4     print(i_batch, sample_batched['image'].size(), sample_batched['semantic'].size())
----> 5     out = unet(sample_batched['image'])
      6     #Observe the 4th batch and stop
      7     if i_batch == 1:

~/miniconda3/envs/ImageSegmentation/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    475             result = self._slow_forward(*input, **kwargs)
    476         else:
--> 477             result = self.forward(*input, **kwargs)
    478         for hook in self._forward_hooks.values():
    479             hook_result = hook(self, input, result)

<ipython-input-21-be431a676bcb> in forward(self, x)
     14 
     15     def forward(self, x):
---> 16         x1 = self.inc(x)
     17         x2 = self.down1(x1)
     18         x3 = self.down2(x2)

~/miniconda3/envs/ImageSegmentation/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    475             result = self._slow_forward(*input, **kwargs)
    476         else:
--> 477             result = self.forward(*input, **kwargs)
    478         for hook in self._forward_hooks.values():
    479             hook_result = hook(self, input, result)

<ipython-input-14-cc6f846be9d2> in forward(self, x)
      5 
      6     def forward(self, x):
----> 7         x = self.conv(x)
      8         return x

~/miniconda3/envs/ImageSegmentation/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    475             result = self._slow_forward(*input, **kwargs)
    476         else:
--> 477             result = self.forward(*input, **kwargs)
    478         for hook in self._forward_hooks.values():
    479             hook_result = hook(self, input, result)

<ipython-input-13-40a5fe15925a> in forward(self, x)
     13 
     14     def forward(self, x):
---> 15         x = self.conv(x)
     16         return x

~/miniconda3/envs/ImageSegmentation/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    475             result = self._slow_forward(*input, **kwargs)
    476         else:
--> 477             result = self.forward(*input, **kwargs)
    478         for hook in self._forward_hooks.values():
    479             hook_result = hook(self, input, result)

~/miniconda3/envs/ImageSegmentation/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
     89     def forward(self, input):
     90         for module in self._modules.values():
---> 91             input = module(input)
     92         return input
     93 

~/miniconda3/envs/ImageSegmentation/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    475             result = self._slow_forward(*input, **kwargs)
    476         else:
--> 477             result = self.forward(*input, **kwargs)
    478         for hook in self._forward_hooks.values():
    479             hook_result = hook(self, input, result)

~/miniconda3/envs/ImageSegmentation/lib/python3.6/site-packages/torch/nn/modules/conv.py in forward(self, input)
    299     def forward(self, input):
    300         return F.conv2d(input, self.weight, self.bias, self.stride,
--> 301                         self.padding, self.dilation, self.groups)
    302 
    303 

RuntimeError: Expected object of type torch.DoubleTensor but found type torch.FloatTensor for argument #2 'weight'

Does it mean that I should convert my weight to DoubleTensor?

my data is dtype: torch.float64

It’s solved. I change the code below:

out = unet(sample_batched['image'].float())