RuntimeError: expected scalar type Float but found Double

I am trying to use and modify this RGP repository which uses very old version of pytorch

import torch
from torch.autograd import Variable
import torch.nn as nn
from GPy.core import Model, Parameterized, Param
import numpy as np
class Mean_var_rnn(nn.Module):
    
    def __init__(self, p_input_dim, p_output_dim, p_hidden_dim, rnn_type='rnn', with_input_variance = True, bidirectional=False):
        super(Mean_var_rnn, self).__init__()

        if rnn_type == 'rnn':
            self.rnn = nn.RNN(input_size=p_input_dim, hidden_size=p_hidden_dim, num_layers=1, bidirectional=bidirectional)
            
        elif rnn_type == 'lstm':
            self.rnn = nn.LSTM(input_size=p_input_dim, hidden_size=p_hidden_dim, num_layers=1, bidirectional=bidirectional)
            
        elif rnn_type == 'gru':
            self.rnn = nn.GRU(input_size=p_input_dim, hidden_size=p_hidden_dim, num_layers=1, bidirectional=bidirectional)
        else:
            raise ValueError("Unknow rnn type")
        
        self.rnn_type = rnn_type
        self.bidirectional=bidirectional
        self.with_input_variance = with_input_variance
        
        # in_features, out_features
        self.dir_number = 1 if bidirectional==False else 2
        self.linear_mean = nn.Linear(in_features=p_hidden_dim*self.dir_number,  out_features=p_output_dim)
                        
        self.linear_var = nn.Linear(in_features=p_hidden_dim*self.dir_number,  out_features=p_output_dim)
        
        self.soft_plus = nn.Softplus()
        
    def forward(self, mean_input, h_0, var_input=None, c_0=None):
        """
        
        Input:
        ------------
        c_0 - lstm init cell state
        """
        
        
        if self.with_input_variance:
            comb_input = torch.cat( (mean_input, var_input), dim=2 )
        else:
            comb_input = mean_input
        if self.rnn_type=='lstm':
            rnn_outputs,_ = self.rnn( comb_input, (h_0, c_0) ) # (seq_len, batch, hidden_size * num_directions)
        else:    
            rnn_outputs,_ = self.rnn( comb_input, h_0) # (seq_len, batch, hidden_size * num_directions)
        self.mean_out = self.linear_mean(rnn_outputs) #(N,∗,out_features)

        self.var_out = self.soft_plus( self.linear_var(rnn_outputs) ) 
        return self.mean_out, self.var_out   
        
        
class Mean_var_multilayer(nn.Module):
    def __init__(self, p_layer_num, p_input_dims, p_output_dims, p_hidden_dim, rnn_type='rnn', h_0_type='zero',
                 bidirectional=False):
        """
        
        """
        super(Mean_var_multilayer, self).__init__()
        
        assert p_layer_num == len(p_input_dims), "Layer num must be correct"
        assert len(p_input_dims) == len(p_output_dims), " Dim lengths must match"
        
        self.layer_num = p_layer_num
        self.input_dims = [ (ss if i==0 else ss*2) for (i, ss) in enumerate(p_input_dims)  ] # lower layers first 
        self.output_dims = p_output_dims #[ ss*2 for ss in p_output_dims] # lower layers first 
        self.hidden_dim = p_hidden_dim # asssume that hidden dim of all layers is equal
        
        self.rnn_type = rnn_type
        self.bidirectional=bidirectional
        
        if h_0_type=='zero':
            self.h_0 = np.zeros((p_hidden_dim,) )
        else:
            raise NotImplemented("Other initialization is not currently implemented")
        
        if (rnn_type=='lstm'):
            c_0_type = h_0_type
            self.c_0 = np.zeros((p_hidden_dim,) )
        
        self.layers = []
        for l in range(self.layer_num): # layer 0 is observed,  layer 1 is the next after observed etc.
            # layers are created separately in order to make python references to the hidden layers outputs
            with_input_variance = False if (l==0) else True # input variance
            layer = Mean_var_rnn(self.input_dims[l], self.output_dims[l], 
                                 self.hidden_dim, rnn_type=rnn_type, with_input_variance = with_input_variance, bidirectional=bidirectional)
            setattr(self, 'layer_' + str(l), layer)
            self.layers.append(layer)
        
    def forward(self, inp_l0):

        h_0 = Variable( torch.from_numpy( np.broadcast_to(self.h_0, ( 2 if self.bidirectional else 1,inp_l0.size()[1],self.h_0.shape[0]) ) ).double() )
        
        if self.rnn_type =='lstm':
            c_0 = Variable( torch.from_numpy( np.broadcast_to(self.c_0, ( 2 if self.bidirectional else 1,inp_l0.size()[1],self.h_0.shape[0]) ) ).double() )
        else:
            c_0 = None
            
        #import pdb; pdb.set_trace()
        
        # prepare h_0 <-
        self.out_means = []
        self.out_vars = []
        out_mean, out_var = inp_l0, None
        for l in range(self.layer_num):
            layer = self.layers[l]
            out_mean, out_var = layer( out_mean, h_0,  var_input=out_var, c_0=c_0)
            
            # Store outputs
            self.out_means.append(out_mean )
            self.out_vars.append( out_var)

        return self.out_mea

I made an example to see whether the classes in this script are working with the version of my pytorch

>>> from rnn_encoder import *
>>> tt = Mean_var_multilayer(2, [2,3], [3,4], 5, h_0_type='zero', rnn_type='rnn')
>>> tt(torch.randn([5,5,2]))

I got this error:

Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
  File "/Users/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
    return forward_call(*input, **kwargs)
  File "/Volumes//RGP/autoreg/rnn_encoder.py", line 144, in forward
    out_mean, out_var = layer( out_mean, h_0,  var_input=out_var, c_0=c_0)
  File "/Users/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
    return forward_call(*input, **kwargs)
  File "/Volumes/RGP/autoreg/rnn_encoder.py", line 71, in forward
    rnn_outputs,_ = self.rnn( comb_input, h_0) # (seq_len, batch, hidden_size * num_directions)
  File "/Users/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
    return forward_call(*input, **kwargs)
  File "/Users/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/rnn.py", line 471, in forward
    result = _VF.rnn_tanh(input, hx, self._flat_weights, self.bias, self.num_layers,
RuntimeError: expected scalar type Float but found Double

Can someone specify what is the problem here, thanks.

In your script you are explicitly casting the input data to .double() which means that all parameters are expected to be in the same dtype. Either cast the model to .double() as well or the inputs to float.
Also, Variables are deprecated since PyTorch 0.4 and you can use tensors in newer versions.

Hi I have a little problem in my code, the latter shows me the images but sometimes I have images that are repeated, you don’t have a solution for me to view the images only once and not repeat that


import csv
from importlib.resources import path
from operator import index
import random
from cv2 import resize
from matplotlib import transforms
from numpy import size
import torch
from torch.utils.data import Dataset
from torchvision import datasets
from torchvision.transforms import ToTensor
import matplotlib.pyplot as plt
import pandas
import os
from torchvision.io import read_image
from torch.utils.data import DataLoader
from PIL import Image
import torchvision.transforms.functional 
from socket import socket
from random import sample
class Data_set_Papy(Dataset):
    def __init__(self , csv_file  ,root_directory_image , transform=None , target_transform=None , train= True):
  
        self.annotations=pandas.read_csv(csv_file)
        self.root_directory_image=root_directory_image
        self.transform=transform 
        self.target_transform=target_transform
        self.train=train


    def __len__(self):
        return len(self.annotations) # return the lenght of our csv file 




    def __getitem__(self, index) : # returnin a sprecific image and correspanding target to that image 
        our_image_path= os.path.join( self.root_directory_image, self.annotations.iloc[index , 0 ]) # colom zero the first column with a random index
        image= read_image(our_image_path)
        y_label= torch.tensor(int(self.annotations.iloc[index , 1 ]))

        if self.transform:
            image=self.transform(image)
        if self.target_transform:
            image=self.target_transform(image)

        return image , y_label 


training_data = Data_set_Papy(
    
    csv_file='index_papyrus.csv',
    root_directory_image='input_frags_papyrus',
    transform=None , 
    target_transform=None,
    train=True


)
testing_data = Data_set_Papy(
    csv_file='index_papyrus.csv',
    root_directory_image='input_frags_papyrus',
    transform=None , 
    target_transform=None,
    train=False

)

train_dataloader=DataLoader(training_data , batch_size=10 ,  shuffle=True)
test_dataloader=DataLoader(testing_data, batch_size= 10, shuffle= False)    


# Display image and label.


figure = plt.figure(figsize=(8, 8))
cols, rows = 3,3
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
for i in range(1, cols * rows + 1):
    sample_idx = torch.randint(len(training_data), size=(1,)).item()
    #le probleme pas de repitions . 
    img, label = training_data[sample_idx]
    figure.add_subplot(rows, cols, i)
    img=img.squeeze().permute(1,2,0)
    #print(index)
    plt.axis("off")
    plt.imshow(img.squeeze(), cmap="gray")
plt.show()  

Your problem seems unrelated to this topic, but is most likely caused as you are randomly sampling indices to display images, which can be repeated:

sample_idx = torch.randint(len(training_data), size=(1,)).item()

Hi
sorry if it’s unrelated to this topic :slight_smile:
i know that the problem its at the torch.randint … , but i still dont know how i can show my images without repetition

You could permute the indices of the samples and pick the ones you want to visualize:

training_data = torch.randn(100, 3, 224, 224)
rand_idx = torch.randperm(len(training_data))[:10]

for idx in rand_idx:
    print('Plotting {}'.format(idx))

this will replace wich line of my code ? i guess the sample_indx ?
thanks !