Problem type of objects

I tried to code a small neural network.
I created the dataset from random values that I generated using numpy.

Here is my code:

from __future__ import print_function
import argparse
from collections import namedtuple

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset
from torchvision import datasets, transforms
import numpy as np

class Net(nn.Module):
    def __init__(self,input_size,output_size):
        super(Net, self).__init__()
        self.linear1= nn.Linear(input_size,output_size)

    def forward(self, x):
        out = self.linear1(x)
        return out



class ReplayBuffer(Dataset):
    def __init__(self, capacity):
        self.capacity = capacity
        self.memory = []
        self.labels = []
        self.position = 0

    def __len__(self):
        return len(self.memory)

    def __getitem__(self, index):
        value,label = self.memory[index],self.labels[index]
        return value,label

    def push(self, args):
        print(args[0].dtype,args[0].dtype)
        """Saves a transition."""
        if len(self.memory) < self.capacity:
            self.memory.append(None)
            self.labels.append(None)
        self.memory[self.position] =  args[0]
        self.labels[self.position] = args[1]
        self.position = (self.position + 1) % self.capacity

    def sample(self, batch_size):
        return random.sample(self.memory, batch_size)

    def __len__(self):
        return len(self.memory)


def train_model(model,parameters,optimizer,dataset,criterion):
    num_epochs = parameters.get('num_epochs', 10)
    for epoch in range(num_epochs):
        for batch_idx, (data, target) in enumerate(dataset):
            data, target = data.to(device), target.to(device)
            print(data.size())
            print(target.size())
            data = data.double()
            target = target.double()
            optimizer.zero_grad()
            outputs = model(data)
            loss = criterion(outputs,target)
            loss.backward()
            optimizer.step()

parameters = {"input_dimen":5,"learning_rate":0.001,"batch_size":32,"shuffle": True}


#create the model
model = Net(512*2,2)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Attention, the model uses " , device)
model.to(device)

#create the dataset
dataset = ReplayBuffer(100)
for _ in range(50):
    dataset.push((np.random.rand(parameters["input_dimen"]),np.random.randint(0,1)))

print("memor",np.array(dataset.memory).shape)
dataloader = torch.utils.data.DataLoader(dataset,batch_size=parameters["batch_size"], shuffle=parameters["shuffle"])

#create the optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=parameters["learning_rate"])

#criterion for loss function
criterion = nn.CrossEntropyLoss()

#train the model
train_model(model,parameters,optimizer,dataloader,criterion)




The error:

Traceback (most recent call last):
  File "predictor.py", line 94, in <module>
    train_model(model,parameters,optimizer,dataloader,criterion)
  File "predictor.py", line 65, in train_model
    outputs = model(data)
  File "/home/nicolas/.local/lib/python3.6/site-packages/torch/nn/modules/module.py", line 489, in __call__
    result = self.forward(*input, **kwargs)
  File "predictor.py", line 19, in forward
    out = self.linear1(x)
  File "/home/nicolas/.local/lib/python3.6/site-packages/torch/nn/modules/module.py", line 489, in __call__
    result = self.forward(*input, **kwargs)
  File "/home/nicolas/.local/lib/python3.6/site-packages/torch/nn/modules/linear.py", line 67, in forward
    return F.linear(input, self.weight, self.bias)
  File "/home/nicolas/.local/lib/python3.6/site-packages/torch/nn/functional.py", line 1352, in linear
    ret = torch.addmm(torch.jit._unwrap_optional(bias), input, weight.t())
RuntimeError: Expected object of scalar type Float but got scalar type Double for argument #4 'mat1'

Any suggestions ? I believe it’s a problem of type. I tried to cast this “np.random.rand(parameters[“input_dimen”]” to float32 or float64.

numpy uses float64 by default, which is why your Dataset seems to contain data in this type.
You could transform the data using:

dataset.push((np.random.rand(parameters["input_dimen"]).astype(np.float32), np.random.randint(0,1)))

or sample directly from PyTorch:

dataset.push((torch.empty(parameters['input_dimen']).uniform_(), torch.randint(0,1, (1,)))

Note, that np.random.randint(0, 1) and torch.randint(0, 1, (1,)) will always sample a 0, since it’s sampling from [0, 1).
If you want to get values of 0 and 1, you should pass 2 for the high value.

Also, it seems you are explicitly casting your data and target to double in your training loop.
If you would like to use float64 for your calculations, you should also cast the model to double.