About
Hello!
I’m going to make xor-dataloader
(I cannot make validation dataloader yet.)
However, this code does not go well.
Code
import glob
import os.path as osp
import random
import numpy as np
import json
from PIL import Image
from tqdm import tqdm
import matplotlib.pyplot as plt
%matplotlib inline
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
import torchvision
from torchvision import models, transforms
class XorDataset(data.Dataset):
def __init__(self, phase='train', bit=8):
self.phase = phase
self.bit = bit
self.array = []
for i in range(2**bit):
a = []
for j in range(bit):
a.append((i >> j) & 1)
self.array.append(a)
self.array = np.array(self.array)
self.array = torch.Tensor(self.array)
def __len__(self):
return 2**self.bit
def __getitem__(self, index):
label = 0
for b in self.array[index]:
label ^= int(b)
return self.array[index], label
xorDataset = XorDataset()
dataset = XorDataset(bit=8)
batch_size = 32
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
class Model(nn.Module):
def __init__(self, bit):
super(Model, self).__init__()
self.fv1 = nn.Linear(in_features=bit, out_features=10)
self.fv2 = nn.Linear(in_features=10, out_features=10)
self.fv3 = nn.Linear(in_features=10, out_features=1)
def forward(self, x):
x = F.relu(self.fv1(x))
x = F.relu(self.fv2(x))
x = self.fv3(x)
return x
model = Model(bit=8)
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.001)
epochs = 100
model.train()
def train_model(net, dataloader, criterion, optimizer, epochs):
for epoch in range(epochs):
epoch_loss = 0.0
epoch_corrects = 0
for inputs, labels in tqdm(dataloader):
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item() * inputs.size(0)
epoch_loss = epoch_loss / len(dataloader.dataset)
print('Loss: {:.4f} '.format(epoch_loss))
train_model(model, dataloader, criterion, optimizer, epochs)
Error Code
0%| | 0/8 [00:00<?, ?it/s]
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-430-aa76eb3f74cc> in <module>
----> 1 train_model(model, dataloader, criterion, optimizer, epochs)
<ipython-input-429-6d06e31b1b55> in train_model(net, dataloader, criterion, optimizer, epochs)
12 outputs = model(inputs)
13 loss = criterion(outputs, labels)
---> 14 loss.backward()
15 optimizer.step()
16
/opt/conda/lib/python3.7/site-packages/torch/tensor.py in backward(self, gradient, retain_graph, create_graph)
183 products. Defaults to ``False``.
184 """
--> 185 torch.autograd.backward(self, gradient, retain_graph, create_graph)
186
187 def register_hook(self, hook):
/opt/conda/lib/python3.7/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
125 Variable._execution_engine.run_backward(
126 tensors, grad_tensors, retain_graph, create_graph,
--> 127 allow_unreachable=True) # allow_unreachable flag
128
129
RuntimeError: Found dtype Long but expected Float
Exception raised from compute_types at /opt/conda/conda-bld/pytorch_1595629427478/work/aten/src/ATen/native/TensorIterator.cpp:183 (most recent call first):
After train_model(model, dataloader, criterion, optimizer, epochs)
below code raises.
I come to know why this error occurs.
This error occurs because __getitem__
return’s label is long.
However, if I change code as float(label)
, it won’t go well.
How should I do? Thank you!
class XorDataset(data.Dataset):
def __init__(self, phase='train', bit=8):
self.phase = phase
self.bit = bit
self.array = []
for i in range(2**bit):
a = []
for j in range(bit):
a.append((i >> j) & 1)
self.array.append(a)
self.array = np.array(self.array)
self.array = torch.Tensor(self.array)
def __len__(self):
return 2**self.bit
def __getitem__(self, index):
label = 0
for b in self.array[index]:
label ^= int(b)
return self.array[index], label