#train the model
for epoch in range(2):
for i, (images, labels) in enumerate(train_loader):
print(type(images))
images = Variable(images)
labels = Variable(labels)
print(type(images))
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = cnn(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print(loss.data)
print ('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f'
%(epoch+1, 2, i+1, len(train_dataset)//BATCH_SIZE, loss.data[0]))
ERROR:
<class 'torch.LongTensor'>
<class 'torch.autograd.variable.Variable'>
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-26-5427cb169c61> in <module>()
8 # Forward + Backward + Optimize
9 optimizer.zero_grad()
---> 10 outputs = cnn(images)
11 loss = criterion(outputs, labels)
12 loss.backward()
/home/quoniammm/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
222 for hook in self._forward_pre_hooks.values():
223 hook(self, input)
--> 224 result = self.forward(*input, **kwargs)
225 for hook in self._forward_hooks.values():
226 hook_result = hook(self, input, result)
<ipython-input-19-8341c87faa62> in forward(self, x)
14
15 def forward(self, x):
---> 16 x = F.relu(self.conv1(x))
17 x = F.max_pool2d(F.relu(self.conv2(x)), 2)
18
/home/quoniammm/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
222 for hook in self._forward_pre_hooks.values():
223 hook(self, input)
--> 224 result = self.forward(*input, **kwargs)
225 for hook in self._forward_hooks.values():
226 hook_result = hook(self, input, result)
/home/quoniammm/anaconda3/lib/python3.6/site-packages/torch/nn/modules/conv.py in forward(self, input)
252 def forward(self, input):
253 return F.conv2d(input, self.weight, self.bias, self.stride,
--> 254 self.padding, self.dilation, self.groups)
255
256
/home/quoniammm/anaconda3/lib/python3.6/site-packages/torch/nn/functional.py in conv2d(input, weight, bias, stride, padding, dilation, groups)
50 f = ConvNd(_pair(stride), _pair(padding), _pair(dilation), False,
51 _pair(0), groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.enabled)
---> 52 return f(input, weight, bias)
53
54
RuntimeError: expected Long tensor (got Float tensor)
I have a question about type.
when I run Variable().Why the type of images is changed?
I can’t understand it.Who can tell me why?