Hi @ptrblck , thanks a lot for your help.
I changed code as you mentioned. But , it seems to be consuming a lot of memory. ( I do not have GPU ).
Here is my modified code :
for epoch in range(1, num_epochs+1):
epoch_loss = []
iteration=1
for step, (images, labels) in enumerate(trainLoader):
print("Iter:"+str(iteration))
im_arr = np.array(labels)
#print("Labels ==> ",np.unique(np.array(labels)))
im_arr[(im_arr!=0) & (im_arr!=15) & (im_arr!=40)] = 0
# Set to classes
im_arr[im_arr==15] = 1
im_arr[im_arr==40] = 2
#print(np.unique(im_arr))
iteration=iteration+1
inputs = Variable(images)
targets = Variable(torch.from_numpy(im_arr))
outputs = model(inputs)
optimizer.zero_grad()
#print("outputs size ==> ",outputs.size())
#print("outputs[:,0] size ==> ",outputs[:,0].size())
#print("targets[:, 0] size ==> ",targets[:, 0].size())
#loss = criterion(outputs, targets[:, 0])
#print(targets[:,0])
loss = criterion(outputs,targets[:,0])
loss.backward()
optimizer.step()
epoch_loss.append(loss.data[0])
average = sum(epoch_loss) / len(epoch_loss)
print("loss: "+str(average)+" epoch: "+str(epoch)+", step: "+str(step))
Here is out-of-memory issues :
Iter:1
/opt/anaconda/lib/python3.6/site-packages/torch/nn/modules/upsampling.py:180: UserWarning: nn.UpsamplingBilinear2d is deprecated. Use nn.Upsample instead.
warnings.warn("nn.UpsamplingBilinear2d is deprecated. Use nn.Upsample instead.")
/opt/anaconda/lib/python3.6/site-packages/torch/nn/functional.py:1423: UserWarning: nn.functional.upsample_bilinear is deprecated. Use nn.functional.upsample instead.
warnings.warn("nn.functional.upsample_bilinear is deprecated. Use nn.functional.upsample instead.")
/opt/anaconda/lib/python3.6/site-packages/ipykernel_launcher.py:13: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
del sys.path[0]
loss: 1.3778443336486816 epoch: 1, step: 0
Iter:2
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-25-346a5ab069f5> in <module>()
17 targets = Variable(torch.from_numpy(im_arr))
18
---> 19 outputs = model(inputs)
20 optimizer.zero_grad()
21 #print("outputs size ==> ",outputs.size())
/opt/anaconda/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
323 for hook in self._forward_pre_hooks.values():
324 hook(self, input)
--> 325 result = self.forward(*input, **kwargs)
326 for hook in self._forward_hooks.values():
327 hook_result = hook(self, input, result)
<ipython-input-5-72a9e198fd06> in forward(self, x)
54 #print('layer2', x.size())
55
---> 56 x = self.layer3(x)
57 #print('layer3', x.size())
58
/opt/anaconda/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
323 for hook in self._forward_pre_hooks.values():
324 hook(self, input)
--> 325 result = self.forward(*input, **kwargs)
326 for hook in self._forward_hooks.values():
327 hook_result = hook(self, input, result)
/opt/anaconda/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
65 def forward(self, input):
66 for module in self._modules.values():
---> 67 input = module(input)
68 return input
69
/opt/anaconda/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
323 for hook in self._forward_pre_hooks.values():
324 hook(self, input)
--> 325 result = self.forward(*input, **kwargs)
326 for hook in self._forward_hooks.values():
327 hook_result = hook(self, input, result)
/opt/anaconda/lib/python3.6/site-packages/torchvision/models/resnet.py in forward(self, x)
74 residual = x
75
---> 76 out = self.conv1(x)
77 out = self.bn1(out)
78 out = self.relu(out)
/opt/anaconda/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
323 for hook in self._forward_pre_hooks.values():
324 hook(self, input)
--> 325 result = self.forward(*input, **kwargs)
326 for hook in self._forward_hooks.values():
327 hook_result = hook(self, input, result)
/opt/anaconda/lib/python3.6/site-packages/torch/nn/modules/conv.py in forward(self, input)
275 def forward(self, input):
276 return F.conv2d(input, self.weight, self.bias, self.stride,
--> 277 self.padding, self.dilation, self.groups)
278
279
/opt/anaconda/lib/python3.6/site-packages/torch/nn/functional.py in conv2d(input, weight, bias, stride, padding, dilation, groups)
88 _pair(0), groups, torch.backends.cudnn.benchmark,
89 torch.backends.cudnn.deterministic, torch.backends.cudnn.enabled)
---> 90 return f(input, weight, bias)
91
92
RuntimeError: $ Torch: not enough memory: you tried to allocate 0GB. Buy new RAM! at /opt/conda/conda-bld/pytorch_1513368888240/work/torch/lib/TH/THGeneral.c:246
â
Am I doing anything wrong here ? or is it just memory issue ?
I am planning to move the code to a server having GPU. Will it solve this issue ?