#4. Train the network
#This is when things start to get interesting. We simply have to loop over our data iterator,
#and feed the inputs to the network and optimize
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# wrap them in Variable
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.data[0]
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print(‘Finished Training’)
RuntimeError Traceback (most recent call last)
in ()
7
8 running_loss = 0.0
----> 9 for i, data in enumerate(trainloader, 0):
10 # get the inputs
11 inputs, labels = data
//anaconda/lib/python3.5/site-packages/torch/utils/data/dataloader.py in next(self)
210 self.reorder_dict[idx] = batch
211 continue
–> 212 return self._process_next_batch(batch)
213
214 next = next # Python 2 compatibility
//anaconda/lib/python3.5/site-packages/torch/utils/data/dataloader.py in _process_next_batch(self, batch)
237 self._put_indices()
238 if isinstance(batch, ExceptionWrapper):
–> 239 raise batch.exc_type(batch.exc_msg)
240 return batch
241
RuntimeError: Traceback (most recent call last):
File “//anaconda/lib/python3.5/site-packages/torch/utils/data/dataloader.py”, line 41, in _worker_loop
samples = collate_fn([dataset[i] for i in batch_indices])
File “//anaconda/lib/python3.5/site-packages/torch/utils/data/dataloader.py”, line 110, in default_collate
return [default_collate(samples) for samples in transposed]
File “//anaconda/lib/python3.5/site-packages/torch/utils/data/dataloader.py”, line 110, in
return [default_collate(samples) for samples in transposed]
File “//anaconda/lib/python3.5/site-packages/torch/utils/data/dataloader.py”, line 90, in default_collate
storage = batch[0].storage()._new_shared(numel)
File “//anaconda/lib/python3.5/site-packages/torch/storage.py”, line 111, in _new_shared
return cls._new_using_filename(size)
RuntimeError: error executing torch_shm_manager at “//anaconda/lib/python3.5/site-packages/torch/lib/torch_shm_manager” at /Users/soumith/miniconda2/conda-bld/pytorch_1493757035034/work/torch/lib/libshm/core.cpp:125