code:
for i, (input, target) in enumerate(test_loader):
target = target.cuda(async=True) # in test loader, pin_memory = True
input_var = torch.autograd.Variable(input, volatile=False)
# (Batch_Size, 10L, 3L, 32L, 224L, 224L)
b, s, c, t, h, w = input_var.size()
# view in (Batch_Size * 10L, 3L, 32L, 224L, 224L)
input_var = input_var.view(-1, c, t, h, w)
# forward
output = model(input_var)
# split in (Batch_Size, 10L, 400L)
output = output.view(b, s, args.num_classes)
# softmax
scores = torch.sum(F.softmax(output, dim=2), dim=1, keepdim=False)
# in-place average scores
scores, indices = scores.div_(10).sort(dim=1, descending=True)
error:
Traceback (most recent call last):
File "inference.py", line 301, in <module>
main()
File "inference.py", line 164, in main
inference(test_loader, model)
File "inference.py", line 195, in inference
output = model(input_var)
File "/home/anaconda2/lib/python2.7/site-packages/torch/nn/modules/module.py", line 357, in __call__
result = self.forward(*input, **kwargs)
File "/home/anaconda2/lib/python2.7/site-packages/torch/nn/parallel/data_parallel.py", line 73, in forward
outputs = self.parallel_apply(replicas, inputs, kwargs)
File "/home/anaconda2/lib/python2.7/site-packages/torch/nn/parallel/data_parallel.py", line 83, in parallel_apply
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
File "/home/anaconda2/lib/python2.7/site-packages/torch/nn/parallel/parallel_apply.py", line 67, in parallel_apply
raise output
RuntimeError: all tensors must be on devices[0]
Thanks.