class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
CNN...
input 3 channel
#self.fc1 = nn.Linear(17664, 1024)
self.fc1 = nn.LSTM(
input_size=17664,
hidden_size=1024,
num_layers=1,
batch_first=True)
self.fc2 = nn.Linear(1024, 1)
def forward(self, x):
CNN forward
x = x.view(-1, self.num_flat_features(x))
x, _ = self.fc1(x, None)
x = self.fc2(x[:, -1, :])
return x
UserWarning: RNN module weights are not part of single contiguous chunk of memory. This means they need to be compacted at every call, possibly greately increasing memory usage. To compact weights again call flatten_parameters().
x, _ = self.fc1(x, None)
Traceback (most recent call last):
File "test.py", line 444, in <module>
train(Train_Loader, model, criterion, optimizer, Now_Epoch)
File "test.py", line 308, in train
output = model(batch_input)
File "/home/leaf/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 224, in __call__
result = self.forward(*input, **kwargs)
File "/home/leaf/anaconda3/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py", line 60, in forward
outputs = self.parallel_apply(replicas, inputs, kwargs)
File "/home/leaf/anaconda3/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py", line 70, in parallel_apply
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
File "/home/leaf/anaconda3/lib/python3.6/site-packages/torch/nn/parallel/parallel_apply.py", line 67, in parallel_apply
raise output
File "/home/leaf/anaconda3/lib/python3.6/site-packages/torch/nn/parallel/parallel_apply.py", line 42, in _worker
output = module(*input, **kwargs)
File "/home/leaf/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 224, in __call__
result = self.forward(*input, **kwargs)
File "test.py", line 52, in forward
x, _ = self.fc1(x, None)
File "/home/leaf/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 224, in __call__
result = self.forward(*input, **kwargs)
File "/home/leaf/anaconda3/lib/python3.6/site-packages/torch/nn/modules/rnn.py", line 162, in forward
output, hidden = func(input, self.all_weights, hx)
File "/home/leaf/anaconda3/lib/python3.6/site-packages/torch/nn/_functions/rnn.py", line 351, in forward
return func(input, *fargs, **fkwargs)
File "/home/leaf/anaconda3/lib/python3.6/site-packages/torch/autograd/function.py", line 284, in _do_forward
flat_output = super(NestedIOFunction, self)._do_forward(*flat_input)
File "/home/leaf/anaconda3/lib/python3.6/site-packages/torch/autograd/function.py", line 306, in forward
result = self.forward_extended(*nested_tensors)
File "/home/leaf/anaconda3/lib/python3.6/site-packages/torch/nn/_functions/rnn.py", line 293, in forward_extended
cudnn.rnn.forward(self, input, hx, weight, output, hy)
File "/home/leaf/anaconda3/lib/python3.6/site-packages/torch/backends/cudnn/rnn.py", line 208, in forward
'input must have 3 dimensions, got {}'.format(input.dim()))
RuntimeError: input must have 3 dimensions, got 2
The first Warning seems caused by nn.DataParallel() but I don’t know why.
I don’t know what’s wrong with the LSTM part ?