Printing type shows it is a LongTensor still the error prevails. It is a character based model that i am building for the popular ‘dinosaur names’ problem. here is the code:
class din_model(nn.Module):
def __init__(self, max_len):
super(din_model, self).__init__()
self.rnn = nn.RNN(1, 64)
self.dense = nn.Linear(64,27)
def forward(self, input_):
reshape_ = input_.view((input_.size(0),input_.size(1),1))
print(reshape_.type(), reshape_.size())
rnn_, _ = self.rnn(reshape_)
dense_ = self.dense(rnn_)
return F.log_softmax(dense_, dim=1)
model = din_model(27)
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_fn = nn.NLLLoss()
for epoch in range(3):
for t,l in train_dataloader:
output_batch = model(torch.tensor(t, dtype=torch.long))
loss = loss_fn(output_batch, l)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Epoch is {}, loss is {}".format(epoch, loss.data))
the error is here:
RuntimeError Traceback (most recent call last)
<ipython-input-54-1717991d3fdc> in <module>
1 for epoch in range(3):
2 for t,l in train_dataloader:
----> 3 output_batch = model(torch.tensor(t, dtype=torch.long))
4 loss = loss_fn(output_batch, l)
5 # clear previous gradients, compute gradients of all variables wrt loss
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
<ipython-input-53-e8e162814236> in forward(self, input_)
8 reshape_ = input_.view((input_.size(0),input_.size(1),1))
9 print(reshape_.type(), reshape_.size())
---> 10 rnn_, _ = self.rnn(reshape_)
11 dense_ = self.dense(rnn_)
12 return F.log_softmax(dense_, dim=1)
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/rnn.py in forward(self, input, hx)
233 if batch_sizes is None:
234 result = _impl(input, hx, self._flat_weights, self.bias, self.num_layers,
--> 235 self.dropout, self.training, self.bidirectional, self.batch_first)
236 else:
237 result = _impl(input, batch_sizes, hx, self._flat_weights, self.bias,
RuntimeError: expected scalar type Long but found Float
the result for print(reshape_.type(), reshape_.size())
is torch.LongTensor torch.Size([32, 27, 1])
please if anyone can help me resolve it, that would be great.