Error in sample code

I ran the code from http://pytorch.org/tutorials/beginner/nlp/sequence_models_tutorial.html

# Author: Robert Guthrie

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

torch.manual_seed(1)

lstm = nn.LSTM(3, 3)  # Input dim is 3, output dim is 3
inputs = [torch.randn(1, 3) for _ in range(5)]  # make a sequence of length 5

# initialize the hidden state.
hidden = (torch.randn(1, 1, 3),
          torch.randn(1, 1, 3))
for i in inputs:
    # Step through the sequence one element at a time.
    # after each step, hidden contains the hidden state.
    out, hidden = lstm(i.view(1, 1, -1), hidden)

# alternatively, we can do the entire sequence all at once.
# the first value returned by LSTM is all of the hidden states throughout
# the sequence. the second is just the most recent hidden state
# (compare the last slice of "out" with "hidden" below, they are the same)
# The reason for this is that:
# "out" will give you access to all hidden states in the sequence
# "hidden" will allow you to continue the sequence and backpropagate,
# by passing it as an argument  to the lstm at a later time
# Add the extra 2nd dimension
inputs = torch.cat(inputs).view(len(inputs), 1, -1)
hidden = (torch.randn(1, 1, 3), torch.randn(1, 1, 3))  # clean out hidden state
out, hidden = lstm(inputs, hidden)
print(out)
print(hidden)

but receive the following error

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-4-6da0470a479f> in <module>()
      8     # Step through the sequence one element at a time.
      9     # after each step, hidden contains the hidden state.
---> 10     out, hidden = lstm(i.view(1, 1, -1), hidden)
     11 
     12 # alternatively, we can do the entire sequence all at once.

~\Anaconda3\envs\dl\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    355             result = self._slow_forward(*input, **kwargs)
    356         else:
--> 357             result = self.forward(*input, **kwargs)
    358         for hook in self._forward_hooks.values():
    359             hook_result = hook(self, input, result)

~\Anaconda3\envs\dl\lib\site-packages\torch\nn\modules\rnn.py in forward(self, input, hx)
    202             flat_weight=flat_weight
    203         )
--> 204         output, hidden = func(input, self.all_weights, hx)
    205         if is_packed:
    206             output = PackedSequence(output, batch_sizes)

~\Anaconda3\envs\dl\lib\site-packages\torch\nn\_functions\rnn.py in forward(input, *fargs, **fkwargs)
    371 def RNN(*args, **kwargs):
    372     def forward(input, *fargs, **fkwargs):
--> 373         if cudnn.is_acceptable(input.data):
    374             func = CudnnRNN(*args, **kwargs)
    375         else:

~\Anaconda3\envs\dl\lib\site-packages\torch\tensor.py in data(self)
    405     @property
    406     def data(self):
--> 407         raise RuntimeError('cannot call .data on a torch.Tensor: did you intend to use autograd.Variable?')
    408 
    409     # Numpy array interface, to support `numpy.asarray(tensor) -> ndarray`

RuntimeError: cannot call .data on a torch.Tensor: did you intend to use autograd.Variable?

which pytorch version are you using?

Hello, based on the code below, I am using 0.3.1.post2

import torch
print(torch.__version__)

0.3.1.post2

The code works fine with version 0.4.

It only works if i put the inputs and hidden states in a Variable. I guess the error was because older version http://pytorch.org/docs/master/autograd.html requires the Variable… right ?

# Author: Robert Guthrie

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

torch.manual_seed(1)

lstm = nn.LSTM(3, 3)  # Input dim is 3, output dim is 3
inputs = [Variable(torch.randn(1, 3)) for _ in range(5)]  # make a sequence of length 5

# initialize the hidden state.
hidden = (Variable(torch.randn(1, 1, 3)),
          Variable(torch.randn(1, 1, 3)))
for i in inputs:
    # Step through the sequence one element at a time.
    # after each step, hidden contains the hidden state.
    out, hidden = lstm(i.view(1, 1, -1), hidden)

# alternatively, we can do the entire sequence all at once.
# the first value returned by LSTM is all of the hidden states throughout
# the sequence. the second is just the most recent hidden state
# (compare the last slice of "out" with "hidden" below, they are the same)
# The reason for this is that:
# "out" will give you access to all hidden states in the sequence
# "hidden" will allow you to continue the sequence and backpropagate,
# by passing it as an argument  to the lstm at a later time
# Add the extra 2nd dimension
inputs = torch.cat(inputs).view(len(inputs), 1, -1)
hidden = (torch.randn(1, 1, 3), torch.randn(1, 1, 3))  # clean out hidden state
out, hidden = lstm(inputs, hidden)
print(out)
print(hidden)

yes. In version 0.4 both classes were merged I believe. So, you don’t need to use Variable any more.