I tried also coding like this (it’s an example to test how to make sequences):
class DataPrueba(Dataset):
def __init__(self, data, seq, labels=None, transforms=None):
self.X = data
self.y = labels
self.seq = seq
self.transforms = transforms
def __len__(self):
return len(np.asarray(self.X))
def __getitem__(self, i):
if self.y is not None:
if i + self.seq > self.__len__():
if self.transforms is not None:
item = []
item[:self.__len__()-i] = self.transforms(self.X[i:])
target_item = []
target_item[:self.__len__()-i] = self.transforms(self.y[i:])
data = item
labels = target_item
else:
item = []
item[:self.__len__()-i] = self.X[i:]
data = item
target_item = []
target_item[:self.__len__()-i] = self.y[i:]
labels = target_item
else:
if self.transforms is not None:
data = self.transforms(self.X[i:i+self.seq])
labels = self.transforms(self.y[i:i+self.seq])
else:
data = self.X[i:i+self.seq]
labels = self.y[i:i+self.seq]
return data, labels
else:
if i + self.seq > self.__len__():
if self.transforms is not None:
item = []
item[:self.__len__()-i] = self.transforms(self.X[i:])
data = item
else:
item = []
item[:self.__len__()-i] = self.X[i:]
data = item
else:
if self.transforms is not None:
data = self.transforms(self.X[i:i+self.seq])
else:
data = self.X[i:i+self.seq]
return data
x = torch.randn(6, 10)
labels = torch.randint(0, 6, (6,))
dataset = DataPrueba(x, seq=2, labels=labels, transforms=None)
loader = DataLoader(dataset, batch_size=2)
in_size = x.shape[1]
for i, j in loader:
print(i)
print(i.shape)
print(j)
print(j.shape)
break
model = nn.Sequential(nn.LSTM(input_size=10, hidden_size=35,num_layers=1, batch_first=True)),
nn.Linear(35, 6))
input_seq = i
output_seq, _ = model(torch.FloatTensor(input_seq))
last_output = output_seq[-1]
loss = nn.CrossEntropyLoss()
err = loss(last_output, j)
err.backward()
However if the Linear layeres is uncommented I get this error, that I guess t happens because the input is two sequences:
Traceback (most recent call last):
File "/home/lauram/Desktop/RNN_TIMESEQ.py", line 110, in <module>
output_seq, _ = model(torch.FloatTensor(input_seq))
File "/home/lauram/anaconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/home/lauram/anaconda3/lib/python3.8/site-packages/torch/nn/modules/container.py", line 117, in forward
input = module(input)
File "/home/lauram/anaconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/home/lauram/anaconda3/lib/python3.8/site-packages/torch/nn/modules/linear.py", line 93, in forward
return F.linear(input, self.weight, self.bias)
File "/home/lauram/anaconda3/lib/python3.8/site-packages/torch/nn/functional.py", line 1688, in linear
if input.dim() == 2 and bias is not None:
AttributeError: 'tuple' object has no attribute 'dim'
And when the Linear layer is commented then the CrossEntropyLoss cant be applied because as the input is two sequences is a 2D tensor, while CrossEntropyLoss expects just 1D tensor…
Traceback (most recent call last):
File "/home/lauram/Desktop/RNN_TIMESEQ.py", line 114, in <module>
err = loss(last_output, j)
File "/home/lauram/anaconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/home/lauram/anaconda3/lib/python3.8/site-packages/torch/nn/modules/loss.py", line 961, in forward
return F.cross_entropy(input, target, weight=self.weight,
File "/home/lauram/anaconda3/lib/python3.8/site-packages/torch/nn/functional.py", line 2468, in cross_entropy
return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
File "/home/lauram/anaconda3/lib/python3.8/site-packages/torch/nn/functional.py", line 2264, in nll_loss
ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
RuntimeError: 1D target tensor expected, multi-target not supported
Any idea on how to fix it?