I had found some people posted the same error. I am now sure what is going on with my code.
class Neural_Net(nn.Module):
def __init__(self):
super(Neural_Net, self).__init__()
self.conv1 = nn.Conv1d(4, 10, 20, stride=10, padding=5)
# self.maxpool1 = nn.MaxPool1d(5)
self.conv2 = nn.Conv1d(10, 10, 20, stride=10, padding=5)
# self.maxpool2 = nn.MaxPool1d(5)
self.lstm1 = nn.LSTM(10,22)
self.relu = nn.ReLU()
self.linear1 = nn.Linear(11220, 1000)
self.output = nn.Linear(1000,22)
def forward(self, x):
x = torch.transpose(x,2,1)
x = x.to(dtype=torch.float32)
x = self.relu(self.conv1(x))
# x = self.maxpool1(x)
x = self.relu(self.conv2(x))
# x = self.maxpool2(x)
x = torch.transpose(x,2,1)
[x, hidden] = self.lstm1(x)
x = x.view(-1,11220)
x = torch.squeeze(x)
x = self.relu(self.linear1(x))
out = self.output(x)
return out
the input size is batch_size x 1000 x 4, then traspose it.