Hi everyone,
I am getting this error but I do not see why. I wanted to add a custom parameter to my RNN. The code is as follows :
class RNN_pre_d(nn.Module):
def init(self, hidden_dim=64, num_layers=1):
super(RNN_pre_d, self).init()
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(1, self.hidden_dim, self.num_layers)
self.fcF_c = nn.Linear(self.hidden_dim, 1)
w_final = torch.randn(1)
self.w_final = nn.Parameter(w_final)
def forward(self, x):
x1, (h_1, c_1) = self.lstm(x) ## x.shape = [sequence_len, batch_size, hidden_dim]
c_train = torch.log(1+torch.exp(self.fcF_c(x1.squeeze(1))))
c_train = c_train.squeeze(-1)
for i in range(1,len(c_train)):
c_train[i] = c_train[i] + ( 1/(1+torch.exp(-self.w_final)) ) * c_train[i-1]
return c_train, h_1, c_1
the problem only appears when trying to backprop
NumbEpochs = 100
learning_rate = 10e-04
NumbHiddLayer = 1
sizeHidd = 128
nn_pre_d = RNN_pre_d(hidden_dim = sizeHidd,
num_layers = NumbHiddLayer)
optimizer = optim.Adam(nn_pre_d.parameters(),
lr=learning_rate)
MSE loss function
loss = nn.MSELoss() ; listLossTrain = []
Training data standardized
y = np.random.normal(0, 0.1, 500)
mean_train = np.mean(y) ; std_train = np.std(y) ; Z_stand_train = (y-mean_train) / std_train
data_nn_stand_train = torch.FloatTensor([Z_stand_train])
data_nn_stand_train = data_nn_stand_train.permute(1,0).unsqueeze(-1)
target_train = torch.tensor(y)
Training
for epoch in range(NumbEpochs):
optimizer.zero_grad()
# Make the prediction in training set.
c_train, h_1, c_1 = nn_pre_d.forward(data_nn_stand_train)
# Compute a scalar loss in training set.
loss_train = loss(target_train, c_train)
# Compute the gradient for the model parameters and update model parameters.
loss_train.backward()
optimizer.step()
listLossTrain.append(loss_train.detach().numpy())
Thank you!