I got this error:
RuntimeError: expected scalar type Double but found Float
even though my input is already torch.float64 and so do my model parameters, why then? anyone can explain?
train_df = df_final.filter(col('TIMESTEP')<90).toPandas()
test_df = df_final.filter(col('TIMESTEP')>=90).toPandas()
model = GRUModel(input_dim, hidden_dim, num_layers, output_dim).double()
weights = None
# convert model parameters to double
for param in model.parameters():
param.data = param.data.double()
for timestep in sorted(train_df['TIMESTEP'].unique()):
print('FITTING TIMESTEP: ' + str(timestep))
train = train_df[train_df['TIMESTEP']==timestep]
train_X = train[X].astype(np.float64)
train_y = train[y].astype(np.float64)
scaler_train_std = StandardScaler()
scaler_train_minmax = MinMaxScaler()
scaler_train_y_minmax = MinMaxScaler()
train_X[scaling_mapping['std']] = scaler_train_std.fit_transform(train_X[scaling_mapping['std']])
train_X[scaling_mapping['MinMax']] =
scaler_train_minmax.fit_transform(train_X[scaling_mapping['MinMax']])
train_y[y] = scaler_train_y_minmax.fit_transform(train_y[y])
data_X_tensor = torch.tensor(train_X.values, dtype=torch.double)
data_y_tensor = torch.tensor(train_y.values, dtype=torch.double)
X_tensor = data_X_tensor.view(-1, 3, len(train_X.columns))
y_tensor = data_y_tensor.view(-1, 3, len(train_y.columns))
# creating the model
# defining the loss function and optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# training the model
for epoch in range(num_epochs):
# forward pass
outputs = model(X_tensor)
loss = criterion(outputs[:, -1, :], y_tensor[:, -1, :])
# backward pass and parameter update
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print the loss
if (epoch+1) % 10 == 0:
print(f'Epoch {epoch+1}/{num_epochs}, Loss: {loss.item():.4f}')
weights = model.state_dict()
As you can see I put many redundant code lines for trying to solve the problem but they do not work.