I have a pytorch nn, as shown below. I want my neural network to calobirate the variables (sigma and lam).
class SDE(nn.Module):
def __init__(self,lam,sigma):
super().__init__()
self.lam = lam
self.sigma = sigma
def forward(self, T, steps, Npaths):
np.random.seed(4)
lam = self.lam.detach().numpy()
sigma= self.sigma.detach().numpy()
.....
return sigma * lam * xx
My main nn is:
class MyModel(nn.Module):
def __init__(self, args):
super(MyModel, self).__init__()
self.lam = nn.Parameter(torch.tensor(1.0), requires_grad=True)
self.sigma = nn.Parameter(torch.tensor(0.2), requires_grad=True)
# GRU layers
self.gru = nn.GRU(
self.input_dim, self.hidden_dim, self.layer_dim, batch_first=True,
dropout=args.dropout, bidirectional=True)
# SDE
levy = SDE(self.lam, self.sigma)
# Fully connected layer
self.fc = nn.Linear(self.hidden_dim * 2, self.output_dim)
def forward(self, x):
lev = torch.from_numpy(levy(1.0, 16, 1))
.....
h0 = torch.zeros(self.args['num_layers']* 2, x.size(0), self.args['n_hidden_units'],
device=x.device).requires_grad_()
out, _ = self.gru(x, h0.detach())
out = out[:, -1, :]
out = self.fc(out)
out_m = torch.mul(out, lev)
return out
Does this netwok calibirate the sigma and lam variables?
I’m a beginner and don’t know if I’m doing it wright.
The train will besomething like this:
# Makes predictions
yhat = self.model(x)
# Computes loss
loss = self.loss_fn(y, yhat)
# Computes gradients
#loss.requires_grad = True
loss.backward()
# Updates parameters and zeroes gradients
self.optimizer.step()
self.optimizer.zero_grad()