Before modification Net
G_network = torch.nn.Sequential(
torch.nn.Linear(lh1, lh2),
torch.nn.CELU(0.1),
torch.nn.Linear(lh2, lh3),
torch.nn.CELU(0.1),
torch.nn.Linear(lh3, lh4),
torch.nn.CELU(0.1),
torch.nn.Linear(lh4, lh5)
)
After modification for debugging.
class G_network(nn.Module):
def __init__(self, lh1, lh2, lh3, lh4, lh5):
super(G_network, self).__init__()
self.fc1 = nn.Linear(lh1, lh2)
self.fc2 = nn.Linear(lh2, lh3)
self.fc3 = nn.Linear(lh3, lh4)
self.fc4 = nn.Linear(lh4, lh5)
def forward(self, data):
data = F.celu(self.fc1(data), 0.1)
data = F.celu(self.fc2(data), 0.1)
data = F.celu(self.fc3(data), 0.1)
data = self.fc4(data)
return data
G_network = G_network(640, 128, 128, 64, 1)
After modification I am not able to update weights and bias separately.
AdamW = torchani.optim.AdamW([
# O networks
{'params': [G_network[0].weight]},
{'params': [G_network[2].weight], 'weight_decay': 0.000001},
{'params': [G_network[4].weight], 'weight_decay': 0.0000001},
{'params': [G_network[6].weight]},
], lr=adam_lr)
SGD = torch.optim.SGD([
# O networks
{'params': [G_network[0].bias]},
{'params': [G_network[2].bias]},
{'params': [G_network[4].bias]},
{'params': [G_network[6].bias]},
], lr=sgd_lr)