Here is main.py
w1 = Parameter(torch.Tensor(np.random.rand(NF.shape[1]*2, NF.shape[1])))
w2 = Parameter(torch.Tensor(np.random.rand(2,1)))
w4 = Parameter(torch.Tensor(np.random.rand(NF.shape[1] * 2, NF.shape[1])))
w5 = Parameter(torch.Tensor(np.random.rand(2, 1)))
## regression weight
tmp = NF.shape[1] + EF.shape[1]
w3 = Parameter(torch.Tensor(np.random.rand(tmp, 1)))
bias = Parameter(torch.Tensor(np.random.rand(1, 1)))
opt = optimizer.Adam([w1, w2, w3, bias, w4, w5], lr=0.01)
torch.nn.init.xavier_uniform_(w1)
torch.nn.init.xavier_uniform_(w2)
torch.nn.init.xavier_uniform_(w3)
torch.nn.init.xavier_uniform_(bias)
torch.nn.init.xavier_uniform_(w4)
torch.nn.init.xavier_uniform_(w5)
mymodel = model(target, hop_num, A, NF, EF, w1, w2, w3, w4, w5, bias)
### train
for epoch in range(3) :
print('=================== epoch {} start ======================='.format(epoch+1))
var = mymodel.hop_info()
y = torch.FloatTensor([[labels[target]]])
cost = Func.mse_loss(var, y) # ( x.clone(), y )
opt.zero_grad()
cost.backward()
opt.step()
and model.py
def hop_info(self):
with torch.autograd.set_detect_anomaly(True):
# concat NF
self.ANF = torch.matmul(self.A, self.NF)
temp_NF = torch.cat((self.NF, self.ANF), dim=1)
self.NF = Func.relu(normalize(torch.matmul(temp_NF, self.w1)))
# self.NF = normalize(torch.matmul(temp_NF, self.w1))
print(self.w1)
# concat EF
self.AEF = torch.matmul(self.A, self.EF)
temp_EF = torch.cat((self.EF, self.AEF), dim=1)
self.EF = Func.relu(normalize(torch.matmul(temp_EF, self.w2)))
print(self.w2)
# concat NF
self.ANF = torch.matmul(self.A, self.NF)
temp_NF = torch.cat((self.NF, self.ANF), dim=1)
self.NF = Func.relu(normalize(torch.matmul(temp_NF, self.w4)))
print(self.w4)
# concat EF
self.AEF = torch.matmul(self.A, self.EF)
temp_EF = torch.cat((self.EF, self.AEF), dim=1)
self.EF = Func.relu(normalize(torch.matmul(temp_EF, self.w5)))
print(self.w5)
self.allF = torch.cat((torch.Tensor(self.NF), torch.Tensor(self.EF)), dim=-1)
self.targetF = self.allF[self.target]
self.prediction = torch.matmul(self.targetF, self.w3) + self.bias
return self.prediction
when I run the code, epoch 1 works.
but in epoch 2, opt.step() has a problem with the Error below.
the Error is
“RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor [2, 1]] is at version 3; expected version 2 instead. Hint: the backtrace further above shows the operation that failed to compute its gradient. The variable in question was changed in there or anywhere later. Good luck!”
How can I fix it ?! Help please