Hi Guys,
I’m trying to apply noises to samples, here are the features of nodes in the graph, during test phase in graph neural networks (to perturb samples). The noises should be added to samples in the (opposite) direction of gradient. The following codes are part of the main code that I call the function and the next one is the function I wrote to apply perturb samples. It is almost three weeks that I’ve tried to solve it and couldn’t
Could you please have a look to see if you have any idea how to solve it.
with torch.set_grad_enabled(‘test’ in phase):
x, p_logits = GNN(data.x, data.edge_index, data.edge_weight)
if mode == 'Supervised':
loss = criterion_BCE(p_logits, data.y)
elif mode == 'Unsupervised':
sum_rates_continuous_p = torch.sum(calc_rates(data.h, torch.sigmoid(p_logits).view(-1, k), P_max, noise_var), dim=1)
loss = - torch.mean(sum_rates_continuous_p)
else:
raise ValueError('Mode {} not supported! (Supported modes: Supervised/Unsupervised)'.format(mode))
if 'test' in phase:
loss.backward(retain_graph=True)
data.x = fgsm_attack_new(loss, data.x, epsilon=.1)
sum_rates = torch.sum(calc_rates(data.h, 1. * (p_logits > 0).view(-1, k), P_max, noise_var), dim=1)
all_rates.extend(sum_rates.detach().cpu().numpy().tolist())
all_losses.append(loss.item())
all_accs.append(torch.mean(1. * ((p_logits > 0) == data.y)))
My function is
def fgsm_attack_new(loss, x, epsilon):
#x.requires_grad = True
x = Variable(x, requires_grad=True)
grad = torch.autograd.grad(loss, x, allow_unused=True)
grad = np.array(grad,dtype=float)
np.nan_to_num(grad, copy=False)
torch.tensor(grad, dtype=torch.float)
print("grad is:{}".format(grad))
perturebed_x = x - epsilon * grad
perturebed_x = torch.clamp(perturebed_x, 0, 1)
return perturebed_x