```
from pyrfm import OrthogonalRandomFeature,CompactRandomFeature,RandomFourier,FastFood
class Kernel_Mapping(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
transformer = RandomFourier(n_components=10,
kernel='rbf',
use_offset=True, random_state=0)
inputs = input.detach().numpy()
#inputs = input.numpy()
inputs_trans = transformer.fit_transform(inputs)
## Do I want to save the transformed for backward?
ctx.save_for_backward(input)
return torch.as_tensor(inputs_trans, dtype=input.dtype)
@staticmethod
def backward(ctx,g):
return g
loader = DataLoader(dataset, batch_size=len(dataset), shuffle=False)
generator = torch.Generator()
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#model = model.to(device)
for inputs, targets in loader:
sensitive_attributes = (inputs[:, sensitive_attribute_idx])[:, None]
inputs = drop_attribute_tensor(inputs,40)
inputs = inputs.to(device)
targets = targets.to(device)
print(f'Local Contribution {type(inputs)}')
#import pdb;pdb.set_trace()
#targets = targets
#inputs = inputs.1
outputs = model(inputs)
#outputs = model(inputs[:,:-1])
Z_outputs= Kernel_Mapping.apply(outputs.clone())
Z_sens_attr = Kernel_Mapping.apply(sensitive_attributes)
## Are they sampled independently?
phi_hat = torch.normal(0., 1.0/R, (len(dataset), R), generator=generator.manual_seed(random_seed))
omega_hat = torch.normal(0., 1.0/T, (len(dataset), T), generator=generator.manual_seed(int(random_seed/2)))
phi_sens_attr = phi_hat.T@Z_sens_attr
phi_outputs = phi_hat.T@Z_outputs
omega_sens_attr = omega_hat.T@Z_sens_attr
omega_outputs = omega_hat.T@Z_outputs
inner_arg_sens = matrices['phi_s']@ matrices['omega_s'].T
inner_arg_output = matrices['omega_f'] @matrices['phi_f'].T
inner_arg = inner_arg_sens @ inner_arg_output
fair_loss = torch.trace(inner_arg)
fair_loss = (params["fairness_weight"]/( (len(train_dataset) -1)**2) ) * fair_loss
#reg_loss = (1.0)/(2.0*step_size) * (model_l2(subtract_models(client_model, global_model)))
loss = loss_func(outputs, targets) + params["fairness_weight"]/( (len(train_dataset) -1)**2) * torch.trace(inner_arg)
#overall_loss +=loss.item()
f_loss += fair_loss.item()
#torch.autograd.set_detect_anomaly(True)
loss.backward(retain_graph = True)
The error: RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor [32, 1]], which is output 0 of AsStridedBackward0, is at version 146; expected version 145 instead. Hint: the backtrace further above shows the operation that failed to compute its gradient. The variable in question was changed in there or anywhere later. Good luck!
```

The Runtime error seems to be when I use feature_mapping on outputs. When I use :

Z_outputs= Kernel_Mapping.apply(outputs.detach()) I see no error but that

is wrong.