Problem with .detach()

I am trying to have different activations in forward and backward paths. I wrote this code but, it seems that it does not work properly in the backward path.
Do I have to include anything else?

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(28 * 28, 64)
        self.fc2 = nn.Linear(64, 10)
        nn.init.xavier_normal_(self.fc1.weight)
        nn.init.xavier_normal_(self.fc2.weight)

    def forward(self, x):
        x1 = F.relu(self.fc1(x))
        x_backward = x1
        x1[x1 <= 0] = 0
        x1[x1> 0] = 1
        x_forward = x1
        y1 = x_backward + (x_forward - x_backward).detach()
        y2 = self.fc2(y1)
        y3 = F.log_softmax(y2, dim=1)
        return y3


for epoch in range(epochs):
    total_train = 0
    correct_tr = 0
    for batch_idx, (data, label) in enumerate(train_set):
        data, label = Variable(data), Variable(label)
        data = data.view(-1, 28*28)
        optimizer.zero_grad()
        data = data.float()
        net_out = net(data)
        print(net_out.size())
        loss = criterion(net_out, label)
        loss.backward()
        optimizer.step()
        .....

If you assign a tensor to another, you will use a reference and thus manipulate both, x_backward and x1 using inplace operations.
Try to call x_backward = x1.clone() and test the code again.

Double post from here.

Thank you. It works.