The loss remains same in pytorch nn

class Network(nn.Module):
 
def __init__(self, inputs,hidden_layers, outputs):
    super(Network, self).__init__()
    print(hidden_layers)
    self.fc1 = nn.Linear(inputs, hidden_layers[0])
    self.fc2 = nn.Linear(hidden_layers[0], hidden_layers[1])
    self.fc3 = nn.Linear(hidden_layers[1], hidden_layers[2])
    self.fc4 = nn.Linear(hidden_layers[2], hidden_layers[3])
    self.fc5 = nn.Linear(hidden_layers[3], hidden_layers[4])
    self.fc6 = nn.Linear(hidden_layers[4], hidden_layers[5])
    self.fc9 = nn.Linear(hidden_layers[5], outputs)
    
def forward(self, x):
    out = torch.relu(self.fc1(x))
    out = torch.relu(self.fc2(out))
    out = torch.sigmoid(self.fc3(out))
    out = torch.sigmoid(self.fc4(out))
    out = torch.sigmoid(self.fc5(out))
    out = torch.sigmoid(self.fc6(out))
    out = torch.sigmoid(self.fc9(out))
    return out

inputs = SIZE*SIZE*2
print(inputs)
x = inputs
hidden_layers = []
while x>=2:
    x/=4
    if(x>=2):
        hidden_layers.append(int(x))
print(hidden_layers)
outputs = 1

model = Network(inputs, hidden_layers, outputs)

criterion = nn.MSELoss()
    optimizer = torch.optim.SGD(
    model.parameters(), 
    lr=0.0001, 
    momentum=0.9)'
N_EPOCHS = 100
for epoch in range(N_EPOCHS):
    optimizer.zero_grad()
    model.train()
    for i,(im1, im2, labels) in enumerate(train_dl):
        i1 = torch.flatten(im1,1)
        i2 = torch.flatten(im2,1)
        inp = torch.cat([i1,i2],1)
    
        b_x = Variable(inp) # batch x
        b_y = Variable(labels) # batch y
        y_ = model(b_x).squeeze()
        y_ = (y_>0.5).float()
    
        print(y_)
        print(l)
        loss = criterion(y_,b_y)
        loss = Variable(loss, requires_grad = True)
        print(loss.item())
    
        loss.backward()
        optimizer.step()

You are rewrapping the loss into a new Variable in:

loss = Variable(loss, requires_grad = True)

and are thus breaking the computation graph.
Remove this line of code and all Variables as well, since they are deprecated since PyTorch 0.4.