Does computational graph or the hooks break?

I’ve a case where I need to call the forward method like this:

class Net(nn.Module):
    #This defines the structure of the NN.
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.conv2_drop = nn.Dropout2d()  #Dropout
        self.fc1 = nn.Linear(320, 50)
        self.fc2 = nn.Linear(50, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2)) 
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)
      
    def sampler(self,x):
        print("*",end=" ",flush=True)
        y = self.forward(x)

        # Do something to y!
          ....
        return y


model = Net()
y = model.sampler(x)
loss = F.nll_loss()(y,target)
loss.backward()

Will the loss backpropagation perform normally without breaking any internal hooks? If it is breaking it, what’s the alternative without changing the model structure?

Thanks in advance.