Tensorboard visualize graph for a model using autograd functions

Hi, I’m trying to visualize the graph for the following model:

class Model(torch.nn.Module):
    def __init__(self, ):
        super().__init__()
        self.decoder = Decoder(c_dim=0, dropout_prob=0.2, weight_norm=True, norm_layers=[0,1,2,3,4,5,6,7])

    def forward(self, p):
        # p.requires_grad_(True)

        # sdf = self.decoder(p)
        # normals = autograd.grad(sdf, p, torch.ones_like(sdf), create_graph=True, retain_graph=True)[0]
        normals = torch.autograd.functional.jacobian(self.decoder, p, create_graph=True, strict=False)
        return torch.mean((torch.norm(normals, dim=-1) - 1)**2)

I try to use tensorboard to visualize as follows:

writer = SummaryWriter(os.path.join(out_dir))

# create model
model = Model()

# data (1,1,3) point
data = torch.rand((1,1,3))

writer.add_graph(model, data, verbose=True)

However I keep getting the following error

RuntimeError: Cannot insert a Tensor that requires grad as a constant. Consider making it a parameter or input, or detaching the gradient
Tensor:
(1,.,.) = 
  0.6029  1.4451  2.1129
[ torch.FloatTensor{1,1,3} ]

BTW: the code runs if I don’t attempt to add the graph.