Loss penalty on second order gradients norm

I’m trying to test a penalty of the norm of second order gradients:

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, kernel_size=5)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=5)
        self.pool = nn.MaxPool2d(2, 2)
        self.relu = nn.ReLU()
        self.linear = nn.Linear(64 * 5 * 5, 10)

    def forward(self, input):
        conv1 = self.conv1(input)
        pool1 = self.pool(conv1)
        relu1 = self.relu(pool1)
        conv2 = self.conv2(relu1)
        pool2 = self.pool(conv2)
        relu2 = self.relu(pool2)
        relu2 = relu2.view(relu2.size(0), -1)
        return self.linear(relu2)

model = Net()
torch.nn.init.kaiming_normal_(model.parameters)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
for epoch in range(100):
    for i in range(1000):
        output = model(input)
        loss = nn.CrossEntropyLoss()(output, label)
        optimizer.zero_grad()
        loss.backward()

        grads = torch.autograd.grad(loss, model.parameters(), create_graph=True)
        grads_sum = 0
        for g in grads:
	    grads_sum += g.sum()
        grads2 = torch.autograd.grad(grads_sum, model.parameters(), create_graph=True)
        g2_norm = 0
        for g2 in grads2:
	    g2_norm += g2.norm(p=2)

        g2_norm.backward()
        optimizer.step()

Is this the right way to do it? Is it important to calculate individual second order gradients (diagonal of the Hessian), or is this method would be a good approximation?