DivBackward0 and CUDA error

Hi,
I’m training a network on distance learning.
My code looks like this:

def dist(p1, p2, p3, r1, r2, r3):
	x1	= torch.norm(p1-p2)
	x2	= torch.norm(p1-p3)
	x3	= torch.norm(p3-p2)

	y1	= torch.norm(r1-r2)
	y2	= torch.norm(r1-r3)
	y3	= torch.norm(r3-r2)

	a	= torch.Tensor([x1,x2,x3])/max((x1,x2,x3))
	b	= torch.Tensor([y1,y2,y3])/max((y1,y2,y3))
	a, b	= a.to(DEVICE), b.to(DEVICE)
	return a, b
...
criterion	= torch.nn.MSELoss()
...
with torch.enable_grad():
	optimizer.zero_grad()
	Ys	= []
	Ts	= []
	for d in data:
		X	= torch.Tensor([d[0]])
		target	= torch.Tensor([d[1]])
		X, target	= X.to(DEVICE), target.to(DEVICE)
		Y	= model(X.float())
		Ys.append(Y)
		Ts.append(target)

	a,b = dist(Ys[0],
			Ys[1],
			Ys[2],
			Ts[0],
			Ts[1],
			Ts[2])
	loss	= criterion(a,b)
	loss.backward()

I get the following error when I try to train my network

 File "C:\Users\dubois-h\Documents\CLAPTRASP\internship-claptrasp\code\CNNCoder\train.py", line 88, in train_proc
    loss.backward()
  File "C:\Users\dubois-h\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\tensor.py", line 102, in backward
    torch.autograd.backward(self, gradient, retain_graph, create_graph)
  File "C:\Users\dubois-h\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\autograd\__init__.py", line 90, in backward
    allow_unreachable=True)  # allow_unreachable flag
RuntimeError: Function DivBackward0 returned an invalid gradient at index 1 - expected type torch.cuda.FloatTensor but got torch.FloatTensor

What am I doing wrong exactly ? How can I fix it ?

Thanks !