I wish to build a neural network that is both amenable to autograd and includes complex-values for input and output. For the latter, I applied `torch.cfloat`

to change the dtypes in the neural network. However, I’m struggling to fix this error which I get after running my code. This is a snippet:

```
import numpy as np
import torch
import torch.optim as optim
from torch.autograd import grad
dtype=torch.cfloat
class mySin(torch.nn.Module):
@staticmethod
def forward(input):
return torch.sin(input)
class Net(torch.nn.Module):
def __init__(self, D_hid=10):
super(Net,self).__init__()
self.actF = mySin()
self.Ein = torch.nn.Linear(1,1, dtype=torch.cfloat)
self.Lin_1 = torch.nn.Linear(2, D_hid, dtype=torch.cfloat)
self.Lin_2 = torch.nn.Linear(D_hid, D_hid, dtype=torch.cfloat)
self.out = torch.nn.Linear(D_hid, 1, dtype=torch.cfloat)
def forward(self,t):
In1 = self.Ein(torch.ones_like(t, dtype=torch.cfloat))
L1 = self.Lin_1(torch.cat((t,In1),1))
h1 = self.actF(L1)
L2 = self.Lin_2(h1)
h2 = self.actF(L2)
out = self.out(h2)
return out, In1
def dfx(x,f):
# Calculate the derivative with auto-differention
return grad([f], [x], grad_outputs=torch.ones(x.shape, dtype=dtype), create_graph=True)[0]
# differential equation residual
def diffeq_residual(t,psi, E):
psi_dx = dfx(t,psi)
psi_ddx= dfx(t,psi_dx)
f = (psi_ddx)/2 + E*(psi)
L = (f.pow(2)).mean();
return L
# testing the neural network
net = Net()
input = torch.rand(4).reshape(-1,1)
input.requires_grad = True
nn, En = net(input)
Loss = diffeq_residual(input, nn, En)
print(Loss)
```