this is my code to run a quantum neural network by using pytorch but when i’m trying run this code i receive this error:RuntimeError: expected scalar type Float but found ComplexFloat
import torch
import torch.nn as nn
import torch.optim as opt
import numpy as np
import scipy
from scipy.integrate import odeint
import matplotlib.pyplot as plt
#data generating
t = torch.arange(0,1,0.01)
y_0 = torch.tensor(100.)
def diff(y,t):
dydt = y - 0.5*y**2
return dydt
y = odeint(diff,y_0,t)
#extract input and output for modeling
x = t.reshape(-1,1)
y = y.reshape(-1,1)
#spliting data in to training and testing sets
split = int(0.15 *len(x)) #using 15% of data for training and 85% for testing
x_train, y_train = x[:split],y[:split]
x_test, y_test = x[split:], y[split:]
x_train_tensor = torch.tensor(x_train).float()
y_train_tensor = torch.tensor(y_train).float()
x_test_tensor = torch.tensor(x_test).float()
y_test_tensor = torch.tensor(y_test).float()
#design quantum feed-forward model
class QuantumLayer(nn.Module):
def __init__(self,input_size,output_size) :
super(QuantumLayer,self).__init__()
self.input_size=input_size
self.output_size=output_size
def forward(self,x):
#convert classic input into quantum input
qc_input = torch.zeros((self.input_size),dtype=torch.float)
qc_input=qc_input.unsqueeze(0).unsqueeze(0)
for i in range(self.input_size):
qc_input[i]=x[i]
#define quantum operators
qc = torch.zeros((1,self.input_size,self.output_size),dtype=torch.float)
qc[0,i,:] = torch.tensor([1, 1], dtype=torch.float) / torch.sqrt(torch.tensor([2], dtype=torch.float))
qc[0,i,:]=torch.matmul(qc[0,i,:],torch.tensor([[1,1],[1,-1]],dtype=torch.float))
qc[0,i,:] = torch.matmul(qc[0, i, :], torch.tensor([[1, 0], [0, torch.cos(torch.tensor(np.pi/4)) + 1j* torch.sin(torch.tensor(np.pi/4))]],dtype=torch.cfloat))
#runnig quantum circuit
outp = torch.zeros((self.output_size),dtype=torch.float)
for i in range(self.output_size):
outp[i]=torch.abs(torch.matmul(qc_input,qc[:,:,i])).item()**2
#converting quantum output into classic output
return outp
class QuantumNet(nn.Module):
def __init__(self):
super(QuantumNet,self).__init__()
self.fc1=nn.Linear(1,5)
self.qc1=QuantumLayer(5,2)
self.fc2=nn.Linear(2,1)
def forward(self,x):
x=torch.sigmoid(self.fc1(x))
x=self.qc1(x)
x=torch.sigmoid(self.fc2(x))
return x
model = QuantumNet()
loss_function=nn.MSELoss()
optimizer = opt.SGD(model.parameters(), lr=0.01,momentum=0.9)
#training
epochs_number=100
loss_list = []
for epoch in range(epochs_number):
y_pred=model(x_train_tensor)
loss=loss_function(y_pred,y_train_tensor)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_list.append(loss.item())
print('epoch {}:loss={}'.format(epoch+1,loss.item()))
is there any solution to fix this error