Hi, everyone, I have an error with this code:
dataset=pd.read_csv(‘out.csv’,delimiter=’,’,skiprows=1, squeeze=True)
x=dataset.iloc[:,0:4]
y=dataset.iloc[:,[4]]
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y,
test_size=0.2,
random_state=42)
train_set=x_train,y_train
test_set=x_test,y_test
print(train_set)
print(test_set)
train_loader = DataLoader(dataset=train_set,batch_size=batch_size,drop_last=True, shuffle=False)
test_loader = DataLoader(dataset=test_set ,batch_size=batch_size, drop_last=True, shuffle=False)
t=Transformer(dim_val,dim_attn,input_size,dec_seq_len,out_seq_len,n_decoder_layers,n_encoder_layers,n_heads)
model=t
model.to(device)
optimizer=torch.optim.Adam(model.parameters(),lr=lr)
losses=[]
for b ,(inputs,labels) in enumerate (train_loader):
inputs = inputs.to(device=device)
inputs=torch.tensor(inputs)
scores = model(inputs)
criterion = nn.MSELoss()
loss=criterion(scores,labels)
losses.append(loss.items())
optimizer.zero_grad()
loss.backward()
optimizer.step()
“the error in this line below”
print(f"Cost at epoch {epochs} is {sum(losses)/len(losses)}")
def check_accuracy(loader, model):
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for z, y in loader:
z = z.to(device=device)
y = y.to(device=device)
scores = model(z)
_, predictions = scores.max()
num_correct+=(predictions==y).sum(1)
num_correct=torch.tensor(num_correct)
num_samples += predictions.size(0)
print(
f"Got {num_correct} / {num_samples} with accuracy {float(num_correct)/float(num_samples)*100:.2f}"
)
model.train()
print(“Checking accuracy on Training Set”)
check_accuracy(train_loader, model)
print(“Checking accuracy on Test Set”)
check_accuracy(test_loader, model)