I am facing this error wherein my code runs for sometime, i.e I get running loss values for some batches but midway, again this runtime error pops up. I have converted my inputs to float as mentioned in many other similar posts. Kindly help me out.
epochs = 1
for epoch in range(epochs):
running_loss = 0
#init_hidden_states = model.init_hidden(4)
#dtype = torch.FloatTensor
for embeddings, labels in dataloader:
embeddings[0],embeddings[1],labels = embeddings[0].float(),embeddings[1].float(),labels.float()
#embeddings[0] = embeddings[0].float()
#embeddings[1] = embeddings[1].float()
print(embeddings[0].dtype)
#embeddings[0],embeddings[1], labels = embeddings[0].cuda(), embeddings[1].cuda(),labels.cuda()
labels = torch.argmax(labels,dim=1)
optimizer.zero_grad()
#h = tuple([each.data for each in init_hidden_states])
logits = model.forward(embeddings)
loss = criterion(logits,labels)
loss.backward()
optimizer.step()
running_loss += loss.item()*embeddings[0].size(0)
#print(running_loss)
else:
print(f"Training loss: {running_loss}")
<ipython-input-111-2792e98997e4> in <module>()
4 #init_hidden_states = model.init_hidden(4)
5 #dtype = torch.FloatTensor
----> 6 for embeddings, labels in dataloader:
7 embeddings[0],embeddings[1],labels = embeddings[0].float(),embeddings[1].float(),labels.float()
8