my question answer nlp model is taking too much time to run , while it is not GPU is being unutilised and also my loss is about 4-3 million
i’m having 13gb ram and 15 gb graphics card from google colab. Please help me!
it is my training code
from tqdm.auto import tqdm
epochs = 5
set_seed()
model_0.to(device)
# training
for epoch in tqdm(range(epochs)):
train_loss = 0
model_0.train()
for batch in train_dataloader:
x,y= batch[0],batch[1]
x,y = x.to(torch.float),y.to(torch.float)
x,y = x.to(device),y.to(device)
# perform forward pass
logits = model_0(x)
# calculate loss
loss = loss_fn(logits, y)
train_loss += loss
# optimizer zero grad
optimizer.zero_grad()
# loss back
loss.backward()
# step
optimizer.step()
# testing
model_0.eval()
with torch.inference_mode():
test_loss = 0
for batch in test_dataloader:
x,y = batch[0], batch[1]
x,y = x.to(torch.float),y.to(torch.float)
x,y = x.to(device),y.to(device)
# forwrad pass
logits = model_0(x)
# calc loss
loss = loss_fn(logits, y)
test_loss += loss
test_loss /= len(test_dataloader)
# print some info
train_loss /= len(train_dataloader)
print(f"epoch : {epoch} || train_loss : {train_loss:.4f} || test_loss : {test_loss:.4f}")
Now i will show model
# creating the model
from torch import nn
class s2s_model_v0(nn.Module):
def __init__(self,input_size,hidden_size,output_size):
super().__init__()
self.ln = nn.LayerNorm(normalized_shape = input_size)
self.encoder = nn.GRU(input_size = input_size,hidden_size = hidden_size, batch_first = True)
self.ln2 = nn.LayerNorm(normalized_shape = hidden_size)
self.decoder = nn.GRU(input_size = hidden_size,hidden_size = hidden_size, batch_first = True)
self.ln3 = nn.LayerNorm(normalized_shape = hidden_size)
self.linear2 = nn.Linear(in_features = hidden_size, out_features = hidden_size) # also i doubled no. of linear layers
self.linear = nn.Linear(in_features = hidden_size, out_features = output_size)
def forward(self,x):
x = self.ln(x)
encoder_ouput, enc_hidden_state = self.encoder(x)
encoder_ouput = self.ln2(encoder_ouput)
decoder_ouput ,_ = self.decoder(encoder_ouput, enc_hidden_state)
decoder_ouput = self.ln3(decoder_ouput)
final_ouput = self.linear(self.linear2(decoder_ouput))
return final_ouput
please help me