import torch.nn as nn
import torch
from transformers import DebertaModel, DebertaTokenizer
import transformers
class DebertaModel(nn.Module):
def init (self):
super(DebertaModel, self).init ()
self.deberta = transformers.DebertaModel.from_pretrained(‘…/input/deberta/base’, return_dict=True)
self.dropout = nn.Dropout(0.3)
self.classified = nn.Linear(768, config.out_features)
def forward(self, ids, mask, token_type_ids):
_, pooled_output = self.deberta(
input_ids = ids,
attention_mask = mask,
token_type_ids = token_type_ids)
pooled_output = self.dropout(pooled_output)
outputs = self.classified(pooled_output)
return outputs
import torch.nn as nn
from tqdm import tqdm
import torch
def loss_fn(outputs, target):
return nn.MSELoss()(outputs, target)
def train_fx(data_loader, model, optimizer, scheduler, device):
model.train()
train_loss = 0
for bi, d in tqdm(enumerate(data_loader)):
ids = d['ids']
mask = d['mask']
token_type_ids = d['token_type_ids']
target = d['targets']
# putting them into the device
ids = ids.to(device, dtype=torch.long)
mask = mask.to(device, dtype=torch.long)
token_type_ids = token_type_ids.to(device, dtype=torch.long)
target = target.to(device, dtype=torch.long)
optimizer.zero_grad()
outputs = model(
ids = ids,
mask = mask,
token_type_ids = token_type_ids
)
loss = loss_fn(outputs, target)
loss.backward()
optimizer.step()
scheduler.step()
train_loss += loss.item()
train_loss = train_loss / len(data_loader)
return train_loss
This is the issue from my model?
It’s unclear which line of code causes the issue but based on your screenshots it should be somewhere in train_fx
.
Posting screenshots and unformatted code makes debugging unnecessarily harder, so please format your code properly by wrapping it into three backticks ``` and don’t post screenshots.
Rexedoziem
(Rexedoziem)
October 5, 2022, 10:27am
4
0it [00:02, ?it/s]
ValueError Traceback (most recent call last)
/tmp/ipykernel_17/3018574526.py in
2 best_accuracy = 0
3 for epoch in range(config.EPOCHS):
----> 4 train_loss = train_fx(train_data_loader, model, optimizer,scheduler, device)
5 outputs, target = eval_fx(valid_data_loader, model, device)
6 accuracy = metrics.accuracy_score(outputs, targets)
/tmp/ipykernel_17/4220120316.py in train_fx(data_loader, model, optimizer, scheduler, device)
28 ids = ids,
29 mask = mask,
—> 30 token_type_ids = token_type_ids
31 )
32
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = [], []
/tmp/ipykernel_17/2396469671.py in forward(self, ids, mask, token_type_ids)
17 input_ids = ids,
18 attention_mask = mask,
—> 19 token_type_ids = token_type_ids)
20 pooled_output = self.dropout(pooled_output)
21 outputs = self.classified(pooled_output)
ValueError: not enough values to unpack (expected 2, got 1)
Rexedoziem
(Rexedoziem)
October 5, 2022, 10:34am
5
Guess the error came from the model