In the step to evaluate ModelBert with NER, there is an error ‘NoneType’ object has no attribute ‘detach’.
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
from transformers import BertTokenizer, BertForSequenceClassification
from transformers import DataCollatorForTokenClassification
from transformers import AutoModelForTokenClassification
epochs = 5
max_grad_norm = 1.0
for _ in trange(epochs, desc=“Epoch”):
# TRAIN loop
model.train()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(train_dataloader):
# add batch to gpu
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
# forward pass
token_classifier_output = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
token_classifier_output.loss.backward()
# track train loss
tr_loss += token_classifier_output.loss.item()
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
# gradient clipping
torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=max_grad_norm)
# update parameters
torch.optim.Adam
model.zero_grad()
# print train loss per epoch
print(“Train loss: {}”.format(tr_loss/nb_tr_steps))
# VALIDATION on validation set
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
predictions , true_labels = [], []
for batch in valid_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
with torch.no_grad():
tmp_eval_loss = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
token_classifier_output = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask)
logits = token_classifier_output.loss.detach().CPU().numpy()
label_ids = b_labels.to('CPU').numpy()
predictions.extend([list(p) for p in np.argmax(logits, axis=2)])
true_labels.append(label_ids)
tmp_eval_accuracy = flat_accuracy(logits, label_ids)