I have been getting this error constantly and I need some help on how to solve it.
‘NotImplementedError: Could not run ‘aten::add.out’ with arguments from the ‘QuantizedCPU’ backend’
I’m attaching screenshots of my code + error.
Thanks
Inference script
predicted_labels = None
models_dir = 'xxxxxx'
for model_name in glob.glob(models_dir + '/*.pth'):
model = CustomResnet(model_name = CFG.model_name, pretrained = False)
state = torch.load(model_name,
map_location=torch.device('cpu'))['model']
model.load_state_dict(state)
model = model.to(device)
model.eval()
model = torch.quantization.fuse_modules(model, modules_to_fuse)
model.qconfig = torch.quantization.default_qconfig
#print(model.qconfig)
torch.quantization.prepare(model, inplace = True)
model.eval()
torch.quantization.convert(model, inplace=True)
print_size_of_model(model)
temp_preds = None
with torch.no_grad():
for images in tqdm(test_loader, desc=f'Predicting. '):
images = images.to(device)
st = time.time()
predictions = model(images)
et = time.time()
predictions = predictions.softmax(1).to('cpu').numpy()
if temp_preds is None:
temp_preds = predictions
else:
temp_preds = np.vstack((temp_preds, predictions))
if predicted_labels is None:
predicted_labels = temp_preds
else:
predicted_labels += temp_preds
predicted_labels /= (len(glob.glob(models_dir + ‘/*.pth’)))
test_set[‘predicted’] = predicted_labels.argmax(1)
final_test_accuracy = get_score(test_set[‘target’], test_set[‘predicted’])
print(“========================================= PERFORMANCE =============================================”)
print_size_of_model(model)
print(f’Final test accuracy = {final_test_accuracy} ')
print(‘Elapsed time = {:0.4f} milliseconds’.format((et - st) * 1000))
print(“====================================================================================================”)