I am currently training a model in pytorch for regression purposes(the model consists of some dense layers).
I have used sklearn’s standard scaler on the data which consists of an image and the ground truth regression targets.
How can I use the scaler’s inverse transform on the predicted output from the model during evaluation stage.
The training code is shown below
for epoch in range(num_epochs):
model.train()
for batch_idx,(img,gt_pose) in enumerate(train_loader):
img=img.to(device)
gt_pose=gt_pose.to(device)
pred_pose=model(img)
train_loss=loss(pred_pose,gt_pose)
train_loss=train_loss/accum_iter
train_loss.backward()
if((batch_idx+1)%accum_iter==0) or (batch_idx+1==len(train_loader)):
optimizer.step()
optimizer.zero_grad()
print ('Epoch: %03d/%03d | Batch %04d/%04d | Training loss: %.4f'
%(epoch, num_epochs, batch_idx,
len(train_loader), train_loss))
train_losses.append(train_loss.item())
model.eval()
for batch_idx,(img_val,gt_pose_val) in enumerate(val_loader):
img_val=img_val.to(device)
gt_pose_val=gt_pose.to(device)
pred_pose_val=model(img_val)
loss_val=loss(pred_pose_val,gt_pose_val)
print ('Epoch: %03d/%03d | Batch %04d/%04d | Validation loss: %.4f'
%(epoch, num_epochs, batch_idx,
len(train_loader), loss_val))
valid_losses.append(loss_val.item())
Model evaluation
sc=StandardScaler()
img=img.to(device)#same transformations applied on image as during training
torch.manual_seed(123)
with torch.no_grad():
pred=model(img)
pred=pred.detach().cpu().numpy()
#sc.inverse_transform(pred) gives an error
if i do sc.inverse_transform(pred),i get an error of the form scalar needs to be fit on the data.