Transpose numpy

I get this error: Invalid shape (120, 120, 1) for image data
I am sending a grayscale image with this transformation

transforms.Grayscale(num_output_channels=1)

but in the code I get the error:

plt.imshow(np.transpose(im_inv.numpy(), (1, 2, 0)))

if you post more(or full) code then maybe we can help.

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms # add models to the list
import os
import seaborn as sn  # for heatmaps
from sklearn.metrics import confusion_matrix
from sklearn import metrics 

import numpy as np
import matplotlib.pyplot as plt
import cv2
import pandas as pd

# ignore harmless warnings
import warnings
warnings.filterwarnings("ignore")

root = '../'

train_transform = transforms.Compose([
        transforms.Grayscale(num_output_channels=1),
        transforms.RandomRotation(10,fill=(0,)), 
        transforms.Resize(224),             # resize shortest side to 224 pixels
        transforms.CenterCrop(224),         # crop longest side to 224 pixels at center
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406],
                             [0.229, 0.224, 0.225])
    ])
test_transform = transforms.Compose([
      transforms.Grayscale(num_output_channels=1),
      transforms.Resize(120),
      transforms.CenterCrop(120),
      transforms.ToTensor(),
      transforms.Normalize([0.4161,],[0.1688,])
    ])
inv_normalize = transforms.Normalize(
     mean=[0.5], std=[0.5]
    )
    
train_data = datasets.ImageFolder(os.path.join(root, 'train_real'), transform=train_transform)
test_data = datasets.ImageFolder(os.path.join(root, 'validation'), transform=test_transform)



torch.manual_seed(42)

#los loaders sirven para cargar de poco a poco las imagenes en la memoria con 
#10 imagenes en paralelo, es decir una imagen por cada clase o carpeta del total 
#de imagenes es procesada en paralelo
train_loader = DataLoader(train_data, batch_size=10, shuffle=True)
test_loader = DataLoader(test_data, batch_size=10, shuffle=True)

#obtiene los labels o clases del dataset
class_names = train_data.classes

print(class_names)
print(f'Training images available: {len(train_data)}')
print(f'Testing images available:  {len(test_data)}')



    
    # predicting new input images
im = cv2.imread('../imagenes/2.jpg')
    
    #conversion a escala de grises
gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
    
           
    #tratamiento de la imagen
_,thresh = cv2.threshold(gray,150,255,cv2.THRESH_BINARY_INV) 
    #threshold
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
dilated = cv2.dilate(thresh,kernel,iterations = 13) # dilate
contours, hierarchy = cv2.findContours(dilated,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
idx =0
for cnt in contours:
        
    x,y,w,h = cv2.boundingRect(cnt)
    #elimina figuras muy pequeñas y grandes
    
    if h<40 or w<40:
            continue
    #recorre las figuras obtenidas de la segmentacion    
    else:
        idx += 1
        roi=im[y:y+h,x:x+w]
        path = '../test/1'
        cv2.imwrite(os.path.join(path , str(idx) + '.jpg'), roi)
    
    
    #esto es para leer las imagenes de la carpeta test para prediccion
idx =0
test_data1 = datasets.ImageFolder(os.path.join(root,'test'), transform=test_transform)
test_loader1 = DataLoader(test_data1, batch_size=12, shuffle=True)
    
    
    # obtain one batch of test images
dataiter = iter(test_loader1)
images, labels = dataiter.next()
images.numpy()
    

    
train_on_gpu = torch.cuda.is_available()
    
    
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(12):
    ax = fig.add_subplot(2, 12/2, idx+1, xticks=[], yticks=[])
    im_inv = inv_normalize(images[idx])
    plt.imshow(np.transpose(im_inv.numpy(), (1, 2, 0)))
    ax.set_title("hola")