Axes don't match array/size mismatch, m1: [132096 x 344], m2: [118336 x 128]

EPOCH = 20
BATCH_SIZE = 128
LR = 0.005 # learning rate

torch.cuda.empty_cache()
data_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomResizedCrop(344),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor()])
path1 = ‘drive/My Drive/Colab/image/test/’
train_data = torchvision.datasets.ImageFolder(path1, transform=data_transforms)

class AutoEncoder(nn.Module):
def init(self):
super(AutoEncoder, self).init()

    self.encoder = nn.Sequential(
        nn.Linear(3*344*344, 128),
        nn.Tanh(), # 激活
        nn.Linear(128, 64),
        nn.Tanh(),
        nn.Linear(64, 12),
        nn.Tanh(),
        nn.Linear(12, 3),   # compress to 3 features which can be visualized in plt
    )
    self.decoder = nn.Sequential(
        nn.Linear(3, 12),
        nn.Tanh(),
        nn.Linear(12, 64),
        nn.Tanh(),
        nn.Linear(64, 128),
        nn.Tanh(),
        nn.Linear(128, 3*344*344),
        nn.Sigmoid(),       # compress to a range (0, 1)
    )

def forward(self, x):
    x = x.view(x.size(0), -1)

    encoded = self.encoder(x)
    decoded = self.decoder(encoded)
    return encoded, decoded

autoencoder = AutoEncoder()

optimizer = torch.optim.Adam(autoencoder.parameters(), lr=LR)
loss_func = nn.MSELoss()

initialize figure

f, a = plt.subplots(2, N_TEST_IMG, figsize=(5, 2))
plt.ion() # continuously plot

original data (first row) for viewing

view_data = train_data.train_data[:N_TEST_IMG].view(-1, 344*344).type(torch.FloatTensor)/255.

for i in range(N_TEST_IMG):

a[0][i].imshow(np.reshape(view_data.data.numpy()[i], (344, 344)), cmap=‘rainbow’); a[0][i].set_xticks(()); a[0][i].set_yticks(())

for epoch in range(EPOCH):
for step, (x, b_label) in enumerate(train_loader):
b_x = x.view(-1, 3344344) # batch x, shape (batch, 2828)
b_y = x.view(-1, 3
344344) # batch y, shape (batch, 2828)

    encoded, decoded = autoencoder(b_x)

    loss = loss_func(decoded, b_y)      # mean square error
    optimizer.zero_grad()               # clear gradients for this training step
    loss.backward()                     # backpropagation, compute gradients
    optimizer.step()                    # apply gradients

Above is the training process

with torch.no_grad():
for img, label in train_loader :
fig = plt.figure()

print(‘img’, img.shape)

imggg = np.transpose(img[0],(1,2,0))

print(‘imggg’, imggg.shape)

ax1 = fig.add_subplot(121)
ax1.imshow(imggg)

if torch.cuda.is_available():
  img = Variable(img.to())
else:
  img = Variable(img)

encoded, decoded = autoencoder(img)

print(decoded.shape)

print(decoded)

decodeddd = np.transpose(decoded.cpu()[0],(1,2,0))

print(decodeddd.shape)

print(decodeddd)

ax2 = fig.add_subplot(122)
ax2.imshow(decodeddd)

this is the code that i used to plot my decoded picture, but it returns the error:

TypeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py in _wrapfunc(obj, method, *args, **kwds)
55 try:
—> 56 return getattr(obj, method)(*args, **kwds)
57

TypeError: transpose(): argument ‘dim0’ (position 1) must be int, not tuple

During handling of the above exception, another exception occurred:

ValueError Traceback (most recent call last)
3 frames
/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py in _wrapit(obj, method, *args, **kwds)
44 except AttributeError:
45 wrap = None
—> 46 result = getattr(asarray(obj), method)(*args, **kwds)
47 if wrap:
48 if not isinstance(result, mu.ndarray):

ValueError: axes don’t match array

How to solve the problem, or what should i do to show my decoded pic?