Neural style transfer stack expects each tensor to be equal size

Need help, it is simple neural style transfer script, it works by training but after saving and loading model state, i got this error

RuntimeError: stack expects each tensor to be equal size, but got [1, 64, 256, 256] at entry 0 and [1, 128, 128, 128] at entry 1
at save_image(new_img, ‘output.png’)
i do not understand what is wrong with it, i transformed the image size …

import torch
import torch.nn as nn
import torch.optim as optim
from PIL import Image
import torchvision.transforms as transforms
import torchvision.models as models
from torchvision.utils import save_image
device = torch.device("cuda" if torch.cuda.is_available else "cpu")

image_size  = 256

class VGG(nn.Module):

  def __init__(self):
    super(VGG,self).__init__()
    self.chosen_features = ["0","5","10","19","28"]
    self.model = models.vgg19(pretrained=True).features[:29]

  def forward(self,x):
    features = []
    for layer_num,layer  in enumerate(self.model):
      x = layer(x)
      if str(layer_num) in self.chosen_features:
        features.append(x)
    return features

def load_image(name):
  image = Image.open(name)
  image = loader(image).unsqueeze(0).to(device)
  return image


loader = transforms.Compose([
    transforms.Resize((image_size,image_size)),
    transforms.ToTensor(),
])
model = VGG().to(device).eval()

original_img = load_image("and.png")
style_img = load_image("style.jpg")
generated = original_img.clone().requires_grad_(True)
learning_rate = 0.001
alpha = 1
beta = 0.01
optimizer =  optim.Adam([generated],lr=learning_rate)

for step in range(200):
  optimizer.zero_grad()
  generated_features = model(generated)
  original_img_features = model(original_img)
  style_features = model(style_img)
  style_loss = original_loss = 0
  for gen_feature,orig_feature,style_feature in zip(
      generated_features,original_img_features,style_features
  ):
      batch_size,channel,height,width = gen_feature.shape
      original_loss += torch.mean((gen_feature - orig_feature)**2)
      G = gen_feature.view(channel,height * width) @ gen_feature.view(channel,height * width).T
      A = style_feature.view(channel,height * width) @ style_feature.view(channel,height * width).T
      style_loss += torch.mean((G - A)**2)
  total_loss = alpha * original_loss + beta * style_loss
  total_loss.backward()
  optimizer.step()
  if step % 10 == 0:
    print(total_loss)
    save_image(generated,"gen.png")#works
  torch.save({'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict()},
            "saved_model_for_styletransfer.pth")

  
with torch.no_grad(): 
  checkpoint = torch.load("saved_model_for_styletransfer.pth")
  model = VGG().to(device).eval()
  model.load_state_dict(checkpoint['model_state_dict'])
  new_img = model(load_image("and.png"))
  print(new_img)
  save_image(new_img, 'output.png')#fails

Your model returns a list of intermediate features in different shapes, so storing this list as a single image won’t work.
You can check the shape of each tensor by iterating the output:

for i in new_img:
    print(i.shape)

thanks for reply, well i got this
torch.Size([1, 64, 256, 256])
torch.Size([1, 128, 128, 128])
torch.Size([1, 256, 64, 64])
torch.Size([1, 512, 32, 32])
torch.Size([1, 512, 16, 16])

i tried to save as eval and cpu:

# model.eval().cpu()          
# torch.save({"optimizer":copy.deepcopy(optimizer.state_dict()), 
#             "model":copy.deepcopy(model.state_dict())
#             },"saved_model_for_styletransfer_cpu.pth")

with torch.no_grad(): 
  checkpoint = torch.load("saved_model_for_styletransfer_cpu.pth")
  for k in list(checkpoint.keys()):
    if re.search(r'in\d+\.running_(mean|var)$', k):
        del checkpoint[k]
  model = VGG().cpu()
  model.load_state_dict(checkpoint['model'])
  ad = load_image("and.png").cpu()
  new_img = model(ad)
  for i in new_img:
    print(i.shape)
  save_image(new_img, 'output.png')

well training works, error is getting lower when i load model, but if i want only to do forward pass i get that error… i do not understand why …

The forward pass is not raising the error, but the save_image applied on a list of tensors with different shapes.
Remove

save_image(new_img, 'output.png')

and it should work or alternatively store each slice of each intermediate activation separately.

it is because im using optimizer = optim.Adam([generated],lr=learning_rate)
optimizing the image and not model parameters , but i do not know the way how to get work without training, just to style image

I’m not sure I understand the explanation. The code fails in saving the list of tensors so how does it relate to the model training and optimizer?