Hi Patrick,

Apologies for the confusion this is my entire training loop.

###Below if my Code

```
valid_loss_min = valid_loss_min_input
for epoch in range(start_epochs, n_epochs+1):
# initialize variables to monitor training and validation loss
train_loss = 0.0
###################
# train the model #
###################
model.train()
for batch_idx,img in enumerate(loaders['rgbimage_dataloader']):
optimizer.zero_grad()
# move to GPU
if use_cuda:
real = img[0].to(device)
sketch_image = cycle_model(real)
else:
real = img[0].to(device)
sketch_image = cycle_model(real)
###Getting output from main model#####
styleGan_output = model(real,sketch_image,Target_color.float(),noise)
###Feeding to the pretrained model########
fakeEdge_image = cycle_model(styleGan_output)
```

#
###Unormalizing Image to feed to PIL image

```
x_tensor = transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])(styleGan_output)
min_i = x_tensor.min(dim=(1), keepdim=True).values.min(dim=(2), keepdim=True).values
max_i = x_tensor.max(dim=(1), keepdim=True).values.max(dim=(2), keepdim=True).values
x_tensor = ((x_tensor-min_i) / (max_i - min_i)) * 255
styleGAN_transformedimage = [transforms.ToPILImage()(x_) for x_ in x_tensor.type(torch.uint8)]
```

#
###Get Dominant Color

```
styleGAN_transformedimage = [get_dominant_color(x) for x in styleGAN_transformedimage]
Avg_color = [torch.tensor(x,dtype=float,requires_grad=False) for x in styleGAN_transformedimage]
Avg_color = [color.to(device) for color in Avg_color]
Avg_color = torch.stack(Avg_color)
Target_color_match = torch.cat(len(Avg_color)*[Target_color])
Avg_color = torch.unsqueeze(Avg_color,1)
```

#
###Loss calculation########

```
Edge_loss = criterion(fakeEdge_image,sketch_image)
color_loss = criterion(Avg_color,Target_color_match)
total_loss = Edge_loss + color_loss
total_loss.backward()
optimizer.step()
train_loss = train_loss + ((1 / (batch_idx + 1)) * (total_loss.data - train_loss))
# print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch,train_loss))
del total_loss
del styleGan_output
del fakeEdge_image
del Edge_loss
torch.save(model.state_dict(),f"model_history/mu_stylegan{epoch}.pth")
```

This is my entire training loop

#
Below steps in the above code are not from the model output i.e sketch_image,Avg_color and Target_color_match.

Edge_loss = criterion(fakeEdge_image,sketch_image)

color_loss = criterion(Avg_color,Target_color_match)

#
Does states of all these to get saved below

torch.save(model.state_dict(),f"model_history/mu_stylegan{epoch}.pth")

#
Below is the way i am Evaluating:

Target_color = torch.zeros(1, 1, 3, dtype=torch.float)

Target_color[:,:,2] = 255

Target_color_blue = Target_color.to(device)

model.load_state_dict(torch.load(os.path.join(“model_history/mu_stylegan178.pth”),map_location=torch.device(‘cuda’)))

#progress_bar = tqdm(enumerate(trainA_dataloader,trainB_dataloader),total=len(trainA_dataloader))

model.eval()

outputs = []

for batch_idx, img in enumerate(loaders[‘test_rgbimage_dataloader’]):

if use_cuda:

real = img[0].to(device)

sketch_image = Cyclegan_model(real)

recon = mu_stylegan(real,sketch_image,Target_color_blue.float(),noise)

sketch_fake_test = recon.detach().cpu().numpy()

for img in sketch_fake_test:

img = img/2+0.5

plt.imshow(np.transpose(img, (1, 2, 0)))

plt.show()