I’m trying to implement this work in PyTorch: https://arxiv.org/pdf/1812.02342v1.pdf. Now the problem is Identity loss. I just don’t understand how it should be considered, can anyone explain it to me?
My code
# -*- coding: utf-8 -*-
!pip3 install https://download.pytorch.org/whl/cu80/torch-0.4.0-cp36-cp36m-linux_x86_64.whl
!wget -c https://s3.amazonaws.com/xunhuang-public/adain/vgg_normalised.t7
from __future__ import print_function
import argparse
from functools import reduce
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.serialization import load_lua
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
# result is Variables list [Variable1, Variable2, ...]
return list(map(self.lambda_func, self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def forward(self, input):
# result is a Variable
return reduce(self.lambda_func, self.forward_prepare(input))
def copy_param(m, n):
if m.weight is not None: n.weight.data.copy_(m.weight)
if m.bias is not None: n.bias.data.copy_(m.bias)
if hasattr(n, 'running_mean'): n.running_mean.copy_(m.running_mean)
if hasattr(n, 'running_var'): n.running_var.copy_(m.running_var)
def add_submodule(seq, *args):
for n in args:
seq.add_module(str(len(seq._modules)), n)
def lua_recursive_model(module, seq):
for m in module.modules:
name = type(m).__name__
real = m
if name == 'TorchObject':
name = m._typename.replace('cudnn.', '')
m = m._obj
if name == 'SpatialConvolution':
if not hasattr(m, 'groups'): m.groups = 1
n = nn.Conv2d(m.nInputPlane, m.nOutputPlane, (m.kW, m.kH),
(m.dW, m.dH), (m.padW, m.padH), 1, m.groups,
bias=(m.bias is not None))
copy_param(m, n)
add_submodule(seq, n)
elif name == 'SpatialBatchNormalization':
n = nn.BatchNorm2d(m.running_mean.size(0), m.eps, m.momentum,
m.affine)
copy_param(m, n)
add_submodule(seq, n)
elif name == 'ReLU':
n = nn.ReLU()
add_submodule(seq, n)
elif name == 'SpatialMaxPooling':
n = nn.MaxPool2d((m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH),
ceil_mode=m.ceil_mode)
add_submodule(seq, n)
elif name == 'SpatialAveragePooling':
n = nn.AvgPool2d((m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH),
ceil_mode=m.ceil_mode)
add_submodule(seq, n)
elif name == 'SpatialUpSamplingNearest':
n = nn.UpsamplingNearest2d(scale_factor=m.scale_factor)
add_submodule(seq, n)
elif name == 'View':
n = Lambda(lambda x: x.view(x.size(0), -1))
add_submodule(seq, n)
elif name == 'Linear':
# Linear in pytorch only accept 2D input
n1 = Lambda(lambda x: x.view(1, -1) if 1 == len(x.size()) else x)
n2 = nn.Linear(m.weight.size(1), m.weight.size(0),
bias=(m.bias is not None))
copy_param(m, n2)
n = nn.Sequential(n1, n2)
add_submodule(seq, n)
elif name == 'Dropout':
m.inplace = False
n = nn.Dropout(m.p)
add_submodule(seq, n)
elif name == 'SoftMax':
n = nn.Softmax()
add_submodule(seq, n)
elif name == 'Identity':
n = Lambda(lambda x: x) # do nothing
add_submodule(seq, n)
elif name == 'SpatialFullConvolution':
n = nn.ConvTranspose2d(m.nInputPlane, m.nOutputPlane, (m.kW, m.kH),
(m.dW, m.dH), (m.padW, m.padH))
add_submodule(seq, n)
elif name == 'SpatialReplicationPadding':
n = nn.ReplicationPad2d((m.pad_l, m.pad_r, m.pad_t, m.pad_b))
add_submodule(seq, n)
elif name == 'SpatialReflectionPadding':
n = nn.ReflectionPad2d((m.pad_l, m.pad_r, m.pad_t, m.pad_b))
add_submodule(seq, n)
elif name == 'Copy':
n = Lambda(lambda x: x) # do nothing
add_submodule(seq, n)
elif name == 'Narrow':
n = Lambda(
lambda x, a=(m.dimension, m.index, m.length): x.narrow(*a))
add_submodule(seq, n)
elif name == 'SpatialCrossMapLRN':
lrn = torch.legacy.nn.SpatialCrossMapLRN(m.size, m.alpha, m.beta,
m.k)
n = Lambda(lambda x, lrn=lrn: lrn.forward(x))
add_submodule(seq, n)
elif name == 'Sequential':
n = nn.Sequential()
lua_recursive_model(m, n)
add_submodule(seq, n)
elif name == 'ConcatTable': # output is list
n = LambdaMap(lambda x: x)
lua_recursive_model(m, n)
add_submodule(seq, n)
elif name == 'CAddTable': # input is list
n = LambdaReduce(lambda x, y: x + y)
add_submodule(seq, n)
elif name == 'Concat':
dim = m.dimension
n = LambdaReduce(lambda x, y, dim=dim: torch.cat((x, y), dim))
lua_recursive_model(m, n)
add_submodule(seq, n)
elif name == 'TorchObject':
print('Not Implement', name, real._typename)
else:
print('Not Implement', name)
def lua_recursive_source(module):
s = []
for m in module.modules:
name = type(m).__name__
real = m
if name == 'TorchObject':
name = m._typename.replace('cudnn.', '')
m = m._obj
if name == 'SpatialConvolution':
if not hasattr(m, 'groups'): m.groups = 1
s += ['nn.Conv2d({},{},{},{},{},{},{},bias={}),#Conv2d'.format(
m.nInputPlane,
m.nOutputPlane, (m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH),
1, m.groups, m.bias is not None)]
elif name == 'SpatialBatchNormalization':
s += ['nn.BatchNorm2d({},{},{},{}),#BatchNorm2d'.format(
m.running_mean.size(0), m.eps, m.momentum, m.affine)]
elif name == 'ReLU':
s += ['nn.ReLU()']
elif name == 'SpatialMaxPooling':
s += ['nn.MaxPool2d({},{},{},ceil_mode={}),#MaxPool2d'.format(
(m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH), m.ceil_mode)]
elif name == 'SpatialAveragePooling':
s += ['nn.AvgPool2d({},{},{},ceil_mode={}),#AvgPool2d'.format(
(m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH), m.ceil_mode)]
elif name == 'SpatialUpSamplingNearest':
s += ['nn.UpsamplingNearest2d(scale_factor={})'.format(
m.scale_factor)]
elif name == 'View':
s += ['Lambda(lambda x: x.view(x.size(0),-1)), # View']
elif name == 'Linear':
s1 = 'Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x )'
s2 = 'nn.Linear({},{},bias={})'.format(m.weight.size(1),
m.weight.size(0),
(m.bias is not None))
s += ['nn.Sequential({},{}),#Linear'.format(s1, s2)]
elif name == 'Dropout':
s += ['nn.Dropout({})'.format(m.p)]
elif name == 'SoftMax':
s += ['nn.Softmax()']
elif name == 'Identity':
s += ['Lambda(lambda x: x), # Identity']
elif name == 'SpatialFullConvolution':
s += ['nn.ConvTranspose2d({},{},{},{},{})'.format(m.nInputPlane,
m.nOutputPlane,
(m.kW, m.kH),
(m.dW, m.dH), (
m.padW, m.padH))]
elif name == 'SpatialReplicationPadding':
s += ['nn.ReplicationPad2d({})'.format(
(m.pad_l, m.pad_r, m.pad_t, m.pad_b))]
elif name == 'SpatialReflectionPadding':
s += ['nn.ReflectionPad2d({})'.format(
(m.pad_l, m.pad_r, m.pad_t, m.pad_b))]
elif name == 'Copy':
s += ['Lambda(lambda x: x), # Copy']
elif name == 'Narrow':
s += ['Lambda(lambda x,a={}: x.narrow(*a))'.format(
(m.dimension, m.index, m.length))]
elif name == 'SpatialCrossMapLRN':
lrn = 'torch.legacy.nn.SpatialCrossMapLRN(*{})'.format(
(m.size, m.alpha, m.beta, m.k))
s += [
'Lambda(lambda x,lrn={}: Variable(lrn.forward(x)))'.format(
lrn)]
elif name == 'Sequential':
s += ['nn.Sequential( # Sequential']
s += lua_recursive_source(m)
s += [')']
elif name == 'ConcatTable':
s += ['LambdaMap(lambda x: x, # ConcatTable']
s += lua_recursive_source(m)
s += [')']
elif name == 'CAddTable':
s += ['LambdaReduce(lambda x,y: x+y), # CAddTable']
elif name == 'Concat':
dim = m.dimension
s += [
'LambdaReduce(lambda x,y,dim={}: torch.cat((x,y),dim), # Concat'.format(
m.dimension)]
s += lua_recursive_source(m)
s += [')']
else:
s += '# ' + name + ' Not Implement,\n'
s = map(lambda x: '\t{}'.format(x), s)
return s
def simplify_source(s):
s = map(lambda x: x.replace(',(1, 1),(0, 0),1,1,bias=True),#Conv2d', ')'),
s)
s = map(lambda x: x.replace(',(0, 0),1,1,bias=True),#Conv2d', ')'), s)
s = map(lambda x: x.replace(',1,1,bias=True),#Conv2d', ')'), s)
s = map(lambda x: x.replace(',bias=True),#Conv2d', ')'), s)
s = map(lambda x: x.replace('),#Conv2d', ')'), s)
s = map(lambda x: x.replace(',1e-05,0.1,True),#BatchNorm2d', ')'), s)
s = map(lambda x: x.replace('),#BatchNorm2d', ')'), s)
s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#MaxPool2d', ')'), s)
s = map(lambda x: x.replace(',ceil_mode=False),#MaxPool2d', ')'), s)
s = map(lambda x: x.replace('),#MaxPool2d', ')'), s)
s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#AvgPool2d', ')'), s)
s = map(lambda x: x.replace(',ceil_mode=False),#AvgPool2d', ')'), s)
s = map(lambda x: x.replace(',bias=True)),#Linear', ')), # Linear'), s)
s = map(lambda x: x.replace(')),#Linear', ')), # Linear'), s)
s = map(lambda x: '{},\n'.format(x), s)
s = map(lambda x: x[1:], s)
s = reduce(lambda x, y: x + y, s)
return s
def torch_to_pytorch(t7_filename, outputname=None):
model = load_lua(t7_filename, unknown_classes=True)
if type(model).__name__ == 'hashable_uniq_dict': model = model.model
model.gradInput = None
slist = lua_recursive_source(torch.legacy.nn.Sequential().add(model))
s = simplify_source(slist)
header = '''
import torch
import torch.nn as nn
from torch.autograd import Variable
from functools import reduce
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
return list(map(self.lambda_func,self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def forward(self, input):
return reduce(self.lambda_func,self.forward_prepare(input))
'''
varname = t7_filename.replace('.t7', '').replace('.', '_').replace('-',
'_')
s = '{}\n\n{} = {}'.format(header, varname, s[:-2])
if outputname is None: outputname = varname
with open(outputname + '.py', "w") as pyfile:
pyfile.write(s)
n = nn.Sequential()
lua_recursive_model(model, n)
torch.save(n.state_dict(), outputname + '.pth')
parser = argparse.ArgumentParser(
description='Convert torch t7 model to pytorch')
parser.add_argument('--model', '-m', type=str, default='./vgg_normalised.t7',
help='torch model file in t7 format')
parser.add_argument('--output', '-o', type=str, default='./vgg_normalised',
help='output file name prefix, xxx.py xxx.pth')
args = parser.parse_args('')
torch_to_pytorch(args.model, args.output)
"""Prepare datasets"""
!pip install kaggle
"""Open Kaggle.json from PC"""
from google.colab import files
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
!mkdir -p ~/.kaggle
!cp kaggle.json ~/.kaggle
!kaggle competitions download painter-by-numbers -f train.zip
!wget http://images.cocodataset.org/zips/train2014.zip
!unzip -q train.zip
!unzip -q train2014.zip
!rm train.zip
!rm train2014.zip
"""Implementation"""
import torch
def calc_mean_std(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
def adaptive_instance_normalization(content_feat, style_feat):
assert (content_feat.size()[:2] == style_feat.size()[:2])
size = content_feat.size()
style_mean, style_std = calc_mean_std(style_feat)
content_mean, content_std = calc_mean_std(content_feat)
normalized_feat = (content_feat - content_mean.expand(
size)) / content_std.expand(size)
return normalized_feat * style_std.expand(size) + style_mean.expand(size)
def _calc_feat_flatten_mean_std(feat):
# takes 3D feat (C, H, W), return mean and std of array within channels
assert (feat.size()[0] == 3)
assert (isinstance(feat, torch.FloatTensor))
feat_flatten = feat.view(3, -1)
mean = feat_flatten.mean(dim=-1, keepdim=True)
std = feat_flatten.std(dim=-1, keepdim=True)
return feat_flatten, mean, std
def _mat_sqrt(x):
U, D, V = torch.svd(x)
return torch.mm(torch.mm(U, D.pow(0.5).diag()), V.t())
def coral(source, target):
# assume both source and target are 3D array (C, H, W)
# Note: flatten -> f
source_f, source_f_mean, source_f_std = _calc_feat_flatten_mean_std(source)
source_f_norm = (source_f - source_f_mean.expand_as(
source_f)) / source_f_std.expand_as(source_f)
source_f_cov_eye = \
torch.mm(source_f_norm, source_f_norm.t()) + torch.eye(3)
target_f, target_f_mean, target_f_std = _calc_feat_flatten_mean_std(target)
target_f_norm = (target_f - target_f_mean.expand_as(
target_f)) / target_f_std.expand_as(target_f)
target_f_cov_eye = \
torch.mm(target_f_norm, target_f_norm.t()) + torch.eye(3)
source_f_norm_transfer = torch.mm(
_mat_sqrt(target_f_cov_eye),
torch.mm(torch.inverse(_mat_sqrt(source_f_cov_eye)),
source_f_norm)
)
source_f_transfer = source_f_norm_transfer * \
target_f_std.expand_as(source_f_norm) + \
target_f_mean.expand_as(source_f_norm)
return source_f_transfer.view(source.size())
import torch.nn as nn
"""
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(),
"""
decoder = nn.Sequential(
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 256, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 128, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 64, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 3, (3, 3)),
)
vgg = nn.Sequential(
nn.Conv2d(3, 3, (1, 1)),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(3, 64, (3, 3)),
nn.ReLU(), # relu1-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(), # relu1-2
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 128, (3, 3)),
nn.ReLU(), # relu2-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, (3, 3)),
nn.ReLU(), # relu2-2
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 256, (3, 3)),
nn.ReLU(), # relu3-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-4
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 512, (3, 3)),
nn.ReLU(), # relu4-1, this is the last layer used
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-4
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU() # relu5-4
)
class SANet(nn.Module):
def __init__(self, in_planes):
super(SANet, self).__init__()
self.f = nn.Conv2d(in_planes, in_planes // 2, (1, 1))
self.g = nn.Conv2d(in_planes, in_planes // 2, (1, 1))
self.h = nn.Conv2d(in_planes, in_planes // 2, (1, 1))
#self.f_norm = nn.InstanceNorm2d(in_planes)
#self.g_norm = nn.InstanceNorm2d(in_planes)
self.sm = nn.Softmax(dim = -1)
self.out_conv = nn.Conv2d(in_planes // 2, in_planes, (1, 1))
def forward(self, content, style):
#F = self.f(self.f_norm(content))
#G = self.g(self.g_norm(style))
F = self.f(content)
G = self.g(style)
H = self.h(style)
b, c, h, w = F.size()
F = F.view(b, -1, w * h).permute(0, 2, 1)
b, c, h, w = G.size()
G = G.view(b, -1, w * h)
S = torch.bmm(F, G)
S = self.sm(S)
b, c, h, w = H.size()
H = H.view(b, -1, w * h)
O = torch.bmm(H, S.permute(0, 2, 1))
b, c, h, w = content.size()
O = O.view(b, c // 2, h, w)
O = self.out_conv(O)
O += content
return O
class Transform(nn.Module):
def __init__(self, in_planes):
super(Transform, self).__init__()
self.sanet4_1 = SANet(in_planes = in_planes)
self.sanet5_1 = SANet(in_planes = in_planes)
self.upsample5 = nn.Upsample(scale_factor = 2, mode = 'nearest')
self.pad = nn.ReflectionPad2d((1, 1, 1, 1))
self.merge_conv = nn.Conv2d(in_planes, in_planes, (3, 3))
self.merge_relu = nn.ReLU()
def forward(self, content4_1, style4_1, content5_1, style5_1):
return self.merge_relu(self.merge_conv(self.pad(self.sanet4_1(content4_1, style4_1) + self.upsample5(self.sanet5_1(content5_1, style5_1)))))
class Net(nn.Module):
def __init__(self, encoder, decoder):
super(Net, self).__init__()
enc_layers = list(encoder.children())
self.enc_1 = nn.Sequential(*enc_layers[:4]) # input -> relu1_1
self.enc_2 = nn.Sequential(*enc_layers[4:11]) # relu1_1 -> relu2_1
self.enc_3 = nn.Sequential(*enc_layers[11:18]) # relu2_1 -> relu3_1
self.enc_4 = nn.Sequential(*enc_layers[18:31]) # relu3_1 -> relu4_1
self.enc_5 = nn.Sequential(*enc_layers[31:44]) # relu4_1 -> relu5_1
self.transform = Transform(in_planes = 512)
self.decoder = decoder
self.mse_loss = nn.MSELoss()
# fix the encoder
for name in ['enc_1', 'enc_2', 'enc_3', 'enc_4', 'enc_5']:
for param in getattr(self, name).parameters():
param.requires_grad = False
# extract relu1_1, relu2_1, relu3_1, relu4_1, relu5_1 from input image
def encode_with_intermediate(self, input):
results = [input]
for i in range(5):
func = getattr(self, 'enc_{:d}'.format(i + 1))
results.append(func(results[-1]))
return results[1:]
# extract relu4_1, relu5_1 from input image
def encode(self, input):
conv1 = getattr(self, 'enc_1')(input)
conv2 = getattr(self, 'enc_2')(conv1)
conv3 = getattr(self, 'enc_3')(conv2)
conv4 = getattr(self, 'enc_4')(conv3)
conv5 = getattr(self, 'enc_5')(conv4)
return (conv4, conv5)
def calc_content_loss(self, input, target):
assert (input.size() == target.size())
assert (target.requires_grad is False)
return self.mse_loss(input, target)
def calc_style_loss(self, input, target):
assert (input.size() == target.size())
assert (target.requires_grad is False)
input_mean, input_std = calc_mean_std(input)
target_mean, target_std = calc_mean_std(target)
return self.mse_loss(input_mean, target_mean) + \
self.mse_loss(input_std, target_std)
def forward(self, content, style, alpha=1.0):
assert 0 <= alpha <= 1
style_feats = self.encode_with_intermediate(style)
content_feats = self.encode(content)
"""
t4_1 = adaptive_instance_normalization(content_feats[0], style_feats[3])
t5_1 = adaptive_instance_normalization(content_feats[1], style_feats[4])
t4_1 = alpha * t4_1 + (1 - alpha) * content_feats[0]
t5_1 = alpha * t5_1 + (1 - alpha) * content_feats[1]
"""
stylized = self.transform(content_feats[0], style_feats[3], content_feats[1], style_feats[4])
g_t = self.decoder(stylized)
g_t_feats = self.encode_with_intermediate(g_t)
loss_c = self.calc_content_loss(g_t_feats[3], content_feats[0]) + self.calc_content_loss(g_t_feats[4], content_feats[1])
loss_s = self.calc_style_loss(g_t_feats[0], style_feats[0])
for i in range(1, 5):
loss_s += self.calc_style_loss(g_t_feats[i], style_feats[i])
return loss_c, loss_s
import numpy as np
from torch.utils import data
def InfiniteSampler(n):
# i = 0
i = n - 1
order = np.random.permutation(n)
while True:
yield order[i]
i += 1
if i >= n:
np.random.seed()
order = np.random.permutation(n)
i = 0
class InfiniteSamplerWrapper(data.sampler.Sampler):
def __init__(self, data_source):
self.num_samples = len(data_source)
def __iter__(self):
return iter(InfiniteSampler(self.num_samples))
def __len__(self):
return 2 ** 31
!mkdir experiments
!mkdir logs
!pip3 install tqdm
!pip3 install TensorboardX
import argparse
import os
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.utils.data as data
from PIL import Image
from PIL import ImageFile
from tensorboardX import SummaryWriter
from torchvision import transforms
from tqdm import tqdm
cudnn.benchmark = True
Image.MAX_IMAGE_PIXELS = None # Disable DecompressionBombError
ImageFile.LOAD_TRUNCATED_IMAGES = True # Disable OSError: image file is truncated
def train_transform():
transform_list = [
transforms.Resize(size=(512, 512)),
transforms.RandomCrop(256),
transforms.ToTensor()
]
return transforms.Compose(transform_list)
class FlatFolderDataset(data.Dataset):
def __init__(self, root, transform):
super(FlatFolderDataset, self).__init__()
self.root = root
self.paths = os.listdir(self.root)
self.transform = transform
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(os.path.join(self.root, path)).convert('RGB')
img = self.transform(img)
return img
def __len__(self):
return len(self.paths)
def name(self):
return 'FlatFolderDataset'
def adjust_learning_rate(optimizer, iteration_count):
"""Imitating the original implementation"""
lr = args.lr / (1.0 + args.lr_decay * iteration_count)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
parser = argparse.ArgumentParser()
# Basic options
parser.add_argument('--content_dir', type=str, default='./train2014',
help='Directory path to a batch of content images')
parser.add_argument('--style_dir', type=str, default='./train',
help='Directory path to a batch of style images')
parser.add_argument('--vgg', type=str, default='./vgg_normalised.pth')
# training options
parser.add_argument('--save_dir', default='./experiments',
help='Directory to save the model')
parser.add_argument('--log_dir', default='./logs',
help='Directory to save the log')
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--lr_decay', type=float, default=0)
parser.add_argument('--max_iter', type=int, default=160000)
parser.add_argument('--batch_size', type=int, default=5)
parser.add_argument('--style_weight', type=float, default=3.0)
parser.add_argument('--content_weight', type=float, default=1.0)
parser.add_argument('--n_threads', type=int, default=16)
parser.add_argument('--save_model_interval', type=int, default=1000)
args = parser.parse_args('')
device = torch.device('cuda')
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
if not os.path.exists(args.log_dir):
os.mkdir(args.log_dir)
writer = SummaryWriter(log_dir=args.log_dir)
decoder = decoder
vgg = vgg
vgg.load_state_dict(torch.load(args.vgg))
vgg = nn.Sequential(*list(vgg.children())[:44])
network = Net(vgg, decoder)
network.train()
network.to(device)
content_tf = train_transform()
style_tf = train_transform()
content_dataset = FlatFolderDataset(args.content_dir, content_tf)
style_dataset = FlatFolderDataset(args.style_dir, style_tf)
content_iter = iter(data.DataLoader(
content_dataset, batch_size=args.batch_size,
sampler=InfiniteSamplerWrapper(content_dataset),
num_workers=args.n_threads))
style_iter = iter(data.DataLoader(
style_dataset, batch_size=args.batch_size,
sampler=InfiniteSamplerWrapper(style_dataset),
num_workers=args.n_threads))
optimizer = torch.optim.Adam([
{'params': network.decoder.parameters()},
{'params': network.transform.parameters()}], lr=args.lr)
for i in tqdm(range(args.max_iter)):
adjust_learning_rate(optimizer, iteration_count=i)
content_images = next(content_iter).to(device)
style_images = next(style_iter).to(device)
loss_c, loss_s = network(content_images, style_images)
loss_c = args.content_weight * loss_c
loss_s = args.style_weight * loss_s
loss = loss_c + loss_s
optimizer.zero_grad()
loss.backward()
optimizer.step()
writer.add_scalar('loss_content', loss_c.item(), i + 1)
writer.add_scalar('loss_style', loss_s.item(), i + 1)
if (i + 1) % args.save_model_interval == 0 or (i + 1) == args.max_iter:
state_dict = decoder.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].to(torch.device('cpu'))
torch.save(state_dict,
'{:s}/decoder_iter_{:d}.pth.tar'.format(args.save_dir,
i + 1))
state_dict = network.transform.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].to(torch.device('cpu'))
torch.save(state_dict,
'{:s}/transformer_iter_{:d}.pth.tar'.format(args.save_dir,
i + 1))
writer.close()