#model class
class Framework(nn.Module):
def __init__(self,input_shape, representation_size, output_shape, fc1, fc21,fc22,fc3,fc4):
super(Framework, self).__init__()
self.fc1 = fc1
self.fc21 = fc21
self.fc22 = fc22
self.fc3 = fc3
self.fc4 = fc4
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.elu=nn.ELU()
def encoder(self, x):
"""Encode a batch of samples, and return posterior parameters for each point."""
h1 = self.relu(self.fc1(x))
return self.fc21(h1), self.elu(self.fc22(h1))+1
def decoder(self, z):
h3 = self.relu(self.fc3(z))
return self.sigmoid(self.fc4(h3))
def reparam(self, mu, logvar):
"""Reparameterisation trick to sample z values.
This is stochastic during training, and returns the mode during evaluation."""
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def get_z(self, x):
"""Encode a batch of data points, x, into their z representations."""
mu, logvar = self.encoder(x.view(-1, input_shape))
return self.reparam(mu, logvar)
def forward(self, x):
"""Takes a batch of samples, encodes them, and then decodes them again to compare."""
mu, logvar = self.encoder(x.view(-1, input_shape))
z = self.reparam(mu, logvar)
return self.decoder(z), mu, logvar,z
#parameter sharing
fc1 = nn.Linear(input_shape, 512,bias=True)
torch.nn.init.xavier_uniform_(fc1.weight)
fc21 = nn.Linear(512, representation_size,bias=True)
torch.nn.init.xavier_uniform_(fc21.weight)
fc22 = nn.Linear(512, representation_size,bias=True)
torch.nn.init.xavier_uniform_(fc22.weight)
fc3 = nn.Linear(representation_size, 512,bias=True)
torch.nn.init.xavier_uniform_(fc3.weight)
fc4 = nn.Linear(512, output_shape,bias=True)
torch.nn.init.xavier_uniform_(fc4.weight)
#Complete model
model1 = Framework(input_shape, representation_size, output_shape, fc1, fc21,fc22,fc3,fc4)
model2 = Framework(input_shape, representation_size, output_shape, fc1, fc21,fc22,fc3,fc4)
model3 = Framework(input_shape, representation_size, output_shape, fc1, fc21,fc22,fc3,fc4)
model1.to(device)
model2.to(device)
model3.to(device)
#deine optimizers
f_params=model1.parameters()
s_params=model2.parameters()
t_params=model3.parameters()
dvne_params=itertools.chain(f_params,s_params,t_params)
optimizer = optim.RMSprop(f_params, lr=learning_rate)
Is it the correct way to share the parameters between three model lcass?