The discriminator and generator are as follow:
### Discriminator-----
class Discriminator(nn.Module):
def __init__(self, ngpu):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.label_embedding = nn.Embedding(10,10)
# input is (nc) x 64 x 64
self.l1= nn.Sequential(nn.Conv2d(1, ndf, 4, 2, 1, bias=False),nn.LeakyReLU(0.2, inplace=True))
# state size. (ndf) x 32 x 32
self.l2=nn.Sequential(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),nn.BatchNorm2d(ndf * 2),nn.LeakyReLU(0.2, inplace=True))
self.drop_out2 = nn.Dropout(0.5)
# state size. (ndf*2) x 16 x 16
self.l3= nn.Sequential(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 4), nn.LeakyReLU(0.2, inplace=True))
self.drop_out3 = nn.Dropout(0.5)
# state size. (ndf*4) x 8 x 8
self.l4= nn.Sequential(nn.Conv2d(ndf * 4, 1, 4, 2, 1, bias=False),nn.Sigmoid())
# self.l4= nn.Sequential(nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 8),nn.LeakyReLU(0.2, inplace=True),nn.Sigmoid())
# # state size. (ndf*8) x 4 x 4
# self.l5= nn.Sequential(nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),nn.Sigmoid())
def forward(self, x,Real_volume):
c = self.label_embedding(Real_volume)
print(c.shape)
print(x.shape)
x = torch.cat([x, c], 1)
out = self.l1(x)
# print("outsize1",out.shape)
out=self.l2(out)
out=self.drop_out2(out)
# print("outsize2",out.shape)
out=self.l3(out)
out=self.drop_out3(out)
out=self.l4(out)
return out
## -----------Generator ------------
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.label_embedding = nn.Embedding(10,10)
self.l1= nn.Sequential(nn.ConvTranspose2d( nz+10, ngf * 8, 3, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True))
# state size. (ngf*8) x 4 x 4
self.l2=nn.Sequential(nn.ConvTranspose2d(ngf * 8, ngf * 4, 3, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True))
# state size. (ngf*4) x 8 x 8
self.l3=nn.Sequential(nn.ConvTranspose2d( ngf * 4, ngf * 2, 3, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True))
# state size. (ngf*2) x 16 x 16
self.l4=nn.Sequential(nn.ConvTranspose2d( ngf * 2, ngf, 3, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True))
# state size. (ngf) x 32 x 32
self.l5=nn.Sequential(nn.ConvTranspose2d( ngf, 1, 3, 2, 3, bias=False),nn.Sigmoid())
# nn.Tanh()
# state size. (nc) x 64 x 64
def forward(self, x,Real_volume):
c = self.label_embedding(Real_volume)
print(c.shape)
x = torch.cat([x,c], 1)
output = self.l1(x)
output = self.l2(output)
output = self.l3(output)
output = self.l4(output)
output = self.l5(output)
return output
The error is when I use discriminator ( output = netD(real_cpu,Real_volume.squeeze(1)).view(-1)) and the error referred to the (c = self.label_embedding(Real_volume))
The real_cpu is the training set (64x1x21x21) and the Readl_volume are the 64x1 volume (float numbers) from 2 to 89, the nz=100.
As an suggestion I set it to the
self.label_embedding = nn.Embedding(100,10)
I set to this it give not any error but I am not sure if it works correctly or not!?