I am using a GAN model and working with CT scans, and I would like to crop them with transforms buth with a different size the the image size. But I got this error:
RuntimeError: shape '[-1, 3, 64, 64]' is invalid for input of size 120000
How can I solve the problem?
Discriminator Code:
class GoodDiscriminator(nn.Module):
def __init__(self, dim=DIM):
super(GoodDiscriminator, self).__init__()
self.dim = dim
self.conv1 = MyConvo2d(3, self.dim, 3, he_init=False)
self.rb1 = ResidualBlock(self.dim, 2*self.dim,
3, resample='down', hw=DIM)
self.rb2 = ResidualBlock(
2*self.dim, 4*self.dim, 3, resample='down', hw=int(DIM/2))
self.rb3 = ResidualBlock(
4*self.dim, 8*self.dim, 3, resample='down', hw=int(DIM/4))
self.rb4 = ResidualBlock(
8*self.dim, 8*self.dim, 3, resample='down', hw=int(DIM/8))
self.ln1 = nn.Linear(4*4*8*self.dim, 1)
self.initialize()
def initialize(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.LayerNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
def extract_feature(self, input):
output = input.contiguous()
output = output.view(-1, 3, DIM, DIM)
output = self.conv1(output)
output = self.rb1(output)
output = self.rb2(output)
output = self.rb3(output)
output = self.rb4(output)
output = output.view(-1, 4*4*8*self.dim)
return output
def forward(self, input):
output = self.extract_feature(input)
output = self.ln1(output)
output = output.view(-1)
return output
Generator Code:
class GoodGenerator(nn.Module):
def __init__(self, dim=DIM, output_dim=OUTPUT_DIM):
super(GoodGenerator, self).__init__()
self.dim = dim
self.ln1 = nn.Linear(128, 4*4*8*self.dim)
self.rb1 = ResidualBlock(8*self.dim, 8*self.dim, 3, resample='up')
self.rb2 = ResidualBlock(8*self.dim, 4*self.dim, 3, resample='up')
self.rb3 = ResidualBlock(4*self.dim, 2*self.dim, 3, resample='up')
self.rb4 = ResidualBlock(2*self.dim, 1*self.dim, 3, resample='up')
self.bn = nn.BatchNorm2d(self.dim)
self.conv1 = MyConvo2d(1*self.dim, 3, 3)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.initialize()
def initialize(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.LayerNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, input):
output = self.ln1(input.contiguous())
output = output.view(-1, 8*self.dim, 4, 4)
output = self.rb1(output)
output = self.rb2(output)
output = self.rb3(output)
output = self.rb4(output)
output = self.bn(output)
output = self.relu(output)
output = self.conv1(output)
output = self.tanh(output)
# output = output.view(-1, OUTPUT_DIM)
return output