I am building a CNN model with residual network achidecture for input images with different dimension. At the same time, I want to make sure the output has the same dimension as the input.
In the forward pass of the Generator object, I declare a list object size_list
to store the dimension of the tensors during its down-sampling steps. So that when the tensor is unsampling, I can pass the required size from the list to the forward function of ConvTranspose2D in the argument output_size
.
However, once I added this list object, I run into this error even though I explicitly pass both input and model into my cuda device.
RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.FloatTensor) should be the same
The error smply would not show up if I don’t use the list object, or run the model in cpu device. I don’t know how the list object influence my model getting into the cuda device. Any help is appreciated. Thank you.
class Transpose_Block(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, output_size, padding=1):
super(Transpose_Block, self).__init__()
self.output_size = output_size
self.block = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
def centercrop(self, x):
dimension = x.shape
top = 0 + math.floor((dimension[-2] - self.output_size[0]) / 2)
bottom = dimension[-2] - math.ceil((dimension[-2] - self.output_size[0]) / 2)
left = 0 + math.floor((dimension[-1] - self.output_size[1]) / 2)
right = dimension[-1] - math.ceil((dimension[-1] - self.output_size[1]) / 2)
print(top, bottom, left, right)
return x[:,:, top:bottom, left:right]
def forward(self, x):
try:
out = self.block(x, output_size=self.output_size)
except ValueError:
out = self.block(x)
out = self.centercrop(out)
print('manual crop')
return out
class Generator(nn.Module):
def __init__(self, input_channels=INPUT_CHANNELS, channels_list=[64, 128, 256, 512], n_res=NUM_RES):
super(Generator, self).__init__()
self.input_channels = input_channels
self.channels_list = channels_list
self.list = []
self.initial = nn.Sequential(
nn.Conv2d(input_channels, channels_list[0], kernel_size=7, stride=1, padding=3, padding_mode='reflect'),
nn.InstanceNorm2d(channels_list[0]),
nn.ReLU(),
)
self.down_layers = nn.ModuleList()
self.res_layers = nn.ModuleList()
self.down_layers.append(self.initial)
for down in range(len(channels_list)-1):
self.down_layers.append(Gen_Block(channels_list[down], channels_list[down + 1]))
for res in range(n_res):
self.res_layers.append(Res_Block(channels_list[-1]))
def forward(self, x):
# size_tensor = torch.empty((len(self.channels_list),2))
for (index, down) in enumerate(self.down_layers):
# size_tensor[index] = torch.tensor(x.size()[-2:])
self.list.append(x.size()[-2:])
x = down(x)
for res in self.res_layers:
x = res(x)
for up in range(len(self.channels_list)-1, 0, -1):
x = Transpose_Block(self.channels_list[up], self.channels_list[up-1], kernel_size=3, stride=2, output_size=self.list[up], padding=1)(x)
x = nn.InstanceNorm2d(self.channels_list[up-1])(x)
x = nn.ReLU()(x)
x = Transpose_Block(self.channels_list[0], self.input_channels, kernel_size=7, stride=1, output_size=self.list[0], padding=3)(x)
x = nn.Tanh()(x)
return x
def gen_test():
x = torch.randn(1, 1, 100, 150).to(DEVICE)
model = Generator().to(DEVICE)
print(model)
print(model(x).shape)
if __name__ == '__main__':
gen_test()
pass