Model not going properly to GPU

I’m getting

RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.FloatTensor) should be the same

when trying to train my model using GPU. My data is properly inserted at GPU, but my model is not going there and I don’t really know how to do it. I’ve read that you have to use nn.ModuleList instead of lists to push all parameters to the device. I’m not using lists but also I wrote the model slightly different than I usually do:

class AutoEncoder(nn.Module):

    def __init__(self):
        self.maxpool = nn.MaxPool2d(kernel_size=(2,2))
        self.conv1 = nn.Conv2d(in_channels=3 ,  out_channels=64,  kernel_size=(5,5), stride=1, padding=2)
        self.conv2 = nn.Conv2d(in_channels=64 , out_channels=64,  kernel_size=(5,5), stride=1, padding=2)
        self.conv3 = nn.Conv2d(in_channels=64 , out_channels=128, kernel_size=(3,3), stride=1, padding=1)
        self.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3,3), stride=1, padding=1)
        self.conv5 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3,3), stride=1, padding=1)
        self.conv6 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3,3), stride=1, padding=1)
        self.conv7 = nn.Conv2d(in_channels=128, out_channels=64,  kernel_size=(5,5), stride=1, padding=2)
        self.conv8 = nn.Conv2d(in_channels=64,  out_channels=64,  kernel_size=(5,5), stride=1, padding=2)
        self.conv9 = nn.Conv2d(in_channels=64,  out_channels=3,   kernel_size=(3,3), stride=1, padding=1)
    def convtransposed(self, x, in_filters: int, out_filters: int):
        deconv = nn.ConvTranspose2d(in_channels=in_filters,out_channels=out_filters,kernel_size=(2,2), stride=2)
        return deconv(x)

    def encoder(self, x):
        self._keep_conv1 = self.conv1(x)
        x = F.relu(self.maxpool(self._keep_conv1))
        self._keep_conv2 = self.conv2(x)
        x = F.relu(self.maxpool(self._keep_conv2))
        self._keep_conv3 = self.conv3(x)
        x = F.relu(self.maxpool(self._keep_conv3))
        x = F.relu(self.maxpool(self.conv4(x)))
        x = F.relu(self.conv5(x))
        return x
    def decoder(self, x):
        upsampling1 = self.convtransposed(x, in_filters=128, out_filters=128)
        conv6 = F.relu(self.conv6(upsampling1))
        upsampling2 = self.convtransposed(conv6, in_filters=128, out_filters=128)
        skip1 = upsampling2 + self._keep_conv3
        conv7 = F.relu(self.conv7(skip1))
        upsampling3 = self.convtransposed(conv7, in_filters=64, out_filters=64)
        skip2 = upsampling3 + self._keep_conv2
        conv8 = F.relu(self.conv8(skip2))
        upsampling4 = self.convtransposed(conv8, in_filters=64, out_filters=64)
        skip3 = upsampling4 + self._keep_conv1
        conv9 = F.relu(self.conv9(skip3))
        return conv9
    def forward(self, x):
        x = self.encoder(x)
        x = self.decoder(x)
        return x

How do I push all parameters properly to the GPU?

I believe it is because nn.ConvTranspose2d is not defined in __init__. Any reason why it is not defined there but defined when forward is called?

1 Like

No particular reason. I wrote them as

self.convtransposed64 = nn.ConvTranspose2d(in_channels=64,out_channels=64,kernel_size=(2,2), stride=2)
self.convtransposed128 = nn.ConvTranspose2d(in_channels=128,out_channels=128,kernel_size=(2,2), stride=2)

In init and it worked. Thank you!