Runtime error in implementation of skip connect

I am trying to build a U-net generator model in cycleGAN architecture.I want to save the tensor from the down-sampling layer that is to be concatenated into the up-sampling layer by a another tensor I create using torch.empty. The images to be saved are of different size depending on which layer it is from in the down-sampling layers. My code is as follows:

class Gen_Block(nn.Module):
    def __init__(self, in_channels, out_channels, stride, padding):
        super(Gen_Block, self).__init__()
        self.block = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=4, stride=stride, padding=padding, padding_mode='reflect'),
            nn.InstanceNorm2d(out_channels),
            nn.LeakyReLU(0.2, inplace=True),
        )
    def forward(self, x):
        return self.block(x)


class Generator(nn.Module):
    def __init__(self, input_channels=INPUT_CHANNELS, channels_list=[32,64,128,256,512]):
        super(Generator, self).__init__()
        self.skip = torch.empty(4, dtype=torch.float32, device=DEVICE)
        self.down_layers = nn.ModuleList()
        self.down_layers.append(nn.Sequential(
            Gen_Block(input_channels, channels_list[0], stride=1, padding='same'),
            Gen_Block(channels_list[0], channels_list[0], stride=2, padding=1),
        ))
        for down in range(len(channels_list)-1):
            self.down_layers.append(nn.Sequential(
                Gen_Block(channels_list[down], channels_list[down+1], stride=1, padding='same'),
                Gen_Block(channels_list[down+1], channels_list[down+1], stride=2, padding=1),
            ))
        self.up_layers = nn.ModuleList()
        self.up_layers.append(nn.Sequential(
            nn.Upsample(scale_factor=2, mode='nearest'),
            Gen_Block(channels_list[-1], channels_list[-2], stride=1, padding=1),
        ))
        for up in range(-2, -1*len(channels_list), -1):
            self.up_layers.append(nn.Sequential(
                Gen_Block(channels_list[up+1], channels_list[up], stride=1, padding='same'),
                nn.Upsample(scale_factor=2, mode='nearest'),
                Gen_Block(channels_list[up], channels_list[up-1], stride=1, padding='same'),
            ))
        self.up_layers.append(nn.Sequential(
            Gen_Block(channels_list[1], channels_list[0], stride=1, padding='same'),
            nn.Upsample(scale_factor=2, mode='nearest'),
            Gen_Block(channels_list[0], channels_list[0], stride=1, padding='same'),
        ))
        self.up_layers.append(nn.Conv2d(channels_list[0], input_channels, kernel_size=1, stride=1, padding=0))

    def forward(self, x):
        out = x
        for index, down in enumerate(self.down_layers):
            out = down(out)
            if index in range(4):
                self.skip[index] = out[0]

        for index, up in enumerate(self.up_layers):
            out = up(out)
            if index in range(4):
                out = torch.cat((out, self.skip[-1*(index+1)]), dim=0)
        return out


def gen_test():
    x = torch.randn(1, 1, 512, 512)
    model = Generator()
    model(x)

However, a runtime error occurs:

Traceback (most recent call last):
File “C:\Users\AlanLam\PycharmProjects\dissertation\model2.py”, line 127, in
gen_test()
File “C:\Users\AlanLam\PycharmProjects\dissertation\model2.py”, line 124, in gen_test
model(x)
File “C:\Users\AlanLam\anaconda3\envs\notebook\lib\site-packages\torch\nn\modules\module.py”, line 1102, in _call_impl
return forward_call(*input, **kwargs)
File “C:\Users\AlanLam\PycharmProjects\dissertation\model2.py”, line 109, in forward
self.skip[index] = out[0]
RuntimeError: expand(torch.FloatTensor{[32, 256, 256]}, size=[]): the number of sizes provided (0) must be greater or equal to the number of dimensions in the tensor (3)

Can anyone tells me the problem?