ModuleList not working in forward method and outputs same dimension

Hi!
I’m trying to implement a multi-stream CNN. Since I don’t know how many streams will be needed (it depends on the number of views). I decided to use ModuleList but it seems it’s not working as expected.
Here is my code

class MultiStreamModel(nn.Module):
    def __init__(self, n_classe, n_view):
        super(MultiStreamModel, self).__init__()
        self.n_classe = n_classe
        self.n_view = n_view

        self.conv1 = nn.ModuleList([nn.Conv2d(
            in_channels=3, out_channels=64,
            kernel_size=(3, 3), stride=(1, 1))
            for _ in range(n_view)])

        self.conv2 = nn.ModuleList([nn.Conv2d(
            in_channels=64, out_channels=128,
            kernel_size=(3, 3), stride=(1, 1))
            for _ in range(n_view)])

        self.conv3 = nn.ModuleList([nn.Conv2d(
            in_channels=128, out_channels=256,
            kernel_size=(3, 3), stride=(1, 1))
            for _ in range(n_view)])

        self.conv4 = nn.ModuleList([nn.Conv2d(
            in_channels=256, out_channels=512,
            kernel_size=(3, 3), stride=(1, 1))
            for _ in range(n_view)])
        # Here, the views are contacted along axis 1 and flatten
        self.fc1 = nn.Linear(512 * n_view, 1024)
        self.dropout = nn.Dropout(p=0.5)
        self.fc2 = nn.Linear(1024, n_classe)

    def forward(self, x):
        x = [F.relu(conv(x[i])) for i, conv in enumerate(self.conv1)] //8 4 4 4 4
        x = [F.relu(conv(x[i])) for i, conv in enumerate(self.conv2)] //8 4 4 4 4
        x = [F.relu(conv(x[i])) for i, conv in enumerate(self.conv3)] // 8 4 4 4 4
        x = [F.relu(conv(x[i])) for i, conv in enumerate(self.conv4)] //8 4 4 4 4
        # Concat and flatten
        #x = self.fc1(x)
        #x = self.dropout(x)
        #x = self.fc2(x)
        #return x

With such input,

model = MultiStreamModel(n_classe=1327, n_view=8)
batch = Variable(torch.randn(8, 4, 3, 128, 64)) # n_views x B x C x H x W
model = model.to('cuda:0')
batch = batch.to('cuda:0')
logit = model(batch)

In the forward, I always get a list of size (8,4,4,4) after each conv. I was expecting in a worst case a list with the same outputChannel as specified in my ModuleList.

Since it’s a list, does this operation happen in my CPU or in the GPU? How can I make sure I get the tensor of shape tensor.Size([B, C, H, W]) and not a list.
Thank you

One solution is to concatenate the tensors in the list after adding a new dimension:

    def forward(self, x):
        x = torch.cat([F.relu(conv(x[i])).unsqueeze(dim=0) for i, conv in enumerate(self.conv1)], dim=0) #//8 4 4 4 4
        x = torch.cat([F.relu(conv(x[i])).unsqueeze(dim=0) for i, conv in enumerate(self.conv2)], dim=0) #//8 4 4 4 4
        x = torch.cat([F.relu(conv(x[i])).unsqueeze(dim=0) for i, conv in enumerate(self.conv3)], dim=0) #// 8 4 4 4 4
        x = torch.cat([F.relu(conv(x[i])).unsqueeze(dim=0) for i, conv in enumerate(self.conv4)], dim=0) #//8 4 4 4 4