Conv1d not supporting the input

Hi,

I am working on UNet architecture.
My input to the model are 3 series of floating points.

Also, I am taking 400 such rows at a time and my batch size is 32

Thus my input is 32x3x400.

The structure is shown below.

index 0 1 2 3 …399 …799
s1 v1…
s2 v2…
s3 v3…

Using the below architecture I am able to generate my desired output.

class DoubleConv(nn.Module):


def __init__(self, in_channels, out_channels, mid_channels=None):
    super().__init__()
    if not mid_channels:
        mid_channels = out_channels
    self.double_conv = nn.Sequential(
        nn.Conv1d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
        nn.BatchNorm1d(mid_channels),
        nn.ReLU(inplace=True),
        nn.Conv1d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
        nn.BatchNorm1d(mid_channels),
        nn.ReLU(inplace=True)
    )

def forward(self, x):
    return self.double_conv(x)


class Down(nn.Module):

def __init__(self, in_channels, out_channels):
      super().__init__()
      self.maxpool_conv = nn.Sequential(
        nn.MaxPool1d(2),
        DoubleConv(in_channels, out_channels)
    )

def forward(self, x):
    return self.maxpool_conv(x)


class Up(nn.Module):

   def __init__(self, in_channels, out_channels, linear=True):
       super().__init__()

    # if linear, use the normal convolutions to reduce the number of channels
    if linear:
        self.up = nn.Upsample(scale_factor=2, mode='linear', align_corners=True)
        self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
    else:
        self.up = nn.ConvTranspose1d(in_channels, in_channels // 2, kernel_size=2, stride=2)
        self.conv = DoubleConv(in_channels, out_channels)

def forward(self, x1, x2):
    x1 = self.up(x1)
    # input is CHW
    diffY = x2.size()[2] - x1.size()[2]

    x1 = F.pad(x1, [diffY // 2, diffY - diffY // 2])
    
    x = torch.cat([x2, x1], dim=1)
    return self.conv(x)


class OutConv(nn.Module):
   def __init__(self, in_channels, out_channels):
    super(OutConv, self).__init__()
    self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=1)

def forward(self, x):
    return self.conv(x)

class UNet1D(nn.Module):
def __init__(self, n_channels = 1, n_classes = 1, channel_sizes = [8,16,32,64,128], linear=True):
    super(UNet1D, self).__init__()
    self.n_channels = n_channels
    self.n_classes = n_classes
    self.linear = linear
    print("##")
    ##channel sizes are specified in main program
    print(self.n_channels,self.n_classes,channel_sizes)#3 3 [32,64,128,256,512]
    #x1
    self.inc = DoubleConv(n_channels, channel_sizes[0])
    #x2
    self.down1 = Down(channel_sizes[0], channel_sizes[1])
    #x3
    self.down2 = Down(channel_sizes[1], channel_sizes[2])
    #x4
    self.down3 = Down(channel_sizes[2], channel_sizes[3])
    factor = 2 if linear else 1
    self.down4 = Down(channel_sizes[3], channel_sizes[4] // factor)
    self.up1 = Up(channel_sizes[4], channel_sizes[3] // factor, linear)
    self.up2 = Up(channel_sizes[3], channel_sizes[2] // factor, linear)
    self.up3 = Up(channel_sizes[2], channel_sizes[1] // factor, linear)
    self.up4 = Up(channel_sizes[1], channel_sizes[0], linear)
    self.outc = OutConv(channel_sizes[0], n_classes)

def forward(self, x):
    print("----")
    print(x.shape)#torch.Size([32, 3, 400])
    x1 = self.inc(x)
    print(x1.shape)#torch.Size([32, 32, 400])
    x2 = self.down1(x1)
    print(x2.shape)#torch.Size([32, 64, 200])
    x3 = self.down2(x2)
    #print(x3.shape)#torch.Size([32, 128, 100])
    x4 = self.down3(x3)
    #print(x4.shape)#torch.Size([32, 256, 50])
    x5 = self.down4(x4)
    #print(x5.shape)#torch.Size([32, 256, 25])
    x = self.up1(x5, x4)
    #print(x.shape)#torch.Size([32, 128, 50])
    x = self.up2(x, x3)
    #print(x.shape)#torch.Size([32, 64, 100])
    x = self.up3(x, x2)
    #print(x.shape)#torch.Size([32, 32, 200])
    x = self.up4(x, x1)
    #print(x.shape)#torch.Size([32, 32, 400])
    logits = self.outc(x)
    #print(logits.shape)#torch.Size([32, 3, 400])
    return logits

‘’’

However when I am using the input as shown below i.e 32x400x3, the above mentioned architecture is failing.

index s1 s2 s3
0 v1 v2 v3
.
.
.
399
.
.
.
799
.
.

My Question: what changes should I incorporate in my architecture in order to support my input?
Please help!

nn.Conv1d expects input size [batch, channels, sequence_length]. It runs the kernels along the sequence_length dim. So if you switch channels and sequence_length, you will likely not get the desired output. If your data is of shape [batch, sequence_length, channels], you can simply use:

data = data.permute(0,2,1)

Yes that’s true.
But l want to find the loss between target and logits which are of size 400X3. What can I do for that?

The point of a Conv1d is detecting edges within a sequential set of data. That means the data must contain spatially relevant information along the sequence dimension. But that is not the case of the channels dimension.

However, if you believe your data does have spatially relevant information in the channels dimension, too, then you would want to use a Conv2d and add in a dim=1 for the channels via data.unsqueeze(1). That will detect edges that are relevant for 2 dimensions.