Fixing number of filters in Conv2ds

Hi,

I am designing encoder decoder network. In which number of convolutional layers are four and number of Transposed convolutional layers are four. My image size is : 8x 1x 256x256. 8 is batch size, 1 is number of channels ( gray image). Size is H = 256 W is 256. Kernel size is 3 x 3. Stride is 2 x2. Padding 1. However, I have to fix number of filters as 96 each layer…I don’t understand how to do that .

96 filters will always make 96 channels.

The first time though, the input channels will be 3, output 96. Then, it will be input 96, output 96.

You should try, and use torchinfo to understand the dimensionality changes.

first convltional layer will be having in_channels =1, out_channels = 96 . why since image is gray
second convolutional layer will be having in_channles = 96, out_channels= ?
#------------------------------------------------------------------------------------------------------#
I am having four convolutional layers… And each layer is having 96 filters…
#--------------------------------------------------------------------------------------------------------#
I am not understanding, how It can be done ?

import cv2
import torch
from torchvision import transforms
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
class conv_block(nn.Module):

def __init__(self, in_c, out_c):
    super().__init__()
    self.conv1 = nn.Conv2d(in_c, out_c, kernel_size=3, padding=1)
    self.bn1   = nn.InstanceNorm2d(out_c)
    self.relu  = nn.ReLU(inplace=True)
    
def forward(self, inputs):
    x = self.conv1(inputs)
    x = self.bn1(x)
    x = self.relu(x)
    return x

class encoder_block(nn.Module):

def __init__(self, in_c, out_c):
    super().__init__()
    self.conv = conv_block(in_c, out_c)
    self.pool = nn.MaxPool2d((2, 2))
    
def forward(self, inputs):
    x = self.conv(inputs)
    p = self.pool(x)
    return x, p

class decoder_block(nn.Module):

def __init__(self, in_c, out_c):
    super().__init__()
    self.up = nn.ConvTranspose2d(in_c, out_c, kernel_size=3, stride=2, padding=1)
    self.conv = conv_block(out_c+ out_c, out_c)
    
def forward(self, inputs, skip):
    x = self.up(inputs)
    x = torch.cat([x, skip], axis=1)
    x = self.conv(x)
    return x

class build_unet(nn.Module):

def __init__(self):
    super().__init__()

#-----------------------------------------------------------------------------#

ENCODER

#-----------------------------------------------------------------------------#
self.e1 = encoder_block(1,96)
self.e2 = encoder_block(96,96)
self.e3 = encoder_block(96,96)
self.e4 = encoder_block(96,96)
#-----------------------------------------------------------------------------#

BOTTLENECK

#-----------------------------------------------------------------------------#
self.b = conv_block(96, 96)
#-----------------------------------------------------------------------------#

DECODER

#-----------------------------------------------------------------------------#
self.d1 = decoder_block(96,96)
self.d2 = decoder_block(96,96)
self.d3 = decoder_block(96,96)
self.d4 = decoder_block(96, 3)
#----------------------------------------------------#

“”" Classifier “”"

#----------------------------------------------------#
self.outputs = nn.Conv2d(96, 1, kernel_size=1, padding=0)
#------------------------------------------------------------------------------#

“”" Encoder “”"

#-----------------------------------------------------------------------------#
def forward(self, inputs):
s1, p1 = self.e1(inputs)
s2, p2 = self.e2(p1)
s3, p3 = self.e3(p2)
s4, p4 = self.e4(p3)
#------------------------------------------------------------------------------#

“”" Bottleneck “”"

#-----------------------------------------------------------------------------#
b = self.b(p4)
#------------------------------------------------------------------------------#

“”" Decoder “”"

#------------------------------------------------------------------------------#
d1 = self.d1(b, s4)
d2 = self.d2(d1, s3)
d3 = self.d3(d2, s2)
d4 = self.d4(d3, s1)
#-------------------------------------------#

“”" Classifier “”"

#-------------------------------------------#
outputs = self.outputs(d4)
return outputs
#-------------------------------------------------------------------------------#

if name == “main”:
# inputs = torch.randn([1,1,256,256])
image = cv2.imread (r’C:\Users\Idrees Bhat\Desktop\Research\Insha\Dataset\o_gray\1.png’,0)
convert_tensor = transforms.ToTensor()
inputs=convert_tensor(image)
# print (inputs.shape)
# print (type(inputs))
model = build_unet()
y= model (inputs)
print(y.shape)
``
Error : RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 31 but got size 32 for tensor number 1 in the list.

Note: Image size gray and size is [256 256]… I want fix filters per layer i.e. 96 filters per layer