Hello l got this awkward error message l still do not understand why they are an issue with torch. cat ?
this is my code, many thanks for your support
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(DoubleConv, self).__init__()
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=2),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=2),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def forward(self, out):
x=self.double_conv(out)
return x
In[5]:
class DownBlock(nn.Module):
def init(self, in_channels, out_channels):
super(DownBlock, self).__init__()
self.double_conv = DoubleConv(in_channels, out_channels)
self.down_sample = nn.MaxPool2d(2)
def forward(self, x):
skip_out = self.double_conv(x)
down_out = self.down_sample(skip_out)
return (down_out, skip_out)
In[6]:
class UpBlock (nn.Module):
“”“Upscaling then double conv”“”
def __init__(self, in_channels, out_channels, up_sample_mode):
super(UpBlock, self).__init__()
if up_sample_mode == 'conv_transpose':
self.up_sample = nn.ConvTranspose2d(in_channels-out_channels, in_channels-out_channels, kernel_size=2, stride=2, padding=2)
elif up_sample_mode == 'bilinear':
self.up_sample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
raise ValueError("Unsupported `up_sample_mode` (can take one of `conv_transpose` or `bilinear`)")
self.double_conv = DoubleConv(in_channels, out_channels)
def forward(self, down_input, skip_input):
x = self.up_sample(down_input)
x = torch.cat([x, skip_input], dim=1)
return self.double_conv(x)
In[9]:
class Siamese_UNet_Assement(nn.Module):
def init(self, out_classes, up_sample_mode=‘conv_transpose’):
super(Siamese_UNet_Assement, self).__init__()
self.up_sample_mode = up_sample_mode
# Downsampling Path
self.down_conv1 = DownBlock(3, 64)
self.down_conv2 = DownBlock(64, 128)
self.down_conv3 = DownBlock(128, 256)
self.down_conv4 = DownBlock(256, 512)
# Bottleneck
self.double_conv = DoubleConv(512, 1024)
# Upsampling Path
self.up_conv4 = UpBlock(512 + 1024, 512, self.up_sample_mode)
self.up_conv3 = UpBlock(256 + 512, 256,self.up_sample_mode)
self.up_conv2 = UpBlock(128 + 256,128,self.up_sample_mode)
self.up_conv1 = UpBlock(64+128 , 64, self.up_sample_mode)
# Final Convolution
self.conv_last = nn.Conv2d(64, out_classes, kernel_size=1)
def forward(self, input1,input2):
# Unet1
# Encoder
enc1_1, skip1_out1 = self.down_conv1(input1)
enc1_2, skip2_out1 = self.down_conv2(enc1_1)
enc1_3, skip3_out1 = self.down_conv3(enc1_2)
enc1_4, skip4_out1 = self.down_conv4(enc1_3)
bottleneck_1 = self.double_conv(enc1_4)
#Decoder
dec4_1= self.up_conv4(bottleneck_1, skip4_out1)
dec3_1_= self.up_conv3(dec4_1, skip3_out1)
dec2_1= self.up_conv2(dec3_1_, skip2_out1)
dec1_1 = self.up_conv1(dec2_1, skip1_out1)
dec1_1 = self.conv_last(dec1_1)
# Unet2
# Encoder
enc2_1, skip1_out2 = self.down_conv1(input2)
enc2_2, skip2_out2 = self.down_conv2(enc2_1)
enc2_3, skip3_out2 = self.down_conv3(enc2_2)
enc2_4, skip4_out2 = self.down_conv4(enc2_3)
bottleneck_2 = self.double_conv(enc2_4)
#Decoder
dec4_2= self.up_conv4(bottleneck_2 , skip4_out2)
dec3_2_= self.up_conv3(dec4_2, skip3_out2)
dec2_2= self.up_conv2(dec3_2_, skip2_out2)
dec1_2 = self.up_conv1(dec2_2, skip1_out2)
dec1_2 = self.conv_last(dec1_2)
# Siamese
output = torch.cat((dec1_1,dec1_2), 1)
return output
and the error message :
e ~/codes/Buldings_damage_assessment/models/testUNet_Assement.py:135, in Siamese_UNet_Assement.forward(self, input1, input2)
132 bottleneck_1 = self.double_conv(enc1_4)
134 #Decoder
→ 135 dec4_1= self.up_conv4(bottleneck_1, skip4_out1)
136 dec3_1_= self.up_conv3(dec4_1, skip3_out1)
137 dec2_1= self.up_conv2(dec3_1_, skip2_out1)
File ~/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py:1501, in Module._call_impl(self, *args, **kwargs)
1496 # If we don’t have any hooks, we want to skip the rest of the logic in
1497 # this function, and just call forward.
1498 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1499 or _global_backward_pre_hooks or _global_backward_hooks
1500 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1501 return forward_call(*args, **kwargs)
1502 # Do not call functions when jit is used
1503 full_backward_hooks, non_full_backward_hooks = [], []
File ~/codes/Buldings_damage_assessment/models/testUNet_Assement.py:94, in UpBlock.forward(self, down_input, skip_input)
92 def forward(self, down_input, skip_input):
93 x = self.up_sample(down_input)
—> 94 x = torch.cat([x, skip_input], dim=1)
95 return self.double_conv(x)
RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 42 but got size 39 for tensor number 1 in the list.