I have Gray Image Size as: 256 x 256 . I am using Four convolutional Layers with 96 filters per layer. However, I am getting error: Please see my code
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
import torch
import torch.nn as nn
import cv2
import torch
from torchvision import transforms
from torchviz import make_dot
import numpy as np
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
class conv_block(nn.Module):
def __init__(self, in_c, out_c, kernel_size=3):
super().__init__()
self.conv1 = nn.Conv2d(in_c, out_c, kernel_size, padding=0)
self.in1 = nn.InstanceNorm2d(out_c)
self.relu = nn.ReLU(inplace=True)
def forward(self, inputs):
x = self.conv1(inputs)
x = self.in1(x)
x = self.relu(x)
return x
class encoder_block(nn.Module):
def __init__(self, in_c, out_c):
super().__init__()
self.conv = conv_block(in_c, out_c)
self.pool = nn.MaxPool2d((2, 2))
def forward(self, inputs):
x = self.conv(inputs)
p = self.pool(x)
return x, p
class decoder_block(nn.Module):
def __init__(self, in_c, out_c):
super().__init__()
self.up = nn.ConvTranspose2d(in_c, out_c, kernel_size=3, stride=2, padding=0)
self.conv = conv_block(out_c+ out_c, out_c)
def forward(self, inputs, skip):
x = self.up(inputs)
x = torch.cat([x, skip], axis=1)
x = self.conv(x)
return x
class build_unet(nn.Module):
def __init__(self):
super().__init__()
#-----------------------------------------------------------------------------#
# ENCODER #
#-----------------------------------------------------------------------------#
self.e1 = encoder_block(1 ,96)
self.e2 = encoder_block(96,96)
self.e3 = encoder_block(96,96)
self.e4 = encoder_block(96,96)
#-----------------------------------------------------------------------------#
# BOTTLENECK #
#-----------------------------------------------------------------------------#
self.b = conv_block(96,96,1)
#-----------------------------------------------------------------------------#
# DECODER #
#-----------------------------------------------------------------------------#
self.d1 = decoder_block(96,96)
self.d2 = decoder_block(96,96)
self.d3 = decoder_block(96,96)
self.d4 = decoder_block(96,3)
#----------------------------------------------------#
# """ Classifier """
#----------------------------------------------------#
# self.outputs = nn.Conv2d(1, 96, kernel_size=1, padding=1)
#------------------------------------------------------------------------------#
# """ Encoder """
#-----------------------------------------------------------------------------#
def forward(self, inputs):
s1, p1 = self.e1(inputs)
s2, p2 = self.e2(p1)
s3, p3 = self.e3(p2)
s4, p4 = self.e4(p3)
#------------------------------------------------------------------------------#
# """ Bottleneck """
#-----------------------------------------------------------------------------#
b = self.b(p4)
#------------------------------------------------------------------------------#
# """ Decoder """
#------------------------------------------------------------------------------#
d1 = self.d1(b, s4)
d2 = self.d2(d1, s3)
d3 = self.d3(d2, s2)
d4 = self.d4(d3, s1)
return d4
#-------------------------------------------#
# """ Classifier """ #
#-------------------------------------------#
# outputs = self.outputs(d4)
# return outputs
#-------------------------------------------------------------------------------#
if __name__ == "__main__":
# inputs = torch.randn([1,1,256,256])
image = cv2.imread (r'C:\Users\Idrees Bhat\Desktop\Research\Insha\Dataset\o_gray\1.png',0)
convert_tensor = transforms.ToTensor()
inputs=convert_tensor(image)
# print (inputs.shape)
# print (type(inputs)) # [1,256,256]
model = build_unet()
y = model (inputs)
# print(y.shape)type or paste code here
#-----------------------------------------------------------------------------------------------------------------------
ERROR:
File ~\AppData\Local\anaconda3\Lib\site-packages\spyder_kernels\py3compat.py:356 in compat_exec
exec(code, globals, locals)
File c:\users\idrees bhat\desktop\insha\generator_aiwa.py:113
y = model (inputs)
File ~\AppData\Local\anaconda3\Lib\site-packages\torch\nn\modules\module.py:1518 in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File ~\AppData\Local\anaconda3\Lib\site-packages\torch\nn\modules\module.py:1527 in _call_impl
return forward_call(*args, **kwargs)
File c:\users\idrees bhat\desktop\insha\generator_aiwa.py:93 in forward
d1 = self.d1(b, s4)
File ~\AppData\Local\anaconda3\Lib\site-packages\torch\nn\modules\module.py:1518 in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File ~\AppData\Local\anaconda3\Lib\site-packages\torch\nn\modules\module.py:1527 in _call_impl
return forward_call(*args, **kwargs)
File c:\users\idrees bhat\desktop\insha\generator_aiwa.py:47 in forward
x = torch.cat([x, skip], axis=1)
RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 29 but got size 28 for tensor number 1 in the list.