Sure! I will put here the code of the archtecture of the model too.
####################################
############ MODEL ################
####################################
import torch
import torch.nn as nn
from torchvision import models
class ScaleLayer(nn.Module):
def __init__(self, init_value=1e-3):
super().__init__()
self.scale = nn.Parameter(torch.FloatTensor([init_value]))
def forward(self, input):
return input * self.scale
def weights_init(model):
if type(model) in [nn.Conv2d, nn.Linear]:
nn.init.xavier_normal_(model.weight.data)
nn.init.constant_(model.bias.data, 0.1)
class Color_model(nn.Module):
def __init__(self):
super(Color_model, self).__init__()
self.features = nn.Sequential(
# conv1
nn.Conv2d(in_channels = 1, out_channels = 64, kernel_size = 3, stride = 1, padding = 1),
nn.ReLU(),
nn.Conv2d(in_channels = 64, out_channels = 64, kernel_size = 3, stride = 2, padding = 1),
nn.ReLU(),
nn.BatchNorm2d(num_features = 64),
# conv2
nn.Conv2d(in_channels = 64, out_channels = 128, kernel_size = 3, stride = 1, padding = 1),
nn.ReLU(),
nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, stride = 2, padding = 1),
nn.ReLU(),
nn.BatchNorm2d(num_features = 128),
# conv3
nn.Conv2d(in_channels = 128, out_channels = 256, kernel_size = 3, stride = 1, padding = 1),
nn.ReLU(),
nn.Conv2d(in_channels = 256, out_channels = 256, kernel_size = 3, stride = 1, padding = 1),
nn.ReLU(),
nn.Conv2d(in_channels = 256, out_channels = 256, kernel_size = 3, stride = 2, padding = 1),
nn.ReLU(),
nn.BatchNorm2d(num_features = 256),
# conv4
nn.Conv2d(in_channels = 256, out_channels = 512, kernel_size = 3, stride = 1, padding = 1),
nn.ReLU(),
nn.Conv2d(in_channels = 512, out_channels = 512, kernel_size = 3, stride = 1, padding = 1),
nn.ReLU(),
nn.Conv2d(in_channels = 512, out_channels = 512, kernel_size = 3, stride = 1, padding = 1),
nn.ReLU(),
nn.BatchNorm2d(num_features = 512),
# conv5
nn.Conv2d(in_channels = 512, out_channels = 512, kernel_size = 3, stride = 1, padding = 2, dilation = 2),
nn.ReLU(),
nn.Conv2d(in_channels = 512, out_channels = 512, kernel_size = 3, stride = 1, padding = 2, dilation = 2),
nn.ReLU(),
nn.Conv2d(in_channels = 512, out_channels = 512, kernel_size = 3, stride = 1, padding = 2, dilation = 2),
nn.ReLU(),
nn.BatchNorm2d(num_features = 512),
# conv6
nn.ReLU(),
nn.Conv2d(in_channels = 512, out_channels = 512, kernel_size = 3, stride = 1, padding = 2, dilation = 2),
nn.ReLU(),
nn.Conv2d(in_channels = 512, out_channels = 512, kernel_size = 3, stride = 1, padding = 2, dilation = 2),
nn.ReLU(),
nn.Conv2d(in_channels = 512, out_channels = 512, kernel_size = 3, stride = 1, padding = 2, dilation = 2),
nn.ReLU(),
nn.BatchNorm2d(num_features = 512),
# conv7
nn.Conv2d(in_channels = 512, out_channels = 512, kernel_size = 3, stride = 1, padding = 1, dilation = 1),
nn.ReLU(),
nn.Conv2d(in_channels = 512, out_channels = 512, kernel_size = 3, stride = 1, padding = 1, dilation = 1),
nn.ReLU(),
nn.Conv2d(in_channels = 512, out_channels = 512, kernel_size = 3, stride = 1, padding = 1, dilation = 1),
nn.ReLU(),
nn.BatchNorm2d(num_features = 512),
# conv8
nn.ConvTranspose2d(in_channels = 512, out_channels = 256, kernel_size = 4, stride = 2, padding = 1, dilation = 1),
nn.ReLU(),
nn.Conv2d(in_channels = 256, out_channels = 256, kernel_size = 3, stride = 1, padding = 1, dilation = 1),
nn.ReLU(),
nn.Conv2d(in_channels = 256, out_channels = 256, kernel_size = 3, stride = 1, padding = 1, dilation = 1),
nn.ReLU(),
# conv8_313
nn.Conv2d(in_channels = 256, out_channels = 313, kernel_size = 1, stride = 1,dilation = 1),
nn.ReLU(),
# decoding
#nn.Conv2d(in_channels = 313, out_channels = 2, kernel_size = 1, stride = 1)
)
self.apply(weights_init)
def forward(self, gray_image):
features=self.features(gray_image)
return features
I executed the train with these parameters:
# Model parameters
parser.add_argument('--num_epochs', type = int, default = 5)
parser.add_argument('--batch_size', type = int, default = 16)
parser.add_argument('--num_workers', type = int, default = 8)
parser.add_argument('--learning_rate', type = float, default = 1e-3)
args = parser.parse_args()
print(args)
main(args)
The error is this one:
Traceback (most recent call last):
File "c:/Users/Matheus Santos/Desktop/Colorization-Epiphqny/code/train.py", line 102, in <module>
main(args)
File "c:/Users/Matheus Santos/Desktop/Colorization-Epiphqny/code/train.py", line 44, in main
for i, (images, img_ab, filename) in enumerate(data_loader):
File "C:\Python37\lib\site-packages\torch\utils\data\dataloader.py", line 801, in __next__
return self._process_data(data)
File "C:\Python37\lib\site-packages\torch\utils\data\dataloader.py", line 846, in _process_data
data.reraise()
File "C:\Python37\lib\site-packages\torch\_utils.py", line 369, in reraise
raise self.exc_type(msg)
ValueError: Caught ValueError in DataLoader worker process 2.
Original Traceback (most recent call last):
File "C:\Python37\lib\site-packages\torch\utils\data\_utils\worker.py", line 178, in _worker_loop
data = fetcher.fetch(index)
File "C:\Python37\lib\site-packages\torch\utils\data\_utils\fetch.py", line 44, in fetch
data = [self.dataset[idx] for idx in possibly_batched_index]
File "C:\Python37\lib\site-packages\torch\utils\data\_utils\fetch.py", line 44, in <listcomp>
data = [self.dataset[idx] for idx in possibly_batched_index]
File "c:\Users\Matheus Santos\Desktop\Colorization-Epiphqny\code\data_loader.py", line 44, in __getitem__
img_lab_ori = rgb2lab(img_rgb_ori) # Convert to CIE Lab color space
File "C:\Python37\lib\site-packages\skimage\color\colorconv.py", line 1038, in rgb2lab
return xyz2lab(rgb2xyz(rgb), illuminant, observer)
File "C:\Python37\lib\site-packages\skimage\color\colorconv.py", line 681, in rgb2xyz
arr = _prepare_colorarray(rgb).copy()
File "C:\Python37\lib\site-packages\skimage\color\colorconv.py", line 152, in _prepare_colorarray
raise ValueError(msg)
ValueError: the input array must be have a shape == (.., ..,[ ..,] 3)), got (375, 500)