Hello Sir, @ptrblck Can you also help me with my code?
I too have same error “RuntimeError: Given groups=1, weight of size [3, 1, 1, 11], expected input[1, 3, 1014, 1024] to have 1 channels, but got 3 channels instead”.
Here is my code :
#For combining the data from Belgium for moisture content of 0.78
import numpy as np
import matplotlib.pyplot as plt
import skimage.io as skio
from sklearn.model_selection import train_test_split
import glob
Scan1_22_09_2022_Box1_ref1 = glob.glob(path)
Scan1_22_09_2022_Box2_ref1 = glob.glob(path)
Scan1_30_09_2022_Box3_ref1 = glob.glob(path)
listOfAllImages = Scan1_22_09_2022_Box1_ref1 + Scan1_22_09_2022_Box2_ref1 + Scan1_30_09_2022_Box3_ref1
len(listOfAllImages)
from skimage import transform
def getData(path):
ArryImage = skio.imread(path,plugin=‘pil’)
ArryImage= ArryImage/np.max(ArryImage)
ArryImage = transform.resize(ArryImage, (1024, 1024))
return ArryImage
#Splitting of listOfAllImages into training and validation set
train_list, val_list = train_test_split(listOfAllImages, test_size = 0.2,random_state=42)
len(train_list)
len(val_list)
type(val_list)
#Defining the Neural network
import torch
import torch.optim as optim
device = torch.device(“cuda” if torch.cuda.is_available() else “cpu”)
device
import torch
import torch.nn as nn
class Net(nn.Module):
‘’’
Model that takes one 2D image as input and one 2D image as output.
The inputs are processed using 2 encoders, one for the 3D data and the other for the
2D image. The representations of the both inputs are concatenated.
Thereafter, the merged representations are upsampled in a decoder into a 2D image.
'''
def __init__(self):
super(Net, self).__init__()
# Layers going down
## Encoder 2D input branch
self.pool2d = nn.MaxPool2d(2) # 2D Pooling layer, max pooling
self.encoder2d_lvl_1 = nn.Sequential(
nn.Conv2d(1, 32, 3, padding=1), #inchannel;outchannel:number of filters; kernel size
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.Conv2d(32, 32, 3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(True)
)
self.encoder2d_lvl_2 = nn.Sequential(
nn.Conv2d(32, 32, 3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.Conv2d(32, 32, 3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(True),
)
self.encoder2d_lvl_3 = nn.Sequential(
nn.Conv2d(32, 64, 3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.Conv2d(64, 64, 3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(True)
)
self.encoder2d_lvl_4 = nn.Sequential(
nn.Conv2d(64, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.Conv2d(128, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(True)
)
self.encoder2d_lvl_5 = nn.Sequential(
nn.Conv2d(128,256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.Conv2d(256,256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(True)
)
self.encoder2d_lvl_6 = nn.Sequential(
nn.Conv2d(256,512, 3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.Conv2d(512,512, 3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(True)
)
# Layers going up
self.decoder2d_lvl_5 = nn.Sequential(
nn.ConvTranspose2d(512,512, 4, stride=2, groups=512), #groups=in_channels-> each input channel is convolved with its own set of filters of size(out_channels/in_channels)
nn.Conv2d(512, 512, 3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.Conv2d(512,256, 3, padding=0),
nn.BatchNorm2d(256),
nn.ReLU(True)
)
self.decoder2d_lvl_4 = nn.Sequential(
nn.ConvTranspose2d(256,256, 4, stride=2, groups=256), #groups=in_channels-> each input channel is convolved with its own set of filters of size(out_channels/in_channels)
nn.Conv2d(256, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.Conv2d(256,128, 3, padding=0),
nn.BatchNorm2d(128),
nn.ReLU(True)
)
self.decoder2d_lvl_3 = nn.Sequential(
nn.ConvTranspose2d(128, 128, 4, stride=2, groups=128), #groups=in_channels-> each input channel is convolved with its own set of filters of size(out_channels/in_channels)
nn.Conv2d(128, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.Conv2d(128, 64, 3, padding=0),
nn.BatchNorm2d(64),
nn.ReLU(True)
)
self.decoder2d_lvl_2 = nn.Sequential(
nn.ConvTranspose2d(64, 64, 4, stride=2, groups=64), #groups=in_channels-> each input channel is convolved with its own set of filters of size(out_channels/in_channels)
nn.Conv2d(64, 64, 3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.Conv2d(64, 32, 3, padding=0),
nn.BatchNorm2d(32),
nn.ReLU(True)
)
self.decoder2d_lvl_1 = nn.Sequential(
nn.ConvTranspose2d(32, 32, 4, stride=2, groups=32),
nn.Conv2d(32, 32, 3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.Conv2d(32, 16, 3, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.Conv2d(16, 1, 3, padding=0)
)
def forward(self, x):
# Encoding 2D input data # input ([1, 1, 128, 128])
out = self.encoder2d_lvl_1(x)
#print(out.size()) #torch.Size([1, 32, 128, 128])
out = self.encoder2d_lvl_2(self.pool2d(out))
#print(out.size()) #torch.Size([1, 128, 64, 64])
out = self.encoder2d_lvl_3(self.pool2d(out))
#print(out.size()) #torch.Size([1, 256, 32, 32])
out = self.encoder2d_lvl_4(self.pool2d(out))
#print(out.size()) ##torch.Size([1, 512,16, 16])
out = self.encoder2d_lvl_5(self.pool2d(out))
#print(out.size()) #torch.Size([1, 1024, 8, 8])
out = self.encoder2d_lvl_6(self.pool2d(out))
#print(out.size())
# Decoding to 2D data
out = self.decoder2d_lvl_5(out)
#print(out.size())
out = self.decoder2d_lvl_4(out)
#print(out.size()) #torch.Size([1, 512, 16, 16])
out = self.decoder2d_lvl_3(out)
#print(out.size()) #torch.Size([1, 256, 32, 32])
out = self.decoder2d_lvl_2(out)
#print(out.size()) #torch.Size([1, 128, 66, 66])
out = self.decoder2d_lvl_1(out)
#print(out.size()) #torch.Size([1, 1, 128, 128])
out = torch.sigmoid(out)
#print(out.size()) #torch.Size([1, 1, 128, 128])
return out
#loss function defining
from pytorch_msssim import SSIM
#loss function defining
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
#scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
criterion = SSIM()
model = model.to(device)
criterion = criterion.to(device)
#training the autoencoder network and print loss statistics
trainLos = []
ValLos = []
lenTrainingData = len(train_list)
lenValData = len(val_list)
counterTrain = 0
counterVal = 0
counter = 0
for iteration in range(100 * lenTrainingData):
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
sampleGT = getData(train_list[counterTrain])
sampleGT = torch.from_numpy(sampleGT).float().to(device)
sample = sampleGT.unsqueeze_(0).unsqueeze_(0)
outputs = model(sample)
loss = 1- criterion(outputs, sampleGT)
loss.backward()
optimizer.step()
counterTrain = counterTrain + 1
if counterTrain == lenTrainingData:
counterTrain = 0
# print statistics
trainLos.append(loss.item())
sampleGT = getData(val_list[counterVal])
sampleGT = torch.from_numpy(sampleGT).float().to(device)
sample = sampleGT.unsqueeze_(0).unsqueeze_(0)
outputs = model(sample)
loss = 1- criterion(outputs, sampleGT)
ValLos.append(loss.item())
counterVal = counterVal + 1
counter = counter + 1
if counterVal == lenValData:
counterVal = 0
if counter == 100:
plt.figure()
plt.plot(trainLos,label = "Training los")
plt.plot(ValLos,label = "validation los")
plt.ylabel('Iterations')
plt.ylabel('Los')
plt.legend()
#plt.show()
plt.savefig('./autoEncouder.png')
plt.close()
torch.save(model.state_dict(), "./autoEncouder.pth")
counter = 0
print(iteration)
print(loss.item())