This is the Unet architecture for the customised dataset for semantic segmentation based on this paper. I am getting this error when the num_worker = 2
`
File "C:\Users\Neda\Anaconda3\lib\multiprocessing\spawn.py", line 172, in get_preparation_data
main_mod_name = getattr(main_module.__spec__, "name", None)
AttributeError: module '__main__' has no attribute '__spec__'`
while with the num_worker = 0, I will get this error
raise TypeError((error_msg.format(type(batch[0]))))
TypeError: batch must contain tensors, numbers, dicts or lists; found <class 'PIL.TiffImagePlugin.TiffImageFile'>
The output of print(device) is cuda:0 I don’t have any clue where I am doing wrong. Thank you in advance.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data.dataset import Dataset # For custom data-sets
import torchvision.transforms as transforms
from PIL import Image
import glob
print(torch.version)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
# get all the image and mask path and number of images
folder_data = glob.glob("D:\\Neda\\Pytorch\\U-net\\BMMCdata\\data\\*.tif")
folder_mask = glob.glob("D:\\Neda\\Pytorch\\U-net\\BMMCmasks\\masks\\*.tif")
# split these path using a certain percentage
len_data = len(folder_data)
print(len_data)
train_size = 0.6
train_image_paths = folder_data[:int(len_data*train_size)]
# print(train_image_paths) # output is 25 image for train
test_image_paths = folder_data[int(len_data*train_size):]
#print(test_image_paths) # output is 18 image for test
train_mask_paths = folder_mask[:int(len_data*train_size)]
test_mask_paths = folder_mask[int(len_data*train_size):]
class CustomDataset(Dataset):
def __init__(self, image_paths, target_paths): # initial logic happens like transform
self.image_paths = image_paths
self.target_paths = target_paths
self.transforms = transforms.ToTensor()
def __getitem__(self, index):
image = Image.open(self.image_paths[index])
mask = Image.open(self.target_paths[index])
t_image = self.transforms(image)
return t_image, mask
def __len__(self): # return count of sample we have
return len(self.image_paths)
train_dataset = CustomDataset(train_image_paths, train_mask_paths)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=2)
test_dataset = CustomDataset(test_image_paths, test_mask_paths)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=4, shuffle=False, num_workers=2)
class ConvRelu(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding, stride):
super(ConvRelu, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
padding=padding, stride=stride)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x
class UNet(nn.Module):
def __init__(self):
super(UNet, self).__init__()
#channels, height, width = in_shape
self.down1 = nn.Sequential(
ConvRelu(1, 64, kernel_size=(3, 3), stride=1, padding=0),
ConvRelu(64, 64, kernel_size=(3, 3), stride=1, padding=0)
)
self.maxPool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.down2 = nn.Sequential(
ConvRelu(64, 128, kernel_size=(3, 3), stride=1, padding=0),
ConvRelu(128, 128, kernel_size=(3, 3), stride=1, padding=0)
)
self.maxPool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.down3 = nn.Sequential(
ConvRelu(128, 256, kernel_size=(3, 3), stride=1, padding=0),
ConvRelu(256, 256, kernel_size=(3, 3), stride=1, padding=0)
)
self.maxPool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.down4 = nn.Sequential(
ConvRelu(256, 512, kernel_size=(3, 3), stride=1, padding=0),
ConvRelu(512, 512, kernel_size=(3, 3), stride=1, padding=0)
)
self.maxPool4 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.center = nn.Sequential(
ConvRelu(512, 1024, kernel_size=(3, 3), stride=1, padding=0),
ConvRelu(1024, 1024, kernel_size=(3, 3), stride=1, padding=0)
)
self.upSample1 = nn.ConvTranspose2d(1024, 1024, 2, stride=2)
self.up1 = nn.Sequential(
ConvRelu(1024, 512, kernel_size=(2, 2), stride=1, padding=0),
ConvRelu(512, 512, kernel_size=(2, 2), stride=1, padding=0)
)
self.upSample2 = nn.ConvTranspose2d(512, 512, 2, stride=2)
self.up2 = nn.Sequential(
ConvRelu(512, 256, kernel_size=(2, 2), stride=1, padding=0),
ConvRelu(256, 256, kernel_size=(2, 2), stride=1, padding=0)
)
self.upSample3 = nn.ConvTranspose2d(256, 256, 2, stride=2)
self.up3 = nn.Sequential(
ConvRelu(256, 128, kernel_size=(2, 2), stride=1, padding=0),
ConvRelu(128, 128, kernel_size=(2, 2), stride=1, padding=0)
)
self.upSample4 = nn.ConvTranspose2d(128, 128, 2, stride=2)
self.up4 = nn.Sequential(
ConvRelu(128, 64, kernel_size=(2, 2), stride=1, padding=0),
)
# 1x1 convolution at the last layer
self.output_seg_map = nn.Conv2d(64, 2, kernel_size=(1, 1), padding=0, stride=1)
def crop_concat(self, upsampled, bypass, crop=False):
if crop:
c = (bypass.size()[2] - upsampled.size()[2]) // 2
# -c is amount of pad which will add on each side for all dimension
bypass = F.pad(bypass, (-c, -c, -c, -c)) # (padLeft, padRight, padTop, padBottom)
return torch.cat((upsampled, bypass), 1)
def forward(self, x):
x = self.down1(x)
out_down1 = x
x = self.down2(x)
out_down2 = x
x = self.maxPool2(x)
x = self.down3(x)
out_down3 = x
x = self.maxPool3(x)
x = self.down4(x)
out_down4 = x
x = self.maxPool4(x)
x = self.center(x)
x = self.upSample1(x)
x = self.up1(x)
self.crop_concat(x, out_down4)
x = self.upSample2(x)
x = self.up2(x)
self.crop_concat(x, out_down3)
x = self.upSample3(x)
x = self.up3(x)
self.crop_concat(x, out_down2)
x = self.upSample4(x)
x = self.up4(x)
self.crop_concat(x, out_down1)
out = self.output_seg_map(x)
return F.log_softmax(self.output_seg_map(out)) # applies log-softmax on last layer
net = UNet()
net = net.to(device)
criterion = nn.NLLLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.99)
def main():
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs
t_image, mask = data
t_image, mask = t_image.to(device), mask.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(t_image)
loss = criterion(outputs, mask)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
if __name__=='__main__':
main()