How make customised dataset for semantic segmentation?

@ptrblck I have been trying to do augmentation with no luck.
This is the snippet for custom_dataset


        
class CustomDataset(Dataset):
    def __init__(self, image_paths, target_paths, transform_images):   

        self.image_paths = image_paths
        self.target_paths = target_paths

        self.transformm = transforms.Compose([tf.rotate(10),
                                              tf.affine(0.2,0.2)])                                                                                   
        self.transform = transforms.ToTensor()
        
        self.transform_images = transform_images
            
        self.mapping = {
            0: 0,
            255: 1              
        }
        
    def mask_to_class(self, mask):
        for k in self.mapping:
            mask[mask==k] = self.mapping[k]
        return mask
    
    def __getitem__(self, index):

        image = Image.open(self.image_paths[index])
        mask = Image.open(self.target_paths[index])
        t_image = image.convert('L')
        t_image = self.transforms(t_image) # transform to tensor
        
        
        if any([img in image for img in self.transform_images]):
            t_image = self.transformm(t_image) #augmentation
                
        mask = torch.from_numpy(numpy.array(mask, dtype=numpy.uint8)) 
        mask = self.mask_to_class(mask)
        mask = mask.long()
        return t_image, mask, self.image_paths[index], self.target_paths[index] 
    
    def __len__(self):  

        return len(self.image_paths)

and here is there snippet for splitting the dataset and define dataloaders

from custom_dataset import CustomDataset

folder_data = glob.glob("F:\\my_data\\imagesResized\\*.png")
folder_mask = glob.glob("F:\\my_data\\labelsResized\\*.png") 


folder_data.sort(key = len)
folder_mask.sort(key = len)
#print(folder_data)

len_data = len(folder_data)
print("count of dataset: ", len_data)
print(80 * '_')

test_image_paths = folder_data[794:] #793
print("count of test images is: ", len(test_image_paths)) 
test_mask_paths = folder_mask[794:]
print("count of test mask is: ", len(test_mask_paths)) 

assert len(folder_data) == len(folder_mask) 

indices = list(range(len(folder_data)))
#print(indices)
random.shuffle(indices)
#print(indices)
indices.copy()
#print(70 * '_')
#print(indices)

image_indices = [folder_data[i] for i in indices]

mask_indices = [folder_mask[i] for i in indices]
#print(mask_indices)


split_1 = int(0.6 * len(image_indices))
split_2 = int(0.8 * len(image_indices)+1)

train_image_paths = image_indices[:split_1]
print("count of training images is: ", len(train_image_paths)) 
train_mask_paths = mask_indices[:split_1]
print("count of training mask is: ", len(train_image_paths)) 

valid_image_paths = image_indices[split_1:split_2]
print("count of validation image is: ", len(valid_image_paths))
valid_mask_paths = mask_indices[split_1:split_2]
print("count of validation mask is: ", len(valid_image_paths))
#print(valid_mask_paths)
print(80* '_')
print(valid_image_paths)

transform_images = glob.glob("F:\\my_data\\imagesResized\\P164_ES_1.png")

#transform_images = list(folder_data['P164_ES_1', 'P164_ES_2','P164_ES_3','P165_ED_1',
#                        'P165_ED_2', 'P165_ED_3', 'P165_ES_1', 'P165_ES_2','P165_ES_3',
#                        'P166_ED_1', 'P166_ED_2']) 


train_dataset = CustomDataset(train_image_paths, train_mask_paths, transform_images)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=2)

valid_dataset = CustomDataset(valid_image_paths, valid_mask_paths, transform_images)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, shuffle=True, num_workers=2)

test_dataset = CustomDataset(test_image_paths, test_mask_paths, transform_images)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=2)  

dataLoaders = {
        'train': train_loader,
        'valid': valid_loader,
         'test': test_loader,
        }

in the second snippet transform_images = list(folder_data['P164_ES_1', 'P164_ES_2','P164_ES_3','P165_ED_1', 'P165_ED_2', 'P165_ED_3', 'P165_ES_1', 'P165_ES_2','P165_ES_3', 'P166_ED_1', 'P166_ED_2']) is list of image name that I need to augment.

I really appreciate if you point me to the right direction. at the moment is giving an error __init__() takes 3 positional arguments but 4 were given
I also donā€™t have any idea how can I do it for mask of images (my task is segmentation)

It seems you are passing the torchvision.transforms.functional methods to transforms.Compose instead of the classes.
In case you would like to use the functional API, you could just apply these methods in your __getitem__ method:

if any([img in image for img in self.transform_images]):
    t_image = tf.rotate(t_image, 10)
    t_image = tf.affine(t_image, 0, (0, 0), 0.2, 0.2)

Or alternativelywrap them in transforms.Lambda:

transform = transforms.Compose([
    transforms.Lambda(lambda x: TF.rotate(x, 10)),
    transforms.Lambda(lambda x: TF.affine(x,
                                          angle=0,
                                          translate=(0, 0),
                                          scale=0.2,
                                          shear=0.2))
])

I just assumed, you would like to set the scale and shear argument to 0.2. If thatā€™s not correct, you should of course correct my code.

1 Like

@ptrblck Thanks a lot. Also, my main problem is apply augmentation on specific indices. In the second above snippet, which is splitting the dataset and define the dataloader do I need to make a list of indices which need to be augment? Is there any example for this?

Try to pass a list containing all images names which should be transformed.
In __getitem__ you should compare the current image_path not the image itself.
Here is a simple example for a folder containing 5 images:

class CustomDataset(Dataset):
    def __init__(self, image_paths, transform_images):   
        self.image_paths = image_paths
        self.transform_images = transform_images
        self.transformm = transforms.Lambda(lambda x: TF.affine(x,
                                                                angle=0,
                                                                translate=(0, 0),
                                                                scale=0.2,
                                                                shear=0.2))
    
    def __getitem__(self, index):
        image = Image.open(self.image_paths[index])

        if any([img in self.image_paths[index] for img in self.transform_images]):
            print('applying special transformation')
            image = self.transformm(image) #augmentation
        
        image = TF.to_tensor(image)
            
        return image, self.image_paths[index]
    
    def __len__(self):  
        return len(self.image_paths)

image_paths = glob.glob('./data/fake_folders/class0/*.png')
print(image_paths)
> ['./data/fake_folders/class0/0.png', './data/fake_folders/class0/1.png', './data/fake_folders/class0/3.png', './data/fake_folders/class0/2.png', './data/fake_folders/class0/4.png']

transform_images = ['0.png', '1.png']  # apply special transformation only on first two images
print(transform_images)
> ['0.png', '1.png']

dataset = CustomDataset(image_paths, transform_images)
for data, path in dataset:
    print(path)
> applying special transformation
./data/fake_folders/class0/0.png
applying special transformation
./data/fake_folders/class0/1.png
./data/fake_folders/class0/3.png
./data/fake_folders/class0/2.png
./data/fake_folders/class0/4.png
1 Like

Thank you very much @ptrblck.
As I am doing segmentation, I did repeat the same thing for masks augmentation as well. Is it correct?

Could you please let me know how can I display some transform images and correspond mask and where these augmented images and mask stored? or is this transformation happening during training?

Here is the script:

import torch
from torch.utils.data.dataset import Dataset  # For custom data-sets
import torchvision.transforms as transforms
import torchvision.transforms.functional as tf
from PIL import Image
import numpy 
import glob
import matplotlib.pyplot as plt
from split_dataset import test_loader
import os

class CustomDataset(Dataset):
    def __init__(self, image_paths, target_paths, transform_images, transform_masks):   

        self.image_paths = image_paths
        self.target_paths = target_paths
        
        self.transform_images = transform_images
        self.transform_masks = transform_masks

  
        self.transformm = transforms.Compose([transforms.Lambda(lambda x: tf.rotate(x, 10)),
                                              transforms.Lambda(lambda x: tf.affine(x, angle=0,
                                          translate=(0, 0),
                                          scale=0.2,
                                          shear=0.2))
                                            ])
        
        self.transform = transforms.ToTensor()
                   
        self.mapping = {
            0: 0,
            255: 1              
        }
        
    def mask_to_class(self, mask):
        for k in self.mapping:
            mask[mask==k] = self.mapping[k]
        return mask
        
    def __getitem__(self, index):

        image = Image.open(self.image_paths[index])
        mask = Image.open(self.target_paths[index])

        if any([img in self.image_paths[index] for img in self.transform_images]):
            print('applying special transformation')
            image = self.transformm(image) #augmentation
        
        if any([msk in self.target_paths[index] for msk in self.transform_masks]):
            print('applying special transformation')
            image = self.transformm(mask) #augmentation
        
        t_image = image.convert('L')
        t_image = self.transform(t_image) # transform to tensor for image
        mask = self.transform(mask) # transform to tensor for mask

            
        mask = torch.from_numpy(numpy.array(mask, dtype=numpy.uint8)) 
        mask = self.mask_to_class(mask)
        mask = mask.long()

        return t_image, mask, self.image_paths[index], self.target_paths[index] 
    
    def __len__(self):  # return count of sample we have

        return len(self.image_paths)


image_paths = glob.glob("D:\\Neda\\Pytorch\\U-net\\my_data\\imagesResized\\*.png")
target_paths = glob.glob("D:\\Neda\\Pytorch\\U-net\\my_data\\labelsResized\\*.png")


transform_images = ['image_981.png', 'image_982.png','image_983.png', 'image_984.png', 'image_985.png',
                    'image_986.png','image_987.png','image_988.png','image_989.png','image_990.png',
                    'image_991.png']  # apply special transformation only on these images
print(transform_images)
#['image_991.png', 'image_991.png']

transform_masks = ['image_labeled_981.png', 'image_labeled_982.png','image_labeled_983.png', 'image_labeled_984.png',
                    'image_labeled_985.png', 'image_labeled_986.png','image_labeled_987.png','image_labeled_988.png',
                    'image_labeled_989.png','image_labeled_990.png',
                    'image_labeled_991.png'] 

dataset = CustomDataset(image_paths, target_paths, transform_images, transform_masks)

for transform_images in dataset:
    
    #print(transform_images)        
    transform_images = Image.open(os.path.join(image_paths, transform_images))
    transform_images = numpy.array(transform_images)
    
    transform_masks = Image.open(os.path.join(target_paths, transform_masks))
    transform_masks = numpy.array(transform_masks)


    fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=1, sharex=True, sharey=True, figsize = (6,6))
                           
    img1 = ax1.imshow(transform_images, cmap='gray')
    ax1.axis('off')   

    img2 = ax2.imshow(transform_masks)
    ax1.axis('off')        
    plt.show()   

it is causing an error path = os.fspath(path) TypeError: expected str, bytes or os.PathLike object, not tuple . Also, I tried with cv2 and another error which is about float image

Another question is since I did change now CustomDataset do I need to change the way I was loading my dataloader for training?

Since self.transformm does not contain any random transformations, you can apply it on your image and mask without a problem.
Note that you have a small error in your code, i.e. you are assigning the transformed mask to image instead of mask:

if any([msk in self.target_paths[index] for msk in self.transform_masks]):
    print('applying special transformation')
    image = self.transformm(mask) #augmentation

The transformation is applied on the fly each time __getitem__ is called.
If you want to visualize some examples, you could either try to visualize the already transformed tensors:

x, y = dataset[0]
# "denormalize" if necessary
image, mask = TF.to_pil_image(x), TF.to_pil_image(y)

or alternatively if you need to visualize a lot of examples, you could also write another class method just for the augmentation:

class CustomDataset(Dataset):
    def __init__(self):
        pass

    def get_sample(self, index):
        # your loading and augmentation code
        return image, mask

    def __getitem__(self, index):
        image, mask = self.get_sample(index)
        # Transform to tensor
        image = self.transform(image)
        mask = ...

    def __len__(self):
        ...

image, mask = dataset.get_sample(0)

Note that you might have another error in your code, since you are applying ToTensor on your mask.
Since your mask should contain class indices, this would normalize the values to the range [0, 1], which might destroy it.
Could you check the values of your mask and make sure all pixel values contain a valid class index?

Regarding the last error message: you are trying to pass a complete list of file names to Image.open.
This method only loads a single image, so you should use a loop if you want to load multiple images.

1 Like

@ptrblck I really appreciate your help. You are right. I shouldnā€™t do transform on mask. Then if I remove that line mask = self.transform(mask) # transform to tensor for mask would it be fine?
even if I donā€™t apply transform on mask the mask is all zeros. no idea! maybe I am doing something wrong. Also, transformation on image destroyed the image as well.

here is the code:

import torch
from torch.utils.data.dataset import Dataset  # For custom data-sets
import torchvision.transforms as transforms
import torchvision.transforms.functional as tf
from PIL import Image
import numpy 
import glob
import matplotlib.pyplot as plt
from split_dataset import test_loader

class CustomDataset(Dataset):
    def __init__(self, image_paths, target_paths, transform_images, transform_masks):   

        self.image_paths = image_paths
        self.target_paths = target_paths
        
        self.transform_images = transform_images
        self.transform_masks = transform_masks

  
        self.transformm = transforms.Compose([transforms.Lambda(lambda x: tf.rotate(x, 10)),
                                              transforms.Lambda(lambda x: tf.affine(x, angle=0,
                                          translate=(0, 0),
                                          scale=0.2,
                                          shear=0.2))
                                            ])
        
        self.transform = transforms.ToTensor()
                   
        self.mapping = {
            0: 0,
            255: 1              
        }
        
    def mask_to_class(self, mask):
        for k in self.mapping:
            mask[mask==k] = self.mapping[k]
        return mask
        
    def __getitem__(self, index):

        image = Image.open(self.image_paths[index])
        mask = Image.open(self.target_paths[index])

        if any([img in self.image_paths[index] for img in self.transform_images]):
            print('applying special transformation on images')
            image = self.transformm(image) #augmentation
        
        if any([msk in self.target_paths[index] for msk in self.transform_masks]):
            print('applying special transformation on masks')
            mask = self.transformm(mask) #augmentation
        
        t_image = image.convert('L')
        t_image = self.transform(t_image) # transform to tensor for image
        #mask = self.transform(mask) # transform to tensor for mask

            
        mask = torch.from_numpy(numpy.array(mask, dtype=numpy.uint8)) 
        mask = self.mask_to_class(mask)
        mask = mask.long()

        return t_image, mask, self.image_paths[index], self.target_paths[index] 
    
    def __len__(self):  # return count of sample we have

        return len(self.image_paths)


image_paths = glob.glob("D:\\Neda\\Pytorch\\U-net\\my_data\\imagesResized\\*.png")
target_paths = glob.glob("D:\\Neda\\Pytorch\\U-net\\my_data\\labelsResized\\*.png")


transform_images = ['image_981.png', 'image_982.png','image_983.png', 'image_984.png', 'image_985.png',
                    'image_986.png','image_987.png','image_988.png','image_989.png','image_990.png',
                    'image_991.png']  # apply special transformation only on these images
print(transform_images)
#['image_991.png', 'image_991.png']

transform_masks = ['image_labeled_981.png', 'image_labeled_982.png','image_labeled_983.png', 'image_labeled_984.png',
                    'image_labeled_985.png', 'image_labeled_986.png','image_labeled_987.png','image_labeled_988.png',
                    'image_labeled_989.png','image_labeled_990.png',
                    'image_labeled_991.png'] 

dataset = CustomDataset(image_paths, target_paths, transform_images, transform_masks)


image, mask,ti,tm = dataset[981]
layer, height, width = (image.size())
print(layer, height, width)

image = torch.FloatTensor(1, height, width)
mask = torch.FloatTensor(1, height, width)
image, mask = tf.to_pil_image(image), tf.to_pil_image(mask) #Convert a tensor or an ndarray to PIL Image

min_img, max_img = image.getextrema()
print(min_img, max_img) #[0,243]

min_msk, max_msk = mask.getextrema()
print(min_msk, max_msk) # [0,255]

image = numpy.array(image) / 255

fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True, figsize = (6,6))
                       
img1 = ax1.imshow(image, cmap='gray')
ax1.axis('on')   

img2 = ax2.imshow(mask)
ax2.axis('on')        
plt.show()                

here is what it produced:

The scale argument in your affine transformation is set to 0.2, which is quite small.
You could increase it to see, if the images look like you would expect them to.

Could you check the unique values of image after you are dividing by 255?
Iā€™m not sure if you are performing an integer division, thus loosing all information.

1 Like

@ptrblck I did set affine as follows:

 self.transformm = transforms.Compose([transforms.Lambda(lambda x: tf.rotate(x, 8)),
                                              transforms.Lambda(lambda x: tf.affine(x, angle=0,
                                          translate=(0.2,0.2),
                                          scale=1,
                                          shear=0))
                                            ])

here is what produced. I donā€™t know why image is all zeros and image_labeled which is mask isnā€™t what I expected. I assume should transform my current mask in traget_paths. unique value for this after /255 is 0.

sample_trans_2
another question is why each time it produce different image? some times is like this!!
weird

I was expecting to get a transform mask of almost similar to the below image with rotation:(this is actual mask)

image_labeled_981

If you want to divide the image by 255, transform the image to a floating point type before the division.
Could you upload a single image - mask pair somewhere so that I can have a look?

1 Like

@ptrblck please find the dropbox link. Could you please let me know if you couldā€™t get the images.

Thanks for the data.
It looks like you are trying to visualize empty tensors, since you are recreating image and mask in these lines of code:

image = torch.FloatTensor(1, height, width)
mask = torch.FloatTensor(1, height, width)
image, mask = tf.to_pil_image(image), tf.to_pil_image(mask) #Convert a tensor or an ndarray to PIL Image

This code will show both the image and the corresponding mask:

image, mask,ti,tm = dataset[0]
layer, height, width = (image.size())
print(layer, height, width)

image = tf.to_pil_image(image)  #Convert a tensor or an ndarray to PIL Image

fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True, figsize = (6,6))
                       
img1 = ax1.imshow(image, cmap='gray')
ax1.axis('on')   

img2 = ax2.imshow(mask)
ax2.axis('on')        
plt.show()   
1 Like

Yes, it works. Thank you very much @ptrblck.

@ptrblck when I added self.transformm for data augmentation to the training pipeline, itā€™s causing an error AttributeError: Can't pickle local object 'CustomDataset.__init__.<locals>.<lambda>' it seems is about lambda? Is it because of windows? Could you please let me know how can I fix this?

Are you trying to serialize your Dataset somehow or is this error thrown just using it?
Is this line of code running successfully:

image, mask,ti,tm = dataset[0]

@ptrblck that line running successfully.

here is the snippet for the dataloaders and split data:
I import the CustomDataset class which posted above from another module

from custom_dataset_with_augmnetation import CustomDataset, transform_images, transform_masks
import torch
import glob

folder_data = glob.glob("D:\\Neda\\Pytorch\\U-net\\my_data\\imagesResized\\*.png")
folder_mask = glob.glob("D:\\Neda\\Pytorch\\U-net\\my_data\\labelsResized\\*.png")

 #split these path using a certain percentage
len_data = len(folder_data)
print("count of dataset: ", len_data)
# count of dataset:  992

split_1 = int(0.6 * len(folder_data))
split_2 = int(0.8 * len(folder_data))

folder_data.sort()

train_image_paths = folder_data[:split_1]
print("count of train images is: ", len(train_image_paths)) 
#count of train images is:  793

valid_image_paths = folder_data[split_1:split_2]
print("count of validation image is: ", len(valid_image_paths))
#count of validation image is:  99

test_image_paths = folder_data[split_2:]
print("count of test images is: ", len(test_image_paths)) 
#count of test images is:  100
#print(test_image_paths)

train_mask_paths = folder_mask[:split_1]
valid_mask_paths = folder_mask[split_1:split_2]
test_mask_paths = folder_mask[split_2:]


train_dataset = CustomDataset(train_image_paths, train_mask_paths, transform_images, transform_masks)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=2)

valid_dataset = CustomDataset(valid_image_paths, valid_mask_paths, transform_images, transform_masks)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, shuffle=True, num_workers=2)

#test_image_paths= glob.glob("D:\\Neda\\Pytorch\\U-net\\my_data\\CineLoop\\*.png")
#test_mask_paths = glob.glob("D:\\Neda\\Pytorch\\U-net\\my_data\\fake_mask_CineLoop\\*.png")

test_dataset = CustomDataset(test_image_paths, test_mask_paths, transform_images, transform_images)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=2)  

dataLoaders = {
        'train': train_loader,
        'valid': valid_loader,
         'test': test_loader,
        }

here is the snippet for training which is thrown that error.

def train_valid_model():
    
    num_epochs=1
                      
    since = time.time()
    out_loss = open("history_loss_UNet_exp10.txt", "w")
    out_acc = open("history_acc_UNet_exp10.txt", "w")

    losses=[]
    ACCes =[]
    #losses = {}

       
    for epoch in range(num_epochs):  # loop over the dataset multiple times
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 50)
        
        
        if epoch % 10 == 9:
           torch.save({
            'epoch': epoch + 1,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
             'loss': loss
            }, 'UNet_exp10_epoch{}.pth'.format(epoch+1))
        
        # Each epoch has a training and validation phase
        for phase in ['train', 'valid', 'test']:
            if phase == 'train':
                
                model.train()  # Set model to training mode
            else:
                model.eval()   # Set model to evaluate mode
        
            train_loss = 0.0
            total_train = 0
            correct_train = 0

            #iterate over data
            for t_image, mask, image_paths, target_paths in dataLoaders[phase]:
                
                 
                # get the inputs
                t_image = t_image.to(device)
                mask = mask.to(device)
                #transform_images =  transform_images.to(device)
                #transform_masks =  transform_masks.to(device)
                                
                 # zeroes the gradient buffers of all parameters
                optimizer.zero_grad()
                
                # forward
                # track history if only in train
                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(t_image) 
                    _, predicted = torch.max(outputs.data, 1)
                    loss = criterion(outputs, mask) # calculate the loss
            
                    # backward + optimize only if in training phase
                    if phase == 'train':
                        loss.backward() # back propagation
                        optimizer.step() # update gradients                        
            
                # accuracy
                train_loss += loss.item()
                total_train += mask.nelement()  # number of pixel in the batch
                correct_train += predicted.eq(mask.data).sum().item() # sum all precited pixel values
                
            epoch_loss = train_loss / len(dataLoaders[phase].dataset)
            #losses[phase] = epoch_loss
            losses.append(epoch_loss)
                            
            epoch_acc = 100 * correct_train / total_train
            ACCes.append(epoch_acc)
                                             
            print('{} Loss: {:.4f} {} Acc: {:.4f}'.format(phase, epoch_loss, phase, epoch_acc))     

            out_loss.write('{} {} Loss: {:.4f}\n'.format(epoch, phase, epoch_loss))
            out_acc.write('{} {} ACC: {:.4f}\n'.format(epoch, phase, epoch_acc))

            #numpy.savetxt('loss.csv', (losses), "%.4f", header= 'loss', comments='', delimiter = ",")
            #numpy.savetxt('ACC.csv', (ACCes), "%.4f", header= 'accuracy', comments='', delimiter = ",")                    
            numpy.savetxt('loss_acc_UNet_exp10.csv', numpy.c_[losses, ACCes], fmt=['%.4f', '%.3f'], header= "loss, acc", comments='', delimiter = ",")                    
            
            
            
    print('-' * 70)        
    print()
    time_elapsed = time.time() - since
    
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))         

Which line of code throws this error?

@ptrblck itā€™s for t_image, mask, image_paths, target_paths in dataLoaders[phase]:'

class CustomDataset(Dataset):

Do I need to change return in __getitem__ ?

If you are using multiple workers on a Windows machine, make sure to add the if-clause protection as described here.

@ptrblck thank you. the error isnā€™t about if-clause protection because it is already in the code but I forgot to paste it in the above post.