How to check the number of Images after augmentation

I have used Random cropped, rotation and flipping as augmentation strategy in training. I want to know the number of images before and after augmentation. How to do that?

from matplotlib import pyplot as plt
import torch
from torch import nn
import torch.nn.functional as F
from torch import optim
from torch.autograd import Variable
from torchvision import datasets, transforms, models
from PIL import Image
import numpy as np
from torch.utils import data
import os
import torchvision
from torch.utils.data.sampler import SubsetRandomSampler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
torch.cuda.empty_cache()
import pandas as pd
from torch.optim.lr_scheduler import StepLR
print("PyTorch Version: ",torch.__version__)
print("Torchvision Version: ",torchvision.__version__)

data_dir = "./BW"
train_dir=data_dir + './Train'
valid_dir=data_dir + './Valid'
test_dir=data_dir + './Test'

# Models to choose from [resnet18, resnet50, alexnet, vgg, squeezenet, densenet, inception]
model_name = "resnet18_1"

# Number of classes in the dataset
num_classes = 2

# Batch size for training (change depending on how much memory you have)
batch_size =16

# Number of epochs to train for
num_epochs =10000                      

train_transforms = transforms.Compose([transforms.RandomResizedCrop(size=256),
                                      transforms.Resize((224,224)),
                                      transforms.RandomRotation(degrees=15),
                                      transforms.RandomHorizontalFlip(),
                                      transforms.ToTensor(),
                                      transforms.Normalize([0.485, 0.456, 0.406],
                                                           [0.229, 0.224, 0.225])
                                      ])


test_transforms = transforms.Compose([
                    transforms.Resize((224,224)),
                    transforms.ToTensor(),
                    transforms.Normalize([0.485, 0.456, 0.406], 
                                         [0.229, 0.224, 0.225])
                    #transforms.Normalize((0.4914, 0.4822, 0.4465),(0.2023, 0.1994, 0.2010))
                    ])


validation_transforms = transforms.Compose([transforms.Resize((224,224)),
                                            transforms.ToTensor(),
                                            transforms.Normalize([0.485, 0.456, 0.406], 
                                                                 [0.229, 0.224, 0.225])])
train_data= datasets.ImageFolder(train_dir,transform=train_transforms)
valid_data= datasets.ImageFolder(valid_dir,transform=validation_transforms)
test_data= datasets.ImageFolder(test_dir,transform=test_transforms)

#targets = datasets.ImageFolder.targets

num_workers = 0

print("Number of Samples in Train: ",len(train_data))
print("Number of Samples in Valid: ",len(valid_data))
print("Number of Samples in Test ",len(test_data))

train_loader = torch.utils.data.DataLoader(train_data, batch_size,
     num_workers=num_workers)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size, 
     num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size, 
     num_workers=num_workers, shuffle=False)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

The number of images remains the same after you do data augmentation, since it happens on the fly. To get more idea on why it is called data augmentation, because the literal meaning of augmentation can be a little misleading, here is a post: Data augmentation in PyTorch