import torch
from torch import nn
import torchvision
from torchvision import transforms
train_transforms = transforms.Compose([
transforms.Resize(size=(224,224)),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.TrivialAugmentWide(num_magnitude_bins=31),
transforms.ToTensor(),
])
test_transforms = transforms.Compose([
transforms.Resize(size=(224,224)),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
from torchvision import datasets
train_data = datasets.ImageFolder(
root=train_dir,
transform=train_transforms
)
test_data = datasets.ImageFolder(
root=test_dir,
transform=test_transforms
)
])
from torch.utils.data import DataLoader
train_dataloader = DataLoader(
dataset = train_data,
batch_size = 28,
num_workers = os.cpu_count(),
shuffle=True
)
test_dataloader = DataLoader(
dataset = test_data,
batch_size=28,
num_workers = os.cpu_count(),
shuffle=False
)
class_list = train_data.classes
classclass tinyvgg1(nn.Module):
def init(self,hidden,input,output):
super().init()
self.Conv_block1 = nn.Sequential(
nn.Conv2d(
in_channels= input,
out_channels=hidden,
kernel_size=3,
stride=1,
padding=1
),
nn.BatchNorm2d(hidden),
nn.ReLU(),
nn.Dropout(0.2),
nn.Conv2d(
in_channels = hidden,
out_channels =hidden,
kernel_size=3,
stride=1,
padding=1
),
nn.BatchNorm2d(hidden),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2,stride=2)
)
self.Conv_block2 = nn.Sequential(
nn.Conv2d(
in_channels=hidden,
out_channels=hidden,
kernel_size=3,
stride=1,
padding=1
),
nn.BatchNorm2d(hidden),
nn.ReLU(),
nn.Dropout(0.2),
nn.Conv2d(
in_channels=hidden,
out_channels=hidden,
kernel_size=3,
stride=1,
padding=1
),
nn.BatchNorm2d(hidden),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2,stride=2)
)
self.Conv_block3 = nn.Sequential(
nn.Conv2d(
in_channels=hidden,
out_channels=hidden,
kernel_size=3,
stride=1,
padding=1
),
nn.BatchNorm2d(hidden),
nn.ReLU(),
nn.Dropout(0.2),
nn.Conv2d(
in_channels=hidden,
out_channels=hidden,
kernel_size=3,
stride=1,
padding=1
),
nn.BatchNorm2d(hidden),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2,stride=2)
)
self.Conv_block4 = nn.Sequential(
nn.Conv2d(
in_channels=hidden,
out_channels=hidden,
kernel_size=3,
stride=1,
padding=1
),
nn.BatchNorm2d(hidden),
nn.ReLU(),
nn.Dropout(0.2),
nn.Conv2d(
in_channels=hidden,
out_channels=hidden,
kernel_size=3,
stride=1,
padding=1
),
nn.BatchNorm2d(hidden),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2,stride=2)
)
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(
in_features = hidden*14*14,
out_features = output
)
)
def forward(self,x):
x = self.Conv_block1(x)
#print(x.shape)
x = self.Conv_block2(x)
#print(x.shape)
x = self.Conv_block3(x)
#print(x.shape)
x = self.Conv_block4(x)
#print(x.shape)
x = self.classifier(x)
return x
return self.Conv_block4(self.Conv_block3(self.Conv_block2(self.Conv_block1(x))))
_list
device = “cuda” if torch.cuda.is_available() else “cpu”
device
model_x = tinyvgg1(input=3,
hidden=76,
output=2
).to(device)
def accuracy_fn(y_true,y_pred):
correct = torch.eq(y_true,y_pred).sum().item()
acc = correct/len(y_pred)*100
return acc
losfn = nn.CrossEntropyLoss()
optimizer =torch.optim.Adam(params = model_x.parameters(),
lr=0.0001,weight_decay=0.00001)
import torch
from torch import nn
def train_step(model:torch.nn.Module,
dataloader:torch.utils.data.DataLoader,
optimizer:torch.optim.Optimizer,
losfn:torch.nn.Module,
accuracy_fn,
device:device):
train_acc,train_loss=0,0
model.train()
l2 = 0.001
for batch,(X,y) in enumerate(dataloader):
X,y = X.to(device),y.to(device)
y_pred = model(X)
loss = losfn(y_pred,y)
train_loss += loss.item()
train_acc += accuracy_fn(y_true=y,y_pred=y_pred.argmax(dim=1))
l2_reg = torch.tensor(0.,device=device)
for param in model.parameters():
l2_reg += torch.norm(param,p=2)
loss = loss + l2 * l2_reg
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_acc /= len(dataloader)
train_loss /= len(dataloader)
print(f"Epoch {epoch} | Train acc: {train_acc:.2f}% | Train Loss {train_loss:.4f}")
def test_step(model:torch.nn.Module,
dataloader:torch.utils.data.DataLoader,
losfn:torch.nn.Module,
accuracy_fn,
device:device
):
test_acc,test_loss = 0,0
model.eval()
with torch.inference_mode():
for batch,(X,y) in enumerate(dataloader):
X,y = X.to(device),y.to(device)
test_pred = model(X)
loss1 = losfn(test_pred,y)
test_loss += loss1.item()
test_acc += accuracy_fn(y_true=y,y_pred=test_pred.argmax(dim=1))
test_acc /= len(dataloader)
test_loss /= len(dataloader)
print(f"Test Acc {test_acc:.2f}% | Test Loss {test_loss:.4f}")
from tqdm.auto import tqdm
torch.manual_seed(42)
epochs = 1000
from timeit import default_timer as timer
start = timer()
for epoch in tqdm(range(epochs)):
train_step(model=model_x.to(device),
dataloader=train_dataloader,
optimizer=optimizer,
losfn=losfn,
accuracy_fn=accuracy_fn,
device=device)
test_step(model=model_x.to(device),
dataloader=test_dataloader,
losfn=losfn,
accuracy_fn=accuracy_fn,
device=device)
end = timer()
print(f"Total time {end-start}")
from pathlib import Path
MODEL_PATH = Path(“models”)
MODEL_PATH.mkdir(parents=True, exist_ok=True)
MODEL_NAME = “aI_genrtd_img_dectr.pth”
MODEL_SAVE_PATH = MODEL_PATH/MODEL_NAME
print(f"Saving model to: {MODEL_SAVE_PATH}")
torch.save(obj=model_x.state_dict(),
f=MODEL_SAVE_PATH)
model = tinyvgg1(3,76,2).to(device)
saved_model_path = “/content/aI_genrtd_img_dectr (2).pth”
model.load_state_dict(torch.load(saved_model_path))