import os
from PIL import Image
# import numpy as np
from torchvision import transforms
import torch.nn.functional as F
import pandas as pd
import torch
import torch.utils.data as utils
import torch.nn as nn
from torchsummary import summary
## list of images
Images_train = []
Images_val = []
############### IF IT'S A CAT, it's '0' and IF IT'S A DOG, it's '1' #######################
# Labels = []
## defining required transforms or preprocessing on the images
data_transforms = transforms.Compose([
transforms.RandomResizedCrop(64),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
## Get the list of all images
# files = os.listdir("dataset/val/cats")
# files.remove(".DS_Store")
# print(files[0])
## reading the images applying the transformations, converting each of them to pytorch tensors and storing them in images list
# for i in files:
# image = os.path.join("dataset/val/cats",i)
# im = Image.open(image)
# # imm = np.asarray(im)
# im = data_transforms(im)
# # Images.append(im)
# # Labels.append(0)
# Images_val.append([im, 0])
# print(len(Images_val[0]))
# print(len(Images_val[0][0]))
# print(len(Images_val[0][0][0]))
#### after applying the transforms, input shape: 32*32*3
files = os.listdir("dataset/train/cats")
files.remove(".DS_Store")
print(files[0])
for i in files:
image = os.path.join("dataset/train/cats",i)
im = Image.open(image)
# imm = np.asarray(im)
im = data_transforms(im)
# Images.append(im)
# Labels.append('cat')
Images_train.append([im, 0])
# files = os.listdir("dataset/val/dogs")
# files.remove(".DS_Store")
# print(files[0])
# for i in files:
# image = os.path.join("dataset/val/dogs",i)
# im = Image.open(image)
# # imm = np.asarray(im)
# im = data_transforms(im)
# # Images.append(im)
# # Labels.append('dog')
# Images_val.append([im, 1])
files = os.listdir("dataset/train/dogs")
files.remove(".DS_Store")
print(files[0])
for i in files:
image = os.path.join("dataset/train/dogs",i)
im = Image.open(image)
# imm = np.asarray(im)
im = data_transforms(im)
# Images.append(im)
# Labels.append('dog')
Images_train.append([im, 1])
# df = pd.DataFrame(Images, columns=['Image', 'Label'])
# print(df.head())
# print(Images_val[0])
# print(Images_val[0][0])
# print(Images_val[0][0][0])
print("Number of training samples: ", len(Images_train), "\n")
batch_size = 20
# n_iters = 800
# num_epochs = n_iters / (len(Images_train) / batch_size)
# num_epochs = 10
#tensor_x = torch.stack([torch.Tensor(i) for i in Images_val])
train_loader = torch.utils.data.DataLoader(dataset=Images_train,
batch_size=batch_size,
shuffle=True)
# test_loader = torch.utils.data.DataLoader(dataset=Images_val,
# batch_size=batch_size,
# shuffle=False)
class VGG(torch.nn.Module):
#Our batch shape for input x is (3, 64, 64)
def __init__(self):
super(VGG, self).__init__()
#Input channels = 3, output channels = 96
self.conv1 = torch.nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
self.conv2 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.pool1 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv3 = torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
self.conv4 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.pool2 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv5 = torch.nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
self.conv6 = torch.nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.conv7 = torch.nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.pool3 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv8 = torch.nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1)
self.conv9 = torch.nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv10 = torch.nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.pool4 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv11 = torch.nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv12 = torch.nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv13 = torch.nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.pool5 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.fc1 = torch.nn.Linear(512*7*7, 4096)
self.fc2 = torch.nn.Linear(4096, 4096)
self.fc3 = torch.nn.Linear(4096, 2)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool1(x)
x = self.conv3(x)
x = F.relu(x)
x = self.conv4(x)
x = F.relu(x)
x = self.pool2(x)
x = self.conv5(x)
x = F.relu(x)
x = self.conv6(x)
x = F.relu(x)
x = self.conv7(x)
x = F.relu(x)
x = self.pool3(x)
x = self.conv8(x)
x = F.relu(x)
x = self.conv9(x)
x = F.relu(x)
x = self.conv10(x)
x = F.relu(x)
x = self.pool4(x)
x = self.conv11(x)
x = F.relu(x)
x = self.conv12(x)
x = F.relu(x)
x = self.conv13(x)
x = F.relu(x)
x = self.pool5(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return(x)
model = VGG()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
# summary(model, (3, 224, 224))
criterion = nn.CrossEntropyLoss()
learning_rate = 0.1
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
iter = 0
for epoch in range(0, 20):
print("epoch: ",epoch)
for i, (images, labels) in enumerate(train_loader):
# Load images
# images = images.requires_grad_()
images = images.requires_grad_().to(device)
labels = labels.to(device)
# Clear gradients w.r.t. parameters
optimizer.zero_grad()
# Forward pass to get output/logits
outputs = model(images)
# Calculate Loss: softmax --> cross entropy loss
loss = criterion(outputs, labels)
# Getting gradients w.r.t. parameters
loss.backward()
# Updating parameters
optimizer.step()
iter += 1
# print(iter)
if iter % 200 == 0:
# Calculate Accuracy
correct = 0
total = 0
# Iterate through test dataset
for images, labels in test_loader:
# Load images
# images = images.requires_grad_()
images = images.requires_grad_().to(device)
labels = labels.to(device)
# Forward pass only to get logits/output
outputs = model(images)
# Get predictions from the maximum value
_, predicted = torch.max(outputs.data, 1)
# Total number of labels
total += labels.size(0)
# Total correct predictions
# correct += (predicted == labels).sum()
if torch.cuda.is_available():
correct += (predicted.cpu() == labels.cpu()).sum()
else:
correct += (predicted == labels).sum()
accuracy = 100 * correct / total
# Print Loss
print('Iteration: {}. Loss: {}. Accuracy: {}'.format(iter, loss.item(), accuracy))
# print("hello")
RuntimeError:
as explained in the heading.
I have no clue about the error. I am a student, learning. Please help me to understand where did I go wrong. Thank You.