%reload_ext autoreload
%autoreload 2
%matplotlib inline
import math
import time
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from PIL import Image
from sklearn.metrics import confusion_matrix
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torchvision
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
Our libraries
from train import train_model
from model_utils import *
from predict_utils import *
from vis_utils import *
some initial setup
np.set_printoptions(precision=2)
use_gpu = torch.cuda.is_available()
np.random.seed(1234)
use_gpu
DATA_DIR = “G:/0-HusheMasnuei-DeepLearning-Machine Learning/Dataset/ACNP/Dev/”
sz = 224
batch_size = 8
os.listdir(DATA_DIR)
trn_dir = f’{DATA_DIR}train’
val_dir = f’{DATA_DIR}valid’
os.listdir(trn_dir)
os.listdir(val_dir)
trn_fnames =glob.glob(f’{trn_dir}//.jpg’)
trn_fnames [:2]
img = plt.imread(trn_fnames[6])
plt.imshow(img);
trn_fnames =glob.glob(f’{trn_dir}//.png’)
trn_fnames [:2]
train_ds = datasets.ImageFolder(trn_dir)
train_ds.classes
train_ds.class_to_idx
train_ds.root
train_ds.imgs
tfms = transforms.Compose([
transforms.Resize((sz, sz)),
transforms.ToTensor(),
transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.255])
])
train_ds = datasets.ImageFolder(trn_dir, transform=tfms)
valid_ds = datasets.ImageFolder(val_dir, transform=tfms)
len(train_ds),len(valid_ds)
type(train_ds.transforms)
train_dl = torch.utils.data.DataLoader(train_ds, batch_size = batch_size, shuffle = True, num_workers = 5)
valid_dl = torch.utils.data.DataLoader(valid_ds, batch_size = batch_size, shuffle = True, num_workers= 5 )
inputs, targets = next(iter(train_dl))
out = torchvision.utils.make_grid(inputs, padding=3)
plt.figure(figsize=(16,12))
imshow(out, title=‘Random images from training data’)
class SimpleCNN(nn.Module) :
def __init__(self) :
super (SimpleCNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3 , 16 , kernel_size = 5, padding = 2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(16 , 32 , kernel_size = 5, padding = 2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.fc = nn.Linear(56 * 56 * 32, 2)
def forward(self , x):
out = self.cov1(x)
out = self.cov2(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
model = SimpleCNN()
if use_gpu:
model = model.cuda()
model
#model = SimpleCNN()
#if torch.cuda.is_available():
device = ‘cuda’
#else:
device = ‘cpu’
#model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.002, momentum=0.9)
num_epochs = 10
losses = []
for epoch in range(num_epochs):
for i, (inputs, targets) in enumerate(train_dl):
inputs = to_var(inputs)
targets = to_var(targets)
# forwad pass
optimizer.zero_grad()
outputs = model(inputs)
# loss
loss = criterion(outputs, targets)
losses += [loss.data[0]]
# backward pass
loss.backward()
# update parameters
optimizer.step()
# report
if (i + 1) % 50 == 0:
print('Epoch [%2d/%2d], Step [%3d/%3d], Loss: %.4f'
% (epoch + 1, num_epochs, i + 1, len(train_ds) // batch_size, loss.data[0]))
NotImplementedError Traceback (most recent call last)
in
8 # forwad pass
9 optimizer.zero_grad()
—> 10 outputs = model(inputs)
11
12 # loss
~\anaconda3\lib\site-packages\torch\nn\modules\module.py in call(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
–> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
~\anaconda3\lib\site-packages\torch\nn\modules\module.py in forward(self, *input)
94 registered hooks while the latter silently ignores them.
95 “”"
—> 96 raise NotImplementedError
97
98 def register_buffer(self, name, tensor):
NotImplementedError: