step1)
import torch.nn.functional as F
dropout_value = 0.1
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# Input Block
self.convblock1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=16, kernel_size=(3, 3), padding=0, bias=False),
nn.ReLU(),
nn.BatchNorm2d(16),
nn.Dropout(dropout_value)
) # output_size = 26
# CONVOLUTION BLOCK 1
self.convblock2 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=(3, 3), padding=0, bias=False),
nn.ReLU(),
nn.BatchNorm2d(16),
nn.Dropout(dropout_value)
) # output_size = 24
# TRANSITION BLOCK 1
self.convblock3 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=10, kernel_size=(1, 1), padding=0, bias=False),
) # output_size = 24
self.pool1 = nn.MaxPool2d(2, 2) # output_size = 12
# CONVOLUTION BLOCK 2
self.convblock4 = nn.Sequential(
nn.Conv2d(in_channels=10, out_channels=16, kernel_size=(3, 3), padding=0, bias=False),
nn.ReLU(),
nn.BatchNorm2d(16),
nn.Dropout(dropout_value)
) # output_size = 10
self.convblock5 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=10, kernel_size=(3, 3), padding=0, bias=False),
nn.ReLU(),
nn.BatchNorm2d(10),
nn.Dropout(dropout_value)
) # output_size = 8
self.convblock6 = nn.Sequential(
nn.Conv2d(in_channels=10, out_channels=10, kernel_size=(3, 3), padding=0, bias=False),
nn.ReLU(),
nn.BatchNorm2d(10),
nn.Dropout(dropout_value)
) # output_size = 6
self.convblock7 = nn.Sequential(
nn.Conv2d(in_channels=10, out_channels=10, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(10),
nn.Dropout(dropout_value)
) # output_size = 6
# OUTPUT BLOCK
self.gap = nn.Sequential(
nn.AvgPool2d(kernel_size=6)
) # output_size = 1
self.convblock8 = nn.Sequential(
nn.Conv2d(in_channels=10, out_channels=10, kernel_size=(1, 1), padding=0, bias=False),
# nn.BatchNorm2d(10),
# nn.ReLU(),
# nn.Dropout(dropout_value)
)
self.dropout = nn.Dropout(dropout_value)
def forward(self, x):
x = self.convblock1(x)
x = self.convblock2(x)
x = self.convblock3(x)
x = self.pool1(x)
x = self.convblock4(x)
x = self.convblock5(x)
x = self.convblock6(x)
x = self.convblock7(x)
x = self.gap(x)
x = self.convblock8(x)
x = x.view(-1, 10)
return F.log_softmax(x, dim=-1)
step 2)
from tqdm import tqdm
train_losses = []
test_losses = []
train_acc = []
test_acc = []
def train(model, device, train_loader, optimizer, epoch):
#liveloss = PlotLosses()
model = model.to(device)
model.train()
pbar = tqdm(train_loader)
correct = 0
processed = 0
for batch_idx, (data, target) in enumerate(pbar):
# get samples
data, target = data.to(device), target.to(device)
# Init
optimizer.zero_grad()
# In PyTorch, we need to set the gradients to zero before starting to do backpropragation because PyTorch accumulates the gradients on subsequent backward passes.
# Because of this, when you start your training loop, ideally you should zero out the gradients so that you do the parameter update correctly.
# Predict
y_pred = model(data)
# Calculate loss
loss = F.nll_loss(y_pred, target)
train_losses.append(loss)
# Backpropagation
loss.backward()
optimizer.step()
# Update pbar-tqdm
pred = y_pred.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
processed += len(data)
pbar.set_description(desc= f'Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}')
train_acc.append(100*correct/processed)
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test_acc.append(100. * correct / len(test_loader.dataset))
step3)
from torch.optim.lr_scheduler import StepLR
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
scheduler = StepLR(optimizer, step_size=6, gamma=0.1)
EPOCHS = 15
for epoch in range(EPOCHS):
print("EPOCH:", epoch)
train(model, device, train_loader, optimizer, epoch)
# scheduler.step()
test(model, device, test_loader)