from __future__ import print_function
import time
import math
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from torchvision import datasets, transforms
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='input batch size for training (default: 256)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=30, metavar='N',
help='number of epochs to train (default: 30)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.0005, metavar='W',
help='SGD weight decay (default: 0.0005)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--vis_path', type=str, default="visualizations/color6", metavar='S',
help='path to save your visualization figures')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
def weights_init(m):
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5)
self.fc1 = nn.Linear(1024, 256)
self.fc2 = nn.Linear(256, 2)
self.fc3 = nn.Linear(2, 10)
def forward(self, x, y=None):
x = F.max_pool2d(F.relu(self.conv1(x)), 2, stride=2)
x = F.max_pool2d(F.relu(self.conv2(x)), 2, stride=2)
x = x.view(-1, 1024)
x = F.relu(self.fc1(x))
features = self.fc2(x)
x = self.fc3(features)
return F.log_softmax(x, dim=1)
model = Net().to(device)
model.apply(weights_init)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
def train(epoch):
model.train()
for batch_idx, (batch_data, batch_labels) in enumerate(train_loader):
batch_data, batch_labels = batch_data.to(device), batch_labels.to(device)
optimizer.zero_grad()
batch_scores = model(batch_data)
loss = F.nll_loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.2f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(batch_data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test():
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for batch_data, batch_labels in test_loader:
batch_data, batch_labels = batch_data.to(device), batch_labels.to(device)
batch_scores = model(batch_data)
test_loss += F.nll_loss(batch_scores, batch_labels, size_average=False).item() # sum up batch loss
pred = batch_scores.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(batch_labels.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
for epoch in range(1, args.epochs + 1):
scheduler.step()
start = time.time()
train(epoch)
end = time.time()
print('Total time taken: {:.2f}s\n'.format(end-start))
test()
The mxnet version is here https://github.com/luoyetx/mx-lsoftmax. I run the mxnet version without the lsoftmax layer using this command python2 mnist.py --train --no-lsoftmax --gpu 0 --batch_size 256
.
Can I know what I am doing wrong? Or is pytorch just so much slower?
I am using GTX1080Ti with CUDA 9 and CUDNN 7.