Dimension problem when training GTSRB dataset with Alexnet in PyTorch

I am new to PyTorch and Deep Learning, and I am trying to get the Alexnet trained with the GTSRB dataset in PyTorch.

Some information about the “German Traffic Signs Recognition Benchmark” Dataset (GTSRB):

The GTSRB dataset consists of 43 classes, 39209 training images as well as 12630 test images (all in RGB colors with dimensions ranging from 29x30x3 to 144x48x3). For further information see here.

Model Architecture:

I used the model architecture slightly modified from here

Implementation:

As a guidance, I followed the implementation found here and modified it to run in a jupyter notebook (Anaconda distribution).

This is the structure in file system (~/Desktop/pytorch-alexnet-gtsrb):

structure in file system

File Alexnet.ipynb

from __future__ import print_function
import zipfile
import os

import torchvision.transforms as transforms
from torchvision import datasets, transforms

import PIL
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import torch.optim as optim

import shutil
import time

import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.datasets as datasets


IMG_SIZE = 64 # Image size has to be 64x64
NUM_CLASSES = 43 # GTSRB dataset has 43 classes


# Prepare dataset
def prepare_dataset(folder):
    train_zip = folder + '/train_images.zip'
    test_zip = folder + '/test_images.zip'
    if not os.path.exists(train_zip) or not os.path.exists(test_zip):
        raise(RuntimeError("Could not find " + train_zip + " and " + test_zip))

    # extract train_data.zip to train_data
    train_folder = folder + '/train_images'

    if not os.path.isdir(train_folder):
        print(train_folder + ' not found, extracting ' + train_zip)
        zip_ref = zipfile.ZipFile(train_zip, 'r')
        zip_ref.extractall(folder)
        zip_ref.close()

    # extract test_data.zip to test_data
    test_folder = folder + '/test_images'
    if not os.path.isdir(test_folder):
        print(test_folder + ' not found, extracting ' + test_zip)
        zip_ref = zipfile.ZipFile(test_zip, 'r')
        zip_ref.extractall(folder)
        zip_ref.close()

    # make validation_data by using images 00000*, 00001* and 00002* in each class
    val_folder = folder + '/val_images'
    if not os.path.isdir(val_folder):
        print(val_folder + ' not found, making a validation set')
        os.mkdir(val_folder)
        for dirs in os.listdir(train_folder):
            if dirs.startswith('000'):
                os.mkdir(val_folder + '/' + dirs)
                for f in os.listdir(train_folder + '/' + dirs):
                    if f.startswith('00000') or f.startswith('00001') or f.startswith('00002'):
                        # move file to validation folder
                        os.rename(train_folder + '/' + dirs + '/' + f, val_folder + '/' + dirs + '/' + f)

def train(train_loader, model, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (data, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        ######
        print("Input has shape: " + str(data.shape))
        ######
        data = data.to(device)
        target = target.to(device)

        output = model(data)
        loss = F.nll_loss(output, target)

        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.data[0], data.size(0))
        top1.update(prec1[0], data.size(0))
        top5.update(prec5[0], data.size(0))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if i % 10 == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                   epoch, i, len(train_loader), batch_time=batch_time,
                   data_time=data_time, loss=losses, top1=top1, top5=top5))               




def validate(val_loader, model, criterion):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(val_loader):

        if torch.cuda.is_available():
            target = target.cuda()
        else:
            target = target.cpu()

        input_var = torch.autograd.Variable(input, volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)

        # compute output
        output = model(input_var)
        loss = criterion(output, target_var)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.data[0], input.size(0))
        top1.update(prec1[0], input.size(0))
        top5.update(prec5[0], input.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % 10 == 0:
            print('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                   i, len(val_loader), batch_time=batch_time, loss=losses,
                   top1=top1, top5=top5))

    print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
          .format(top1=top1, top5=top5))

    return top1.avg




def save_checkpoint(state, is_best, filename='checkpoint.pth'):
    torch.save(state, filename)
    if is_best:
        shutil.copyfile(filename, 'model_best.pth')




def adjust_learning_rate(optimizer, epoch):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    lr = 0.1 * (0.1 ** (epoch // 30))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr




def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    batch_size = target.size(0)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        res.append(correct_k.mul_(100.0 / batch_size))
    return res




class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

prepare_dataset('data')

normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])

# Load training dataset
traindir = 'data/train_images'

train_loader = torch.utils.data.DataLoader(
    datasets.ImageFolder(traindir, transforms.Compose(
        [
            transforms.Resize((IMG_SIZE, IMG_SIZE)),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]
    )), batch_size=10, shuffle=True, num_workers=1)


# Load validation dataset    
valdir = 'data/val_images'

val_loader = torch.utils.data.DataLoader(
    datasets.ImageFolder(valdir, transforms.Compose(
        [
            transforms.Resize((IMG_SIZE, IMG_SIZE)),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]
    )), batch_size=10, shuffle=True, num_workers=1)






from model import AlexNet

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = AlexNet().to(device)

#######
use_sgd_optimizer = True
#######

if use_sgd_optimizer == True:
    if torch.cuda.is_available():
        criterion = nn.CrossEntropyLoss().cuda()
    else:
        criterion = nn.CrossEntropyLoss()

    optimizer = torch.optim.SGD(model.parameters(), 
                                0.1, # learning rate
                                momentum=0.9,
                                weight_decay=1e-4)
else:
    optimizer = optim.Adam(model.parameters(), lr=0.001)




for epoch in range(1, 10):
    adjust_learning_rate(optimizer, epoch)

    # train for one epoch
    train(train_loader, model, criterion, optimizer, epoch)

    # evaluate on validation set
    prec1 = validate(val_loader, model, criterion)

    # remember best prec@1 and save checkpoint
    is_best = prec1 > best_prec1
    best_prec1 = max(prec1, best_prec1)
    save_checkpoint({
        'epoch': epoch + 1,
        'arch': "alexnet",
        'state_dict': model.state_dict(),
        'best_prec1': best_prec1,
    }, is_best)

File model.py

import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models

from constants import IMG_SIZE, NUM_CLASSES

class AlexNet(nn.Module):

    def __init__(self, num_classes=NUM_CLASSES):
        super(AlexNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(192, 384, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(256 * 6 * 6, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, NUM_CLASSES),
        )

    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size(0), 256 * 6 * 6)
        x = self.classifier(x)
        return x

File constants.py

NUM_CLASSES = 43
IMG_SIZE = 64

When I run the code, I get the following error:

Input has shape: torch.Size([10, 3, 64, 64])
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-11-a683ff9e57ec> in <module>()
     67 
     68     # train for one epoch
---> 69     train(train_loader, model, criterion, optimizer, epoch)
     70 
     71     # evaluate on validation set

<ipython-input-10-d58a1c9f986c> in train(train_loader, model, criterion, optimizer, epoch)
     20         target = target.to(device)
     21 
---> 22         output = model(data)
     23         loss = F.nll_loss(output, target)
     24 

~/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    487             result = self._slow_forward(*input, **kwargs)
    488         else:
--> 489             result = self.forward(*input, **kwargs)
    490         for hook in self._forward_hooks.values():
    491             hook_result = hook(self, input, result)

~/Desktop/pytorch-alexnet-gtsrb/model.py in forward(self, x)
    101     def forward(self, x):
    102         x = self.features(x)
--> 103         x = x.view(x.size(0), 256 * 6 * 6)
    104         x = self.classifier(x)
    105         return x

RuntimeError: shape '[64, 2304]' is invalid for input of size 16384
---------------------------------------------------------------------------
  1. I guess that torch.Size([10, 3, 64, 64]) means that my input data has the following parameters?
  • batch_size = 10
  • channels = 3
  • height = 64
  • width = 64
  1. Why is input of size 16384, when my input shape is torch.Size([10, 3, 64, 64])?

  2. I guess there is a problem with the parameters in model architecture (see: code of model.py)? If so, what do I have to change to get the input images (with target size 64x64) trained with that Alexnet model?

  3. Did I forget anything else?

Could you print the shape of x after the self.features call and before the view operation?

---------------------------------------------------------------------------
Input has shape: torch.Size([10, 3, 64, 64])
x after self.features: tensor([[[[0.0486]],

         [[0.0576]],

         [[0.0202]],

         ...,

         [[0.0301]],

         [[0.0128]],

         [[0.0528]]],


        [[[0.0439]],

         [[0.0621]],

         [[0.0302]],

         ...,

         [[0.0398]],

         [[0.0171]],

         [[0.0494]]],


        [[[0.0156]],

         [[0.0240]],

         [[0.0101]],

         ...,

         [[0.0218]],

         [[0.0055]],

         [[0.0158]]],


        ...,


        [[[0.0359]],

         [[0.0443]],

         [[0.0132]],

         ...,

         [[0.0209]],

         [[0.0095]],

         [[0.0220]]],


        [[[0.0192]],

         [[0.0251]],

         [[0.0114]],

         ...,

         [[0.0180]],

         [[0.0051]],

         [[0.0143]]],


        [[[0.0409]],

         [[0.0476]],

         [[0.0173]],

         ...,

         [[0.0355]],

         [[0.0091]],

         [[0.0355]]]], grad_fn=<MaxPool2DWithIndicesBackward>)
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-4-702d467d5d33> in <module>()
     67 
     68     # train for one epoch
---> 69     train(train_loader, model, criterion, optimizer, epoch)
     70 
     71     # evaluate on validation set

<ipython-input-3-d58a1c9f986c> in train(train_loader, model, criterion, optimizer, epoch)
     20         target = target.to(device)
     21 
---> 22         output = model(data)
     23         loss = F.nll_loss(output, target)
     24 

~/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    487             result = self._slow_forward(*input, **kwargs)
    488         else:
--> 489             result = self.forward(*input, **kwargs)
    490         for hook in self._forward_hooks.values():
    491             hook_result = hook(self, input, result)

~/Desktop/pytorch-alexnet-gtsrb/model.py in forward(self, x)
     45         x = self.features(x)
     46         print("x after self.features: " + str(x))
---> 47         x = x.view(x.size(0), 256 * 6 * 6)
     48         x = self.classifier(x)
     49         return x

RuntimeError: shape '[10, 9216]' is invalid for input of size 2560
---------------------------------------------------------------------------

Interestingly after closing and reopening the jupyter notebook now the runtime error has changed from “RuntimeError: shape ‘[64, 2304]’ is invalid for input of size 16384” to “RuntimeError: shape ‘[10, 9216]’ is invalid for input of size 2560”.

Your size calculation might be a bit wrong, since x will have the shape [10, 256, 1, 1] after features.
Change your first linear layer in classifier to nn.Linear(256, 4096) and the reshaping operation to x = x.view(x.size(0), -1) and your code should work.

1 Like

Dear ptrblck,

I did so as you suggested, and changed the code in model.py to, saved the file, closed the jupyter notebook and reopened it:

import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models

from constants import IMG_SIZE, NUM_CLASSES

import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models

from constants import IMG_SIZE, NUM_CLASSES

class AlexNet(nn.Module):

    def __init__(self, num_classes=NUM_CLASSES):
        super(AlexNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(192, 384, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.classifier = nn.Sequential(
            nn.Dropout(),
            #nn.Linear(256 * 6 * 6, 4096),
            nn.Linear(256, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, NUM_CLASSES),
        )

    def forward(self, x):
        x = self.features(x)
        # x = x.view(x.size(0), 256 * 6 * 6)
        x = x.view(x.size(0), -1)
        x = self.classifier(x)
        return x

I now get this runtime error. I’m not sure, what’s now wrong. Can you help me?

Input has shape: torch.Size([10, 3, 64, 64])
---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-4-702d467d5d33> in <module>()
     67 
     68     # train for one epoch
---> 69     train(train_loader, model, criterion, optimizer, epoch)
     70 
     71     # evaluate on validation set

<ipython-input-3-d58a1c9f986c> in train(train_loader, model, criterion, optimizer, epoch)
     24 
     25         prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
---> 26         losses.update(loss.data[0], data.size(0))
     27         top1.update(prec1[0], data.size(0))
     28         top5.update(prec5[0], data.size(0))

IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number
---------------------------------------------------------------------------

Since loss should be a scalar value, try to save it using:

losses.update(loss.item(), data.size(0))
1 Like

Dear ptrblck,

thank you a lot - you made my day!

The training works now. Here comes the updated source code:

File Alexnet.ipynb

from __future__ import print_function
import zipfile
import os

import torchvision.transforms as transforms
from torchvision import datasets, transforms

import PIL
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import torch.optim as optim
import datetime

import shutil
import time

import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.datasets as datasets


IMG_SIZE = 64
NUM_CLASSES = 43


# Prepare dataset
def prepare_dataset(folder):
    train_zip = folder + '/train_images.zip'
    test_zip = folder + '/test_images.zip'
    if not os.path.exists(train_zip) or not os.path.exists(test_zip):
        raise(RuntimeError("Could not find " + train_zip + " and " + test_zip))

    # extract train_data.zip to train_data
    train_folder = folder + '/train_images'

    if not os.path.isdir(train_folder):
        print(train_folder + ' not found, extracting ' + train_zip)
        zip_ref = zipfile.ZipFile(train_zip, 'r')
        zip_ref.extractall(folder)
        zip_ref.close()

    # extract test_data.zip to test_data
    test_folder = folder + '/test_images'
    if not os.path.isdir(test_folder):
        print(test_folder + ' not found, extracting ' + test_zip)
        zip_ref = zipfile.ZipFile(test_zip, 'r')
        zip_ref.extractall(folder)
        zip_ref.close()

    # make validation_data by using images 00000*, 00001* and 00002* in each class
    val_folder = folder + '/val_images'
    if not os.path.isdir(val_folder):
        print(val_folder + ' not found, making a validation set')
        os.mkdir(val_folder)
        for dirs in os.listdir(train_folder):
            if dirs.startswith('000'):
                os.mkdir(val_folder + '/' + dirs)
                for f in os.listdir(train_folder + '/' + dirs):
                    if f.startswith('00000') or f.startswith('00001') or f.startswith('00002'):
                        # move file to validation folder
                        os.rename(train_folder + '/' + dirs + '/' + f, val_folder + '/' + dirs + '/' + f)


def train(train_loader, model, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (input, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        if torch.cuda.is_available():
            target = target.cuda()
        else:
            target = target.cpu()

        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        # compute output
        output = model(input_var)
        loss = criterion(output, target_var)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.item(), input.size(0))
        top1.update(prec1.item(), input.size(0))
        top5.update(prec5.item(), input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % 10 == 0:

            out_train = ('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})').format(
                   epoch, i, len(train_loader), batch_time=batch_time,
                   data_time=data_time, loss=losses, top1=top1, top5=top5)

            with open("######Progress-training.txt", "a") as myfile:
                myfile.write(str(datetime.datetime.now()) + " -- " + out_train + "\n")

            print(out_train)




def validate(val_loader, model, criterion):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(val_loader):

        if torch.cuda.is_available():
            target = target.cuda()
        else:
            target = target.cpu()

        input_var = torch.autograd.Variable(input, volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)

        # compute output
        output = model(input_var)
        loss = criterion(output, target_var)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.item(), input.size(0))
        top1.update(prec1.item(), input.size(0))
        top5.update(prec5.item(), input.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % 10 == 0:
            out_val = ('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})').format(
                   i, len(val_loader), batch_time=batch_time, loss=losses,
                   top1=top1, top5=top5)

            with open("######Progress-validation.txt", "a") as myfile:
                myfile.write(str(datetime.datetime.now()) + " -- " + out_val + "\n")

            print(out_val)

    out_val_final = (' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
          .format(top1=top1, top5=top5))

    with open("######Progress-validation-final.txt", "a") as myfile:
        myfile.write(str(datetime.datetime.now()) + " -- " + out_val_final + "\n")

    print(out_val)

    return top1.avg




def save_checkpoint(state, is_best, filename='checkpoint.pth'):
    #torch.save(state, filename)
    torch.save(model.state_dict(), filename)
    if is_best:
        shutil.copyfile(filename, 'model_best.pth')




def adjust_learning_rate(optimizer, epoch):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    lr = 0.1 * (0.1 ** (epoch // 30))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr




def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    batch_size = target.size(0)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        res.append(correct_k.mul_(100.0 / batch_size))
    return res




class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

prepare_dataset('data')

normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])

# Load training dataset
traindir = 'data/train_images'

train_loader = torch.utils.data.DataLoader(
    datasets.ImageFolder(traindir, transforms.Compose(
        [
            transforms.Resize((IMG_SIZE, IMG_SIZE)),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]
    )), batch_size=100, shuffle=True, num_workers=1)
    # Model will be trained on 354 batches, each containing 100 images
    # (as specified in the batch_size of your train_loader). 
    # Since the training data is splitted into training and validation, 
    # this number (354) is a bit smaller than 39209//100.


# Load validation dataset    
valdir = 'data/val_images'

val_loader = torch.utils.data.DataLoader(
    datasets.ImageFolder(valdir, transforms.Compose(
        [
            transforms.Resize((IMG_SIZE, IMG_SIZE)),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]
    )), batch_size=100, shuffle=True, num_workers=1)






from model import AlexNet

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = AlexNet().to(device)

#####################
use_sgd_optimizer = True
#####################

if use_sgd_optimizer == True:
    if torch.cuda.is_available():
        criterion = nn.CrossEntropyLoss().cuda()
    else:
        criterion = nn.CrossEntropyLoss()

    optimizer = torch.optim.SGD(model.parameters(), 
                                0.1, # learning rate
                                momentum=0.9,
                                weight_decay=1e-4)
else:
    optimizer = optim.Adam(model.parameters(), lr=0.001)



#####################
epochs = 51 # trains for 50 epochs
best_prec1 = 0
#####################
for epoch in range(1, epochs):

    progress_epochs = "\n============================\nStart training epoch: " + str(epoch) + "/" + str(epochs-1) + "\n@ " + str(datetime.datetime.now()) + "\n============================"

    with open("######Progress-epochs.txt", "a") as myfile:
        myfile.write(progress_epochs + "\n")

    print(progress_epochs)    


    adjust_learning_rate(optimizer, epoch)

    # train for one epoch
    train(train_loader, model, criterion, optimizer, epoch)

    # evaluate on validation set
    prec1 = validate(val_loader, model, criterion)

    # remember best prec@1 and save checkpoint
    is_best = prec1 > best_prec1
    best_prec1 = max(prec1, best_prec1)
    save_checkpoint({
        'epoch': epoch + 1,
        'arch': "alexnet",
        'state_dict': model.state_dict(),
        'best_prec1': best_prec1,
    }, is_best)


    progress_epochs = "\n=============================\nFinished training epoch: " + str(epoch) + "/" + str(epochs-1) + "\n@ " + str(datetime.datetime.now()) + "\n============================="

    with open("######Progress-epochs.txt", "a") as myfile:
        myfile.write(progress_epochs + "\n")

    print(progress_epochs)    

File model.py

import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models

from constants import IMG_SIZE, NUM_CLASSES

import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models

from constants import IMG_SIZE, NUM_CLASSES

class AlexNet(nn.Module):

    def __init__(self, num_classes=NUM_CLASSES):
        super(AlexNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(192, 384, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.classifier = nn.Sequential(
            nn.Dropout(),
            #nn.Linear(256 * 6 * 6, 4096),
            nn.Linear(256, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, NUM_CLASSES),
        )

    def forward(self, x):
        x = self.features(x)
        # x = x.view(x.size(0), 256 * 6 * 6)
        x = x.view(x.size(0), -1)
        x = self.classifier(x)
        return x

File constants.py

NUM_CLASSES = 43
IMG_SIZE = 64

It now comes to the following output:

Epoch: [1][0/354] Time 2.683 (2.683) Data 0.123 (0.123) Loss 3.7641 (3.7641) Prec@1 0.000 (0.000) Prec@5 7.000 (7.000)
Epoch: [1][10/354] Time 2.030 (2.100) Data 0.003 (0.014) Loss 3.5542 (3.6974) Prec@1 5.000 (5.455) Prec@5 27.000 (24.182)
Epoch: [1][20/354] Time 2.014 (2.049) Data 0.003 (0.009) Loss 3.4839 (3.6009) Prec@1 1.000 (5.857) Prec@5 17.000 (24.333)
Epoch: [1][30/354] Time 2.437 (2.051) Data 0.003 (0.007) Loss 3.3573 (3.5383) Prec@1 5.000 (6.097) Prec@5 33.000 (25.742)
Epoch: [1][40/354] Time 1.994 (2.074) Data 0.003 (0.006) Loss 3.4271 (3.5123) Prec@1 9.000 (5.976) Prec@5 33.000 (26.415)
Epoch: [1][50/354] Time 2.034 (2.065) Data 0.003 (0.005) Loss 3.4206 (3.5004) Prec@1 6.000 (5.843) Prec@5 23.000 (26.059)
Epoch: [1][60/354] Time 1.970 (2.051) Data 0.003 (0.005) Loss 3.3889 (3.4947) Prec@1 4.000 (5.984) Prec@5 32.000 (26.705)
Epoch: [1][70/354] Time 1.965 (2.058) Data 0.003 (0.005) Loss 3.5263 (3.4855) Prec@1 3.000 (5.901) Prec@5 20.000 (26.930)
Epoch: [1][80/354] Time 2.017 (2.057) Data 0.002 (0.004) Loss 3.5213 (3.4840) Prec@1 5.000 (5.827) Prec@5 22.000 (27.012)
Epoch: [1][90/354] Time 2.030 (2.049) Data 0.002 (0.004) Loss 3.4811 (3.4783) Prec@1 7.000 (5.802) Prec@5 24.000 (27.374)
Epoch: [1][100/354] Time 2.056 (2.044) Data 0.003 (0.004) Loss 3.3517 (3.4692) Prec@1 7.000 (6.000) Prec@5 36.000 (28.178)
Epoch: [1][110/354] Time 1.996 (2.037) Data 0.003 (0.004) Loss 3.3756 (3.4605) Prec@1 6.000 (6.054) Prec@5 39.000 (28.730)
Epoch: [1][120/354] Time 1.961 (2.034) Data 0.003 (0.004) Loss 3.2252 (3.4559) Prec@1 7.000 (6.256) Prec@5 33.000 (29.074)
Epoch: [1][130/354] Time 1.949 (2.032) Data 0.003 (0.004) Loss 3.3313 (3.4484) Prec@1 5.000 (6.359) Prec@5 29.000 (29.351)
Epoch: [1][140/354] Time 2.181 (2.031) Data 0.002 (0.004) Loss 3.3257 (3.4427) Prec@1 4.000 (6.348) Prec@5 32.000 (29.567)
Epoch: [1][150/354] Time 2.000 (2.028) Data 0.002 (0.004) Loss 3.3333 (3.4370) Prec@1 3.000 (6.338) Prec@5 32.000 (29.954)
Epoch: [1][160/354] Time 2.047 (2.027) Data 0.001 (0.003) Loss 3.1608 (3.4282) Prec@1 14.000 (6.578) Prec@5 43.000 (30.472)
Epoch: [1][170/354] Time 2.045 (2.026) Data 0.003 (0.003) Loss 3.2424 (3.4190) Prec@1 8.000 (6.854) Prec@5 36.000 (30.930)
Epoch: [1][180/354] Time 1.969 (2.025) Data 0.003 (0.003) Loss 3.3580 (3.4087) Prec@1 7.000 (7.177) Prec@5 41.000 (31.459)
Epoch: [1][190/354] Time 1.977 (2.024) Data 0.003 (0.003) Loss 3.4424 (3.3933) Prec@1 10.000 (7.445) Prec@5 31.000 (32.099)
Epoch: [1][200/354] Time 2.063 (2.023) Data 0.002 (0.003) Loss 2.9468 (3.3781) Prec@1 17.000 (7.736) Prec@5 54.000 (32.796)
Epoch: [1][210/354] Time 2.003 (2.021) Data 0.003 (0.003) Loss 2.8463 (3.3585) Prec@1 14.000 (8.118) Prec@5 51.000 (33.583)
Epoch: [1][220/354] Time 1.963 (2.020) Data 0.004 (0.003) Loss 3.2116 (3.3477) Prec@1 9.000 (8.489) Prec@5 39.000 (33.995)
Epoch: [1][230/354] Time 2.021 (2.020) Data 0.002 (0.003) Loss 3.1663 (3.3409) Prec@1 12.000 (8.632) Prec@5 38.000 (34.468)
Epoch: [1][240/354] Time 1.954 (2.018) Data 0.003 (0.003) Loss 3.0815 (3.3294) Prec@1 14.000 (8.983) Prec@5 41.000 (34.929)
Epoch: [1][250/354] Time 1.992 (2.018) Data 0.003 (0.003) Loss 3.2567 (3.3148) Prec@1 9.000 (9.319) Prec@5 41.000 (35.514)
Epoch: [1][260/354] Time 1.988 (2.017) Data 0.003 (0.003) Loss 3.2490 (3.3170) Prec@1 10.000 (9.303) Prec@5 38.000 (35.475)
Epoch: [1][270/354] Time 2.091 (2.016) Data 0.004 (0.003) Loss 3.1162 (3.3074) Prec@1 22.000 (9.579) Prec@5 45.000 (35.908)
Epoch: [1][280/354] Time 1.925 (2.016) Data 0.003 (0.003) Loss 2.8177 (3.2990) Prec@1 23.000 (9.861) Prec@5 57.000 (36.317)
Epoch: [1][290/354] Time 2.057 (2.015) Data 0.003 (0.003) Loss 2.9302 (3.2892) Prec@1 19.000 (10.103) Prec@5 46.000 (36.814)
Epoch: [1][300/354] Time 2.026 (2.021) Data 0.002 (0.003) Loss 2.6849 (3.2711) Prec@1 22.000 (10.495) Prec@5 53.000 (37.452)
Epoch: [1][310/354] Time 2.024 (2.032) Data 0.003 (0.003) Loss 2.8718 (3.2517) Prec@1 21.000 (10.820) Prec@5 49.000 (38.096)
Epoch: [1][320/354] Time 1.999 (2.033) Data 0.002 (0.003) Loss 3.4280 (3.2563) Prec@1 13.000 (10.913) Prec@5 31.000 (38.150)

I do have some last questions:

  1. Does the output above mean, the Alexnet will only be trained on 354 images, whereby the whole trainings dataset is about 39209 images?
  2. I guess that ‘Prec’ is the training accuracy?
    If so, why is this value greater than zero? Shouldn’t it be less than zero converging to 1.0 (whereby 1.0 means 100 %)?
1 Like

I’m glad it’s working! :slight_smile:

  1. No, your model will be trained on 354 batches, each containing 100 images (as specified in the batch_size of your train_loader. Since you are splitting your training data into training and validation, this number is a bit smaller than 39209//100.
  2. Prec@k sounds like it should give you the precision for k predictions, which is calculated as TP/(TP+FP). However, since you have multiple classes and looking at the code for accuracy, it looks like it’s calculating the accuracy for the top k predictions.
    As you can see, the value is scaled by 100 and its batch size, so that it gives you already the percentage.
1 Like

OK, I understand. :blush:
Nice to hear that, and thank you again for your help! :pray:

1 Like

When training the Alexnet for at least 5 epochs, there is no improvement after epoch number 3 regarding to precision (Prec) and Loss (see output below).

What’s the reason for that, and how can I fix that?

--------------------------------------------------------------------------------------------------------------
============================
Start training epoch: 1/50
@ 2018-12-20 17:35:18.972483
============================
Epoch: [1][0/354]	Time 2.656 (2.656)	Data 0.117 (0.117)	Loss 3.7634 (3.7634)	Prec@1 2.000 (2.000)	Prec@5 9.000 (9.000)
Epoch: [1][10/354]	Time 2.016 (2.083)	Data 0.003 (0.013)	Loss 3.5828 (3.6961)	Prec@1 6.000 (6.182)	Prec@5 29.000 (28.091)
Epoch: [1][20/354]	Time 2.015 (2.056)	Data 0.002 (0.008)	Loss 3.4318 (3.6030)	Prec@1 8.000 (6.000)	Prec@5 25.000 (27.476)
Epoch: [1][30/354]	Time 2.042 (2.046)	Data 0.003 (0.006)	Loss 3.5251 (3.5495)	Prec@1 4.000 (5.935)	Prec@5 16.000 (27.548)
Epoch: [1][40/354]	Time 1.953 (2.037)	Data 0.003 (0.005)	Loss 3.3811 (3.5167)	Prec@1 5.000 (6.073)	Prec@5 31.000 (27.634)
Epoch: [1][50/354]	Time 1.951 (2.031)	Data 0.003 (0.005)	Loss 3.4218 (3.4937)	Prec@1 5.000 (5.843)	Prec@5 26.000 (27.255)
Epoch: [1][60/354]	Time 1.968 (2.025)	Data 0.003 (0.005)	Loss 3.4051 (3.4816)	Prec@1 11.000 (6.033)	Prec@5 29.000 (27.836)
Epoch: [1][70/354]	Time 2.045 (2.024)	Data 0.003 (0.004)	Loss 3.3996 (3.4749)	Prec@1 7.000 (6.113)	Prec@5 26.000 (27.986)
Epoch: [1][80/354]	Time 1.985 (2.021)	Data 0.010 (0.004)	Loss 3.3590 (3.4666)	Prec@1 7.000 (6.049)	Prec@5 35.000 (28.346)
Epoch: [1][90/354]	Time 1.993 (2.018)	Data 0.002 (0.004)	Loss 3.4126 (3.4599)	Prec@1 10.000 (6.165)	Prec@5 30.000 (28.582)
Epoch: [1][100/354]	Time 1.957 (2.016)	Data 0.002 (0.004)	Loss 3.3853 (3.4548)	Prec@1 6.000 (6.218)	Prec@5 36.000 (29.168)
Epoch: [1][110/354]	Time 2.045 (2.015)	Data 0.003 (0.004)	Loss 3.4590 (3.4529)	Prec@1 6.000 (6.234)	Prec@5 23.000 (29.171)
Epoch: [1][120/354]	Time 2.006 (2.017)	Data 0.002 (0.004)	Loss 3.4032 (3.4459)	Prec@1 9.000 (6.479)	Prec@5 31.000 (29.347)
Epoch: [1][130/354]	Time 1.966 (2.019)	Data 0.002 (0.004)	Loss 3.2392 (3.4405)	Prec@1 12.000 (6.634)	Prec@5 40.000 (29.733)
Epoch: [1][140/354]	Time 2.038 (2.020)	Data 0.002 (0.004)	Loss 3.0439 (3.4248)	Prec@1 22.000 (7.085)	Prec@5 42.000 (30.255)
Epoch: [1][150/354]	Time 1.932 (2.020)	Data 0.003 (0.004)	Loss 3.2785 (3.4144)	Prec@1 11.000 (7.258)	Prec@5 36.000 (30.616)
Epoch: [1][160/354]	Time 1.982 (2.020)	Data 0.002 (0.003)	Loss 3.2526 (3.4095)	Prec@1 14.000 (7.429)	Prec@5 37.000 (30.652)
Epoch: [1][170/354]	Time 1.991 (2.020)	Data 0.003 (0.003)	Loss 3.6547 (3.4247)	Prec@1 8.000 (7.480)	Prec@5 28.000 (30.702)
Epoch: [1][180/354]	Time 2.007 (2.020)	Data 0.003 (0.003)	Loss 3.5979 (3.4333)	Prec@1 4.000 (7.414)	Prec@5 20.000 (30.652)
Epoch: [1][190/354]	Time 2.035 (2.020)	Data 0.003 (0.004)	Loss 3.3323 (3.4335)	Prec@1 5.000 (7.319)	Prec@5 30.000 (30.628)
Epoch: [1][200/354]	Time 1.991 (2.019)	Data 0.003 (0.003)	Loss 3.2653 (3.4328)	Prec@1 7.000 (7.308)	Prec@5 40.000 (30.622)
Epoch: [1][210/354]	Time 1.983 (2.018)	Data 0.003 (0.003)	Loss 3.5182 (3.4308)	Prec@1 6.000 (7.270)	Prec@5 26.000 (30.640)
Epoch: [1][220/354]	Time 1.917 (2.016)	Data 0.002 (0.003)	Loss 3.4520 (3.4308)	Prec@1 5.000 (7.199)	Prec@5 25.000 (30.606)
Epoch: [1][230/354]	Time 1.929 (2.016)	Data 0.002 (0.003)	Loss 3.4194 (3.4295)	Prec@1 8.000 (7.177)	Prec@5 28.000 (30.693)
Epoch: [1][240/354]	Time 2.056 (2.016)	Data 0.007 (0.003)	Loss 3.4955 (3.4287)	Prec@1 5.000 (7.237)	Prec@5 35.000 (30.842)
Epoch: [1][250/354]	Time 2.075 (2.016)	Data 0.003 (0.003)	Loss 3.4167 (3.4313)	Prec@1 7.000 (7.167)	Prec@5 29.000 (30.677)
Epoch: [1][260/354]	Time 2.010 (2.017)	Data 0.003 (0.003)	Loss 3.3218 (3.4298)	Prec@1 4.000 (7.169)	Prec@5 34.000 (30.759)
Epoch: [1][270/354]	Time 2.085 (2.016)	Data 0.003 (0.003)	Loss 3.3360 (3.4284)	Prec@1 8.000 (7.229)	Prec@5 37.000 (30.790)
Epoch: [1][280/354]	Time 1.927 (2.016)	Data 0.003 (0.003)	Loss 3.3796 (3.4276)	Prec@1 9.000 (7.256)	Prec@5 34.000 (30.879)
Epoch: [1][290/354]	Time 2.109 (2.015)	Data 0.003 (0.003)	Loss 3.4033 (3.4256)	Prec@1 9.000 (7.278)	Prec@5 39.000 (31.000)
Epoch: [1][300/354]	Time 2.023 (2.016)	Data 0.003 (0.003)	Loss 3.5021 (3.4245)	Prec@1 3.000 (7.243)	Prec@5 22.000 (31.023)
Epoch: [1][310/354]	Time 1.976 (2.016)	Data 0.002 (0.003)	Loss 3.4964 (3.4228)	Prec@1 5.000 (7.344)	Prec@5 33.000 (31.151)
Epoch: [1][320/354]	Time 1.933 (2.016)	Data 0.003 (0.003)	Loss 3.4231 (3.4205)	Prec@1 5.000 (7.393)	Prec@5 31.000 (31.343)
Epoch: [1][330/354]	Time 1.955 (2.016)	Data 0.003 (0.003)	Loss 3.3093 (3.4203)	Prec@1 11.000 (7.423)	Prec@5 32.000 (31.332)
Epoch: [1][340/354]	Time 2.002 (2.015)	Data 0.003 (0.003)	Loss 3.4843 (3.4185)	Prec@1 6.000 (7.449)	Prec@5 31.000 (31.455)
Epoch: [1][350/354]	Time 2.020 (2.015)	Data 0.003 (0.003)	Loss 3.3400 (3.4170)	Prec@1 3.000 (7.473)	Prec@5 28.000 (31.467)
/var/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:79: UserWarning: volatile was removed and now has no effect. Use `with torch.no_grad():` instead.
/var/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:80: UserWarning: volatile was removed and now has no effect. Use `with torch.no_grad():` instead.
Test: [0/39]	Time 0.836 (0.836)	Loss 4.1514 (4.1514)	Prec@1 1.000 (1.000)	Prec@5 7.000 (7.000)
Test: [10/39]	Time 0.613 (0.599)	Loss 4.1281 (4.0848)	Prec@1 1.000 (2.727)	Prec@5 9.000 (12.273)
Test: [20/39]	Time 0.553 (0.581)	Loss 4.0871 (4.0643)	Prec@1 1.000 (2.952)	Prec@5 9.000 (12.286)
Test: [30/39]	Time 0.580 (0.577)	Loss 4.1038 (4.0520)	Prec@1 2.000 (2.935)	Prec@5 7.000 (12.516)
Test: [30/39]	Time 0.580 (0.577)	Loss 4.1038 (4.0520)	Prec@1 2.000 (2.935)	Prec@5 7.000 (12.516)

=============================
Finished training epoch: 1/50
@ 2018-12-20 17:47:33.444769
=============================

============================
Start training epoch: 2/50
@ 2018-12-20 17:47:33.445010
============================
Epoch: [2][0/354]	Time 2.733 (2.733)	Data 0.116 (0.116)	Loss 3.3509 (3.3509)	Prec@1 11.000 (11.000)	Prec@5 36.000 (36.000)
Epoch: [2][10/354]	Time 2.082 (2.110)	Data 0.004 (0.012)	Loss 3.3165 (3.3353)	Prec@1 7.000 (9.273)	Prec@5 38.000 (35.727)
Epoch: [2][20/354]	Time 2.057 (2.073)	Data 0.003 (0.008)	Loss 3.2800 (3.3334)	Prec@1 9.000 (9.476)	Prec@5 33.000 (35.571)
Epoch: [2][30/354]	Time 1.999 (2.052)	Data 0.002 (0.006)	Loss 3.4280 (3.3352)	Prec@1 7.000 (9.806)	Prec@5 30.000 (35.258)
Epoch: [2][40/354]	Time 2.083 (2.045)	Data 0.003 (0.005)	Loss 3.2260 (3.3332)	Prec@1 7.000 (9.902)	Prec@5 41.000 (35.780)
Epoch: [2][50/354]	Time 2.124 (2.045)	Data 0.002 (0.005)	Loss 3.2208 (3.3331)	Prec@1 9.000 (9.824)	Prec@5 38.000 (35.588)
Epoch: [2][60/354]	Time 2.002 (2.039)	Data 0.003 (0.004)	Loss 3.3373 (3.3206)	Prec@1 10.000 (10.049)	Prec@5 33.000 (36.230)
Epoch: [2][70/354]	Time 1.993 (2.040)	Data 0.003 (0.004)	Loss 3.3383 (3.3254)	Prec@1 7.000 (10.042)	Prec@5 33.000 (36.042)
Epoch: [2][80/354]	Time 1.957 (2.042)	Data 0.003 (0.004)	Loss 3.4124 (3.3293)	Prec@1 7.000 (9.840)	Prec@5 32.000 (35.852)
Epoch: [2][90/354]	Time 2.075 (2.041)	Data 0.003 (0.004)	Loss 3.2135 (3.3235)	Prec@1 15.000 (9.813)	Prec@5 37.000 (35.758)
Epoch: [2][100/354]	Time 2.044 (2.039)	Data 0.003 (0.004)	Loss 3.2491 (3.3171)	Prec@1 13.000 (9.921)	Prec@5 40.000 (36.168)
Epoch: [2][110/354]	Time 2.050 (2.036)	Data 0.003 (0.004)	Loss 3.2031 (3.3128)	Prec@1 14.000 (10.117)	Prec@5 37.000 (36.432)
Epoch: [2][120/354]	Time 2.009 (2.035)	Data 0.003 (0.004)	Loss 3.2368 (3.3022)	Prec@1 12.000 (10.289)	Prec@5 38.000 (36.736)
Epoch: [2][130/354]	Time 1.972 (2.037)	Data 0.003 (0.004)	Loss 3.2186 (3.2908)	Prec@1 13.000 (10.573)	Prec@5 40.000 (37.214)
Epoch: [2][140/354]	Time 2.039 (2.035)	Data 0.003 (0.004)	Loss 3.2031 (3.2808)	Prec@1 16.000 (10.787)	Prec@5 42.000 (37.730)
Epoch: [2][150/354]	Time 2.081 (2.036)	Data 0.003 (0.004)	Loss 3.0879 (3.2682)	Prec@1 20.000 (11.113)	Prec@5 41.000 (38.245)
Epoch: [2][160/354]	Time 2.059 (2.036)	Data 0.003 (0.004)	Loss 3.0897 (3.2672)	Prec@1 12.000 (11.099)	Prec@5 49.000 (38.373)
Epoch: [2][170/354]	Time 1.978 (2.037)	Data 0.002 (0.004)	Loss 3.2178 (3.2681)	Prec@1 13.000 (11.082)	Prec@5 41.000 (38.415)
Epoch: [2][180/354]	Time 2.146 (2.037)	Data 0.002 (0.004)	Loss 3.3955 (3.2656)	Prec@1 6.000 (11.105)	Prec@5 36.000 (38.619)
Epoch: [2][190/354]	Time 1.984 (2.039)	Data 0.002 (0.003)	Loss 3.1231 (3.2643)	Prec@1 14.000 (11.068)	Prec@5 42.000 (38.754)
Epoch: [2][200/354]	Time 1.999 (2.038)	Data 0.003 (0.003)	Loss 3.4245 (3.2615)	Prec@1 7.000 (11.025)	Prec@5 36.000 (38.816)
Epoch: [2][210/354]	Time 1.991 (2.038)	Data 0.002 (0.003)	Loss 3.1295 (3.2606)	Prec@1 15.000 (11.028)	Prec@5 45.000 (38.801)
Epoch: [2][220/354]	Time 1.955 (2.038)	Data 0.004 (0.003)	Loss 3.1918 (3.2598)	Prec@1 9.000 (11.054)	Prec@5 32.000 (38.647)
Epoch: [2][230/354]	Time 2.017 (2.038)	Data 0.003 (0.003)	Loss 2.9778 (3.2553)	Prec@1 17.000 (11.117)	Prec@5 48.000 (38.866)
Epoch: [2][240/354]	Time 2.133 (2.038)	Data 0.003 (0.003)	Loss 2.9836 (3.2488)	Prec@1 16.000 (11.253)	Prec@5 45.000 (39.100)
Epoch: [2][250/354]	Time 2.017 (2.039)	Data 0.002 (0.003)	Loss 2.9704 (3.2396)	Prec@1 12.000 (11.434)	Prec@5 42.000 (39.446)
Epoch: [2][260/354]	Time 1.989 (2.038)	Data 0.003 (0.003)	Loss 3.0747 (3.2383)	Prec@1 12.000 (11.479)	Prec@5 46.000 (39.460)
Epoch: [2][270/354]	Time 2.014 (2.037)	Data 0.003 (0.003)	Loss 2.8776 (3.2337)	Prec@1 17.000 (11.550)	Prec@5 50.000 (39.598)
Epoch: [2][280/354]	Time 2.015 (2.038)	Data 0.003 (0.003)	Loss 3.1961 (3.2312)	Prec@1 13.000 (11.548)	Prec@5 38.000 (39.662)
Epoch: [2][290/354]	Time 2.080 (2.038)	Data 0.007 (0.003)	Loss 3.1483 (3.2254)	Prec@1 12.000 (11.660)	Prec@5 43.000 (39.808)
Epoch: [2][300/354]	Time 2.035 (2.037)	Data 0.003 (0.003)	Loss 2.6297 (3.2115)	Prec@1 26.000 (11.927)	Prec@5 63.000 (40.279)
Epoch: [2][310/354]	Time 2.058 (2.039)	Data 0.003 (0.003)	Loss 2.5613 (3.1940)	Prec@1 22.000 (12.209)	Prec@5 63.000 (40.955)
Epoch: [2][320/354]	Time 2.060 (2.039)	Data 0.003 (0.003)	Loss 2.7152 (3.1785)	Prec@1 14.000 (12.421)	Prec@5 60.000 (41.551)
Epoch: [2][330/354]	Time 2.106 (2.039)	Data 0.003 (0.003)	Loss 2.6099 (3.1671)	Prec@1 20.000 (12.610)	Prec@5 59.000 (41.952)
Epoch: [2][340/354]	Time 2.015 (2.040)	Data 0.003 (0.003)	Loss 2.4312 (3.1535)	Prec@1 30.000 (12.792)	Prec@5 65.000 (42.372)
Epoch: [2][350/354]	Time 2.025 (2.039)	Data 0.002 (0.003)	Loss 2.5318 (3.1342)	Prec@1 28.000 (13.157)	Prec@5 65.000 (43.020)
Test: [0/39]	Time 0.750 (0.750)	Loss 3.6744 (3.6744)	Prec@1 5.000 (5.000)	Prec@5 32.000 (32.000)
Test: [10/39]	Time 0.588 (0.589)	Loss 3.4847 (3.5892)	Prec@1 6.000 (6.727)	Prec@5 36.000 (33.364)
Test: [20/39]	Time 0.579 (0.576)	Loss 3.6747 (3.5878)	Prec@1 6.000 (6.905)	Prec@5 28.000 (33.190)
Test: [30/39]	Time 0.542 (0.571)	Loss 3.5044 (3.6157)	Prec@1 9.000 (6.903)	Prec@5 32.000 (32.065)
Test: [30/39]	Time 0.542 (0.571)	Loss 3.5044 (3.6157)	Prec@1 9.000 (6.903)	Prec@5 32.000 (32.065)

=============================
Finished training epoch: 2/50
@ 2018-12-20 17:59:56.274658
=============================

============================
Start training epoch: 3/50
@ 2018-12-20 17:59:56.275210
============================
Epoch: [3][0/354]	Time 2.978 (2.978)	Data 0.121 (0.121)	Loss 2.7746 (2.7746)	Prec@1 23.000 (23.000)	Prec@5 61.000 (61.000)
Epoch: [3][10/354]	Time 1.951 (2.118)	Data 0.003 (0.013)	Loss 2.5009 (2.4301)	Prec@1 29.000 (26.727)	Prec@5 68.000 (65.727)
Epoch: [3][20/354]	Time 1.995 (2.071)	Data 0.003 (0.008)	Loss 2.3893 (2.4799)	Prec@1 23.000 (25.333)	Prec@5 66.000 (63.524)
Epoch: [3][30/354]	Time 2.040 (2.056)	Data 0.004 (0.007)	Loss 2.4988 (2.4649)	Prec@1 23.000 (24.968)	Prec@5 67.000 (64.032)
Epoch: [3][40/354]	Time 1.990 (2.057)	Data 0.002 (0.006)	Loss 2.6619 (2.4722)	Prec@1 21.000 (24.585)	Prec@5 60.000 (63.951)
Epoch: [3][50/354]	Time 2.004 (2.057)	Data 0.003 (0.005)	Loss 2.7971 (2.4965)	Prec@1 18.000 (23.922)	Prec@5 61.000 (63.392)
Epoch: [3][60/354]	Time 2.009 (2.056)	Data 0.002 (0.005)	Loss 2.7433 (2.5334)	Prec@1 19.000 (23.213)	Prec@5 53.000 (62.541)
Epoch: [3][70/354]	Time 2.009 (2.051)	Data 0.003 (0.004)	Loss 2.7221 (2.5770)	Prec@1 20.000 (22.676)	Prec@5 57.000 (61.549)
Epoch: [3][80/354]	Time 1.989 (2.048)	Data 0.003 (0.004)	Loss 2.8521 (2.6038)	Prec@1 20.000 (22.148)	Prec@5 60.000 (60.852)
Epoch: [3][90/354]	Time 1.987 (2.043)	Data 0.003 (0.004)	Loss 2.2585 (2.6065)	Prec@1 29.000 (22.176)	Prec@5 72.000 (60.681)
Epoch: [3][100/354]	Time 2.052 (2.044)	Data 0.004 (0.004)	Loss 2.9009 (2.6191)	Prec@1 12.000 (22.089)	Prec@5 50.000 (60.317)
Epoch: [3][110/354]	Time 2.120 (2.041)	Data 0.002 (0.004)	Loss 2.4650 (2.6159)	Prec@1 24.000 (22.252)	Prec@5 62.000 (60.234)
Epoch: [3][120/354]	Time 1.989 (2.046)	Data 0.003 (0.004)	Loss 2.5185 (2.6079)	Prec@1 17.000 (22.306)	Prec@5 57.000 (60.281)
Epoch: [3][130/354]	Time 1.997 (2.043)	Data 0.003 (0.004)	Loss 2.3207 (2.5905)	Prec@1 29.000 (22.504)	Prec@5 72.000 (60.687)
Epoch: [3][140/354]	Time 2.006 (2.039)	Data 0.003 (0.004)	Loss 2.3463 (2.5736)	Prec@1 28.000 (22.887)	Prec@5 69.000 (61.227)
Epoch: [3][150/354]	Time 1.980 (2.038)	Data 0.003 (0.004)	Loss 1.9706 (2.5519)	Prec@1 38.000 (23.358)	Prec@5 78.000 (61.801)
Epoch: [3][160/354]	Time 2.034 (2.036)	Data 0.003 (0.004)	Loss 3.4425 (2.6219)	Prec@1 10.000 (22.857)	Prec@5 30.000 (60.571)
Epoch: [3][170/354]	Time 2.037 (2.034)	Data 0.002 (0.004)	Loss 3.3483 (2.6664)	Prec@1 12.000 (22.053)	Prec@5 34.000 (58.901)
Epoch: [3][180/354]	Time 1.989 (2.033)	Data 0.003 (0.004)	Loss 114.0475 (3.3504)	Prec@1 4.000 (21.309)	Prec@5 17.000 (57.343)
Epoch: [3][190/354]	Time 2.004 (2.034)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 1.000 (20.288)	Prec@5 16.000 (55.220)
Epoch: [3][200/354]	Time 2.011 (2.033)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 1.000 (19.328)	Prec@5 19.000 (53.239)
Epoch: [3][210/354]	Time 1.935 (2.032)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 1.000 (18.460)	Prec@5 12.000 (51.384)
Epoch: [3][220/354]	Time 2.052 (2.031)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 2.000 (17.661)	Prec@5 16.000 (49.715)
Epoch: [3][230/354]	Time 1.963 (2.030)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 2.000 (16.957)	Prec@5 25.000 (48.277)
Epoch: [3][240/354]	Time 2.075 (2.030)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 0.000 (16.282)	Prec@5 10.000 (46.780)
Epoch: [3][250/354]	Time 2.097 (2.030)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 1.000 (15.677)	Prec@5 13.000 (45.430)
Epoch: [3][260/354]	Time 1.959 (2.029)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 0.000 (15.111)	Prec@5 13.000 (44.307)
Epoch: [3][270/354]	Time 1.959 (2.028)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 0.000 (14.587)	Prec@5 16.000 (43.255)
Epoch: [3][280/354]	Time 1.998 (2.027)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 1.000 (14.089)	Prec@5 14.000 (42.171)
Epoch: [3][290/354]	Time 2.027 (2.027)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 1.000 (13.663)	Prec@5 13.000 (41.289)
Epoch: [3][300/354]	Time 2.027 (2.028)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 0.000 (13.229)	Prec@5 12.000 (40.415)
Epoch: [3][310/354]	Time 2.022 (2.027)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 3.000 (12.836)	Prec@5 20.000 (39.553)
Epoch: [3][320/354]	Time 2.032 (2.027)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 1.000 (12.458)	Prec@5 21.000 (38.816)
Epoch: [3][330/354]	Time 1.961 (2.027)	Data 0.002 (0.003)	Loss nan (nan)	Prec@1 1.000 (12.112)	Prec@5 16.000 (38.088)
Epoch: [3][340/354]	Time 1.983 (2.026)	Data 0.002 (0.003)	Loss nan (nan)	Prec@1 0.000 (11.777)	Prec@5 13.000 (37.346)
Epoch: [3][350/354]	Time 2.141 (2.025)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 0.000 (11.467)	Prec@5 20.000 (36.752)
Test: [0/39]	Time 0.797 (0.797)	Loss nan (nan)	Prec@1 1.000 (1.000)	Prec@5 17.000 (17.000)
Test: [10/39]	Time 0.602 (0.604)	Loss nan (nan)	Prec@1 3.000 (2.182)	Prec@5 13.000 (12.727)
Test: [20/39]	Time 0.532 (0.579)	Loss nan (nan)	Prec@1 2.000 (2.000)	Prec@5 12.000 (11.714)
Test: [30/39]	Time 0.542 (0.579)	Loss nan (nan)	Prec@1 3.000 (2.161)	Prec@5 9.000 (11.452)
Test: [30/39]	Time 0.542 (0.579)	Loss nan (nan)	Prec@1 3.000 (2.161)	Prec@5 9.000 (11.452)

=============================
Finished training epoch: 3/50
@ 2018-12-20 18:12:14.660602
=============================

============================
Start training epoch: 4/50
@ 2018-12-20 18:12:14.660861
============================
Epoch: [4][0/354]	Time 2.878 (2.878)	Data 0.147 (0.147)	Loss nan (nan)	Prec@1 0.000 (0.000)	Prec@5 15.000 (15.000)
Epoch: [4][10/354]	Time 2.057 (2.095)	Data 0.003 (0.016)	Loss nan (nan)	Prec@1 1.000 (0.636)	Prec@5 13.000 (14.636)
Epoch: [4][20/354]	Time 2.161 (2.067)	Data 0.007 (0.010)	Loss nan (nan)	Prec@1 1.000 (0.905)	Prec@5 14.000 (14.238)
Epoch: [4][30/354]	Time 2.028 (2.066)	Data 0.003 (0.008)	Loss nan (nan)	Prec@1 2.000 (1.032)	Prec@5 18.000 (14.548)
Epoch: [4][40/354]	Time 1.966 (2.052)	Data 0.002 (0.007)	Loss nan (nan)	Prec@1 1.000 (0.976)	Prec@5 14.000 (14.488)
Epoch: [4][50/354]	Time 1.936 (2.050)	Data 0.002 (0.006)	Loss nan (nan)	Prec@1 0.000 (0.961)	Prec@5 13.000 (14.275)
Epoch: [4][60/354]	Time 2.012 (2.043)	Data 0.003 (0.006)	Loss nan (nan)	Prec@1 1.000 (0.902)	Prec@5 15.000 (14.328)
Epoch: [4][70/354]	Time 2.042 (2.037)	Data 0.002 (0.005)	Loss nan (nan)	Prec@1 0.000 (0.887)	Prec@5 15.000 (14.648)
Epoch: [4][80/354]	Time 1.925 (2.034)	Data 0.003 (0.005)	Loss nan (nan)	Prec@1 2.000 (0.901)	Prec@5 11.000 (14.679)
Epoch: [4][90/354]	Time 1.960 (2.035)	Data 0.003 (0.005)	Loss nan (nan)	Prec@1 2.000 (0.923)	Prec@5 15.000 (14.593)
Epoch: [4][100/354]	Time 2.047 (2.031)	Data 0.003 (0.005)	Loss nan (nan)	Prec@1 0.000 (0.931)	Prec@5 14.000 (14.713)
Epoch: [4][110/354]	Time 1.962 (2.029)	Data 0.003 (0.005)	Loss nan (nan)	Prec@1 0.000 (0.928)	Prec@5 14.000 (14.829)
Epoch: [4][120/354]	Time 2.016 (2.028)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 1.000 (0.950)	Prec@5 14.000 (14.901)
Epoch: [4][130/354]	Time 2.045 (2.025)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 1.000 (0.947)	Prec@5 21.000 (14.908)
Epoch: [4][140/354]	Time 2.018 (2.026)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 3.000 (0.950)	Prec@5 20.000 (14.894)
Epoch: [4][150/354]	Time 1.992 (2.024)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.927)	Prec@5 13.000 (14.874)
Epoch: [4][160/354]	Time 1.991 (2.023)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.944)	Prec@5 15.000 (14.870)
Epoch: [4][170/354]	Time 2.092 (2.024)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 1.000 (0.936)	Prec@5 20.000 (14.895)
Epoch: [4][180/354]	Time 2.054 (2.024)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.923)	Prec@5 18.000 (14.873)
Epoch: [4][190/354]	Time 1.997 (2.026)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.916)	Prec@5 7.000 (14.791)
Epoch: [4][200/354]	Time 2.082 (2.027)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.910)	Prec@5 19.000 (14.856)
Epoch: [4][210/354]	Time 1.929 (2.028)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 1.000 (0.915)	Prec@5 17.000 (14.754)
Epoch: [4][220/354]	Time 1.940 (2.028)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 2.000 (0.896)	Prec@5 16.000 (14.792)
Epoch: [4][230/354]	Time 2.049 (2.032)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.870)	Prec@5 15.000 (14.879)
Epoch: [4][240/354]	Time 2.064 (2.033)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 1.000 (0.871)	Prec@5 12.000 (14.880)
Epoch: [4][250/354]	Time 1.998 (2.035)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.880)	Prec@5 12.000 (14.865)
Epoch: [4][260/354]	Time 2.056 (2.036)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 2.000 (0.885)	Prec@5 17.000 (14.839)
Epoch: [4][270/354]	Time 2.106 (2.036)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 1.000 (0.889)	Prec@5 26.000 (14.867)
Epoch: [4][280/354]	Time 2.130 (2.036)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.904)	Prec@5 10.000 (14.886)
Epoch: [4][290/354]	Time 1.984 (2.037)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.893)	Prec@5 11.000 (14.797)
Epoch: [4][300/354]	Time 2.053 (2.040)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.890)	Prec@5 15.000 (14.834)
Epoch: [4][310/354]	Time 2.077 (2.039)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 1.000 (0.904)	Prec@5 16.000 (14.871)
Epoch: [4][320/354]	Time 2.017 (2.037)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 0.000 (0.897)	Prec@5 13.000 (14.872)
Epoch: [4][330/354]	Time 1.972 (2.037)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 1.000 (0.897)	Prec@5 12.000 (14.846)
Epoch: [4][340/354]	Time 2.011 (2.036)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 4.000 (0.927)	Prec@5 15.000 (14.839)
Epoch: [4][350/354]	Time 2.037 (2.037)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 1.000 (0.932)	Prec@5 16.000 (14.832)
Test: [0/39]	Time 0.877 (0.877)	Loss nan (nan)	Prec@1 2.000 (2.000)	Prec@5 6.000 (6.000)
Test: [10/39]	Time 0.523 (0.598)	Loss nan (nan)	Prec@1 1.000 (2.455)	Prec@5 14.000 (12.545)
Test: [20/39]	Time 0.542 (0.583)	Loss nan (nan)	Prec@1 1.000 (2.381)	Prec@5 12.000 (12.571)
Test: [30/39]	Time 0.593 (0.574)	Loss nan (nan)	Prec@1 3.000 (2.452)	Prec@5 12.000 (12.290)
Test: [30/39]	Time 0.593 (0.574)	Loss nan (nan)	Prec@1 3.000 (2.452)	Prec@5 12.000 (12.290)

=============================
Finished training epoch: 4/50
@ 2018-12-20 18:24:36.562591
=============================

============================
Start training epoch: 5/50
@ 2018-12-20 18:24:36.563099
============================
Epoch: [5][0/354]	Time 2.586 (2.586)	Data 0.134 (0.134)	Loss nan (nan)	Prec@1 1.000 (1.000)	Prec@5 9.000 (9.000)
Epoch: [5][10/354]	Time 1.979 (2.061)	Data 0.004 (0.014)	Loss nan (nan)	Prec@1 0.000 (1.364)	Prec@5 16.000 (14.636)
Epoch: [5][20/354]	Time 2.033 (2.033)	Data 0.002 (0.009)	Loss nan (nan)	Prec@1 0.000 (1.048)	Prec@5 15.000 (14.476)
Epoch: [5][30/354]	Time 2.003 (2.022)	Data 0.002 (0.007)	Loss nan (nan)	Prec@1 0.000 (0.871)	Prec@5 14.000 (15.065)
Epoch: [5][40/354]	Time 1.954 (2.018)	Data 0.003 (0.005)	Loss nan (nan)	Prec@1 0.000 (0.878)	Prec@5 10.000 (14.390)
Epoch: [5][50/354]	Time 1.995 (2.014)	Data 0.002 (0.005)	Loss nan (nan)	Prec@1 1.000 (0.784)	Prec@5 19.000 (14.922)
Epoch: [5][60/354]	Time 1.967 (2.010)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.770)	Prec@5 12.000 (14.721)
Epoch: [5][70/354]	Time 1.979 (2.014)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.746)	Prec@5 11.000 (14.352)
Epoch: [5][80/354]	Time 2.015 (2.014)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.728)	Prec@5 10.000 (14.333)
Epoch: [5][90/354]	Time 2.010 (2.013)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.780)	Prec@5 9.000 (14.319)
Epoch: [5][100/354]	Time 2.007 (2.014)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 1.000 (0.802)	Prec@5 17.000 (14.535)
Epoch: [5][110/354]	Time 1.980 (2.012)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 3.000 (0.856)	Prec@5 14.000 (14.468)
Epoch: [5][120/354]	Time 2.040 (2.012)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.860)	Prec@5 19.000 (14.463)
Epoch: [5][130/354]	Time 1.964 (2.012)	Data 0.003 (0.004)	Loss nan (nan)	Prec@1 0.000 (0.855)	Prec@5 9.000 (14.504)
Epoch: [5][140/354]	Time 1.991 (2.010)	Data 0.002 (0.004)	Loss nan (nan)	Prec@1 1.000 (0.823)	Prec@5 16.000 (14.482)
Epoch: [5][150/354]	Time 2.014 (2.009)	Data 0.002 (0.003)	Loss nan (nan)	Prec@1 1.000 (0.834)	Prec@5 14.000 (14.510)
Epoch: [5][160/354]	Time 1.952 (2.010)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 2.000 (0.870)	Prec@5 15.000 (14.534)
Epoch: [5][170/354]	Time 2.094 (2.009)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 3.000 (0.936)	Prec@5 15.000 (14.579)
Epoch: [5][180/354]	Time 1.996 (2.009)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 1.000 (0.939)	Prec@5 15.000 (14.652)
Epoch: [5][190/354]	Time 1.924 (2.008)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 1.000 (0.937)	Prec@5 13.000 (14.602)
Epoch: [5][200/354]	Time 1.995 (2.009)	Data 0.002 (0.003)	Loss nan (nan)	Prec@1 3.000 (0.960)	Prec@5 19.000 (14.667)
Epoch: [5][210/354]	Time 2.065 (2.009)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 0.000 (0.943)	Prec@5 16.000 (14.673)
Epoch: [5][220/354]	Time 2.039 (2.010)	Data 0.002 (0.003)	Loss nan (nan)	Prec@1 0.000 (0.928)	Prec@5 14.000 (14.701)
Epoch: [5][230/354]	Time 2.028 (2.009)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 1.000 (0.935)	Prec@5 19.000 (14.792)
Epoch: [5][240/354]	Time 2.028 (2.008)	Data 0.002 (0.003)	Loss nan (nan)	Prec@1 0.000 (0.938)	Prec@5 15.000 (14.801)
Epoch: [5][250/354]	Time 1.993 (2.008)	Data 0.002 (0.003)	Loss nan (nan)	Prec@1 0.000 (0.948)	Prec@5 16.000 (14.809)
Epoch: [5][260/354]	Time 1.960 (2.008)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 3.000 (0.969)	Prec@5 14.000 (14.774)
Epoch: [5][270/354]	Time 2.016 (2.008)	Data 0.002 (0.003)	Loss nan (nan)	Prec@1 2.000 (0.963)	Prec@5 17.000 (14.812)
Epoch: [5][280/354]	Time 2.069 (2.008)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 0.000 (0.979)	Prec@5 16.000 (14.808)
Epoch: [5][290/354]	Time 2.042 (2.007)	Data 0.002 (0.003)	Loss nan (nan)	Prec@1 1.000 (0.973)	Prec@5 16.000 (14.845)
Epoch: [5][300/354]	Time 2.130 (2.007)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 0.000 (0.970)	Prec@5 13.000 (14.857)
Epoch: [5][310/354]	Time 2.028 (2.007)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 0.000 (0.961)	Prec@5 13.000 (14.894)
Epoch: [5][320/354]	Time 2.006 (2.007)	Data 0.003 (0.003)	Loss nan (nan)	Prec@1 0.000 (0.960)	Prec@5 12.000 (14.919)
Epoch: [5][330/354]	Time 2.034 (2.008)	Data 0.002 (0.003)	Loss nan (nan)	Prec@1 1.000 (0.940)	Prec@5 21.000 (14.909)
Epoch: [5][340/354]	Time 2.006 (2.008)	Data 0.002 (0.003)	Loss nan (nan)	Prec@1 1.000 (0.947)	Prec@5 18.000 (14.947)
Epoch: [5][350/354]	Time 1.985 (2.007)	Data 0.002 (0.003)	Loss nan (nan)	Prec@1 1.000 (0.940)	Prec@5 15.000 (14.869)
Test: [0/39]	Time 0.824 (0.824)	Loss nan (nan)	Prec@1 2.000 (2.000)	Prec@5 6.000 (6.000)
Test: [10/39]	Time 0.546 (0.584)	Loss nan (nan)	Prec@1 2.000 (1.909)	Prec@5 9.000 (11.000)
Test: [20/39]	Time 0.546 (0.570)	Loss nan (nan)	Prec@1 1.000 (1.952)	Prec@5 10.000 (11.190)
Test: [30/39]	Time 0.609 (0.572)	Loss nan (nan)	Prec@1 0.000 (2.258)	Prec@5 10.000 (11.677)
Test: [30/39]	Time 0.609 (0.572)	Loss nan (nan)	Prec@1 0.000 (2.258)	Prec@5 10.000 (11.677)

=============================
Finished training epoch: 5/50
@ 2018-12-20 18:36:48.077169
=============================
--------------------------------------------------------------------------------------------------------------