Model not very slow and doesn't seem to learn

Using vgg-16 source code, I added on another layer, which is supposed to be an LSTM layer after every convolution. However, when I run the new model with my LSTM layer, it seems that not only is the model not learning, but the training rate is extremely slow (and I am running this on a GPU!). I was wondering why my added classes (RLSTM, RowLSTMCell) are slow and not learning, and if anyone has any suggestions on how to fix it. I think the problem just lies in my custom classes, but I will show code of the whole program to get a better sense of what I am doing.

Here is where I define my models:

__all__ = [
    'VGG', 'vgg16'
]
class VGG(nn.Module):
    '''
    VGG model 
    '''
    def __init__(self, features): # features represents the layers array
        super(VGG, self).__init__()
        self.features = features
        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(512,512),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(512, 512),
            nn.ReLU(True),
            nn.Linear(512, 10),
        )
         # Initialize weights
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                m.bias.data.zero_()


    def forward(self, x): # x is the image, we run x through the layers
        x = self.features(x) # runs through all features, where each feature is a function
        x = x.view(x.size(0), -1) 
        # after running through features, does sequential steps to finally classify
        x = self.classifier(x)
        return x


def make_layers(cfg, batch_norm=False):
    print("Making layers!")
    layers = []
    # clearing the layers for next vgg model
    in_channels = 3
    count=0
    for v in cfg:
        count+=1
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
                in_channels=v
                rlstm =RLSTM(v)
                rlstm=rlstm.cuda()
                layers+=[rlstm]
            in_channels = v
  
           

    return nn.Sequential(*layers)
global total
total = 0
class RLSTM(nn.Module):
    def __init__(self,ch):
        super(RLSTM,self).__init__()
        self.ch=ch
        self.input_to_state = torch.nn.Conv2d(self.ch,4*self.ch,kernel_size=(1,3),padding=(0,1)).cuda()
        self.state_to_state = torch.nn.Conv2d(self.ch,4*self.ch,kernel_size=(1,3),padding=(0,1)).cuda() # error is here: hidPrev is an array - not a valid number of input channel
      

    def forward(self, image):
        size = image.size()
        b = size[0]
        indvs = list(image.split(1,0))
        tensor_array = []
        for i in range(b):
            tensor_array.append(self.RowLSTM(indvs[i]))
        seq=tuple(tensor_array)
        trans = torch.cat(seq,0)
        global total
        total+=1
        return trans.cuda() 
    def RowLSTM(self, image): 
     # input-to-state (K_is * x_i) : 1oxx3 convolution. generate h x n x n tensor. hxnxn tensor contains all i -> s info
        cell_list=[]
        igates = []
        n = image.size()[2]
        ch=image.size()[1]
        for i in range(n):
            if i==0:
                
                isgates = self.splitIS(self.input_to_state(image)) # convolve, then split into gates (4 per row)

                cell=RowLSTMCell(0,torch.randn(ch,n,1).cuda(),torch.randn(ch,n,1).cuda(),torch.randn(ch,n,1).cuda(),torch.randn(ch,n,1).cuda(),torch.randn(ch,n,1).cuda(),torch.randn(ch,n,1).cuda())
                # now have dummy variables for first row
                cell_list.append(cell)
            else:   
                cell_prev = cell_list[i-1]
                hid_prev = cell_prev.getHiddenState()
                ssgates = self.splitSS(self.state_to_state(hid_prev.unsqueeze(0)))
                gates = self.addGates(isgates, ssgates,i)
                ig, og, fg, gg = gates[0], gates[1], gates[2], gates[3]
                cell = RowLSTMCell(cell_prev, ig, og, fg, gg, 0 ,0)
                cell.compute()
                cell_list.append(cell)

        # now have a list of all cell data, concatenate hidden state into 1 x h x n x n

        hidden_layers = []
        for i in range(n):
            hid = cell_list[i].h
            hidden_layers.append(torch.unsqueeze(hid,0))

        seq = tuple(hidden_layers)
        tensor = torch.cat(seq,3)
        return tensor 
    
    def splitIS(self, tensor): #always going to be splitting into 4 pieces, so no need to add extra parameters
        inputStateGates={}
        size=tensor.size() # 1 x 4h x n x n
        out_ft=size[1] # get 4h for the nxnx4h tensor
        num=size[2] # get n for the nxn image
        hh=out_ft/4 # we want to split the tensor into 4, for the gates
        tensor = torch.squeeze(tensor).cuda() # 4h x n x n

        # First, split by row: Creates n tensors of 4h x n x 1
        rows = list(tensor.split(1,2))

        for i in range(num):
            # Each row is a tensor of 4h x n x 1, split it into 4 of h x n x 1
            row=rows[i]
          #  print("Each row using cuda: "+str(row.is_cuda))
            inputStateGates[i]=list(row.split(hh,0))
            
        return inputStateGates 


    def splitSS(self, tensor): # 1 x 4h x n x 1, create 4 of 1 x h x n x 1 
        size=tensor.size() 
        out_ft=size[1] # get 4h for the 1x4hxn tensor
        num=size[2] # get n for the 1xhxn row
        hh=out_ft/4 # we want to split the tensor into 4, for the gates
        tensor = tensor.squeeze(0).cuda() # 4h x n x 1
        splitted=list(tensor.split(hh,0))
        return splitted 


    def addGates(self, i2s,s2s,key):
        """ these dictionaries are of form {key : [[i], [o], [f], [g]]}
            we want to add pairwise elemeents """

        # i2s is of form key: [[i], [o], [f], [g]] where each gate is hxn
        # s2s is of form [[h,n],[h,n],[h,n], [h,n]]
        gateSum = []
        for i in range(4): # always of length 4, representing the gates
            gateSum.append(torch.sigmoid(i2s[key][i] + s2s[i]))
        return gateSum
cfg = {
    'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
    'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 
          512, 512, 512, 512, 'M'],
}

class RowLSTMCell(): #inherit torch.nn.LSTM?
    def __init__(self,prev_row, i, o, f, g, c, h):
        self.c=c
        self.h=h
        self.i=i
        self.i = self.i.cuda()
        self.o=o
        self.o = self.o.cuda()
        self.g=g
        self.g = self.g.cuda()
        self.f=f
        self.f = self.f.cuda()
        self.prev_row=prev_row 
    def getStateSize(self):
        return self._state_size

    def getOutputSize(self):
        return self._output_size

    def compute(self):
        c_prev = self.prev_row.getCellState()
        h_prev = self.prev_row.getHiddenState()
      
        self.c = self.f * c_prev + self.i * self.g
        self.h = torch.tanh(self.c) * self.o
    def getHiddenState(self):
        return self.h

    def getCellState(self):
        return self.c

   

def vgg16():
    """VGG 16-layer model (configuration "D")"""
    return VGG(make_layers(cfg['D']))

Next, this is the main method.

import argparse
import os
import shutil
import time

import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import vgg



model_names = sorted(name for name in vgg.__dict__  # create all the models
    if name.islower() and not name.startswith("__")
                     and name.startswith("vgg")
                     and callable(vgg.__dict__[name]))

print("Using the following {}".format(model_names))

parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--arch', '-a', metavar='ARCH', default='vgg16',
                    choices=model_names,
                    help='model architecture: ' + ' | '.join(model_names) +
                    ' (default: vgg16)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
                    help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=300, type=int, metavar='N',
                    help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
                    help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
                    metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.05, type=float,
                    metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                    help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
                    metavar='W', help='weight decay (default: 5e-4)')
parser.add_argument('--print-freq', '-p', default=20, type=int,
                    metavar='N', help='print frequency (default: 20)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
                    help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
                    help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
                    help='use pre-trained model')
parser.add_argument('--half', dest='half', action='store_true',
                    help='use half-precision(16-bit) ')
parser.add_argument('--save-dir', dest='save_dir',
                    help='The directory used to save the trained models',
                    default='save_temp', type=str)


best_prec1 = 0
 
GPU_INDEX = 0
os.environ["CUDA_VISIBLE_DEVICES"] = str(GPU_INDEX)


def main():
    global args, best_prec1
    args = parser.parse_args()


    # Check the save_dir exists or not
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    model = vgg.__dict__[args.arch]()

    model.features = torch.nn.DataParallel(model.features) # features = layers array
    for param in model.parameters():
        print(param)
    model.cuda()

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print "=> loading checkpoint '{}'".format(args.resume)
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.evaluate, checkpoint['epoch']))
        else:
            print "=> no checkpoint found at '{}'".format(args.resume)

    cudnn.benchmark = True

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(
        datasets.CIFAR10(root='./data', train=True, transform=transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.RandomCrop(32, 4),
            transforms.ToTensor(),
            normalize,
        ]), download=True),
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        datasets.CIFAR10(root='./data', train=False, transform=transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    if args.half:
        model.half()
        criterion.half()

    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
        }, is_best, filename=os.path.join(args.save_dir, 'checkpoint_{}.tar'.format(epoch)))


def train(train_loader, model, criterion, optimizer, epoch):
    """
        Run one train epoch
    """
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (input, target) in enumerate(train_loader):

        # measure data loading time
        data_time.update(time.time() - end)

        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input).cuda()
        target_var = torch.autograd.Variable(target)
        if args.half:
            input_var = input_var.half()

        # compute output
        output = model(input_var)
        loss = criterion(output, target_var)

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        output = output.float()
        loss = loss.float()
        # measure accuracy and record loss
        prec1 = accuracy(output.data, target)[0]
        losses.update(loss.data[0], input.size(0))
        top1.update(prec1[0], input.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
                      epoch, i, len(train_loader), batch_time=batch_time,
                      data_time=data_time, loss=losses, top1=top1))


def validate(val_loader, model, criterion):
    """
    Run evaluation
    """
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input, volatile=True).cuda()
        target_var = torch.autograd.Variable(target, volatile=True)

        if args.half:
            input_var = input_var.half()

        # compute output
        output = model(input_var)
        loss = criterion(output, target_var)

        output = output.float()
        loss = loss.float()

        # measure accuracy and record loss
        prec1 = accuracy(output.data, target)[0]
        losses.update(loss.data[0], input.size(0))
        top1.update(prec1[0], input.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
                      i, len(val_loader), batch_time=batch_time, loss=losses,
                      top1=top1))
            
    print(' * Prec@1 {top1.avg:.3f}'
          .format(top1=top1))

    return top1.avg

def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
    """
    Save the training model
    """
    torch.save(state, filename)
 
class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


def adjust_learning_rate(optimizer, epoch):
    """Sets the learning rate to the initial LR decayed by 2 every 30 epochs"""
    lr = args.lr * (0.5 ** (epoch // 30))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    batch_size = target.size(0)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        res.append(correct_k.mul_(100.0 / batch_size))
    return res


if __name__ == '__main__':
    main()

Am I forgetting to train some part of my model, such as the LSTM layer? Are the for loops making it slow? Any help is much appreciated.