Error Running lstm

I get this error when i run mycode

C:\Users\Charvee\AppData\Local\Programs\Python\Python37\python.exe E:/Jigna/Image-Forgery-Detection-CNN-master/Image-Forgery-Detection-CNN-master/src/cnn_lstm/lstm.py
Traceback (most recent call last):
File “E:/Jigna/Image-Forgery-Detection-CNN-master/Image-Forgery-Detection-CNN-master/src/cnn_lstm/lstm.py”, line 164, in
outputs = model(images)
File “C:\Users\Charvee\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\module.py”, line 547, in call
result = self.forward(*input, **kwargs)
File “E:/Jigna/Image-Forgery-Detection-CNN-master/Image-Forgery-Detection-CNN-master/src/cnn_lstm/lstm.py”, line 94, in forward
out, (hn, cn) = self.lstm(x, (h0, c0))
File “C:\Users\Charvee\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\module.py”, line 547, in call
result = self.forward(*input, **kwargs)
File “C:\Users\Charvee\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\rnn.py”, line 564, in forward
return self.forward_tensor(input, hx)
File “C:\Users\Charvee\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\rnn.py”, line 543, in forward_tensor
output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices)
File “C:\Users\Charvee\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\rnn.py”, line 523, in forward_impl
self.check_forward_args(input, hx, batch_sizes)
File “C:\Users\Charvee\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\rnn.py”, line 496, in check_forward_args
self.check_input(input, batch_sizes)
File “C:\Users\Charvee\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\rnn.py”, line 145, in check_input
expected_input_dim, input.dim()))
RuntimeError: input must have 3 dimensions, got 4

My Code is as below

``import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
from torch.autograd import Variable
import torch
import pandas as pd
import torchvision.transforms as transforms
from torchvision import datasets

‘’’
STEP 1: LOADING DATASET
‘’’

train_dataset = dsets.MNIST(root=’./data’,

train=True,

transform=transforms.ToTensor(),

download=False)

test_dataset = dsets.MNIST(root=’./data’,

train=False,

transform=transforms.ToTensor())

DATA_DIR = “…/generated_patches_ComoFo/” #“my_patches” # put the directory of the patches
transform = transforms.Compose([transforms.ToTensor()])

data = datasets.ImageFolder(root=DATA_DIR, transform=transform) # Fetch data

device = torch.device(“cuda:0” if torch.cuda.is_available() else “cpu”)

train_loader = torch.utils.data.DataLoader(data, batch_size=64, shuffle=True, pin_memory=True)

test_loader = torch.utils.data.DataLoader(data, batch_size=64, shuffle=True, pin_memory=True)

‘’’
STEP 2: MAKING DATASET ITERABLE
‘’’

batch_size =100 #64 #100
n_iters = 3000 #3000
num_epochs = n_iters / (len(data) / batch_size)
num_epochs = int(num_epochs)

train_loader = torch.utils.data.DataLoader(dataset=data,
batch_size=batch_size,
shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=data,
batch_size=batch_size,
shuffle=False)

‘’’
STEP 3: CREATE MODEL CLASS
‘’’

class LSTMModel(nn.Module):
def init(self, input_dim, hidden_dim, layer_dim, output_dim):
super(LSTMModel, self).init()
# Hidden dimensions
self.hidden_dim = hidden_dim

    # Number of hidden layers
    self.layer_dim = layer_dim

    # Building your LSTM
    # batch_first=True causes input/output tensors to be of shape
    # (batch_dim, seq_dim, feature_dim)
    self.lstm = nn.LSTM(input_dim, hidden_dim, layer_dim, batch_first=True)

    # Readout layer
    self.fc = nn.Linear(hidden_dim, output_dim)

def forward(self, x):
    # Initialize hidden state with zeros
    #######################
    #  USE GPU FOR MODEL  #
    #######################

    if torch.cuda.is_available():
        h0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).cuda())
    else:
        h0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim))

    # Initialize cell state
    if torch.cuda.is_available():
        c0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).cuda())
    else:
        c0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim))

    # One time step
    out, (hn, cn) = self.lstm(x, (h0, c0))

    # Index hidden state of last time step
    # out.size() --> 100, 28, 100
    # out[:, -1, :] --> 100, 100 --> just want last time step hidden states! 
    out = self.fc(out[:, -1, :])
    # out.size() --> 100, 10
    return out

‘’’
STEP 4: INSTANTIATE MODEL CLASS
‘’’
input_dim =28 #16 #4 #28
hidden_dim = 100
layer_dim = 3 # ONLY CHANGE IS HERE FROM ONE LAYER TO TWO LAYER
output_dim = 10

model = LSTMModel(input_dim, hidden_dim, layer_dim, output_dim)

#######################

USE GPU FOR MODEL

#######################

if torch.cuda.is_available():
model.cuda()

‘’’
STEP 5: INSTANTIATE LOSS CLASS
‘’’
criterion = nn.CrossEntropyLoss()

‘’’
STEP 6: INSTANTIATE OPTIMIZER CLASS
‘’’
learning_rate = 0.1

optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)

‘’’
STEP 7: TRAIN THE MODEL
‘’’

Number of steps to unroll

seq_dim = 28

iter = 0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Load images as Variable
#######################
# USE GPU FOR MODEL #
#######################
if torch.cuda.is_available():
images = Variable(images.view(-1, seq_dim, input_dim).cuda())
labels = Variable(labels.cuda())
else:
# images = Variable(images.view(-1, 196608,16))
#labels = Variable(labels)

        images = Variable(images)
        labels = Variable(labels)

    # Clear gradients w.r.t. parameters
    optimizer.zero_grad()


    #for loop and changed sampled_ids = torch.cat(sampled_ids, 1) to sampled_ids = torch.cat(sampled_ids, 0)
    # Forward pass to get output/logits
    # outputs.size() --> 100, 10
    outputs = model(images)





    print(0)
    # Calculate Loss: softmax --> cross entropy loss
    loss = criterion(outputs, labels)
    print("1")
    # Getting gradients w.r.t. parameters
    loss.backward()
    print("2")
    # Updating parameters
    optimizer.step()
    print("3")
    iter += 1
    print("4")
    if iter % 500 == 0:
        # Calculate Accuracy         
        correct = 0
        total = 0
        print("6")
        # Iterate through test dataset
        for images, labels in test_loader:
            #######################
            #  USE GPU FOR MODEL  #
            #######################
            if torch.cuda.is_available():
                images = Variable(images.view(-1, seq_dim, input_dim).cuda())
            else:
                images = Variable(images.view(-1, seq_dim, input_dim))

            # Forward pass only to get logits/output
            outputs = model(images)

            # Get predictions from the maximum value
            _, predicted = torch.max(outputs.data, 1)

            # Total number of labels
            total += labels.size(0)

            # Total correct predictions
            #######################
            #  USE GPU FOR MODEL  #
            #######################
            if torch.cuda.is_available():
                correct += (predicted.cpu() == labels.cpu()).sum()
            else:
                correct += (predicted == labels).sum()

        accuracy = 100 * correct / total

        # Print Loss
        print('Iteration: {}. Loss: {}. Accuracy: {}'.format(iter, loss.data[0], accuracy))``