Mismatch in batch size

When i am trying to train the model, i get the following mismatch error given at the end of this question. As i am new to PyTorch, i am having difficulties in understanding how to resolve this issue. Kindly guide me what changes should i made in my code.

import torch
import torch.nn as nn
import torch.nn.functional as F

class  CharCNN(nn.Module):
 def __init__(self):
    super(CharCNN, self).__init__()
    
    self.conv1 = nn.Sequential(
        nn.Conv1d(num_channels, depth_1, kernel_size=kernel_size_1, stride=stride_size),
        nn.ReLU(),
        nn.MaxPool1d(kernel_size=kernel_size_1, stride=stride_size),
        nn.Dropout(0.1),
    )

    self.conv2 = nn.Sequential(
        nn.Conv1d(depth_1, depth_2, kernel_size=kernel_size_2, stride=stride_size),
        nn.ReLU(),
        nn.MaxPool1d(kernel_size=kernel_size_2, stride=stride_size),
        nn.Dropout(0.25)
    )            
        
         
    self.fc1 = nn.Sequential(
        nn.Linear(128*64, num_hidden),
        nn.ReLU(),
        nn.Dropout(0.5)
    )
    
    self.fc2 = nn.Sequential(
        nn.Linear(num_hidden, 11),
        nn.ReLU(),
        nn.Dropout(0.5)
    )
    
    

def forward(self, x):
    out = self.conv1(x)
    out = self.conv2(out)

    # collapse
    out = out.reshape(-1,128*64)
    #out = out.view(out.size(0), -1) 
    # linear layer
    out = self.fc1(out)
    # output layer
    out = self.fc2(out)
    #out = self.log_softmax(x,dim=1)
    
    return out

This is my model, when i try to train my network like this:

 criterion = nn.CrossEntropyLoss()  
 opt = torch.optim.Adam(model.parameters(),lr=learning_rate)

for e in range(training_epochs):
   if(train_on_gpu):
    net.cuda()
   train_losses = []    

 for batch in iterate_minibatches(train_x, train_y, batch_size):
    x, y = batch
    inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
    if(train_on_gpu):
        inputs, targets = inputs.cuda(), targets.cuda()
        #inputs= input.view(batch_size,  128*64)
        #targets = targets.view(batch_size)
    opt.zero_grad()   
    output = model(inputs)
        
    loss = criterion(output, targets.long())
    train_losses.append(loss.item())
    loss.backward()
    opt.step()
val_losses = []
accuracy=0
f1score=0
print("Epoch: {}/{}...".format(e+1, training_epochs),
              "Train Loss: {:.4f}...".format(np.mean(train_losses)))

I get the following error:

 ValueError: Expected input batch_size (1) to match target batch_size (64).

Please help me to resolve this issue.

Also posted here with follow-up.