Mini batching in a basic neural network model using pytorch

hello, I am working on a neural network model and I have tried using mini batching to optimize my code but I get this error

ValueError: Expected input batch_size (466) to match target batch_size (1869).

my code is

from __future__ import print_function
from builtins import range
from import DataLoader
import torchvision.transforms as transforms

import pandas as pd
import numpy as np

# Data sets
IRIS_TRAINING = r'C:\Users\User\PycharmProjects\AI LAB\prof NN code\train.txt'
IRIS_TEST = r'C:\Users\User\PycharmProjects\AI LAB\prof NN code\test.txt'
train_data = np.genfromtxt(IRIS_TRAINING, skip_header=1, 
    dtype=float, delimiter=';') 
test_data = np.genfromtxt(IRIS_TEST, skip_header=1, 
    dtype=float, delimiter=';') 

#initialize batch size
dataloadertrain = DataLoader(dataset= train_data, batch_size=BATCH_SIZE, shuffle=True)
dataloadertest = DataLoader(dataset= test_data, batch_size=BATCH_SIZE, shuffle=True)

print("Array Dimension = ",len(train_data.shape))

# In[352]:

#split x and y (feature and target)
xtrain = train_data[:,:4001]
ytrain = train_data[:,4001]

# In[353]:

import torch
import torch.nn as nn
import torch.nn.functional as f

hl = 8
lr = 0.01
num_epoch = 2000

# In[354]:

#build model
class Net(nn.Module):

    def __init__(self, n_feature, hl, n_output):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(n_feature, hl)
        self.fc2 = nn.Linear(hl, n_output)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x

# In[355]:

#choose optimizer and loss function
criterion = nn.CrossEntropyLoss()
#optimizer = torch.optim.SGD(net.parameters(), lr=lr)
#optimizer = torch.optim.Adagrad(net.parameters(), lr=lr)
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
#optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9)
#optimizer = torch.optim.RMSprop(net.parameters(), lr=lr)

# In[356]:

net_batch = Net(n_feature=4001, hl=10, n_output=3)  
loss_list = []
avg_loss_list = []
batch_size = 16
n_batches = int(len(train_data) / batch_size) 

for epoch in range(len(train_data)):
    X = torch.Tensor(xtrain).float()
    Y = torch.Tensor(ytrain).long()
    for batch in range(n_batches):
       batch_X, batch_Y = X[batch*batch_size:(batch+1)*batch_size,], Y[batch*batch_size:(batch+1)*batch_size,]
       prediction = net_batch(batch_X)
       loss = criterion(out, Y)

       print('\repoch: {}\tbatch: {}\tLoss =  {:.3f}'.format(epoch, batch, loss), end="")



# In[ ]:

#feedforward - backprop
out = net(X)
acc = 100 * torch.sum(Y==torch.max(, 1)[1]).double() / len(Y)
if (epoch % 50 == 1):
	    print ('Epoch [%d/%d] Loss: %.4f   Acc: %.4f' 
                  %(epoch+1, num_epoch, loss.item(), acc.item()))

# In[320]:

#split x and y (feature and target)
xtest = test_data[:,:4001]
ytest = test_data[:,4001]

#get prediction
X = torch.Tensor(xtest).float()
Y = torch.Tensor(ytest).long()
out = net(X)
_, predicted = torch.max(, 1)

#get accuration
print('Accuracy of testing %.2f %%' % (100 * torch.sum(Y==predicted).double() / len(Y)))

these are the specifics of my dataset:

  • 1747 training example stored in train.txt file,
  • 600 testing example saved in test.txt
  • Each example has 4001 features.
  • Two Classes only labeled as 1 and 0.

I would also take any advice regarding my code since I am a beginner

Your code seems to be a mix of different code snippets, as you are hitting these issues:

  • torch.nn.functional is imported as f but used as F
  • net is not initialized before the optimizer is created
  • DataLoaders are created, but never used
  • Y is used in the loss calculation while it seems batch_Y should be used (which creates the shape mismatch)
  • optmizer_batch is used while optimizer is initialized

Make sure to post an executable code snippet for faster debugging, as cleaning up the code often takes longer than spotting the actual error :wink: