Mat1 and mat2 shapes cannot be multiplied, torch on audio!

I’ve tried to pad the dataset using collate_fn as described in the following link
But, every time I try to run the train.py, at a different level I get the following error, I’ve also tried another option of padding but still have the same error.
The Error:

Traceback (most recent call last):
  File "C:\Users\omari\Documents\GitHub\Classifying_Maqams\model3\train.py", line 38, in <module>
    for i, data in enumerate(train_loader, 0):
  File "C:\Users\omari\anaconda3\lib\site-packages\torch\utils\data\dataloader.py", line 521, in __next__
    data = self._next_data()
  File "C:\Users\omari\anaconda3\lib\site-packages\torch\utils\data\dataloader.py", line 561, in _next_data
    data = self._dataset_fetcher.fetch(index)  # may raise StopIteration
  File "C:\Users\omari\anaconda3\lib\site-packages\torch\utils\data\_utils\fetch.py", line 47, in fetch
    return self.collate_fn(data)
  File "C:\Users\omari\anaconda3\lib\site-packages\torch\utils\data\_utils\collate.py", line 84, in default_collate
    return [default_collate(samples) for samples in transposed]
  File "C:\Users\omari\anaconda3\lib\site-packages\torch\utils\data\_utils\collate.py", line 84, in <listcomp>
    return [default_collate(samples) for samples in transposed]
  File "C:\Users\omari\anaconda3\lib\site-packages\torch\utils\data\_utils\collate.py", line 56, in default_collate
    return torch.stack(batch, 0, out=out)
RuntimeError: stack expects each tensor to be equal size, but got [1439994] at entry 0 and [1440000] at entry 1

can you help me with this?
Here is the code of the model.py in case it helps:

import torch.nn as nn

class MaqamCNN(nn.Module):
    def __init__(self):
        super(MaqamCNN, self).__init__()
        self.conv1 = nn.Conv2d(1, 16, kernel_size=(5, 1), stride=(1, 1), padding=(2, 0))
        self.bn1 = nn.BatchNorm2d(16)
        self.relu1 = nn.ReLU()
        self.pool1 = nn.MaxPool2d(kernel_size=(4, 1), stride=(4, 1))
        self.conv2 = nn.Conv2d(16, 32, kernel_size=(5, 1), stride=(1, 1), padding=(2, 0))
        self.bn2 = nn.BatchNorm2d(32)
        self.relu2 = nn.ReLU()
        self.pool2 = nn.MaxPool2d(kernel_size=(4, 1), stride=(4, 1))
        self.fc1 = nn.Linear(32*90000, 128)
        self.bn3 = nn.BatchNorm1d(128)
        self.relu3 = nn.ReLU()
        self.dropout = nn.Dropout(p=0.5)
        self.fc2 = nn.Linear(128, 8)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu1(x)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.bn2(x)
        x = self.relu2(x)
        x = self.pool2(x)
        print(x.shape)
        batch_size = x.size(0)
        x = x.view(batch_size, -1)
        x = self.fc1(x)
        x = self.bn3(x)
        x = self.relu3(x)
        x = self.dropout(x)
        x = self.fc2(x)
        print("x shape = ", x.shape)
        return x