Mat1 and mat2 shapes cannot be multiplied (8x10 and 8x8)


I give 8 floats per inputs to an 8 inputs layer

yet it fails me with that error message

mat1 and mat2 shapes cannot be multiplied (8x10 and 8x8)

I just don’t see where the error is

thanks for your help

import time
import gensim.downloader as api
import torch
import torch.nn as nn
import torch.optim as optim
from import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
import re
import random

class MyDataset(Dataset):
    def __init__(self): = []
        self.input_size = 0
        for i in range(0,10):
            label = random.randint(0, 1)
            encoded_text = [random.uniform(0.0, 1.0) for _ in range(8)]
  , label))
            self.input_size = len(encoded_text) if self.input_size < len(encoded_text) else self.input_size
        # Pad each sequence in with zeros to match the maximum sequence length
        for i in range(len(
            delta = self.input_size - len([i][0])
            for j in range(delta):
    def __len__(self):
        return len(
    def __getitem__(self, idx):
        text, label =[idx]
        return text, label

class MyModel(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(MyModel, self).__init__()
        #self.embedding = nn.Embedding(input_size, hidden_size)  # Step 4: Embedding Layer
        self.input = nn.Linear(input_size,hidden_size)
        self.fc = nn.Linear(hidden_size, output_size)
        self.sigmoid = nn.Sigmoid() = =
    def forward(self, x):
        # Convert input to tensor with data type int64
        x_padded = pad_sequence(x, batch_first=True, padding_value=0).float()  # Ensure dtype matches model weights
        #embedded = self.embedding(x_padded)
        #embedded_avg = torch.mean(embedded, dim=1)
        output = self.input(x_padded)
        return torch.sigmoid(output)

def train_model(model, train_loader, criterion, optimizer, num_epochs):
    for epoch in range(num_epochs):
        for inputs, labels in train_loader:

            # Forward pass
            outputs = model(inputs)
            batch_size = outputs.size(0)
            loss = criterion(outputs, labels.float().unsqueeze(1)[:batch_size])  # Adjust for BCEWithLogitsLoss
            # Backward pass and optimization

if __name__ == "__main__":
    # Define hyperparameters
    hidden_size = 8  # Embedding size (word vector size)
    output_size = 1  # Binary classification (use auto-completion or not)
    learning_rate = 0.001
    num_epochs = 10
    # Instantiate the dataset to determine the maximum tokens and input size
    dataset = MyDataset()
    train_loader = DataLoader(dataset, batch_size=64, shuffle=True)
    input_size = dataset.input_size
    # Initialize model, loss function, and optimizer
    model = MyModel(input_size, hidden_size, output_size)
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    train_model(model, train_loader, criterion, optimizer, num_epochs)

        while True:
    except KeyboardInterrupt:

anyone ? this is simple pytorch stuff, should be obvious for experimented users
I know about matrices multiplications

but why is the generated tensors have only 6 values instead of 8 ???

why is pytorch not able to do the simplest thing ?

ok found the answer, thanks for the awesome help