F.linear(input, self.weight, self.bias) RuntimeError: mat1 and mat2 shapes cannot be multiplied (15x1 and 15x256)

I’m getting this error return F.linear(input, self.weight, self.bias) RuntimeError: mat1 and mat2 shapes cannot be multiplied (15x1 and 15x256)

Below is what my Model look like and I also flattened the input before creating the layer

class Model(nn.Module):
    def __init__(self, params):
        super(Model, self).__init__()

        self.indim = params['indim']
        self.num_layers_rnn = params['num_layers']
        self.neurons_rnn = params['neurons']
        self.layer_type = params['layer_type']
        self.inter_activation = params['inter_activation']
        self.outdim = params['outdim']

        self.rnn_layers = nn.ModuleList()


        self.rnn_layers.append(nn.Linear(self.indim , self.neurons_rnn))

        # Define RNN Layers
        for i in range(self.num_layers_rnn - 1):
            self.rnn_layers.append(self.get_layer(self.layer_type, self.neurons_rnn, True))

        self.rnn_layers.append(self.get_layer(self.layer_type, self.neurons_rnn, False))

        # Linear layer to flatten the output
        self.linear = nn.Linear(self.neurons_rnn, self.outdim)

        self.softmax = nn.Softmax(dim=1)

    def forward(self, x):
        for i, layer in enumerate(self.rnn_layers):
            if isinstance(layer, nn.Linear):
                x = x.view(x.size(0), -1)
            x = layer(x)
            x = self.get_activation(self.inter_activation)(x)
        x = self.linear(x)
        x = self.softmax(x)
        return x

The shape of my input is x_batch shape: torch.Size([15])

Below is how I’m creating and handling the data

def main(rank, params ,world_size, resume_latest ):
    x, y = getOneHot("{}/{}".format(root, filename),
                     restricted=restricted, **params)
    val_x, val_y = None, None
    params["gbatch_size"] = params['batch_size'] * device_count
    from torch.utils.data import TensorDataset
    x = torch.from_numpy(x).double()
    y = torch.from_numpy(y).long()
    data = TensorDataset(x, y)

    v = params['validation']
    if val_x is not None:
        vrecord = val_x.shape[0]
        val = TensorDataset(torch.from_numpy(val_x), torch.from_numpy(val_y))
        validation = val  # data.take(vrecord)
    else:
        vrecord = int(x.shape[0]*v)
        print("vrecord: ", vrecord)
        validation = data[:vrecord]
        print("validation: ", validation)

    if val_x is not None:
        vrecord = val_x.shape[0]
        val = TensorDataset(torch.from_numpy(val_x), torch.from_numpy(val_y))
        validation = val  # data.take(vrecord)
    else:
        vrecord = int(x.shape[0]*v)
        print("vrecord: ", vrecord)
        validation = data[:vrecord]
        print("validation: ", validation)

    validation = DataLoader(validation, batch_size=params['gbatch_size'], shuffle=False)
    validation = validation.dataset
    validation = torch.utils.data.ConcatDataset([validation] * params['epochs'])
    vsteps = vrecord // params['gbatch_size']
    if vrecord % params['gbatch_size'] != 0:
       vsteps += 1

    data = DataLoader(data, batch_size=params['gbatch_size'], shuffle=False)
    data = data.dataset
    data = torch.utils.data.ConcatDataset([data] * params['epochs'])
    records = x.shape[0]
    steps = records // params['gbatch_size']
    if records % params['gbatch_size']:
        steps += 1

    callbacks = create_callbacks(params, callbacks, rank, resume_training)

    model, optimizer, criterion = create_model(**params)
    model = model.to(rank)
    model = DDP(model, device_ids=[rank])

    for epoch in range(initial_epoch, params['epochs']):
        model.train()
        for batch_idx, (x_batch, y_batch) in enumerate(data):
            optimizer.zero_grad()
            output = model(x_batch)
            loss = nn.CrossEntropyLoss(output, y_batch)
            loss.backward()
            optimizer.step()

        model.eval()
        with torch.no_grad():
            val_loss = 0.0
            val_batches = 0
            for val_batch_idx, (val_x_batch, val_y_batch)in enumerate(validation_data):
                val_output = model(val_x_batch.float())
                val_loss += criterion(val_output, val_y_batch)
                val_batches += 1

Help on this will be greatly appreciated as I’m new to AI and I’ve been stuck on this for a while