Getting the mat1 dim 1 must match mat2 dim 0 when trying to do sentiment analysis

Im making a simple model:
class Linear(nn.Module):
def init(self, input_size, hidden_size, num_classes):
super(Linear, self).init()
self.fc1 = nn.Linear(input_size, hidden_size, bias=True)
self.fc2 = nn.Linear(hidden_size, num_classes, bias=True)

def forward(self, text, text_lengths):
    text = text.float() # dense layer deals with float datatype
    x = self.fc1(text)
    preds = self.fc2(x)
    return preds

and training it with the following code:
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
for batch in iterator:
optimizer.zero_grad()
text, text_lengths = batch.text
print(np.shape(text))
print(np.shape(text_lengths))
predictions = model(text, text_lengths)
loss = criterion(predictions, batch.labels.squeeze())
acc = accuracy(predictions, batch.labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
When I run it, I am getting the following error:

RuntimeError Traceback (most recent call last)
in
----> 1 run_train(num_epochs, linear_model, train_iterator, valid_iterator, optimizer, loss_func, ‘linear’)

in run_train(epochs, model, train_iterator, valid_iterator, optimizer, criterion, model_type)
5
6 # train the model
----> 7 train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
8
9 # evaluate the model

in train(model, iterator, optimizer, criterion)
8 print(np.shape(text))
9 print(np.shape(text_lengths))
—> 10 predictions = model(text, text_lengths)
11 loss = criterion(predictions, batch.labels.squeeze())
12 acc = accuracy(predictions, batch.labels)

c:\users\mynam\appdata\local\programs\python\python38\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
720 result = self._slow_forward(*input, **kwargs)
721 else:
–> 722 result = self.forward(*input, **kwargs)
723 for hook in itertools.chain(
724 _global_forward_hooks.values(),

in forward(self, text, text_lengths)
8 def forward(self, text, text_lengths):
9 text = text.float() # dense layer deals with float datatype
—> 10 x = self.fc1(text)
11 preds = self.fc2(x)
12 return preds

c:\users\mynam\appdata\local\programs\python\python38\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
720 result = self._slow_forward(*input, **kwargs)
721 else:
–> 722 result = self.forward(*input, **kwargs)
723 for hook in itertools.chain(
724 _global_forward_hooks.values(),

c:\users\mynam\appdata\local\programs\python\python38\lib\site-packages\torch\nn\modules\linear.py in forward(self, input)
89
90 def forward(self, input: Tensor) -> Tensor:
—> 91 return F.linear(input, self.weight, self.bias)
92
93 def extra_repr(self) -> str:

c:\users\mynam\appdata\local\programs\python\python38\lib\site-packages\torch\nn\functional.py in linear(input, weight, bias)
1672 if input.dim() == 2 and bias is not None:
1673 # fused op is marginally faster
-> 1674 ret = torch.addmm(bias, input, weight.t())
1675 else:
1676 output = input.matmul(weight.t())

RuntimeError: mat1 dim 1 must match mat2 dim 0
I printed out the dimensions of my tensors and they are torch.Size([250, 10]) and torch.Size([250]) if that helps.

The error is raised in:

 x = self.fc1(text)

Print the shape of text before passing it to self.fc1 and make sure the in_features of the linear layer and the feature size of text are equal.

PS: you can post code snippets by wrapping them into three backticks ```, which makes debugging easier. :wink: