RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x3 and 20x10)

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from typing import List
from collections import Counter
def build_vocab(words: List[str]):
vocab = Counter()
for w in words:
vocab[w] += 1
return vocab
CONTEXT_SIZE = 3 # Define the context size. Default value 3, which means the context
#includes 3 words to the left, 3 to the right

#In Python, enclosing a string within triple quotes (either single ’ or double " quotes)
#allows you to create a multiline string literal

raw_text = “”“We are about to study the idea of a computational process.
Computational processes are abstract beings that inhabit computers.
As they evolve, processes manipulate other abstract things called data.
The evolution of a process is directed by a pattern of rules
called a program. People create programs to direct processes. In effect,
we conjure the spirits of the computer with our spells.”“”.split()

#print (raw_text) - try it out
vocab = build_vocab(raw_text)
#print (vocab) - try it out
vocab_size = len(vocab)

print (‘vocab_size’,vocab_size)

word_to_idx = {word: i for i, word in enumerate(vocab)}#index for each word in vocab
idx_to_word = list(vocab)#list of words in the vocab

#print (‘word_to_idx’, word_to_idx) - try it out
#print (‘idx_to_word’,idx_to_word) - try it out

word_indices = [word_to_idx[w] for w in raw_text]#Every word in the raw text has an index

#print (‘word_indices’,word_indices)
#print (‘len(word_indices)’, len(word_indices))

#The function prepare_data creates a data sample with 6 context words and the target word between them
def prepare_data(word_indices):
data =
for i in range(CONTEXT_SIZE, len(word_indices) - CONTEXT_SIZE):

    #### START YOUR CODE ####
    # Hint: You can intialize context to an empty list
    # and then use a for loop to append elements to context properly.
    context = []
    
    # Collect the context words around the target word
    for j in range(i - CONTEXT_SIZE, i + CONTEXT_SIZE + 1):
        if j != i:  # Avoid adding the target word itself
            context.append(word_indices[j])
    
    target = word_indices[i]
    #### END YOUR CODE ####

    data.append((context, target))

return data

print(type(vocab))
print(len(vocab))
print(type(word_indices))
print(len(word_indices))

Test Task 1. Do not change the code below.

data = prepare_data(word_indices)
print (‘length of data’, len(data))
print(‘data[0]:’, data[0])
ctx, tgt = data[0]
print(‘context words:’, [idx_to_word[c] for c in ctx])
print(‘target word:’, idx_to_word[tgt])
class CBOW(nn.Module):
def init(self, vocab_size, embedding_dim, no_of_samples, embed_weights):
super(CBOW, self).init()

    #### START YOUR CODE ####
    #Hint use nn.Embedding() if no initial pre-trained embeddings in embed_weights are provided
    #Use nn.Embedding.from_pretrained() if embed_weights are provided.
    if embed_weights is None:
        self.embeddings = nn.Embedding(vocab_size, embedding_dim)
    else:
        self.embeddings = nn.Embedding.from_pretrained(embed_weights, freeze=False)
    
    self.linear = nn.Linear(embedding_dim, vocab_size)
    self.act = nn.LogSoftmax(dim=-1)

def forward(self, inputs):
    
    
    #### START YOUR CODE ####
    embeds = self.embeddings(inputs)
    
    # Sum embeddings along the context words dimension (assuming inputs are [batch_size, context_size])
    embeds = torch.sum(embeds, dim=1)
    
    
    #### END YOUR CODE ####
    
    out = self.linear(embeds)
    out = self.act(out)

    return out

Test Task 2. Do not change the code blow

torch.manual_seed(0)

m = CBOW(10, 20, 3, embed_weights=None)
test_input = torch.tensor([1,2,3], dtype=torch.long)

test_output = m(test_input)

print(‘test_output.shape’, test_output.shape)
print(‘test_output’, test_output.data)

OUTPUT:

RuntimeError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_22152/2979772149.py in
5 test_input = torch.tensor([1,2,3], dtype=torch.long)
6
----> 7 test_output = m(test_input)
8
9 print(‘test_output.shape’, test_output.shape)

~\anaconda3\lib\site-packages\torch\nn\modules\module.py in _wrapped_call_impl(self, *args, **kwargs)
1530 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1531 else:
→ 1532 return self._call_impl(*args, **kwargs)
1533
1534 def _call_impl(self, *args, **kwargs):

~\anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *args, **kwargs)
1539 or _global_backward_pre_hooks or _global_backward_hooks
1540 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1541 return forward_call(*args, **kwargs)
1542
1543 try:

~\AppData\Local\Temp/ipykernel_22152/3066800851.py in forward(self, inputs)
26 #### END YOUR CODE ####
27
—> 28 out = self.linear(embeds)
29 out = self.act(out)
30

~\anaconda3\lib\site-packages\torch\nn\modules\module.py in _wrapped_call_impl(self, *args, **kwargs)
1530 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1531 else:
→ 1532 return self._call_impl(*args, **kwargs)
1533
1534 def _call_impl(self, *args, **kwargs):

~\anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *args, **kwargs)
1539 or _global_backward_pre_hooks or _global_backward_hooks
1540 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1541 return forward_call(*args, **kwargs)
1542
1543 try:

~\anaconda3\lib\site-packages\torch\nn\modules\linear.py in forward(self, input)
114
115 def forward(self, input: Tensor) → Tensor:
→ 116 return F.linear(input, self.weight, self.bias)
117
118 def extra_repr(self) → str:

RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x3 and 20x10)