Error: Dimension out of range (expected to be in range of [-1, 0], but got 1)

Dataset

class dataset(Dataset):
    def __init__(self):
        self.tf=TfidfVectorizer(max_df=0.99, min_df=0.005)
        self.x=self.tf.fit_transform(corpus).toarray()
        self.y=list(df.review)
        self.x_train,self.x_test,self.y_train,self.y_test=train_test_split(self.x,self.y,test_size=0.2)
        self.token2idx=self.tf.vocabulary_
        self.idx2token = {idx: token for token, idx in self.token2idx.items()}
        print(self.idx2token)
    
    def __getitem__(self,i):
        return self.x_train[i, :], self.y_train[i]
    
    def __len__(self):
        return self.x_train.shape[0]

Classifier

class classifier(nn.Module):
    def __init__(self,vocab_size,hidden1,hidden2):
        super(classifier,self).__init__()
        self.fc1=nn.Linear(vocab_size,hidden1)
        self.fc2=nn.Linear(hidden1,hidden2)
        self.fc3=nn.Linear(hidden2,1)
    def forward(self,inputs):
        x=F.relu(self.fc1(inputs.squeeze(1).float()))
        x=F.relu(self.fc2(x))
        return self.fc3(x)

Training Loop

epochs=10
total=0
model.train()
for epoch in tqdm(range(epochs)):
    progress_bar=tqdm_notebook(train_loader,leave=False)
    losses=[]
    correct=0
    for inputs,target in progress_bar:
        model.zero_grad()
        output=model(inputs)
        print(output.squeeze().shape)
        print(target.shape)
        loss=criterion(output.squeeze(),target.float())
        loss.backward()
        nn.utils.clip_grad_norm_(model.parameters(), 3)
        optim.step()
        correct += (output == target).float().sum()
        progress_bar.set_description(f'Loss: {loss.item():.3f}')
        losses.append(loss.item())
        total += 1
    epoch_loss = sum(losses) / total
    train_losses.append(epoch_loss)   
    tqdm.write(f'Epoch #{epoch + 1}\tTrain Loss: {epoch_loss:.3f}\tAccuracy: {correct/output.shape[0]}')

Error

IndexError                                Traceback (most recent call last)
<ipython-input-78-6b86c97bcabf> in <module>
     14         print(output.squeeze().shape)
     15         print(target.shape)
---> 16         loss=criterion(output.squeeze(),target.float())
     17         loss.backward()
     18         nn.utils.clip_grad_norm_(model.parameters(), 3)

~\Anaconda3\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    548             result = self._slow_forward(*input, **kwargs)
    549         else:
--> 550             result = self.forward(*input, **kwargs)
    551         for hook in self._forward_hooks.values():
    552             hook_result = hook(self, input, result)

~\Anaconda3\lib\site-packages\torch\nn\modules\loss.py in forward(self, input, target)
    930     def forward(self, input, target):
    931         return F.cross_entropy(input, target, weight=self.weight,
--> 932                                ignore_index=self.ignore_index, reduction=self.reduction)
    933 
    934 

~\Anaconda3\lib\site-packages\torch\nn\functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
   2315     if size_average is not None or reduce is not None:
   2316         reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2317     return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
   2318 
   2319 

~\Anaconda3\lib\site-packages\torch\nn\functional.py in log_softmax(input, dim, _stacklevel, dtype)
   1533         dim = _get_softmax_dim('log_softmax', input.dim(), _stacklevel)
   1534     if dtype is None:
-> 1535         ret = input.log_softmax(dim)
   1536     else:
   1537         ret = input.log_softmax(dim, dtype=dtype)

IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)

Based on the error message it seems you are trying to calculate the log_softmax inside the criterion in dim1 (which is the standard and right approach for a multi-class classification), but your tensor has only a single dimension.
Could you remove the squeeze() operation in loss = criterion(output.squeeze(), ...) and check, if it’s working, since this op would remove all dimensions with a size==1?