ValueError: Target size (torch.Size([32])) must be the same as input size (torch.Size([32, 1]))

ValueError                                Traceback (most recent call last)
<ipython-input-29-f01cf5c6afa7> in <module>
----> 1 learner.lr_find()

/opt/conda/lib/python3.6/site-packages/fastai/ in lr_find(learn, start_lr, end_lr, num_it, stop_div, wd)
     39     cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
     40     epochs = int(np.ceil(num_it/len(
---> 41, start_lr, callbacks=[cb], wd=wd)
     43 def to_fp16(learn:Learner, loss_scale:float=None, max_noskip:int=1000, dynamic:bool=True, clip:float=None,

/opt/conda/lib/python3.6/site-packages/fastai/ in fit(self, epochs, lr, wd, callbacks)
    198         else:,self.opt.wd = lr,wd
    199         callbacks = [cb(self) for cb in self.callback_fns + listify(defaults.extra_callback_fns)] + listify(callbacks)
--> 200         fit(epochs, self, metrics=self.metrics, callbacks=self.callbacks+callbacks)
    202     def create_opt(self, lr:Floats, wd:Floats=0.)->None:

/opt/conda/lib/python3.6/site-packages/fastai/ in fit(epochs, learn, callbacks, metrics)
     99             for xb,yb in progress_bar(, parent=pbar):
    100                 xb, yb = cb_handler.on_batch_begin(xb, yb)
--> 101                 loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler)
    102                 if cb_handler.on_batch_end(loss): break

/opt/conda/lib/python3.6/site-packages/fastai/ in loss_batch(model, xb, yb, loss_func, opt, cb_handler)
     29     if not loss_func: return to_detach(out), to_detach(yb[0])
---> 30     loss = loss_func(out, *yb)
     32     if opt is not None:

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/ in __call__(self, *input, **kwargs)
    530             result = self._slow_forward(*input, **kwargs)
    531         else:
--> 532             result = self.forward(*input, **kwargs)
    533         for hook in self._forward_hooks.values():
    534             hook_result = hook(self, input, result)

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/ in forward(self, input, target)
    599                                                   self.weight,
    600                                                   pos_weight=self.pos_weight,
--> 601                                                   reduction=self.reduction)

/opt/conda/lib/python3.6/site-packages/torch/nn/ in binary_cross_entropy_with_logits(input, target, weight, size_average, reduce, reduction, pos_weight)
   2123     if not (target.size() == input.size()):
-> 2124         raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
   2126     return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum)

ValueError: Target size (torch.Size([32])) must be the same as input size (torch.Size([32, 1]))

from fastai.callbacks import *

learner = Learner(
    databunch, bert_model,
if config.use_fp16: learner = learner.to_fp16()

on runnung learner.lr_find() gives above error. I am using bert for sentiment analysis with Fastai

torch.Size([32]) is a row vector and torch.Size([32, 1]) is a column vector. You can easily convert them:

a = torch.ones(32) # [32] row vector
b = a.view(a.shape[0], -1)  # make a [32, 1] column vector out of a

print(b.squeeze().shape) # make a row vector out of the column vector

where do i implement this type?
Actually I have

databunch = BertDataBunch.from_df(".", train, val, test,
                  collate_fn=partial(pad_collate, pad_first=False, pad_idx=0),
class FastAiBertTokenizer(BaseTokenizer):
    """Wrapper around BertTokenizer to be compatible with"""
    def __init__(self, tokenizer: BertTokenizer, max_seq_len: int=128, **kwargs):
        self._pretrained_tokenizer = tokenizer
        self.max_seq_len = max_seq_len

    def __call__(self, *args, **kwargs):
        return self

    def tokenizer(self, t:str) -> List[str]:
        """Limits the maximum sequence length"""
        return ["[CLS]"] + self._pretrained_tokenizer.tokenize(t)[:self.max_seq_len - 2] + ["[SEP]"]

Do I need to implement this code in tokenizer?
Then how?

Sorry, here I cannot help since I never worked with FastAI, and I cannot spot anything given the code you posted. I just focused on the thrown error.