I am getting an error while adding transformer encoder layer with cnn layers above it, i am passing an input of size 2,50,768 for transformer layer

model.apply(register_hook)
70 # make a forward pass
71 # print(x.shape)
—> 72 model(*x)
74 # remove these hooks
75 for h in hooks:

File C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py:1190, in Module._call_impl(self, *input, **kwargs)
1186 # If we don’t have any hooks, we want to skip the rest of the logic in
1187 # this function, and just call forward.
1188 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1189 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1190 return forward_call(*input, **kwargs)
1191 # Do not call functions when jit is used
1192 full_backward_hooks, non_full_backward_hooks = [], []

File C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\transformer.py:280, in TransformerEncoder.forward(self, src, mask, src_key_padding_mask)
277 src_key_padding_mask_for_layers = None
279 for mod in self.layers:
→ 280 output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask_for_layers)
282 if convert_to_nested:
283 output = output.to_padded_tensor(0.)

File C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py:1208, in Module._call_impl(self, *input, **kwargs)
1205 bw_hook = hooks.BackwardHook(self, full_backward_hooks)
1206 input = bw_hook.setup_input_hook(input)
→ 1208 result = forward_call(input, **kwargs)
1209 if _global_forward_hooks or self._forward_hooks:
1210 for hook in (
_global_forward_hooks.values(), *self._forward_hooks.values()):

File C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\transformer.py:538, in TransformerEncoderLayer.forward(self, src, src_mask, src_key_padding_mask)
536 x = x + self._ff_block(self.norm2(x))
537 else:
→ 538 x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask))
539 x = self.norm2(x + self._ff_block(x))
541 return x

File C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\transformer.py:546, in TransformerEncoderLayer._sa_block(self, x, attn_mask, key_padding_mask)
544 def _sa_block(self, x: Tensor,
545 attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) → Tensor:
→ 546 x = self.self_attn(x, x, x,
547 attn_mask=attn_mask,
548 key_padding_mask=key_padding_mask,
549 need_weights=False)[0]
550 return self.dropout1(x)

File C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py:1211, in Module._call_impl(self, input, **kwargs)
1209 if _global_forward_hooks or self._forward_hooks:
1210 for hook in (
_global_forward_hooks.values(), *self._forward_hooks.values()):
→ 1211 hook_result = hook(self, input, result)
1212 if hook_result is not None:
1213 result = hook_result

File ~\AppData\Roaming\Python\Python39\site-packages\torchsummary\torchsummary.py:22, in summary..register_hook..hook(module, input, output)
20 summary[m_key][“input_shape”][0] = batch_size
21 if isinstance(output, (list, tuple)):
—> 22 summary[m_key][“output_shape”] = [
23 [-1] + list(o.size())[1:] for o in output
24 ]
25 else:
26 summary[m_key][“output_shape”] = list(output.size())

File ~\AppData\Roaming\Python\Python39\site-packages\torchsummary\torchsummary.py:23, in (.0)
20 summary[m_key][“input_shape”][0] = batch_size
21 if isinstance(output, (list, tuple)):
22 summary[m_key][“output_shape”] = [
—> 23 [-1] + list(o.size())[1:] for o in output
24 ]
25 else:
26 summary[m_key][“output_shape”] = list(output.size())

AttributeError: ‘NoneType’ object has no attribute ‘size’

Hi,
Can you send the code, that caused the error?