Issue in printing model summary due to AttributeError: 'tuple' object has no attribute 'size'

I HAVE the model code as:
class RNN(nn.Module):
“”“RNN module(cell type lstm or gru)”“”
def init(
self,
input_size,
hid_size,
num_rnn_layers=1,
dropout_p = 0.2,
bidirectional = False,
rnn_type = ‘lstm’,
):
super().init()

    if rnn_type == 'lstm':
        self.rnn_layer = nn.LSTM(
            input_size=input_size,
            hidden_size=hid_size,
            num_layers=num_rnn_layers,
            dropout=dropout_p if num_rnn_layers>1 else 0,
            bidirectional=bidirectional,
            batch_first=True,
        )
        
    else:
        self.rnn_layer = nn.GRU(
            input_size=input_size,
            hidden_size=hid_size,
            num_layers=num_rnn_layers,
            dropout=dropout_p if num_rnn_layers>1 else 0,
            bidirectional=bidirectional,
            batch_first=True,
        )
def forward(self, input):
    outputs, hidden_states = self.rnn_layer(input)
    return outputs, hidden_states

class RNNModel(nn.Module):
def init(
self,
input_size,
hid_size,
rnn_type,
bidirectional,
n_classes=5,
kernel_size=5,
):
super().init()

    self.rnn_layer = RNN(
        input_size=46,#hid_size * 2 if bidirectional else hid_size,
        hid_size=hid_size,
        rnn_type=rnn_type,
        bidirectional=bidirectional
    )
    self.conv1 = ConvNormPool(
        input_size=input_size,
        hidden_size=hid_size,
        kernel_size=kernel_size,
    )
    self.conv2 = ConvNormPool(
        input_size=hid_size,
        hidden_size=hid_size,
        kernel_size=kernel_size,
    )
    self.avgpool = nn.AdaptiveAvgPool1d((1))
    self.fc = nn.Linear(in_features=hid_size, out_features=n_classes)

def forward(self, input):
    print("shape")
    print(input.shape) # **shape isshape torch.Size([32, 1, 187])**
    x = self.conv1(input)
    x = self.conv2(x)
    x, _ = self.rnn_layer(x)
    x = self.avgpool(x)
    x = x.view(-1, x.size(1) * x.size(2))
    x = F.softmax(self.fc(x), dim=1)#.squeeze(1)
    return x

when I create :sweat_smile:
m1=RNNModel(1, 64, ‘lstm’, True).to(device)

and run

#from torchsummary import summary
summary(m1,(1,187),32)

The following error raises:


AttributeError Traceback (most recent call last)
in
1 #from torchsummary import summary
----> 2 summary(m1,(1,187),32)

/opt/conda/lib/python3.7/site-packages/torchsummary/torchsummary.py in summary(model, input_size, batch_size, device)
70 # make a forward pass
71 # print(x.shape)
—> 72 model(*x)
73
74 # remove these hooks

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
→ 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),

in forward(self, input)
35 x = self.conv1(input)
36 x = self.conv2(x)
—> 37 x, _ = self.rnn_layer(x)
38 x = self.avgpool(x)
39 x = x.view(-1, x.size(1) * x.size(2))

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
→ 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),

in forward(self, input)
32 )
33 def forward(self, input):
—> 34 outputs, hidden_states = self.rnn_layer(input)
35 return outputs, hidden_states

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
729 _global_forward_hooks.values(),
730 self._forward_hooks.values()):
→ 731 hook_result = hook(self, input, result)
732 if hook_result is not None:
733 result = hook_result

/opt/conda/lib/python3.7/site-packages/torchsummary/torchsummary.py in hook(module, input, output)
21 if isinstance(output, (list, tuple)):
22 summary[m_key][“output_shape”] = [
—> 23 [-1] + list(o.size())[1:] for o in output
24 ]
25 else:

/opt/conda/lib/python3.7/site-packages/torchsummary/torchsummary.py in (.0)
21 if isinstance(output, (list, tuple)):
22 summary[m_key][“output_shape”] = [
—> 23 [-1] + list(o.size())[1:] for o in output
24 ]
25 else:

AttributeError: ‘tuple’ object has no attribute ‘size’

in frwd layer i have commented i/p shape also shape isshape torch.Size([32, 1, 187])**
Batch size is 32
Why is this so??Pls help

This seems to be a known issue of pytorch-summary, which was fixed in a fork.

@ptrblck can u pls tell me the actual fix…I am unable to find the exact solution in the fork?

I think the “fix” is to use the package from the forked repository:

pip install torchinfo
from torchinfo import summary

model = ConvNet()
batch_size = 16
summary(model, input_size=(batch_size, 1, 28, 28))
1 Like

@ptrblck i got another issue!!


from torchinfo import summary
batch_size = 32
summary(m1, input_size=(32, 1, 187))

I am getting

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
/opt/conda/lib/python3.7/site-packages/torchinfo/torchinfo.py in summary(model, input_size, input_data, batch_dim, col_names, col_width, depth, device, dtypes, verbose, **kwargs)
    159                 if isinstance(x, (list, tuple)):
--> 160                     _ = model.to(device)(*x, **kwargs)
    161                 elif isinstance(x, dict):

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    726         else:
--> 727             result = self.forward(*input, **kwargs)
    728         for hook in itertools.chain(

<ipython-input-49-26860265b2db> in forward(self, input)
     36         x = self.conv2(x)
---> 37         x, _ = self.rnn_layer(x)
     38         x = self.avgpool(x)

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    726         else:
--> 727             result = self.forward(*input, **kwargs)
    728         for hook in itertools.chain(

<ipython-input-48-c383aa5a9c6e> in forward(self, input)
     33     def forward(self, input):
---> 34         outputs, hidden_states = self.rnn_layer(input)
     35         return outputs, hidden_states

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    730                 self._forward_hooks.values()):
--> 731             hook_result = hook(self, input, result)
    732             if hook_result is not None:

/opt/conda/lib/python3.7/site-packages/torchsummary/torchsummary.py in hook(module, input, output)
     22                 summary[m_key]["output_shape"] = [
---> 23                     [-1] + list(o.size())[1:] for o in output
     24                 ]

/opt/conda/lib/python3.7/site-packages/torchsummary/torchsummary.py in <listcomp>(.0)
     22                 summary[m_key]["output_shape"] = [
---> 23                     [-1] + list(o.size())[1:] for o in output
     24                 ]

AttributeError: 'tuple' object has no attribute 'size'

The above exception was the direct cause of the following exception:

RuntimeError                              Traceback (most recent call last)
<ipython-input-101-d16bdb924f0b> in <module>
      1 from torchinfo import summary
      2 batch_size = 32
----> 3 summary(m1, input_size=(32, 1, 187))

/opt/conda/lib/python3.7/site-packages/torchinfo/torchinfo.py in summary(model, input_size, input_data, batch_dim, col_names, col_width, depth, device, dtypes, verbose, **kwargs)
    170                 "Failed to run torchinfo. See above stack traces for more details. "
    171                 f"Executed layers up to: {executed_layers}"
--> 172             ) from e
    173         finally:
    174             if hooks is not None:

RuntimeError: Failed to run torchinfo. See above stack traces for more details. Executed layers up to: [ConvNormPool: 1-1, Conv1d: 2-1, BatchNorm1d: 2-2, Swish: 2-3, Conv1d: 2-4, BatchNorm1d: 2-5, Swish: 2-6, Conv1d: 2-7, BatchNorm1d: 2-8, Swish: 2-9, MaxPool1d: 2-10, ConvNormPool: 1-2, Conv1d: 2-11, BatchNorm1d: 2-12, Swish: 2-13, Conv1d: 2-14, BatchNorm1d: 2-15, Swish: 2-16, Conv1d: 2-17, BatchNorm1d: 2-18, Swish: 2-19, MaxPool1d: 2-20]

Pls help me resolve this issue!!Its important!