Error while permuting axes in a CRNN network

I am new to PyTorch (migrating from Keras) and am trying to create a CNN-LSTM network. The layer before my LSTM outputs the shape (batch_size,features,time_steps). Since LSTM takes inputs of shape (batch_size,time_steps,features), I tried to permute the axes. But it is giving me an error while executing ‘summary’. When I check the type of input to LSTM, it is showing ‘Tensor’. I am not sure where the ‘tuple’ is coming from in the error. Details are given as follows.

Network

class audioNet(nn.Module):
	def __init__(self):
		super(audioNet,self).__init__()
		self.conv1 = nn.Conv1d(in_channels=1,out_channels=64,kernel_size=6,stride=1,padding=0) # input: (bs,1,240000)
		self.relu1 = nn.ReLU()
		self.bnorm1 = nn.BatchNorm1d(num_features=64) # input: (bs,64,239995)
		self.mpool1 = nn.MaxPool1d(kernel_size=8,stride=8)
		self.conv2 = nn.Conv1d(in_channels=64,out_channels=128,kernel_size=4,stride=1,padding=0) 
		self.relu2 = nn.ReLU()
		self.bnorm2 = nn.BatchNorm1d(num_features=128) 
		self.mpool2 = nn.MaxPool1d(kernel_size=6,stride=6)
		self.conv3 = nn.Conv1d(in_channels=128,out_channels=256,kernel_size=4,stride=1,padding=0) 
		self.relu3 = nn.ReLU()
		self.bnorm3 = nn.BatchNorm1d(num_features=256) 
		self.mpool3 = nn.MaxPool1d(kernel_size=6,stride=6)
		self.apool1 = nn.AvgPool1d(kernel_size=4)
		self.lstm1 = nn.LSTM(input_size=256,hidden_size=128,batch_first=True) # batch_first=True expects input shape (bs,time_steps,feature_dim) while actual input is (bs,feature_dim,time_steps). So, swap 2 and 3 axes.
		self.bnorm4 = nn.BatchNorm1d(num_features=128) 
		self.lstm2 = nn.LSTM(input_size=128,hidden_size=64,batch_first=True)
		self.bnorm5 = nn.BatchNorm1d(num_features=64)
		self.lin1 = nn.Linear(64,7)
		self.smax1 = nn.Softmax(7)
	def forward(self,x):
		out = self.conv1(x)
		out = self.relu1(out)
		out = self.bnorm1(out)
		out = self.mpool1(out)
		out = self.conv2(out)
		out = self.relu2(out)
		out = self.bnorm2(out)
		out = self.mpool2(out)
		out = self.conv3(out)
		out = self.relu3(out)
		out = self.bnorm3(out)
		out = self.mpool3(out)
		out = self.apool1(out)
		out = out.permute(0,2,1)
		print(type(out))
		out = self.lstm1(out)
		out = self.bnorm4(out)
		out = self.lstm2(out)
		out = self.bnorm5(out)
		out = self.lin1(out)
		out = self.smax1(out)
		return out

Error:

Traceback (most recent call last):
File “”, line 1, in
File “/data/home/eex608/conda3_envs/PyT3/lib/python3.7/site-packages/torchsummary/torchsummary.py”, line 72, in summary
model(*x)
File “/data/home/eex608/conda3_envs/PyT3/lib/python3.7/site-packages/torch/nn/modules/module.py”, line 541, in call
result = self.forward(*input, **kwargs)
File “”, line 39, in forward
File “/data/home/eex608/conda3_envs/PyT3/lib/python3.7/site-packages/torch/nn/modules/module.py”, line 543, in call
hook_result = hook(self, input, result)
File “/data/home/eex608/conda3_envs/PyT3/lib/python3.7/site-packages/torchsummary/torchsummary.py”, line 23, in hook
[-1] + list(o.size())[1:] for o in output
File “/data/home/eex608/conda3_envs/PyT3/lib/python3.7/site-packages/torchsummary/torchsummary.py”, line 23, in
[-1] + list(o.size())[1:] for o in output
AttributeError: ‘tuple’ object has no attribute ‘size’

I figured this out. A silly mistake. I should have understood that LSTM provides 3 outputs.