TypeError: 'bool' object is not callable

Hi, I am trying to compile CNN + LSTM for the imaging that first uses the CNN to train the network, then feeds to the LSTM while defining in sequence each time series of each predicted image.
Below is the code that generates the structure of my network. Indeed, the CNN network works well. Error is identified when I export the CNN module to LSTM. How to solve the error? I face two problems. The first one is located in this part of my script " images = images.view(-1, sequence_dim, INPUT_DIM).requires_grad()
TypeError: ‘bool’ object is not callable". The second displays this error “TypeError: ‘CNNLetNet’ object is not iterable”, when I remove the "view(-1, sequence_dim, INPUT_DIM).requires_grad()“. Need help, thanks.

class CNNLetNet(nn.Module):
    def __init__(self):
        super(CNNLetNet, self).__init__()
        """
         @ defining four cnn network to perform our data 
         in a set of sequential layers
        """
        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 6, 5, stride=1, padding=1)
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(6, 16, 5, stride=1, padding=1)
        )
        self.conv3 = nn.Sequential(
            nn.Conv2d(16, 32, 2, stride=1, padding=1)
        )
        self.fc1 = nn.Sequential(
            nn.Linear(32 * 3 * 3, 120)
        )
        self.fc2 = nn.Sequential(
            nn.Linear(120, 60)
        )
        self.fc3 = nn.Sequential(
            nn.Linear(60, 10)
        )
        self.pool = nn.Sequential(
            nn.MaxPool2d(2, 2)
        )
import torch
import torch.nn as nn
from cnn_net import CNNLetNet, trainset, testset
from torch.utils.data import DataLoader


class LSTMNet(nn.Module):
    """
     @ the primary goal is to design a sequential lstm network
     with two layers to sequentially train the data
    """
    def __init__(self, input_dim, hidden_dim1, hidden_dim2, layers_dim1, layers_dim2, output_dim):
        super(LSTMNet, self).__init__()
        # Hidden dimensions
        self.hidden1 = hidden_dim1
        self.hidden2 = hidden_dim2
        # Number of hidden layers
        self.layers_dim1 = layers_dim1
        self.layers_dim2 = layers_dim2
        self.cnns = CNNLetNet().to(device='cuda' if torch.cuda.is_available() else 'cpu')
        # print(self.cnns)

        self.lstm1 = nn.Sequential(
            nn.LSTM(input_dim, hidden_dim1, layers_dim1, batch_first=True)
        )
        self.lstm2 = nn.Sequential(
            nn.LSTM(hidden_dim1, hidden_dim2, layers_dim2, batch_first=True)
        )

        self.fc1 = nn.Sequential(
            nn.Linear(hidden_dim2, output_dim)
        )

    def forward(self, x):
        # Initialize hidden state with zeros
        hidden_state1 = torch.zeros(self.layers_dim1, x.size(0), self.hidden1).requires_grad_()
        # Initialize cell state
        hidden_cell_1 = torch.zeros(self.layers_dim1, x.size(0), self.hidden1).requires_grad_()

        hidden_state2 = torch.zeros(self.layers_dim2, x.size(0), self.hidden2).requires_grad_()
        hidden_cell_2 = torch.zeros(self.layers_dim2, x.size(0), self.hidden2).requires_grad_()

        cnn_outs = []   # creating an empty liste to affect CNN trained model
        for conv in self.cnns:
            cnn_outs.append(conv)
        outs = torch.cat(cnn_outs)
        outs = torch.flatten(outs, 1)
        _, outputs1 = self.lstm1(outs, (hidden_state1, hidden_cell_1))
        _, outputs2 = self.lstm2(outputs1, (hidden_state2, hidden_cell_2))

        outputs = self.fc(outputs2[:, -1, :])
        return outputs

tensor.requires_grad is an attribute returning a bool so you can’t call it.
Maybe you want to use tensor.requires_grad_() instead which will change the internal attribute?

I exactly use this tensor.requires_grad_() and have an error. Or my problem is based on the concatenate part (where all CNN layers are concatenated)? where:

cnn_outs = []   # creating an empty liste to affect CNN trained model
        for conv in self.cnns:
            cnn_outs.append(conv)
        outs = torch.cat(cnn_outs)
        outs = torch.flatten(outs, 1)
        _, outputs1 = self.lstm1(outs, (hidden_state1, hidden_cell_1))
        _, outputs2 = self.lstm2(outputs1, (hidden_state2, hidden_cell_2))

        outputs = self.fc(outputs2[:, -1, :])

Cause it shows this message:

In your failed code snippet you showed:

images = images.view(-1, sequence_dim, INPUT_DIM).requires_grad()

which is not the same, as the trailing underscore is missing and will thus fail as previously described.

after a change in the code, it shows this message:

U can check out my full code:

import torch
import torch.nn as nn
from cnn_net import CNNLetNet, trainset, testset
from torch.utils.data import DataLoader


class LSTMNet(nn.Module):
    """
     @ 
    """
    def __init__(self, input_dim, hidden_dim1, hidden_dim2, layers_dim1, layers_dim2, output_dim):
        super(LSTMNet, self).__init__()
        # Hidden dimensions
        self.hidden1 = hidden_dim1
        self.hidden2 = hidden_dim2
        # Number of hidden layers
        self.layers_dim1 = layers_dim1
        self.layers_dim2 = layers_dim2
        self.cnns = CNNLetNet().to(device='cuda' if torch.cuda.is_available() else 'cpu')
        # print(self.cnns)

        self.lstm1 = nn.Sequential(
            nn.LSTM(input_dim, hidden_dim1, layers_dim1, batch_first=True)
        )
        self.lstm2 = nn.Sequential(
            nn.LSTM(hidden_dim1, hidden_dim2, layers_dim2, batch_first=True)
        )

        self.fc1 = nn.Sequential(
            nn.Linear(hidden_dim2, output_dim)
        )

    def forward(self, x):
        # Initialize hidden state with zeros
        hidden_state1 = torch.zeros(self.layers_dim1, x.size(0), self.hidden1).requires_grad_()
        # Initialize cell state
        hidden_cell_1 = torch.zeros(self.layers_dim1, x.size(0), self.hidden1).requires_grad_()

        hidden_state2 = torch.zeros(self.layers_dim2, x.size(0), self.hidden2).requires_grad_()
        hidden_cell_2 = torch.zeros(self.layers_dim2, x.size(0), self.hidden2).requires_grad_()

        cnn_outs = []   # creating an empty liste to affect CNN trained model
        for conv in self.cnns:
            cnn_outs.append(conv)
        outs = torch.cat(cnn_outs)
        outs = torch.flatten(outs, 1)
        _, outputs1 = self.lstm1(outs, (hidden_state1, hidden_cell_1))
        _, outputs2 = self.lstm2(outputs1, (hidden_state2, hidden_cell_2))

        outputs = self.fc(outputs2[:, -1, :])
        return outputs


if __name__ == '__main__':
    INPUT_DIM = 10
    HIDDEN_DIM1 = 8
    HIDDEN_DIM2 = 5
    LAYERS_DIM1 = 1
    LAYERS_DIM2 = 2
    OUTPUT_DIM = 2

    model_lstm = LSTMNet(INPUT_DIM, HIDDEN_DIM1, HIDDEN_DIM2, LAYERS_DIM1, LAYERS_DIM2, OUTPUT_DIM)
    print(model_lstm)

    epochs = 20
    learning_rate = 0.001
    batch_size = 100

    trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=0)
    testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=0)

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model_lstm.parameters(), lr=learning_rate)
    # Number of steps to unroll
    sequence_dim = 28
    loss_sequence = 0.0
    iteration_over = 0
    for epoch in range(epochs):
        model_lstm.train()
        for i, (images, labels) in enumerate(trainloader):
            model_lstm.train()
            images = images.view(-1, sequence_dim, INPUT_DIM).requires_grad_()

            outputs = model_lstm(images)
            loss = criterion(outputs, labels)
            optimizer.zero_grad()

            # Getting gradients
            loss.backward()

            # Updating parameters
            optimizer.step()

            iteration_over += 1
            loss_sequence += loss.item()

            if iteration_over % 100 == 0:

                # print(f"nIteration : {iteration_over}")
                # with torch.no_grad():
                model_lstm.eval()
                correct = 0
                total = 0
                for images in testloader:
                    model_lstm.eval()
                    images = images.view(-1, sequence_dim, INPUT_DIM)

                    outputs = model_lstm(images)
                    _, predictions = torch.max(outputs.data, 1)
                    total += labels.size(0)
                    correct += (predictions == labels).sum()

                total_accuracy = 100 * correct / total
                print(f"nIteration : {iteration_over} | nLoss : {loss_sequence / 100} | "
                      f"Total accuracy over the data : {total_accuracy}")

As the error explains: CNNLetNet is not iterable as it’s a custom nn.Module.
If you want to iterate modules, use e.g. nn.ModuleList.

It seems working well with nn.ModuleList. But now another issue while running the entire code. I’ll paste all code in sequence:

Model:

class LSTMNet(nn.Module):
    """
     @ 
    """
    def __init__(self, model, input_dim, hidden_dim1, hidden_dim2, layers_dim1, layers_dim2, output_dim):
        super(LSTMNet, self).__init__()
        # Hidden dimensions
        self.hidden1 = hidden_dim1
        self.hidden2 = hidden_dim2
        # Number of hidden layers
        self.layers_dim1 = layers_dim1
        self.layers_dim2 = layers_dim2
        self.cnns = model # import CNN network model from CNNLetNet
        # print(self.cnns)

        self.lstm1 = nn.Sequential(
            nn.LSTM(input_dim, hidden_dim1, layers_dim1, batch_first=True)
        )
        self.lstm2 = nn.Sequential(
            nn.LSTM(hidden_dim1, hidden_dim2, layers_dim2, batch_first=True)
        )

        self.fc1 = nn.Sequential(
            nn.Linear(hidden_dim2, output_dim)
        )

    def forward(self, x):
        # Initialize hidden state with zeros
        hidden_state1 = torch.zeros(self.layers_dim1, x.size(0), self.hidden1).requires_grad_()
        # Initialize cell state
        hidden_cell_1 = torch.zeros(self.layers_dim1, x.size(0), self.hidden1).requires_grad_()

        hidden_state2 = torch.zeros(self.layers_dim2, x.size(0), self.hidden2).requires_grad_()
        hidden_cell_2 = torch.zeros(self.layers_dim2, x.size(0), self.hidden2).requires_grad_()

        _, outputs1 = self.lstm1(x, (hidden_state1.detach(), hidden_cell_1.detach()))
        _, outputs2 = self.lstm2(outputs1, (hidden_state2.detach(), hidden_cell_2.detach()))

        outputs = F.relu(self.fc1(outputs2[:, -1, :]))
        return outputs

Training loop part:

if __name__ == '__main__':
    INPUT_DIM = 10
    HIDDEN_DIM1 = 8
    HIDDEN_DIM2 = 5
    LAYERS_DIM1 = 1
    LAYERS_DIM2 = 2
    OUTPUT_DIM = 2

    model = CNNLetNet()

    model_lstm = LSTMNet(model, INPUT_DIM, HIDDEN_DIM1, HIDDEN_DIM2, LAYERS_DIM1, LAYERS_DIM2, OUTPUT_DIM)
    print(model_lstm)

    epochs = 20
    learning_rate = 0.001
    batch_size = 100

    trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=0)
    testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=0)

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model_lstm.parameters(), lr=learning_rate)
    # Number of steps to unroll
    sequence_dim = 28
    loss_sequence = 0.0
    iteration_over = 0
    for epoch in range(epochs):
        model_lstm.train()
        for i, (images, labels) in enumerate(trainloader):
            model_lstm.train()
            images = images.view(-1, sequence_dim, INPUT_DIM).requires_grad_()

            outputs = model_lstm(images)
            loss = criterion(outputs, labels)
            optimizer.zero_grad()

            # Getting gradients
            loss.backward()

            # Updating parameters
            optimizer.step()

            iteration_over += 1
            loss_sequence += loss.item()

            if iteration_over % 100 == 0:

                # print(f"nIteration : {iteration_over}")
                # with torch.no_grad():
                model_lstm.eval()
                correct = 0
                total = 0
                for images in testloader:
                    model_lstm.eval()
                    images = images.view(-1, sequence_dim, INPUT_DIM)

                    outputs = model_lstm(images)
                    _, predictions = torch.max(outputs.data, 1)
                    total += labels.size(0)
                    correct += (predictions == labels).sum()

                total_accuracy = 100 * correct / total
                print(f"nIteration : {iteration_over} | nLoss : {loss_sequence / 100} | "
                      f"Total accuracy over the data : {total_accuracy}")

Error :

You are trying to pass multiple input arguments to an nn.Sequential module:

_, outputs1 = self.lstm1(x, (hidden_state1.detach(), hidden_cell_1.detach()))

which won’t work.
Remove the nn.Sequential container and assign self.lstm1 directly to the nn.LSTM module.

I have corrected the issue, thanks. But, after correction I have this error raising :

    def forward(self, x):
        # Initialize hidden state with zeros
        hidden_state1 = torch.zeros(self.layers_dim1, x.size(0), self.hidden1).requires_grad_()
        # Initialize cell state
        hidden_cell_1 = torch.zeros(self.layers_dim1, x.size(0), self.hidden1).requires_grad_()

        hidden_state2 = torch.zeros(self.layers_dim2, x.size(0), self.hidden2).requires_grad_()
        hidden_cell_2 = torch.zeros(self.layers_dim2, x.size(0), self.hidden2).requires_grad_()

        _, outputs1 = self.lstm1(x, (hidden_state1.detach(), hidden_cell_1.detach()))

        _, outputs2 = self.lstm2(outputs1, (hidden_state2.detach(), hidden_cell_2.detach()))

        outputs = self.fc1(outputs2[:, -1, :])
        return outputs