Multiple Sequential in one module

Hello all,
I try to convert f-AnoGAN to C++. In the Discriminator there are two sequences:

self.model = nn.Sequential(…
self.adv_layer = nn.Sequential(…

Two forwarding functions call one or both of them:

features = self.forward_features(img)
validity = self.adv_layer(features)

or

features = self.model(img)

So one forwards all, both sequences, the other one only model.

In C++ I tried

struct GAN_DiscriminatorImpl : nn::Module{
private:
    nn::Sequential model;
    nn::Sequential adv_layer;
    nn::Sequential all;

The forwarding would be the same like in python, register_module doesn’t work.

all= model + adv_layer;
register_module("Discriminator", this->all);

or

register_module("Discriminator", this->model, this->adv_layer);

None of them works, how to append the two sequences?

I ended up with:

torch::Tensor GAN_DiscriminatorImpl::forward_features(torch::Tensor z) {

    std::vector<std::shared_ptr<torch::nn::Module>> mods = model->modules();
    for (auto mod = std::begin(mods); mod != std::end(mods); ++mod)
    {
        std::shared_ptr<torch::nn::Module> m = *mod;
        torch::nn::Module* m_ = m.get();

        if (m_->name().compare("torch::nn::Conv2dImpl") == 0)
        {
            torch::nn::Conv2dImpl* c = dynamic_cast<torch::nn::Conv2dImpl*>(m_);
            z = c->forward(z);
        }

        if (m_->name().compare("torch::nn::ReLUImpl") == 0)
        {
            torch::nn::ReLUImpl* c = dynamic_cast<torch::nn::ReLUImpl*>(m_);
            z = c->forward(z);
        }

        if (m_->name().compare("torch::nn::BatchNorm2dImpl") == 0)
        {
            torch::nn::BatchNorm2dImpl* c = dynamic_cast<torch::nn::BatchNorm2dImpl*>(m_);
            z = c->forward(z);
        }

        if (m_->name().compare("torch::nn::Dropout2dImpl") == 0)
        {
            torch::nn::Dropout2dImpl* c = dynamic_cast<torch::nn::Dropout2dImpl*>(m_);
            z = c->forward(z);
        }

        if (m_->name().compare("ViewImpl") == 0)
        {
            ViewImpl* c = dynamic_cast<ViewImpl*>(m_);
            z = c->forward(z);
        }
    }

    return z;

}

So the last step, the fully connected layer is just not forwarded. That works, but it is ugly.