Simultaneous training. From Keras to PyTorch

Hi,

How can simultaneous training of NN generating features for a final NN can be implemented with PyTorch. It would look something like this with Keras:

import tensorflow as tf
import numpy as np

class Regressor(tf.keras.layers.Layer):

    def __init__(self, dims=[32, 8]):
        super(Regressor, self).__init__()

        self.dims = dims
        for i, d in enumerate(self.dims):
            setattr(self, f'dense_{i}', tf.keras.layers.Dense(d))
        setattr(self, f'dense_{i+1}', tf.keras.layers.Dense(1))

    def call(self, inputs):

        x = inputs
        for i, _ in enumerate(self.dims):
            x = getattr(self, f'dense_{i}')(x)
            x = tf.nn.relu(x)
        x = getattr(self, f'dense_{i+1}')(x)
        x = tf.nn.sigmoid(x)

        return x


class FeatureRegressor(Regressor):

    def __init__(self, dims=[32, 8], latent_idx=1):
        super(FeatureRegressor, self).__init__(dims)
        self.latent_idx = latent_idx

    def call(self, inputs):

        x = inputs
        for i, _ in enumerate(self.dims):
            x = getattr(self, f'dense_{i}')(x)
            if i == self.latent_idx:
                latent = x
            x = tf.nn.relu(x)

        return latent, getattr(self, f'dense_{i+1}')(x)


class Model(tf.keras.Model):

    def __init__(self,
        input_dims=10,
        feature_regressor_dims=[32, 8],
        feature_latent_idx=1,
        target_regressor_dims=[32, 8]):
        super(Model, self).__init__()

        self.input_dims = input_dims
        self.feature_regressor_dims = feature_regressor_dims
        self.target_regressor_dims = target_regressor_dims

        for i in range(input_dims):
            setattr(self, f'feature_regressor_{i}', FeatureRegressor(feature_regressor_dims, feature_latent_idx))

        self.target_regressor = Regressor(target_regressor_dims)

    def call(self, inputs):

        # Perform feature regressor inference
        features_latens = []
        features_preds = []
        for f in range(self.input_dims):
            # Prepare input without target feature
            mask = np.array([d != f for d in range(self.input_dims)])
            input_feature = tf.boolean_mask(inputs, mask, axis=1)
            # Regress target feature
            feature_latent, feature_pred = getattr(self, f'feature_regressor_{f}')(input_feature)
            features_latens.append(feature_latent)
            features_preds.append(feature_pred)

        # Perform target regressor inference
        features_latens = tf.concat(features_latens, axis=-1)
        input_target = tf.concat([inputs, features_latens], axis=-1)
        target_pred = self.target_regressor(input_target)

        # Concat predictions
        output = tf.concat(features_preds + [target_pred], axis=-1)

        return output

Thanks!

I’m not sure what simulatenous training means here as it seems like the overall model is still feedforward.

You can likely port most of this implementation over by simply pattern matching (e.g., tf.keras.layers.Layertorch.nn.Module, def call(self, inputs)def forward(self, x)).
torchvision has many good reference model implementations that are good examples of this (e.g., ResNet: torchvision.models.resnet — Torchvision master documentation).

Thanks! @eqy. Didn’t know I could create and initialize other NN in the forward method.

Regarding “simultaneous”, I didn’t use the best word here. The idea is to create a model for each feature and predict it using the other features. Then use the last intermediate layer of each NN as input features to the final model that has to predict the real target. So the final model would have the initial features plus features engineered using the other NN.

The idea is from here https://towardsdatascience.com/automated-feature-engineering-using-neural-networks-5310d6d4280a