Sequential model from config file with deconvolution

Hello!
I have a project where I have many different models, therefore I would like to store the structure in a config file. I wrote this code to generate a model, it works quite well:

layers = [
    {"type": "Conv2d", "in_channels": 1, "out_channels": 6, "kernel_size": 3},
    {"type": "ReLU"},
    {"type": "Conv2d", "in_channels": 6, "out_channels": 16, "kernel_size": 3},
    {"type": "ReLU"},
    {"type": "Flatten"},
    {"type": "Linear", "in_features": 16 * 6 * 6, "out_features": 120},
    {"type": "ReLU"},
    {"type": "Linear", "in_features": 120, "out_features": 84},
    {"type": "ReLU"},
    {"type": "Linear", "in_features": 84, "out_features": 10},
    ]

def build_model(config):
    layer_list = []
    for layer_config in config:
        layer_type = layer_config.pop('type')
        layer_list.append(getattr(torch.nn, layer_type)(**layer_config))
    return nn.Sequential(*layer_list)

model = build_model(layers)

However I have codes that needs “deconvolution network” and for those, I need a view/reshape layer. I am not sure how to handle that without too much trouble. Does someone have an idea ?

Examples of layers I would like to use:

layers = [
    {"type": "Linear", "in_features": 10, "out_features": 1024},
    {"type": "ReLU"},
    #  ? x = x.view(-1, 16, 64)
    {"type": "Conv1d", "in_channels": 16, "out_channels": 32, "kernel_size": 3},
    {"type": "ReLU"},
    {"type": "Conv1d", "in_channels": 32, "out_channels": 64, "kernel_size": 3},
    {"type": "ReLU"}
    # ...
    ]

Thanks!

I ended up using this code:

import torch
layers = [
    {"type": "Linear", "in_features": 10, "out_features": 1024},
    {"type": "ReLU"},
    {"type": "View", "shape": (-1, 16, 64)},
    {"type": "Conv1d", "in_channels": 16, "out_channels": 32, "kernel_size": 3},
    {"type": "ReLU"},
    {"type": "Conv1d", "in_channels": 32, "out_channels": 64, "kernel_size": 3},
    {"type": "ReLU"}
    # ...
    ]

class View(nn.Module):
    def __init__(self, shape):
        super(View, self).__init__()
        self.shape = shape
        
    def forward(self, x):
        return x.view(*self.shape)

def build_model(config):
    layer_list = []
    for layer_config in config:
        layer_type = layer_config.pop('type')
        if layer_type == "View":
            layer = View(**layer_config)
        else:
            layer = getattr(torch.nn, layer_type)(**layer_config)
        layer_list.append(layer)
    return nn.Sequential(*layer_list)

model = build_model(layers)

Unfortunately it is really not as clean as before :confused: