trying to convert keras code to pytorch

############################################################################################

# SFTT Network

############################################################################################

############################################################################################

# ResNet Identity Block

############################################################################################

def residual_block(inputs, filters, block_id):

    

    f1, f2, f3 = filters

    pool = MaxPooling2D(pool_size=(2,2),name='block_' + str(block_id) + '_max_pooling')(inputs)

    x = Conv2D(f1, kernel_size=(3,3), padding='same', use_bias=False, kernel_initializer='he_normal', name='block_' + str(block_id) + '_conv_conv2d_1')(pool)

    x = BatchNormalization(name='block_' + str(block_id) + '_conv_batch_1')(x)

    x = Activation('relu', name='block_' + str(block_id) + '_expand_relu')(x)

        

    x = Conv2D(f2, kernel_size=(1,1), padding='same', use_bias=False, kernel_initializer='he_normal', name='block_' + str(block_id) + '_conv_conv2d_2')(x)

    x = BatchNormalization(name='block_' + str(block_id) + '_conv_batch_2')(x)

    x = Activation('relu', name='block_' + str(block_id) + '_depthwise_relu')(x)

        

    x = Conv2D(f3, kernel_size=(3,3), padding='same', use_bias=False, kernel_initializer='he_normal', name='block_' + str(block_id) + '_project_conv2d')(x)

    x = BatchNormalization(name='block_' + str(block_id) + '_project_batch')(x)

    

    shortcut = Conv2D(f3, kernel_size=(3,3), padding='same', strides=(1,1), use_bias=False, kernel_initializer='he_normal', name='block_' + str(block_id) + '_shortcut_conv2d')(pool)

    shortcut = BatchNormalization(name='block_' + str(block_id) + '_shortcut_batch')(shortcut)

  

    average = Average(name='block_' + str(block_id) + '_average')([shortcut,x])

  

    output = Activation('relu',name='block_' + str(block_id) + '_average_relu')(average)

    

    return output

def SFTT_model(input_shape = (32,32,30)):

    

    map_height, map_width, feature_maps = input_shape

    ######################################################

    # Convolutional

    ######################################################

    

    Image = Input(shape=(map_height, map_width, feature_maps), name='input')

    

    x = Conv2D(32, kernel_size=(1,1),strides=(1,1), padding='same', kernel_initializer='he_normal', name = 'conv1')(Image)

    x = BatchNormalization(name = 'batch_1')(x)

    x = Activation('relu',name='relu_1')(x)

        

    x = residual_block(x,filters = [32, 32, 128], block_id = 0)

    x = residual_block(x,filters = [64, 64, 256], block_id = 1)

    x = residual_block(x,filters = [128, 128, 512], block_id = 2)

    

    sf_output = MaxPooling2D(pool_size=(2,2),name='final_max_pooling')(x)

    sf_output = Flatten()(sf_output)

    conv_model  = Model(inputs=Image,outputs=sf_output)

    

    ######################################################

    # LSTM 

    ######################################################

    print("start LSTM ...")

    input_sequences = Input(shape=(None,map_height,map_width,feature_maps))

    time_distribute = TimeDistributed(Lambda(lambda x: conv_model(x)))(input_sequences)

    print(time_distribute.shape)

    lstm = LSTM(512, return_sequences = True)(time_distribute)

    lstm = LSTM(512, return_sequences = True)(lstm)

    print("lstm  ", lstm.shape)

    fc1 = Dense(map_height*map_width, activation='relu')(lstm)

    print("fc   ", fc1.shape)

    final_output = Reshape((map_height, map_width), name='squeeze_output')(fc1)

    print(final_output.shape)

    

    sftt_model = Model(inputs=[input_sequences], outputs=[final_output])

    

    return sftt_model

Can you pl. elaborate on your question in the post?

i’m trying to convert this keras code to pytorch ,but i stacked in TimeDistributed layer
can you help me

I haven’t used TensorFlow a lot.
However, you can follow something like below:

Or if you post your pytorch code along with TensorFlow code, someone might be able to help you.

1 Like

here is my pytorch code

import torch
import torch.nn as nn
import torchvision
class TimeDistributed(nn.Module):
    def __init__(self, module, batch_first=False):
        super(TimeDistributed, self).__init__()
        self.module = module
        self.batch_first = batch_first

    def forward(self, x):

        if len(x.size()) <= 2:
            return self.module(x)

        # Squash samples and timesteps into a single axis
        x_reshape = x.contiguous().view(-1, x.size(-1))  # (samples * timesteps, input_size)

        y = self.module(x_reshape)

        # We have to reshape Y
        if self.batch_first:
            y = y.contiguous().view(x.size(0), -1, y.size(-1))  # (samples, timesteps, output_size)
        else:
            y = y.view(-1, x.size(1), y.size(-1))  # (timesteps, samples, output_size)

        return y
class Residual(nn.Module):
  def __init__(self, in_channels, filters):
    super(Residual, self).__init__()

    self.in_channels = in_channels
    self.f1 = filters[0]
    self.f2 = filters[1]
    self.f3 = filters[2]

    self.maxpool = nn.MaxPool2d(kernel_size=(2,2))

    self.residual = nn.Sequential(
      nn.Conv2d(self.in_channels, self.f1, kernel_size=(3, 3),padding="same"),
      nn.BatchNorm2d(self.f1),
      nn.ReLU(),

      nn.Conv2d(self.f1,self.f2, kernel_size=(1,1), padding='same'),
      nn.BatchNorm2d(self.f2),
      nn.ReLU(),

      nn.Conv2d(self.f2,self.f3, kernel_size=(3, 3),padding="same"),
      nn.BatchNorm2d(self.f3),

    )

    self.shortcut = nn.Sequential(
      nn.Conv2d(self.in_channels, self.f3, kernel_size=(3,3), padding='same', stride=(1,1)),
      nn.BatchNorm2d(self.f3),
    )


    self.activation = nn.ReLU(inplace=True)
  def forward(self, x):
    pool = self.maxpool(x)
    x = self.residual(pool)
    y = self.shortcut(pool)
    x = (x + y ) / 2.0
    x = self.activation(x)

    return x
# SFTT model
class SFTT(nn.Module):
  def __init__(self):
    super(SFTT, self).__init__()

    
    self.conv2d1= nn.Conv2d(30,32,kernel_size=(1,1), stride=(1,1),padding='same')
    self.btch1 = nn.BatchNorm2d(32) 
    self.act1=nn.ReLU()
    self.res1 = Residual(in_channels=32, filters=[32, 32, 128])
    self.res2 = Residual(in_channels=128, filters=[64, 64, 256])
    self.res3 = Residual(in_channels=256,filters=[128, 128, 512])
    self.sf_output = nn.MaxPool2d(kernel_size=2)
    self.sf_outputfl = nn.Flatten()

    self.lstms = nn.LSTM(512,  512, num_layers=2, batch_first=True)

    self.flatten = nn.Flatten()

    self.fc1 = nn.Linear(4 *512, 32*32)

  def forward(self, x):
    bs,map_height, map_width, feature_maps = x.shape
    
    x = self.conv2d1(x)
    x = self.btch1(x) 
    x = self.act1(x) 
    x = self.res1(x) 
    x = self.res2(x) 
    x = self.res3(x) 
    sf_output =  self.sf_output(x)
    sf_output =  self.sf_outputfl(x)
    print("=================================")
    print(x.shape)
    print("=================================")
    #input_sequences = torch.tensor((bs,feature_maps,map_height,map_width))
    print("after")    
    #time_distribute = TimeDistributed(torchvision.transforms.Lambda(lambda x: self.conv_model(x)))(input_sequences)

    #x, _ = self.lstms(x)
    #x = self.flatten(x)
    #x = self.fc1(x)
    return x.view(-1, 32, 32)

Something like this might work for your case: