# Convert a net from TF to Torch

Hi I’m new to deep learning. Currently have an issue converting TensorFlow Net to PyTorch one… Since I don’t find any “template” that explains how “tf to pytorch”.
The TensorFlow Net is like follows:

``````from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Dense, Dropout, Input
from tensorflow.keras.layers import Bidirectional, Multiply
from tensorflow.keras.layers import Concatenate, LSTM, GRU

def dnn_model():

x_input = Input(shape=(train.shape[-2:]))

x1 = Bidirectional(LSTM(units=768, return_sequences=True))(x_input)
x2 = Bidirectional(LSTM(units=512, return_sequences=True))(x1)
x3 = Bidirectional(LSTM(units=384, return_sequences=True))(x2)
x4 = Bidirectional(LSTM(units=256, return_sequences=True))(x3)
x5 = Bidirectional(LSTM(units=128, return_sequences=True))(x4)

z2 = Bidirectional(GRU(units=384, return_sequences=True))(x2)

z31 = Multiply()([x3, z2])
z31 = BatchNormalization()(z31)
z3 = Bidirectional(GRU(units=256, return_sequences=True))(z31)

z41 = Multiply()([x4, z3])
z41 = BatchNormalization()(z41)
z4 = Bidirectional(GRU(units=128, return_sequences=True))(z41)

z51 = Multiply()([x5, z4])
z51 = BatchNormalization()(z51)
z5 = Bidirectional(GRU(units=64, return_sequences=True))(z51)

x = Concatenate(axis=2)([x5, z2, z3, z4, z5])

x = Dense(units=128, activation='selu')(x)

x_output = Dense(units=1)(x)

model = Model(inputs=x_input, outputs=x_output,
name='DNN_Model')
return model
``````

've referred to both tf docs and pytorch ones. The parameters are pretty different.

For example `tensorflow.keras.layers.LSTM` has parameter `units` as the dimensionality of the output space, what `Torch.nn.LSTM` parameter does this correspond to?

Can I please have a guide?

This doesn’t solve the issue that transferring a manual Keras network to Pytorch structure, which is my purpose.
Thanks for sharing the repo still.

I manually transferred it, by referring to both Keras and Pytorch docs step by step.

``````import torch
from torch import nn
from torch.nn import LSTM
from torch.nn import GRU
from torch.nn import BatchNorm2d

class dnn_moduel(nn.Module):
def __init__(self,input_size):
super(vpp_moduel, self).__init__()
self.x1 = LSTM(input_size = input_size, hidden_size = 768, bidirectional=True)
self.x2 = LSTM(input_size = 768*2, hidden_size = 512, bidirectional=True)
self.x3 = LSTM(input_size = 512*2, hidden_size = 384, bidirectional=True)
self.x4 = LSTM(input_size = 384*2, hidden_size = 256, bidirectional=True)
self.x5 = LSTM(input_size = 256*2, hidden_size = 128, bidirectional=True)

self.z2 = GRU(input_size = 512, hidden_size = 384, bidirectional=True)

self.z31b = BatchNorm2d(num_features=384)
self.z3 = GRU(input_size = 384, hidden_size = 256, bidirectional=True)

self.z41b = BatchNorm2d(num_features=256)
self.z4 = GRU(input_size = 256, hidden_size = 128, bidirectional=True)

self.z51b = BatchNorm2d(num_features=128)
self.z5 = GRU(input_size = 128, hidden_size = 64, bidirectional=True)

self.dense = torch.nn.Linear(1920,128)
self.dense2 = torch.nn.Linear(128,1)

def forward(self, x):

x1 = self.x1(x)
x2 = self.x2(x1)
x3 = self.x3(x2)
x4 = self.x4(x3)
x5 = self.x5(x4)

z2 = self.z2(x2)

z31 = torch.multiply(x3,z2)
z31b = self.z31b(z31)
z3 = self.z3(z31b)

z41 = torch.multiply(x4,z3)
z41b = self.z41b(z41)
z4 = self.z4(z41b)

z51 = torch.multiply(x5,z4)
z51b = self.z51b(z51)
z5 = self.z5(z51b)

c = torch.cat((x5, z2, z3, z4, z5)) #check dimention
d = nn.SELU(self.dense(c))

output = self.dense(d)

return output

``````