I have implemented a hybdrid model with CNN & LSTM in both Keras and PyTorch, the network is composed by 4 layers of convolution with an output size of 64 and a kernel size of 5, followed by 2 LSTM layer with 128 hidden states, and then a Dense layer of 6 outputs for the classification.
In fact, i have juste implemented the DeepConvLSTM proposed here https://www.researchgate.net/publication/291172413_Deep_Convolutional_and_LSTM_Recurrent_Neural_Networks_for_Multimodal_Wearable_Activity_Recognition.
My problem is with PyTorch version, i’m getting around 18~19 % of accuracy, while Keras is giving 86~87%. I don’t understand why, i’m using the same parameters for both networks and the same optimizer RMSROP.
I also tried to use GRU instead of LSTM, but getting the same problem, seems like there is a probleme with the hybridation its selfs, but i can not figure it out.
here is my scripts
Keras version :
type or paste cdef ConvLSTM_Keras(input_shape): from keras.models import Sequential from keras.layers import Dense,Conv1D,LSTM model = Sequential() model.add(Conv1D(64, 5, activation='relu', input_shape=input_shape)) model.add(Conv1D(64, 5, activation='relu')) model.add(Conv1D(64, 5, activation='relu')) model.add(Conv1D(64, 5, activation='relu')) model.add(LSTM(128,return_sequences=True)) model.add(LSTM(128,return_sequences=False)) model.add(Dense(6, activation='softmax')) return model model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.rmsprop( learning_rate=0.001 ), metrics=['accuracy']) model.fit(x_train, y_train, epochs=20, batch_size=100, verbose=1, validation_data=(x_val, y_val))
def __init__(self): super(SimpleCNN, self).__init__() self.conv1 = torch.nn.Conv1d(in_channels=1, out_channels=64, kernel_size=5) self.conv2 = torch.nn.Conv1d(in_channels=64,out_channels=64, kernel_size=5) self.conv3 = torch.nn.Conv1d(in_channels=64, out_channels=64, kernel_size=5) self.conv4 = torch.nn.Conv1d(in_channels=64, out_channels=64, kernel_size=5) self.lstm1 = torch.nn.LSTM( input_size= 545, hidden_size=128, num_layers=2, ) self.fc2 = torch.nn.Linear(128, 6) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x,_ = self.lstm1(x) x = x[:, -1, :] x = self.fc2(x) return (x) import torch.optim as optim def createLossAndOptimizer(net, learning_rate=0.001): # Loss function loss = torch.nn.CrossEntropyLoss() # Optimizer optimizer = optim.rmsprop(net.parameters(), lr=learning_rate) return (loss, optimizer)
Hope you can help me, thanks.