Text Generation with LSTM-RNN

Hi guys can anybody help me in converting this code from Tensorflow to PyTorch:

# define the LSTM model
model = Sequential()
model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2])))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')


# define the checkpoint
filepath="weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]

model.fit(X, y, epochs=20, batch_size=128, callbacks=callbacks_list)

Hi. Something like this should work (it does not contemplate batching):

import torch
from torch import nn
from torch import optim

X = torch.randint(1, 100, (32, 20))
y = torch.randint(0, 2, (32, 1))

class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.emb = nn.Embedding(100, 64)
        self.lstm = nn.LSTM(64, 256)
        self.dropout = nn.Dropout(p=0.2)
        self.linear = nn.Linear(256, 1)
    
    def forward(self, x):
        x = x.transpose(0, 1)
        out = self.emb(x)
        out, _ = self.lstm(out)
        out = self.dropout(out)
        out = self.linear(out)
        return torch.sigmoid(out)
    
model = Model()
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters())
for epoch in range(20):
    out = model(X)
    out, _ = torch.max(out, dim=0)
    loss = criterion(out, y.float())
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()