hi there. i’m new in pytorch and i’m trying to predict membrane protein topology with a lstm but i have an issue with the embedding layer (i think).
I set embedding_dim = 64 but it seems that after every cycle, the dimension grows up. If I adapt the embedding dim empirically the RAM memory goes out max capacity.
I took data from Topcons website and I made a df with two columns (first for protein sequence and second for labelled one). Then I convert them to integers and padded them. I created tensors and batches (64)
Expected 64, got 84544. Max length of sequences is 1321, and 84544/64=1321. Made this discovery, how can I resolve the issue?
here the error and below the code.
Can you help me please?
RuntimeError Traceback (most recent call last)
<ipython-input-20-c13029126c88> in <module>
15 # Step 3. Run our forward pass.
16
---> 17 output = model(sentence_in)
18
19 loss = model.neg_log_likelihood(output, targets)
6 frames
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
<ipython-input-4-68e616720828> in forward(self, sentence)
135 # Get the emission scores from the BiLSTM
136
--> 137 lstm_feats = self._get_lstm_features(sentence)
138
139 # Find the best path, given the features.
<ipython-input-4-68e616720828> in _get_lstm_features(self, sentence)
67 #sentence = sentence.transpose(1, 0)#
68 embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)
---> 69 lstm_out, self.hidden = self.lstm(embeds, self.hidden)
70 lstm_out = lstm_out.view(len(sentence), self.hidden_dim)
71 lstm_feats = self.hidden2tag(lstm_out)
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/rnn.py in forward(self, input, hx)
770 hx = self.permute_hidden(hx, sorted_indices)
771
--> 772 self.check_forward_args(input, hx, batch_sizes)
773 if batch_sizes is None:
774 result = _VF.lstm(input, hx, self._flat_weights, self.bias, self.num_layers,
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/rnn.py in check_forward_args(self, input, hidden, batch_sizes)
695 batch_sizes: Optional[Tensor],
696 ):
--> 697 self.check_input(input, batch_sizes)
698 self.check_hidden_size(hidden[0], self.get_expected_hidden_size(input, batch_sizes),
699 'Expected hidden[0] size {}, got {}')
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/rnn.py in check_input(self, input, batch_sizes)
208 expected_input_dim, input.dim()))
209 if self.input_size != input.size(-1):
--> 210 raise RuntimeError(
211 'input.size(-1) must be equal to input_size. Expected {}, got {}'.format(
212 self.input_size, input.size(-1)))
RuntimeError: input.size(-1) must be equal to input_size. Expected 64, got 84544
code:
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import pandas as pd
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import random_split, DataLoader, TensorDataset
from torch.nn.utils.rnn import pad_sequence
with open("TM.3line") as f:
lines = f.readlines()
sequences = []
labels = []
current_sequence = ""
current_label = ""
for line in lines:
if line.startswith(">"):
if current_sequence:
sequences.append(current_sequence)
labels.append(current_label)
current_sequence = ""
current_label = ""
elif line.startswith("M"):
current_sequence = line.strip()
else:
current_label += line.strip()
sequences.append(current_sequence)
labels.append(current_label)
with open("output_file.csv", "w") as f:
f.write("sequence,label\n")
for i in range(len(sequences)):
f.write(f"{sequences[i]},{labels[i]}\n")
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return idx.item()
def prepare_sequence(seq, to_ix):
idxs = [to_ix[w] for w in seq]
return torch.tensor(idxs, dtype=torch.long)
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + \
torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
class BiLSTM_CRF(nn.Module):
def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):
super(BiLSTM_CRF, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.tag_to_ix = tag_to_ix
self.tagset_size = len(tag_to_ix)
self.word_embeds = nn.Embedding(vocab_size, embedding_dim, padding_idx=20)
self.lstm = nn.LSTM(embedding_dim, hidden_dim,
num_layers=1, bidirectional=True, batch_first=True)
# Maps the output of the LSTM into tag space.
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)
# Matrix of transition parameters. Entry i,j is the score of
# transitioning *to* i *from* j.
self.transitions = nn.Parameter(
torch.randn(self.tagset_size, self.tagset_size))
# These two statements enforce the constraint that we never transfer
# to the start tag and we never transfer from the stop tag
self.transitions.data[tag_to_ix[START_TAG], :] = -10000
self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000
self.hidden = self.init_hidden()
def init_hidden(self):
return (torch.randn(2, 1, self.hidden_dim),
torch.randn(2, 1, self.hidden_dim))
def _forward_alg(self, feats):
# Do the forward algorithm to compute the partition function
init_alphas = torch.full((1, self.tagset_size), -10000)
# START_TAG has all of the score.
init_alphas[0][self.tag_to_ix[START_TAG]] = 0
# Wrap in a variable so that we will get automatic backprop
forward_var = init_alphas
# Iterate through the sentence
for feat in feats:
alphas_t = [] # The forward tensors at this timestep
for next_tag in range(self.tagset_size):
# broadcast the emission score: it is the same regardless of
# the previous tag
emit_score = feat[next_tag].view(
1, -1).expand(1, self.tagset_size)
# the ith entry of trans_score is the score of transitioning to
# next_tag from i
trans_score = self.transitions[next_tag].view(1, -1)
# The ith entry of next_tag_var is the value for the
# edge (i -> next_tag) before we do log-sum-exp
next_tag_var = forward_var + trans_score + emit_score
# The forward variable for this tag is log-sum-exp of all the
# scores.
alphas_t.append(log_sum_exp(next_tag_var).view(1))
forward_var = torch.cat(alphas_t).view(1, -1)
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
alpha = log_sum_exp(terminal_var)
return alpha
def _get_lstm_features(self, sentence):
self.hidden = self.init_hidden()
#sentence = sentence.transpose(1, 0)#
embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)
lstm_out, self.hidden = self.lstm(embeds, self.hidden)
lstm_out = lstm_out.view(len(sentence), self.hidden_dim)
lstm_feats = self.hidden2tag(lstm_out)
return lstm_feats
def _score_sentence(self, feats, tags):
# Gives the score of a provided tag sequence
score = torch.zeros(1)
tags = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long), tags])
for i, feat in enumerate(feats):
score = score + \
self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]
score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]
return score
def _viterbi_decode(self, feats):
backpointers = []
# Initialize the viterbi variables in log space
init_vvars = torch.full((1, self.tagset_size), -10000.)
init_vvars[0][self.tag_to_ix[START_TAG]] = 0
# forward_var at step i holds the viterbi variables for step i-1
forward_var = init_vvars
for feat in feats:
bptrs_t = [] # holds the backpointers for this step
viterbivars_t = [] # holds the viterbi variables for this step
for next_tag in range(self.tagset_size):
# next_tag_var[i] holds the viterbi variable for tag i at the
# previous step, plus the score of transitioning
# from tag i to next_tag.
# We don't include the emission scores here because the max
# does not depend on them (we add them in below)
next_tag_var = forward_var + self.transitions[next_tag]
best_tag_id = argmax(next_tag_var)
bptrs_t.append(best_tag_id)
viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))
# Now add in the emission scores, and assign forward_var to the set
# of viterbi variables we just computed
forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)
backpointers.append(bptrs_t)
# Transition to STOP_TAG
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
best_tag_id = argmax(terminal_var)
path_score = terminal_var[0][best_tag_id]
# Follow the back pointers to decode the best path.
best_path = [best_tag_id]
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
# Pop off the start tag (we dont want to return that to the caller)
start = best_path.pop()
assert start == self.tag_to_ix[START_TAG] # Sanity check
best_path.reverse()
return path_score, best_path
def neg_log_likelihood(self, sentence, tags):
feats = self._get_lstm_features(sentence)
forward_score = self._forward_alg(feats)
gold_score = self._score_sentence(feats, tags)
return forward_score - gold_score
def forward(self, sentence): # dont confuse this with _forward_alg above.
# Get the emission scores from the BiLSTM
lstm_feats = self._get_lstm_features(sentence)
# Find the best path, given the features.
score, tag_seq = self._viterbi_decode(lstm_feats)
return score, tag_seq
df = pd.read_csv('output_file.csv')
training_data = [(row.sequence, row.label) for row in df.itertuples()]
word_to_ix = {}
for sentence, tags in training_data:
for word in sentence:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
START_TAG = "<START>"
STOP_TAG = "<STOP>"
PAD_TAG = '5'
EMBEDDING_DIM = 64
HIDDEN_DIM = 20
num_epochs = 15
batch_size = 16
vocab_size=21
tag_to_ix = {"O": 0, "I": 1, "M": 2, START_TAG: 3, STOP_TAG: 4, PAD_TAG: 5}
# Prepare input data
sentence_in = []
targets = []
for sentence, tags in training_data:
sentence_in.append(prepare_sequence(sentence, word_to_ix))
targets.append([tag_to_ix[t] for t in tags])
# Pad each sentence to the same length
sentence_in = pad_sequence(sentence_in, padding_value=20, batch_first=True)
# Pad entire batch to the same length
targets = pad_sequence([torch.tensor(t) for t in targets], padding_value=5, batch_first=True)
sentence_in = sentence_in.long()
targets = targets.long()
# Create dataset and dataloader
dataset = TensorDataset(sentence_in, targets)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
model = BiLSTM_CRF(vocab_size, tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM)
optimizer = optim.SGD(model.parameters(), lr=0.001, weight_decay=1e-4)
for epoch in range(
num_epochs): # again, normally you would NOT do 300 epochs, it is toy data
for i,(sentence, tags) in enumerate(training_data):
# Step 1. Remember that Pytorch accumulates gradients.
# We need to clear them out before each instance
model.zero_grad()
for batch in dataloader:
sentence_in, targets = batch
# Step 3. Run our forward pass.
output = model(sentence_in)
loss = model.neg_log_likelihood(output, targets)
# Step 4. Compute the loss, gradients, and update the parameters by
# calling optimizer.step()
loss.backward()
optimizer.step()
# Check predictions after training
with torch.no_grad():
SAVE_DIR = '/content/'
#https://discuss.pytorch.org/t/how-to-save-a-model-from-a-previous-epoch/20252/6
path = os.path.join(SAVE_DIR, 'model.pth')
torch.save(model.cpu().state_dict(), path) # saving model
#model.cuda() # moving model to GPU for further training
print(f'Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{len(training_data) // batch_size}], Loss: {loss.item():.4f}')