Segmentation Fault on Pytorch LSTM

When I train my model, I get the following message:

Segmentation fault (core dumped)

I have never had such issue with Pytorch and I’m a bit lost. (Torch version torch==1.7.1+cpu
)

import torch
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
from torch.utils.data import DataLoader

class MyModel(nn.Module):
    def __init__(self, input_size, hidden_size, seq_size, num_layers):
        super().__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.seq_size = seq_size
        self.num_layers = num_layers
        
        self.lstm = nn.LSTM(input_size=self.input_size, hidden_size=self.hidden_size, num_layers=self.num_layers)
        self.linear = nn.Linear(self.hidden_size, self.input_size)

    def reset_hidden(self):
        self.hidden = (torch.zeros(self.num_layers, self.seq_size, self.hidden_size),
                       torch.zeros(self.num_layers, self.seq_size, self.hidden_size))

    def forward(self, sequences):
        
        lstm_out, self.hidden = self.lstm(sequences.view(len(sequences), self.seq_size, -1), self.hidden)
        return self.linear(lstm_out.view(self.seq_size, len(sequences), self.hidden_size)[-1])



# data must be converted into sequences
# sequence offset will be a single day
# sequences are 7 days long
def sequencer(data, seq_size=7):
    x = list()
    y = list()
    
    for i in range(len(data)-seq_size-1):
        x.append(data[i:(i+seq_size)])
        y.append(data[i+seq_size])
    return np.array(x), np.array(y)


def main():

    df = pd.read_csv("daily_data.csv", index_col=0)
    df = df.drop(df.index[-1])
    df = df.fillna(value=0) # fill missing sentiment and counts, which are zero valued
    
    sequences = sequencer(df.values, seq_size=7)
    print(sequences[0].shape)
    
    X = torch.tensor(sequences[0][:200], dtype=torch.float)
    Y = torch.tensor(sequences[1][:200], dtype=torch.float)
    
    #train_loaderx = DataLoader(X, batch_size=5, shuffle=False)
    #train_loadery = DataLoader(Y, batch_size=5, shuffle=False)
    
    model = MyModel(10, 5, 7, 2)
    
    loss_function = nn.MSELoss(reduction="sum")
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
    num_epochs = 1
    
    for i in range(1, num_epochs + 1):
        for seqx, seqy in zip(X, Y):
            seqx = seqx.view(1, seqx.size(0), seqx.size(1))
            seqy = seqy.view(1, seqy.size(0))
            model.reset_hidden()
            optimizer.zero_grad()
            y_pred = model(seqx)
            loss = loss_function(y_pred, seqy)
            print(y_pred.shape)
            loss.backward()
            optimizer.step()

Could you try to get the batcktrace from gdb via:

gdb --args python script.py args
...
run
...
bt

and post it here, please?

1 Like

Thanks for the swift reply. In the meantime, I already solved this issue by reinstalling PyTorch, so i can’t post the backtrace here. It was probably a bad install. Thanks!