Segmentation fault (core dumped) with personnal nn.Function

Hi,

I have some issues which I am not able to solve. A segmentation fault happens at “random” time during the execution of my scripts.
I have implemented an autograd function, when I use it on CPUs everything is going well but on GPUs it happens sometime that it makes a segmentation fault (core dumped) at seeminglyt random time of execution.
Is there anybody here who could help me?

I have run it with gdb and obtained the following stacktrace: Thread 6 "python" received signal SIGSEGV, Segmentation fault. [Switching to Thread 0x7fff60dff700 (LWP 20034)] 0x00007fffb32f6505 in void std::__push_heap<__gnu_cxx::__normal_iterator<torch::autograd::FunctionTask*, std::vector<torch::autograd::FunctionTask, std::allocator<torch::autograd::FunctionTask> > >, long, torch::autograd::FunctionTask, __gnu_cxx::__ops::_Iter_comp_val<torch::autograd::CompareFunctionTaskTime> >(__gnu_cxx::__normal_iterator<torch::autograd::FunctionTask*, std::vector<torch::autograd::FunctionTask, std::allocator<torch::autograd::FunctionTask> > >, long, long, torch::autograd::FunctionTask, __gnu_cxx::__ops::_Iter_comp_val<torch::autograd::CompareFunctionTaskTime>) [clone .isra.264] () from /home/awehenkel/miniconda3/envs/NODE/lib/python3.6/site-packages/torch/lib/libtorch.so.1

Also here is the code of my autograd function:

import torch
import numpy as np
import math


def _flatten(sequence):
    flat = [p.contiguous().view(-1) for p in sequence]
    return torch.cat(flat) if len(flat) > 0 else torch.tensor([])


def compute_cc_weights(nb_steps):
    lam = np.arange(0, nb_steps + 1, 1).reshape(-1, 1)
    lam = np.cos((lam @ lam.T) * math.pi / nb_steps)
    lam[:, 0] = .5
    lam[:, -1] = .5 * lam[:, -1]
    lam = lam * 2 / nb_steps
    W = np.arange(0, nb_steps + 1, 1).reshape(-1, 1)
    W[np.arange(1, nb_steps + 1, 2)] = 0
    W = 2 / (1 - W ** 2)
    W[0] = 1
    W[np.arange(1, nb_steps + 1, 2)] = 0
    cc_weights = torch.tensor(lam.T @ W).float()
    steps = torch.tensor(np.cos(np.arange(0, nb_steps + 1, 1).reshape(-1, 1) * math.pi / nb_steps)).float()

    return cc_weights, steps


def integrate(x0, nb_steps, step_sizes, integrand, h, compute_grad=False, x_tot=None):
    #Clenshaw-Curtis Quadrature Method
    cc_weights, steps = compute_cc_weights(nb_steps)

    device = "cuda:0" if x0.is_cuda else "cpu"
    cc_weights, steps = cc_weights.to(device), steps.to(device)

    if compute_grad:
        g_param = 0.
        g_h = 0.
    else:
        z = 0.
    xT = x0 + nb_steps*step_sizes
    for i in range(nb_steps + 1):
        x = (x0 + (xT - x0)*(steps[i] + 1)/2)
        if compute_grad:
            dg_param, dg_h = computeIntegrand(x, h, integrand, x_tot*(xT - x0)/2)
            g_param += cc_weights[i]*dg_param
            g_h += cc_weights[i]*dg_h
        else:
            dz = integrand(x, h)
            z = z + cc_weights[i]*dz

    if compute_grad:
        return g_param, g_h

    return z*(xT - x0)/2


def computeIntegrand(x, h, integrand, x_tot):
    with torch.enable_grad():
        f = integrand.forward(x, h)
        g_param = _flatten(torch.autograd.grad(f, integrand.parameters(), x_tot, create_graph=True, retain_graph=True))
        g_h = _flatten(torch.autograd.grad(f, h, x_tot))

    return g_param, g_h


class NeuralIntegral(torch.autograd.Function):

    @staticmethod
    def forward(ctx, x0, x, integrand, flat_params, h, nb_steps=20):
        with torch.no_grad():
            x_tot = integrate(x0, nb_steps, (x - x0)/nb_steps, integrand, h, False)
            # Save for backward
            ctx.integrand = integrand
            ctx.nb_steps = nb_steps
            ctx.save_for_backward(x0.clone(), x.clone(), h)
        return x_tot

    @staticmethod
    def backward(ctx, grad_output):
        x0, x, h = ctx.saved_tensors
        integrand = ctx.integrand
        nb_steps = ctx.nb_steps
        integrand_grad, h_grad = integrate(x0, nb_steps, x/nb_steps, integrand, h, True, grad_output)
        x_grad = integrand(x, h)
        x0_grad = integrand(x0, h)
        # Leibniz formula
        return -x0_grad*grad_output, x_grad*grad_output, None, integrand_grad, h_grad.view(h.shape), None

Many thanks in advance!

The problem seem to be solved by updating from v1.0 to v1.0.1