[resolved] RuntimeError: there are no graph nodes that require computing gradients

Hi everyone,

I’m trying to implement the following model but I’m getting the following error:

RuntimeError: there are no graph nodes that require computing gradients

This is my code for the model:

import torch
import torch.nn as nn
import torch.nn.functional as F

class Net(nn.Module):
    def __init__(self, blocks=4):
        super(Net, self).__init__()

        self.blocks = blocks

        self.downconvs = nn.ModuleList()
        self.downbatchnorms = nn.ModuleList()
        stack_channel_dims = []

        for i in range(self.blocks):
            in_channels = i*2+1
            out_channels = (i+1)*2+1
            kernel = 9
            stride = 2
            padding = 4

            stack_channel_dims.append(in_channels)

            self.downconvs.append(nn.Conv1d(in_channels, out_channels, kernel, stride, padding))
            self.downbatchnorms.append(nn.BatchNorm1d(out_channels))

        self.upconvs = nn.ModuleList()
        self.upbatchnorms = nn.ModuleList()
        last_channel_dim = 0

        for i in range(self.blocks):
            kernel = 9
            stride = 1
            padding = 4

            if i == 0:
                bottleneck_dim = self.blocks*2+1
                last_channel_dim = bottleneck_dim
                self.upconvs.append(nn.Conv1d(bottleneck_dim, bottleneck_dim*2, kernel, stride, padding))
                self.upbatchnorms.append(nn.BatchNorm1d(bottleneck_dim*2))
                continue

            in_channels = last_channel_dim + stack_channel_dims[self.blocks - i]
            out_channels = in_channels*2
            last_channel_dim = in_channels

            self.upconvs.append(nn.Conv1d(in_channels, out_channels, kernel, stride, padding))
            self.upbatchnorms.append(nn.BatchNorm1d(out_channels))

        self.finalconv = nn.Conv1d(last_channel_dim+1, 1, kernel, stride, padding)

    def forward(self, x):
        residual_input = x
        residual_layers = []

        for i in range(self.blocks):
            residual_layers.append(x)
            x = self.downconvs[i](x)
            x = self.downbatchnorms[i](x)
            x = F.relu(x)

        for i in range(self.blocks):
            x = self.upconvs[i](x)
            x = self.upbatchnorms[i](x)
            x = F.relu(x)

            x = x.view(x.size(0), int(x.size(1)/2), int(x.size(2)*2))
            x = torch.cat((x, residual_layers[self.blocks - i - 1]), 1)

        x = self.finalconv(x)

        x += residual_input
        return x

This is the code I’m using to train:

net = Net()
criterion = nn.SmoothL1Loss()
optimizer = optim.Adam(net.parameters())

audio_input, audio_target = dataset.get_next_minibatch()
audio_input = Variable(audio_input)
audio_target = Variable(audio_target)

optimizer.zero_grad()
audio_output = net.forward(audio_input)
loss = criterion(audio_input, audio_target)
loss.backward()
optimizer.step()

Please let me know if there’s anything wrong with it. Thanks.

this line should be:
loss = criterion(audio_output, audio_target)

Ah right. My bad, thanks!