Global trainable parameter

import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.init as init
from torch.autograd import Variable
import pdb
import numpy as np

embedding_dim = 10
margin = 1

# Read data
trainFilename = 'base'
testFilename = 'test'

train = pd.read_csv(trainFilename, header=None, sep='\t', names=['1','2','3'], usecols=[0,1,2])
test = pd.read_csv(testFilename, header=None, sep='\t', names=['1','2','3'], usecols=[0,1,2])

numUsers = len(set(train.uid.unique()).union(set(test.uid.unique())))
numItems = len(set(train.iid.unique()).union(set(test.iid.unique())))

class modeler(nn.Module):
    def __init__(self, numUsers, numItems, embedding_dim):
        super(modeler, self).__init__()
        self.userEmbed = nn.Embedding(numUsers, embedding_dim)
        self.itemEmbed = nn.Embedding(numItems, embedding_dim)
        self.rel = nn.Parameter(torch.randn(1,embedding_dim), requires_grad=True)

    def forward(self, head, tail):
        userEmbeds = self.userEmbed(head)
        itemEmbeds = self.itemEmbed(tail)
        rel = self.rel

        out = (userEmbeds + rel - itemEmbeds).sum(1)
    
        return out

losses = []
model = modeler(numUsers, numItems, embedding_dim)
optimizer = optim.SGD(model.parameters(), lr = 0.01)

Given the above code, I want to also train the variable ‘rel’.

How can I do this?

I think rel is trained

actually it does not… Does anyone know why?

you can print rel.grad() to see what happens after loss.backward(), if the grad is 0, maybe it is out of the computation graph.

Yes exactly, the grad of rel turns out to be 0.
I think the problem is that rel is not registered as a leaf node.

ps. Sorry i figured it out. I wrote the equation incorrectly.