I am writing a Variational Recurrent AutoEncoder based model for stock returns prediction but just before instantiating the model, have stumbled upon a roadblock. The error thrown is:
empty(): argument ‘size’ failed to unpack the object at pos 2 with error “type must be tuple of ints,but got tuple”
The Encoder model constructor is:
def convert_to_dotdict(d):
if isinstance(d, dict):
return DotDict({k: convert_to_dotdict(v) for k, v in d.items()})
elif isinstance(d, list):
return [convert_to_dotdict(i) for i in d]
else:
return d
class Encoder(nn.Module):
def __init__(self, params: DotDict):
super(Encoder, self).__init__()
self.params = params
self.batch_size = self.params.batch_size
self.num_stocks = int(self.params.num_stocks)
self.num_factors = self.params.num_factors
self.num_layers = self.params.num_layers
self.hidden_size = self.params.hidden_size
self.num_lags = self.params.num_lags
self.dropout = self.params.dropout
self.rnn = nn.LSTM(
input_size=self.num_stocks,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
dropout=self.dropout,
batch_first=True
)
print("Hi") #sanity check
self.relu = nn.ReLU()
self.mu = nn.Linear(self.hidden_size, self.num_factors)
self.mu_activation = nn.ReLU()
self.log_sigma = nn.Linear(self.hidden_size, self.num_factors)
self.log_sigma_activation = nn.ReLU()
with the Decoder model constructor being VERY similar, except maybe a fully connected layer or projection matrix here and there.
The VRAE module is:
class RVAE(nn.Module):
def __init__(self, encoder_params:dict, decoder_params:dict):
super(RVAE, self).__init__()
self.encoder = Encoder(encoder_params)
self.decoder = Decoder(decoder_params)
self.weight_matrix = nn.Parameter(torch.randn(hyperparams.FACTOR_NETWORK.NUM_FACTORS,
hyperparams.DATA.NUM_STOCKS),
requires_grad=True)
self.mu = None
self.logvar = None
def sample(mu : list, logvar : list):
assert len(mu) == len(logvar)
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def forward(self, x):
mu, logvar = self.encoder(x)
self.mu = mu
self.logvar = logvar
z = self.sample(mu, logvar)
ft = self.decoder(z)
return np.matmul(self.weight_matrix, ft)
When I run model = RVAE(encoder_params, decoder_params)
, then I get the mentioned error. Note that DotDict is just a utility class to convert a dictionary into a dot-accessible form.
I tried searching up the docs on torch.empty() and trying various combinations of inputs to the function to try to break it. Interestingly, torch.empty(2, 2)
, torch.empty((2, 2))
, torch.empty([2, 2])
all seem to work but torch.empty((2, (2, 2))
gives the same exact error, which is reasonable based on the contents of the error message.
After this I tried searching up the source code of torch.empty(), but couldn’t find anything reliable and whatever I found had no mention of raising this error.
But my problem is that I have very explicitly converted the self.num_stocks to int() but still the problem occurs. Your help will be of much use to me. Thanks!
NOTE: hyperparams
is an external JSON file which I am reading and converting into DotDict.