I have been following the instruction on how to use torchsample library and I wrote my own loss function
LogisticRegressionLoss = lambda p_outputs, q_outputs: - torch.mean(torch.log(torch.nn.Sigmoid(p_outputs) +1e-12))- torch.mean(torch.log(1 - torch.nn.Sigmoid(q_outputs) + 1e-12))
class DensityRatioEstimator(nn.Module):
def __init__(self, target_train_samples, hidden_params, target_val_samples=None):
super(DensityRatioEstimator,self).__init__()
self._train_contexts = torch.from_numpy(target_train_samples[0]).type(torch.float32)
self._target_train_samples = torch.cat([torch.from_numpy(x) for x in target_train_samples], -1).type(torch.float32)
self._val_contexts = torch.from_numpy(target_val_samples[0]).type(torch.float32)
self._target_val_samples = torch.cat([torch.from_numpy(x) for x in target_val_samples], -1).type(torch.float32)
self.hidden_params=hidden_params
input_dim = self._target_train_samples.shape[-1]
self._ldre_net, self._ldre_regularizer = build_dense_network(input_dim=input_dim, output_dim=1,
output_activation="linear", params=self.hidden_params)
self._p_samples = nn.Linear(input_dim,input_dim)
self._q_samples = nn.Linear(input_dim,input_dim)
def forward(self, x):
p = self._p_samples(x)
q = self._q_samples(x)
combined = torch.cat((p.view(p.size(0), -1),
q.view(q.size(0), -1)), dim=1)
self._split_layers = Split(
self._ldre_net[-1],
parts=2,
)
p_output, q_output =self._split_layers(combined)
return p_output, q_output
def __call__(self, samples):
return self._ldre_net(samples)
def train(self, model, batch_size, num_iters):
self.trainer = ModuleTrainer(model)
self.batch_size= batch_size
metrics = [DensityRatioAccuracy()]
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
initializers = [XavierUniform(bias=False, module_filter='*')]
regularizers = [L2Regularizer(scale=1e-4)]
model_train_samples, model_val_samples = self.sample_model(model)
callbacks = [EarlyStopping(monitor='val_loss', patience=10),
ReduceLROnPlateau(factor=0.5, patience=5)]
validation_data =( (self._target_val_samples, model_val_samples),None)
self.trainer.compile(loss=LogisticRegressionLoss,
optimizer=optimizer,
regularizers=regularizers,
initializers=initializers,
metrics=metrics,
callbacks=callbacks)
self.trainer.fit(inputs = (self._target_train_samples,model_train_samples),
targets = None,
val_data = validation_data,
batch_size= batch_size,
num_epoch = num_iters,
verbose = 0)
print(self.trainer.history['acc_metric'])
print(self.trainer.history['loss'])
last_epoch = self.trainer.epoch[-1]
return last_epoch+1, self.trainer.history.losses[-1],self.trainer.history.batch_metrics[-1]
def sample_model(self, model):
"""Sample model for density ratio estimator training"""
model_train_samples = torch.cat([self._train_contexts, model.sample(self._train_contexts)], dim=-1)
else:
model_train_samples = model.sample(self._target_train_samples.shape[0])
model_val_samples = torch.cat([self._val_contexts, model.sample(self._val_contexts)], dim=-1)
return model_train_samples, model_val_samples
when I ran my code I keep getthing this error message.
File "EIM.py", line 783, in train
self.trainer.fit(inputs = (self._target_train_samples,model_train_samples),
File "/home/dm_control/src/torchsample/torchsample/modules/module_trainer.py", line 268, in fit
output_batch = fit_forward_fn(input_batch)
File "/home/src/torchsample/torchsample/modules/module_trainer.py", line 818, in forward_pass
return model(*input_batch)
File "/home/dm_control/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
TypeError: forward() takes 2 positional arguments but 3 were given
I will really appreciate if someone here can help to figure out how to fix this problem.