I have a model trained on 16 features, seq_len of 120 and in batches of 256.
I would like to test the loss on the model on a testset, with random sampling from a normal distribution for one features at a time so I can measure how important each features is (important features would give a high rise in loss when its random sampled)
Any advice on how to manipulate one feature at a time? the shape of the input is (256, 120, 16)
h = model.init_hidden(batch_size)
with torch.no_grad():
for inp, labels in loader:
if(train_on_gpu):
inp, labels = inp.cuda(), labels.cuda()
outputs, h = model(inp, h)
_, predicts = torch.max(outputs, 1)
for curFeature in range(X.shape[1]):
model1Obj.zero_grad()
print("Size of x1 is {0}".format(X.shape))
# Alter the feature before you calculate the loss
mu,sigma=norm.fit(X[:,curFeature])
Y=copy.deepcopy(X)
Y[:,curFeature]=np.random.normal(mu, sigma, 100)
Y=Variable(torch.from_numpy(Y))
output=model1Obj(Y)
loss=criterion(target,output)
print("The loss value for feature {0} is {1}".format(curFeature,loss.item()))