Saliency LSTM and interpretation

Hi all,

I am trying to implement Saliency for my LSTM to see how my inputs affect performance.

A result is given, but I am not sure if I have done this as I intended to. I am trying to see how the 12 input variables influence the result. However, I am not quite sure how to interpret it. The size of saliency is [32 12], will I be correct to assume that saliency[0 0] represents my first input of the first batch?

def test(self):
        for indx, data, target, filename in self.test_loader:

            self.model.eval()

            data, target = data.to(self.device), target.to(self.device)
            data.requires_grad_()

            predictions = self.model(data.float())
            predictions.sum().backward()

            saliency, _ = torch.max(data.grad.abs(), dim=1)

then I get these values:

saliency
tensor([[0.0385, 0.0262, 0.0347, 0.0487, 0.0401, 0.0384, 0.0225, 0.0155, 0.0338,
0.0147, 0.0231, 0.0344],
[0.0173, 0.0203, 0.0287, 0.0293, 0.0156, 0.0262, 0.0336, 0.0279, 0.0434,
0.0240, 0.0183, 0.0290],
[0.0026, 0.0022, 0.0060, 0.0044, 0.0033, 0.0064, 0.0054, 0.0059, 0.0097,
0.0049, 0.0066, 0.0082],
[0.0110, 0.0075, 0.0156, 0.0137, 0.0058, 0.0115, 0.0170, 0.0055, 0.0184,
0.0044, 0.0073, 0.0148],
[0.0182, 0.0255, 0.0143, 0.0234, 0.0218, 0.0266, 0.0264, 0.0290, 0.0265,
0.0174, 0.0223, 0.0246],
[0.0054, 0.0042, 0.0147, 0.0103, 0.0099, 0.0067, 0.0118, 0.0097, 0.0138,
0.0079, 0.0170, 0.0324],
[0.0131, 0.0049, 0.0116, 0.0212, 0.0078, 0.0116, 0.0136, 0.0136, 0.0350,
0.0052, 0.0078, 0.0237],
[0.0068, 0.0044, 0.0054, 0.0100, 0.0034, 0.0067, 0.0095, 0.0057, 0.0146,
0.0095, 0.0050, 0.0069],
[0.0122, 0.0086, 0.0126, 0.0069, 0.0166, 0.0140, 0.0133, 0.0097, 0.0142,
0.0084, 0.0045, 0.0126],
[0.0038, 0.0093, 0.0072, 0.0065, 0.0072, 0.0080, 0.0053, 0.0029, 0.0033,
0.0040, 0.0075, 0.0141],
[0.0069, 0.0026, 0.0110, 0.0078, 0.0030, 0.0062, 0.0098, 0.0043, 0.0066,
0.0042, 0.0031, 0.0080],
[0.0086, 0.0083, 0.0115, 0.0095, 0.0043, 0.0111, 0.0087, 0.0153, 0.0119,
0.0061, 0.0034, 0.0065],
[0.0060, 0.0073, 0.0096, 0.0207, 0.0061, 0.0127, 0.0158, 0.0117, 0.0279,
0.0039, 0.0095, 0.0121],
[0.4022, 0.1682, 0.4509, 0.3345, 0.1439, 0.1013, 0.1070, 0.0904, 0.0925,
0.2513, 0.4081, 0.1343],
[0.0054, 0.0091, 0.0072, 0.0048, 0.0049, 0.0084, 0.0041, 0.0100, 0.0123,
0.0041, 0.0045, 0.0077],
[0.0079, 0.0028, 0.0043, 0.0069, 0.0035, 0.0051, 0.0090, 0.0039, 0.0051,
0.0073, 0.0029, 0.0072],
[0.0225, 0.0037, 0.0181, 0.0106, 0.0051, 0.0199, 0.0064, 0.0035, 0.0281,
0.0055, 0.0064, 0.0179],
[0.0137, 0.0132, 0.0080, 0.0118, 0.0100, 0.0148, 0.0184, 0.0176, 0.0101,
0.0146, 0.0121, 0.0103],
[0.0094, 0.0110, 0.0102, 0.0072, 0.0066, 0.0154, 0.0083, 0.0081, 0.0085,
0.0065, 0.0151, 0.0153],
[0.0084, 0.0260, 0.0143, 0.0167, 0.0102, 0.0089, 0.0121, 0.0158, 0.0181,
0.0084, 0.0084, 0.0229],
[0.0062, 0.0042, 0.0094, 0.0072, 0.0054, 0.0114, 0.0087, 0.0030, 0.0170,
0.0068, 0.0096, 0.0077],
[0.0064, 0.0090, 0.0096, 0.0106, 0.0038, 0.0156, 0.0163, 0.0050, 0.0185,
0.0038, 0.0067, 0.0168],
[0.0164, 0.0066, 0.0162, 0.0175, 0.0086, 0.0187, 0.0144, 0.0113, 0.0260,
0.0095, 0.0157, 0.0210],
[0.0053, 0.0048, 0.0118, 0.0073, 0.0043, 0.0081, 0.0106, 0.0072, 0.0132,
0.0034, 0.0018, 0.0065],
[0.0067, 0.0055, 0.0034, 0.0085, 0.0034, 0.0039, 0.0070, 0.0063, 0.0042,
0.0057, 0.0035, 0.0031],
[0.0057, 0.0042, 0.0133, 0.0051, 0.0053, 0.0037, 0.0043, 0.0019, 0.0065,
0.0046, 0.0039, 0.0048],
[0.0094, 0.0041, 0.0130, 0.0096, 0.0071, 0.0146, 0.0124, 0.0092, 0.0133,
0.0134, 0.0089, 0.0096],
[0.0191, 0.0085, 0.0144, 0.0090, 0.0203, 0.0060, 0.0103, 0.0092, 0.0085,
0.0151, 0.0115, 0.0422],
[0.0098, 0.0054, 0.0085, 0.0067, 0.0036, 0.0068, 0.0041, 0.0038, 0.0063,
0.0054, 0.0066, 0.0079],
[0.0252, 0.0232, 0.0273, 0.0167, 0.0154, 0.0188, 0.0182, 0.0301, 0.0172,
0.0136, 0.0206, 0.0353],
[0.0070, 0.0115, 0.0142, 0.0093, 0.0158, 0.0094, 0.0095, 0.0095, 0.0110,
0.0061, 0.0155, 0.0186],
[0.0158, 0.0216, 0.0242, 0.0120, 0.0246, 0.0249, 0.0203, 0.0153, 0.0104,
0.0085, 0.0322, 0.0444]], dtype=torch.float64)
saliency.size()
torch.Size([32, 12])