Sorry, I think I mixed the two
Here is it again
1.
When I tried to debug the model (With ReLU activation function),
The NaN
value appears just after the optim.step ()
My loss.grad
is always None. Why is it so ?
when lr = 1e-3, the gradients change as can be shown below
[TIME DELAY KERNELS Initially] :
tensor([[[ 0.0464, 0.1265, -0.0264, -0.1229, 0.0058],
[ 0.0775, 0.0588, -0.0519, -0.0402, -0.0509],
[-0.1246, 0.1113, 0.0533, -0.0575, 0.0025],
...,
[ 0.0988, -0.0924, -0.0649, -0.1004, 0.1123],
[-0.0736, 0.0166, -0.1075, -0.0556, 0.0434],
[ 0.0260, 0.0104, -0.0937, -0.0415, 0.1184]],
[[-0.0510, -0.0046, 0.0368, 0.0506, 0.0200],
[-0.0930, -0.0011, -0.0559, 0.0898, 0.0883],
[ 0.0701, -0.1162, -0.1195, -0.0312, 0.0920],
...,
[-0.0935, 0.0423, 0.0812, -0.0165, 0.0404],
[-0.0876, -0.0143, 0.0941, -0.0526, 0.1147],
[ 0.0211, -0.0701, -0.1012, 0.0667, 0.0170]],
[[-0.1150, 0.0097, -0.0775, 0.0942, 0.0081],
[-0.0979, 0.1238, 0.0203, 0.0908, 0.0247],
[ 0.0847, -0.1289, -0.0263, 0.0161, 0.0947],
...,
[ 0.0700, -0.1172, -0.0993, 0.0552, 0.0867],
[-0.0777, 0.0926, -0.0298, 0.1178, -0.1251],
[ 0.0866, 0.0364, -0.0255, 0.1266, -0.1166]],
...,
[[ 0.1249, -0.0482, 0.0779, -0.0275, -0.1257],
[ 0.0598, -0.0873, -0.0151, -0.0356, -0.1173],
[ 0.0811, 0.0579, 0.1214, -0.0987, -0.1275],
...,
[ 0.0394, -0.0167, -0.0050, -0.0497, 0.0201],
[-0.0960, -0.0528, -0.0286, -0.0884, 0.0214],
[-0.0445, 0.0251, 0.0291, 0.0033, -0.1236]],
[[-0.0502, 0.1278, -0.0566, -0.1221, 0.0385],
[ 0.1289, 0.0842, 0.1103, 0.0374, 0.0353],
[ 0.0124, 0.0753, -0.0150, 0.0220, -0.0303],
...,
[-0.0197, 0.0386, 0.0428, -0.0563, -0.0295],
[ 0.0788, -0.0783, 0.0876, 0.0235, 0.1071],
[-0.0727, -0.0662, 0.0767, 0.0616, -0.0774]],
[[ 0.1132, -0.0164, -0.0617, 0.0775, -0.0712],
[ 0.0469, -0.0217, 0.1169, 0.0575, -0.1192],
[-0.1204, 0.0910, -0.0542, -0.0739, 0.0933],
...,
[ 0.0845, -0.0405, 0.0301, 0.1049, 0.1191],
[-0.0213, -0.1174, -0.1137, -0.0713, 0.0823],
[ 0.0046, 0.1154, 0.0838, -0.0695, 0.0771]]], requires_grad=True)
loss.data: >>>>>>>>>>>>>>>>>>>>> tensor(1.6111)
loss.grad: >>>>>>>>>>>>>>>>>>>>> None
[TIME DELAY KERNELS ######### MODEL WEIGHTS CONVTDNN1D After backward()] Parameter containing:
tensor([[[ 0.0464, 0.1265, -0.0264, -0.1229, 0.0058],
[ 0.0775, 0.0588, -0.0519, -0.0402, -0.0509],
[-0.1246, 0.1113, 0.0533, -0.0575, 0.0025],
...,
[ 0.0988, -0.0924, -0.0649, -0.1004, 0.1123],
[-0.0736, 0.0166, -0.1075, -0.0556, 0.0434],
[ 0.0260, 0.0104, -0.0937, -0.0415, 0.1184]],
[[-0.0510, -0.0046, 0.0368, 0.0506, 0.0200],
[-0.0930, -0.0011, -0.0559, 0.0898, 0.0883],
[ 0.0701, -0.1162, -0.1195, -0.0312, 0.0920],
...,
[-0.0935, 0.0423, 0.0812, -0.0165, 0.0404],
[-0.0876, -0.0143, 0.0941, -0.0526, 0.1147],
[ 0.0211, -0.0701, -0.1012, 0.0667, 0.0170]],
[[-0.1150, 0.0097, -0.0775, 0.0942, 0.0081],
[-0.0979, 0.1238, 0.0203, 0.0908, 0.0247],
[ 0.0847, -0.1289, -0.0263, 0.0161, 0.0947],
...,
[ 0.0700, -0.1172, -0.0993, 0.0552, 0.0867],
[-0.0777, 0.0926, -0.0298, 0.1178, -0.1251],
[ 0.0866, 0.0364, -0.0255, 0.1266, -0.1166]],
...,
[[ 0.1249, -0.0482, 0.0779, -0.0275, -0.1257],
[ 0.0598, -0.0873, -0.0151, -0.0356, -0.1173],
[ 0.0811, 0.0579, 0.1214, -0.0987, -0.1275],
...,
[ 0.0394, -0.0167, -0.0050, -0.0497, 0.0201],
[-0.0960, -0.0528, -0.0286, -0.0884, 0.0214],
[-0.0445, 0.0251, 0.0291, 0.0033, -0.1236]],
[[-0.0502, 0.1278, -0.0566, -0.1221, 0.0385],
[ 0.1289, 0.0842, 0.1103, 0.0374, 0.0353],
[ 0.0124, 0.0753, -0.0150, 0.0220, -0.0303],
...,
[-0.0197, 0.0386, 0.0428, -0.0563, -0.0295],
[ 0.0788, -0.0783, 0.0876, 0.0235, 0.1071],
[-0.0727, -0.0662, 0.0767, 0.0616, -0.0774]],
[[ 0.1132, -0.0164, -0.0617, 0.0775, -0.0712],
[ 0.0469, -0.0217, 0.1169, 0.0575, -0.1192],
[-0.1204, 0.0910, -0.0542, -0.0739, 0.0933],
...,
[ 0.0845, -0.0405, 0.0301, 0.1049, 0.1191],
[-0.0213, -0.1174, -0.1137, -0.0713, 0.0823],
[ 0.0046, 0.1154, 0.0838, -0.0695, 0.0771]]], requires_grad=True)
for name, param in model.named_parameters():
print("Model Parameters",name, torch.isfinite(param.grad).all())
Output:
Model Parameters conv1d_tdnn1.kernel tensor(True)
Model Parameters conv1d_tdnn1.bias tensor(True)
Model Parameters bn1.weight tensor(True)
Model Parameters bn1.bias tensor(True)
Model Parameters conv1d_tdnn2.kernel tensor(True)
Model Parameters conv1d_tdnn2.bias tensor(True)
Model Parameters bn2.weight tensor(True)
Model Parameters bn2.bias tensor(True)
Model Parameters conv1d_tdnn3.kernel tensor(True)
Model Parameters conv1d_tdnn3.bias tensor(True)
Model Parameters bn3.weight tensor(True)
Model Parameters bn3.bias tensor(True)
Model Parameters conv1d_tdnn4.kernel tensor(True)
Model Parameters conv1d_tdnn4.bias tensor(True)
Model Parameters bn4.weight tensor(True)
Model Parameters bn4.bias tensor(True)
Model Parameters conv1d_tdnn5.kernel tensor(True)
Model Parameters conv1d_tdnn5.bias tensor(True)
Model Parameters bn5.weight tensor(True)
Model Parameters bn5.bias tensor(True)
Model Parameters conv1d_tdnn6.kernel tensor(True)
Model Parameters conv1d_tdnn6.bias tensor(True)
Model Parameters bn6.weight tensor(True)
Model Parameters bn6.bias tensor(True)
Model Parameters conv1d_tdnn7.kernel tensor(True)
Model Parameters conv1d_tdnn7.bias tensor(True)
Model Parameters bn7.weight tensor(True)
Model Parameters bn7.bias tensor(True)
Model Parameters conv1d_tdnn8.kernel tensor(True)
Model Parameters conv1d_tdnn8.bias tensor(True)
Model Parameters bn8.weight tensor(True)
Model Parameters bn8.bias tensor(True)
Model Parameters conv1d1.weight tensor(True)
Model Parameters conv1d1.bias tensor(True)
Model Parameters bn9.weight tensor(True)
Model Parameters bn9.bias tensor(True)
Model Parameters conv1d2.weight tensor(True)
Model Parameters conv1d2.bias tensor(True)
Model Parameters bn17.weight tensor(True)
Model Parameters bn17.bias tensor(True)
Model Parameters fc1.weight tensor(True)
Model Parameters fc1.bias tensor(True)
[TIME DELAY KERNELS ######### MODEL WEIGHTS after optim step] Parameter containing:
tensor([[[ 4.5370e-02, 1.2555e-01, -2.7370e-02, -1.2193e-01, 6.8261e-03],
[ 7.8489e-02, 5.9769e-02, -5.0870e-02, -3.9160e-02, -4.9935e-02],
[-1.2560e-01, 1.1027e-01, 5.2297e-02, -5.8546e-02, 1.4725e-03],
...,
[ 9.9825e-02, -9.1410e-02, -6.5898e-02, -1.0138e-01, 1.1327e-01],
[-7.2554e-02, 1.7553e-02, -1.0855e-01, -5.6650e-02, 4.4416e-02],
[ 2.5022e-02, 9.3778e-03, -9.2745e-02, -4.2471e-02, 1.1742e-01]],
[[-4.9999e-02, -3.5967e-03, 3.5824e-02, 4.9602e-02, 1.9006e-02],
[-9.4040e-02, -1.1871e-04, -5.4942e-02, 9.0787e-02, 8.9262e-02],
[ 7.1104e-02, -1.1721e-01, -1.2053e-01, -3.0214e-02, 9.2959e-02],
...,
[-9.4487e-02, 4.1319e-02, 8.2165e-02, -1.5533e-02, 4.1357e-02],
[-8.6648e-02, -1.3317e-02, 9.5130e-02, -5.1597e-02, 1.1569e-01],
[ 2.2133e-02, -6.9081e-02, -1.0019e-01, 6.7732e-02, 1.8049e-02]],
[[-1.1600e-01, 8.7238e-03, -7.8496e-02, 9.3240e-02, 7.1277e-03],
[-9.6913e-02, 1.2276e-01, 1.9306e-02, 8.9796e-02, 2.3663e-02],
[ 8.5686e-02, -1.2791e-01, -2.5250e-02, 1.7085e-02, 9.5654e-02],
...,
[ 7.1008e-02, -1.1619e-01, -1.0031e-01, 5.6212e-02, 8.7727e-02],
[-7.6723e-02, 9.3636e-02, -3.0839e-02, 1.1676e-01, -1.2606e-01],
[ 8.5552e-02, 3.5442e-02, -2.6544e-02, 1.2565e-01, -1.1764e-01]],
...,
[[ 1.2394e-01, -4.9221e-02, 7.6931e-02, -2.6519e-02, -1.2469e-01],
[ 5.8770e-02, -8.8278e-02, -1.6056e-02, -3.6625e-02, -1.1831e-01],
[ 8.2052e-02, 5.8877e-02, 1.2242e-01, -9.7689e-02, -1.2652e-01],
...,
[ 4.0395e-02, -1.5654e-02, -3.9867e-03, -4.8731e-02, 1.9100e-02],
[-9.5034e-02, -5.1830e-02, -2.7647e-02, -8.7446e-02, 2.2397e-02],
[-4.3457e-02, 2.6086e-02, 3.0136e-02, 4.2744e-03, -1.2261e-01]],
[[-4.9196e-02, 1.2683e-01, -5.7618e-02, -1.2310e-01, 3.7461e-02],
[ 1.2793e-01, 8.5212e-02, 1.1134e-01, 3.8424e-02, 3.6294e-02],
[ 1.1408e-02, 7.6318e-02, -1.3972e-02, 2.3047e-02, -2.9326e-02],
...,
[-2.0711e-02, 3.7618e-02, 4.1771e-02, -5.7251e-02, -3.0511e-02],
[ 7.7848e-02, -7.9281e-02, 8.8627e-02, 2.4537e-02, 1.0809e-01],
[-7.1702e-02, -6.5161e-02, 7.7686e-02, 6.2586e-02, -7.6438e-02]],
[[ 1.1423e-01, -1.5357e-02, -6.0655e-02, 7.8528e-02, -7.0191e-02],
[ 4.5850e-02, -2.2736e-02, 1.1593e-01, 5.6516e-02, -1.2018e-01],
[-1.2140e-01, 8.9994e-02, -5.5193e-02, -7.4852e-02, 9.2298e-02],
...,
[ 8.3527e-02, -4.1535e-02, 2.9075e-02, 1.0388e-01, 1.1814e-01],
[-2.0266e-02, -1.1640e-01, -1.1469e-01, -7.2341e-02, 8.1306e-02],
[ 5.6036e-03, 1.1435e-01, 8.2755e-02, -7.0547e-02, 7.6113e-02]]],
requires_grad=True)
For second batch
loss.data: >>>>>>>>>>>>>>>>>>>>> tensor(1.5873)
loss.grad: >>>>>>>>>>>>>>>>>>>>> None
for name, param in model.named_parameters():
print("Model Parameters",name, torch.isfinite(param.grad).all())
output:
Model Parameters conv1d_tdnn1.kernel tensor(False)
Model Parameters conv1d_tdnn1.bias tensor(False)
Model Parameters bn1.weight tensor(False)
Model Parameters bn1.bias tensor(False)
Model Parameters conv1d_tdnn2.kernel tensor(False)
Model Parameters conv1d_tdnn2.bias tensor(False)
Model Parameters bn2.weight tensor(False)
Model Parameters bn2.bias tensor(False)
Model Parameters conv1d_tdnn3.kernel tensor(False)
Model Parameters conv1d_tdnn3.bias tensor(False)
Model Parameters bn3.weight tensor(False)
Model Parameters bn3.bias tensor(False)
Model Parameters conv1d_tdnn4.kernel tensor(False)
Model Parameters conv1d_tdnn4.bias tensor(False)
Model Parameters bn4.weight tensor(False)
Model Parameters bn4.bias tensor(False)
Model Parameters conv1d_tdnn5.kernel tensor(False)
Model Parameters conv1d_tdnn5.bias tensor(False)
Model Parameters bn5.weight tensor(False)
Model Parameters bn5.bias tensor(False)
Model Parameters conv1d_tdnn6.kernel tensor(False)
Model Parameters conv1d_tdnn6.bias tensor(False)
Model Parameters bn6.weight tensor(False)
Model Parameters bn6.bias tensor(False)
Model Parameters conv1d_tdnn7.kernel tensor(False)
Model Parameters conv1d_tdnn7.bias tensor(False)
Model Parameters bn7.weight tensor(False)
Model Parameters bn7.bias tensor(False)
Model Parameters conv1d_tdnn8.kernel tensor(False)
Model Parameters conv1d_tdnn8.bias tensor(False)
Model Parameters bn8.weight tensor(False)
Model Parameters bn8.bias tensor(False)
Model Parameters conv1d1.weight tensor(False)
Model Parameters conv1d1.bias tensor(False)
Model Parameters bn9.weight tensor(False)
Model Parameters bn9.bias tensor(False)
Model Parameters conv1d2.weight tensor(False)
Model Parameters conv1d2.bias tensor(False)
Model Parameters bn17.weight tensor(False)
Model Parameters bn17.bias tensor(False)
Model Parameters fc1.weight tensor(True)
Model Parameters fc1.bias tensor(True)
[TIME DELAY KERNELS ######### MODEL WEIGHTS after optim step] Parameter containing:
tensor([[[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
...,
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan]],
[[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
...,
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan]],
[[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
...,
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan]],
...,
[[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
...,
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan]],
[[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
...,
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan]],
[[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
...,
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan]]], requires_grad=True)
But When I use lr=1e-6,
The the weights /kernels seems to b not changing as shown below (but eventually becomes NaN after few epochs)
[TIME DELAY KERNELS #########] Parameter containing:
tensor([[[ 0.0079, 0.0298, -0.0579, 0.0080, 0.0100],
[ 0.0063, 0.0655, -0.1080, 0.0079, 0.0759],
[-0.1031, 0.1014, 0.0356, 0.0277, 0.0546],
...,
[ 0.1165, 0.0836, 0.1217, -0.0938, -0.0234],
[ 0.0288, 0.0526, -0.0533, 0.0694, -0.0221],
[-0.0054, 0.0437, 0.0381, -0.0287, -0.0823]],
[[-0.0571, -0.0404, -0.0376, -0.0070, -0.1035],
[ 0.0561, -0.0900, 0.1084, 0.1146, 0.0830],
[-0.1018, 0.0085, 0.0125, -0.0679, -0.1270],
...,
[ 0.1034, -0.1151, 0.1013, -0.0041, 0.0461],
[-0.0316, 0.0530, 0.0920, 0.0571, 0.0864],
[-0.0450, 0.0983, -0.1215, -0.0478, 0.0072]],
[[ 0.0602, -0.0368, 0.0419, 0.0913, -0.0916],
[-0.0727, 0.0879, -0.1193, -0.0528, -0.1224],
[ 0.0742, -0.0206, -0.0934, -0.0904, 0.0351],
...,
[ 0.1246, -0.0076, 0.1060, 0.0746, -0.0602],
[-0.0234, 0.0270, 0.0291, -0.0217, 0.1224],
[-0.0776, -0.1226, 0.0947, -0.0233, -0.0771]],
...,
[[-0.1039, 0.1198, 0.1153, 0.0678, -0.0050],
[ 0.0494, 0.0957, 0.0433, 0.0602, -0.1126],
[ 0.0615, -0.0724, 0.0260, 0.0691, 0.0008],
...,
[ 0.0602, -0.0231, 0.0401, 0.0097, 0.0461],
[-0.1059, 0.0715, -0.0322, 0.0102, -0.0236],
[ 0.1195, -0.1168, -0.0832, 0.0411, 0.0460]],
[[-0.0090, 0.0193, 0.0612, 0.0548, 0.1203],
[ 0.0440, 0.0113, -0.1176, 0.0363, 0.0760],
[-0.0311, -0.0469, -0.1203, -0.0161, 0.0886],
...,
[-0.0672, -0.0296, 0.0936, 0.0948, 0.0258],
[-0.1272, -0.1178, 0.0858, -0.0509, -0.0762],
[-0.0768, -0.0323, -0.0619, 0.0687, 0.0263]],
[[-0.0427, 0.0458, 0.0305, -0.0878, -0.0284],
[ 0.1127, 0.1272, -0.0467, -0.0870, -0.0614],
[-0.0112, 0.0184, -0.0048, 0.0264, 0.0600],
...,
[ 0.0914, 0.1015, -0.1064, 0.0297, 0.0450],
[ 0.0778, 0.0999, -0.0055, 0.0816, -0.1034],
[-0.0717, -0.1141, 0.0757, 0.0213, 0.0423]]], requires_grad=True)
loss.data: >>>>>>>>>>>>>>>>>>>>> tensor(1.6079)
loss.grad: >>>>>>>>>>>>>>>>>>>>> None
[TIME DELAY KERNELS ######### MODEL WEIGHTS after optim step] Parameter containing:
tensor([[[ 0.0079, 0.0298, -0.0579, 0.0080, 0.0100],
[ 0.0063, 0.0655, -0.1080, 0.0079, 0.0759],
[-0.1031, 0.1014, 0.0356, 0.0277, 0.0546],
...,
[ 0.1165, 0.0836, 0.1217, -0.0938, -0.0234],
[ 0.0288, 0.0526, -0.0533, 0.0694, -0.0221],
[-0.0054, 0.0437, 0.0381, -0.0287, -0.0823]],
[[-0.0571, -0.0404, -0.0376, -0.0070, -0.1035],
[ 0.0561, -0.0900, 0.1084, 0.1146, 0.0830],
[-0.1018, 0.0085, 0.0125, -0.0679, -0.1270],
...,
[ 0.1034, -0.1151, 0.1013, -0.0041, 0.0461],
[-0.0316, 0.0530, 0.0920, 0.0571, 0.0864],
[-0.0450, 0.0983, -0.1215, -0.0478, 0.0072]],
[[ 0.0602, -0.0368, 0.0419, 0.0913, -0.0916],
[-0.0727, 0.0879, -0.1193, -0.0528, -0.1224],
[ 0.0742, -0.0206, -0.0934, -0.0904, 0.0351],
...,
[ 0.1246, -0.0076, 0.1060, 0.0746, -0.0602],
[-0.0234, 0.0270, 0.0291, -0.0217, 0.1224],
[-0.0776, -0.1226, 0.0947, -0.0233, -0.0771]],
...,
[[-0.1039, 0.1198, 0.1153, 0.0678, -0.0050],
[ 0.0494, 0.0957, 0.0433, 0.0602, -0.1126],
[ 0.0615, -0.0724, 0.0260, 0.0691, 0.0008],
...,
[ 0.0602, -0.0231, 0.0401, 0.0097, 0.0461],
[-0.1059, 0.0715, -0.0322, 0.0102, -0.0236],
[ 0.1195, -0.1168, -0.0832, 0.0411, 0.0460]],
[[-0.0090, 0.0193, 0.0612, 0.0548, 0.1203],
[ 0.0440, 0.0113, -0.1176, 0.0363, 0.0760],
[-0.0311, -0.0469, -0.1203, -0.0161, 0.0886],
...,
[-0.0672, -0.0296, 0.0936, 0.0948, 0.0258],
[-0.1272, -0.1178, 0.0858, -0.0509, -0.0762],
[-0.0768, -0.0323, -0.0619, 0.0687, 0.0263]],
[[-0.0427, 0.0458, 0.0305, -0.0878, -0.0284],
[ 0.1127, 0.1272, -0.0467, -0.0870, -0.0614],
[-0.0112, 0.0184, -0.0048, 0.0264, 0.0600],
...,
[ 0.0914, 0.1015, -0.1064, 0.0297, 0.0450],
[ 0.0778, 0.0999, -0.0055, 0.0816, -0.1033],
[-0.0717, -0.1141, 0.0757, 0.0213, 0.0423]]], requires_grad=True)
When I use lr=1e-4
, after few epochs , the kernels are NaN
again
[TIME DELAY KERNELS #########] Parameter containing:
tensor([[[ 0.0136, -0.0752, 0.0020, -0.0965, -0.0131],
[ 0.0635, 0.0099, -0.0959, -0.0854, -0.0325],
[ 0.0286, 0.1149, -0.1007, -0.0021, 0.0026],
...,
[-0.0003, 0.1163, -0.0021, 0.0870, -0.1104],
[-0.0024, 0.0378, -0.0088, -0.0364, -0.0283],
[-0.0635, -0.0541, -0.0629, 0.0534, -0.0200]],
[[ 0.0144, -0.1141, 0.1095, 0.0856, -0.0459],
[-0.0461, 0.0749, -0.0852, 0.0616, -0.0973],
[-0.1217, 0.0756, -0.0908, 0.1120, 0.0292],
...,
[-0.0219, -0.0834, 0.0487, 0.0458, -0.0246],
[ 0.0370, 0.1010, -0.0155, -0.1216, -0.0037],
[-0.1001, -0.0428, -0.0965, 0.0643, 0.1126]],
[[-0.1100, -0.0551, 0.0958, -0.0667, -0.1217],
[-0.0888, 0.0772, 0.1293, 0.0672, -0.1150],
[-0.0810, -0.0997, -0.0865, -0.0070, -0.0275],
...,
[-0.0956, 0.0067, 0.0814, -0.0341, 0.0146],
[ 0.0009, 0.1233, 0.1066, 0.1227, 0.0112],
[ 0.0494, 0.0964, 0.1136, -0.0609, 0.0278]],
...,
[[ 0.0077, 0.0765, 0.0944, -0.0367, -0.1288],
[ 0.0360, 0.1273, 0.0949, -0.1149, -0.0253],
[-0.0977, 0.0820, 0.0507, 0.0170, 0.0841],
...,
[ 0.0525, 0.1160, -0.0784, -0.0562, 0.0011],
[ 0.0316, 0.0755, 0.0154, 0.0780, -0.0076],
[-0.0287, 0.0956, 0.0803, -0.0724, 0.1140]],
[[-0.1273, -0.0324, -0.0107, 0.0710, 0.0575],
[-0.0106, -0.0954, 0.0516, 0.0549, 0.0785],
[-0.0294, -0.0269, 0.0113, 0.0007, -0.0129],
...,
[ 0.1290, 0.0674, -0.0439, 0.0626, -0.1003],
[ 0.0332, 0.0544, -0.0148, 0.0892, -0.1052],
[-0.0005, 0.0159, -0.0704, -0.0901, 0.1193]],
[[-0.0171, 0.1152, 0.0823, -0.0116, 0.1046],
[-0.0855, -0.1010, -0.0871, 0.0397, -0.0093],
[ 0.0180, 0.0254, 0.1218, -0.0695, -0.0957],
...,
[ 0.0437, -0.1261, -0.0877, 0.1015, -0.0517],
[ 0.1105, -0.0370, -0.0133, 0.0120, 0.0652],
[ 0.1052, -0.0098, 0.0578, 0.1058, -0.0507]]], requires_grad=True)
Model Parameters conv1d_tdnn1.kernel tensor(False)
Model Parameters conv1d_tdnn1.bias tensor(False)
Model Parameters bn1.weight tensor(False)
Model Parameters bn1.bias tensor(False)
Model Parameters conv1d_tdnn2.kernel tensor(False)
Model Parameters conv1d_tdnn2.bias tensor(False)
Model Parameters bn2.weight tensor(False)
Model Parameters bn2.bias tensor(False)
Model Parameters conv1d_tdnn3.kernel tensor(False)
Model Parameters conv1d_tdnn3.bias tensor(False)
Model Parameters bn3.weight tensor(False)
Model Parameters bn3.bias tensor(False)
Model Parameters conv1d_tdnn4.kernel tensor(False)
Model Parameters conv1d_tdnn4.bias tensor(False)
Model Parameters bn4.weight tensor(False)
Model Parameters bn4.bias tensor(False)
Model Parameters conv1d_tdnn5.kernel tensor(False)
Model Parameters conv1d_tdnn5.bias tensor(False)
Model Parameters bn5.weight tensor(False)
Model Parameters bn5.bias tensor(False)
Model Parameters conv1d_tdnn6.kernel tensor(False)
Model Parameters conv1d_tdnn6.bias tensor(False)
Model Parameters bn6.weight tensor(False)
Model Parameters bn6.bias tensor(False)
Model Parameters conv1d_tdnn7.kernel tensor(False)
Model Parameters conv1d_tdnn7.bias tensor(False)
Model Parameters bn7.weight tensor(False)
Model Parameters bn7.bias tensor(False)
Model Parameters conv1d_tdnn8.kernel tensor(False)
Model Parameters conv1d_tdnn8.bias tensor(False)
Model Parameters bn8.weight tensor(False)
Model Parameters bn8.bias tensor(False)
Model Parameters conv1d1.weight tensor(False)
Model Parameters conv1d1.bias tensor(False)
Model Parameters bn9.weight tensor(False)
Model Parameters bn9.bias tensor(False)
Model Parameters conv1d2.weight tensor(False)
Model Parameters conv1d2.bias tensor(False)
Model Parameters bn17.weight tensor(False)
Model Parameters bn17.bias tensor(False)
Model Parameters fc1.weight tensor(True)
Model Parameters fc1.bias tensor(True)
[TIME DELAY KERNELS ######### MODEL WEIGHTS after optim step] Parameter containing:
tensor([[[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
...,
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan]],
[[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
...,
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan]],
[[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
...,
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan]],
...,
[[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
...,
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan]],
[[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
...,
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan]],
[[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
...,
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan]]], requires_grad=True)
Plus Is this Normalisation okay for audio data
transformed_data = transforms.Compose([
lambda input: input.astype(np.float64)/np.max(abs(input)), # Rescaling -1 to 1
lambda input: get_mfcc_features(input, sr, n_mfcc, n_fft, hop_length),
lambda input: normalize(input),
lambda input: torch.Tensor(input)
])
def normalize(input):
input += 1e-5 #For Numerical Stability
stdv = np.std(input)
input = (input - np.mean(input)) #/ np.std(input) #0 mean 1 std
input = input / stdv #np.max(abs(input))
if np.isnan(np.sum(input)):
print("[Nan Values in Normalize is ::]", np.isnan(np.sum(input)))
return input