Hi Negin, you can access inside the Sequential just by indexing, e.g.:
n_inputs = 1000
n_classes = 5
fc = nn.Sequential(
nn.Linear(n_inputs, 256),
nn.ReLU(),
nn.Dropout(0.4),
nn.Linear(256, n_classes),
nn.LogSoftmax(dim=1))
then:
fc[0]
Out: Linear(in_features=1000, out_features=256, bias=True)
fc[0].weight
Out: Parameter containing:
tensor([[-0.0103, 0.0170, 0.0160, ..., 0.0275, -0.0294, 0.0309],
[ 0.0046, -0.0301, -0.0032, ..., 0.0195, -0.0224, -0.0095],
[ 0.0229, -0.0076, -0.0062, ..., -0.0256, -0.0065, 0.0039],
...,
[ 0.0161, -0.0019, 0.0032, ..., -0.0299, 0.0106, 0.0189],
[-0.0208, 0.0051, -0.0269, ..., -0.0016, 0.0144, 0.0314],
[-0.0121, 0.0188, -0.0211, ..., -0.0263, 0.0120, 0.0235]],
requires_grad=True)
fc[0].bias
Out: Parameter containing:
tensor([ 0.0077, -0.0094, -0.0161, 0.0084, 0.0064, 0.0057, -0.0187, -0.0269,
-0.0020, -0.0084, -0.0150, 0.0281, 0.0314, 0.0107, -0.0049, 0.0230,
-0.0002, -0.0055, 0.0282, -0.0007, 0.0032, -0.0141, -0.0316, -0.0212,
-0.0295, 0.0166, -0.0097, 0.0305, -0.0060, 0.0247, 0.0150, -0.0206,
-0.0310, -0.0130, -0.0203, 0.0024, -0.0096, -0.0056, -0.0157, 0.0237,
-0.0023, -0.0076, 0.0262, 0.0035, -0.0052, -0.0117, -0.0136, -0.0174,
-0.0239, 0.0145, -0.0225, -0.0170, 0.0268, -0.0238, -0.0289, 0.0060,
0.0129, -0.0061, 0.0314, -0.0150, 0.0237, 0.0014, -0.0137, -0.0300,
-0.0041, 0.0194, 0.0170, 0.0037, -0.0310, 0.0257, -0.0073, -0.0109,
0.0289, -0.0143, 0.0208, 0.0264, 0.0072, -0.0010, 0.0068, -0.0172,
0.0095, 0.0193, 0.0135, -0.0153, 0.0310, 0.0064, 0.0116, 0.0136,
-0.0186, 0.0112, -0.0133, 0.0048, 0.0233, -0.0048, -0.0292, -0.0122,
-0.0215, 0.0241, 0.0043, 0.0152, -0.0042, 0.0024, 0.0279, -0.0169,
0.0155, -0.0184, 0.0272, 0.0266, 0.0069, 0.0277, -0.0247, 0.0301,
-0.0241, 0.0148, -0.0294, -0.0063, 0.0074, -0.0133, 0.0177, 0.0173,
-0.0070, -0.0056, 0.0139, 0.0121, 0.0222, -0.0146, -0.0179, 0.0232,
0.0108, -0.0156, -0.0280, -0.0249, -0.0123, 0.0237, 0.0043, -0.0047,
0.0007, 0.0071, 0.0222, 0.0025, 0.0221, -0.0106, -0.0098, -0.0314,
0.0267, 0.0116, -0.0074, -0.0107, 0.0028, -0.0200, 0.0099, -0.0246,
0.0149, 0.0159, -0.0142, -0.0026, 0.0077, 0.0040, -0.0191, 0.0178,
-0.0179, -0.0225, 0.0103, 0.0185, -0.0024, 0.0027, -0.0281, 0.0069,
-0.0132, -0.0046, -0.0024, 0.0175, 0.0265, 0.0006, 0.0010, -0.0278,
0.0204, 0.0226, -0.0247, 0.0204, -0.0003, -0.0005, -0.0255, -0.0023,
0.0024, 0.0022, -0.0065, 0.0257, 0.0014, 0.0218, -0.0029, -0.0126,
0.0278, -0.0309, 0.0044, -0.0001, 0.0115, 0.0032, -0.0184, 0.0239,
0.0240, 0.0016, 0.0297, 0.0233, 0.0222, -0.0126, -0.0221, 0.0220,
0.0262, 0.0286, -0.0129, 0.0275, -0.0284, -0.0280, 0.0042, -0.0126,
-0.0196, -0.0058, -0.0083, -0.0178, -0.0133, 0.0259, -0.0164, -0.0141,
0.0167, -0.0045, 0.0071, 0.0190, 0.0057, -0.0078, 0.0194, -0.0040,
-0.0260, 0.0125, 0.0036, -0.0144, -0.0133, 0.0276, -0.0221, 0.0226,
-0.0264, 0.0144, 0.0190, -0.0275, 0.0238, -0.0014, -0.0134, -0.0257,
-0.0118, 0.0083, 0.0248, 0.0253, 0.0226, -0.0315, -0.0301, 0.0250],
requires_grad=True)
Also, this link explains how to save to disk.