How are layer weights and biases initialized by default?

Hello,
class DQN(nn.Module):
def init(self, input_shape, n_actions):
super(DQN, self).init()
self.fc = nn.Sequential(
nn.Linear(input_shape, 32),
nn.ReLU(),
nn.Linear(32, 64),
nn.ReLU(),
nn.Linear(64, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, n_actions)
)

def init_weights(m):
    if type(m) == nn.Linear:
        nn.init.xavier_normal_(tensor, gain=1.0)
        m.bias.data.fill_(0.01)
    
def forward(self, x):
    return self.fc(x).apply(init_weights)

while using this architecture and weight initialization technique,
I am getting this errror:
def forward(self, x):
return self.fc(x).apply(init_weights)
AttributeError: ‘Tensor’ object has no attribute ‘apply’

can somebody help me with this ?