Difference in result for sci-kit learn PCA and manual PCA in pytorch

I’m trying to get principal components via two different methods but I get different results.
Can you show me what I am missing.
Thanks.

B1 = np.array([
[0.387,4878, 5.42],
[0.723,12104,5.25],
[1,12756,5.52],
[1.524,6787,3.94],
[2.3,4.1,3.21],
[3.1,3.2,2.6]])
from sklearn.decomposition import PCA
pca = PCA(2)
pca.fit(B1)
pca1=pca.fit_transform(B1)
print(pca1)

[[-1.21071628e+03 1.84225231e+00]
[ 6.01528345e+03 -1.61508354e-01]
[ 6.66728345e+03 -3.15444437e-01]
[ 6.98283241e+02 -4.42740940e-01]
[-6.08461682e+03 3.72122654e-02]
[-6.08551704e+03 -9.59770842e-01]]

A = torch.tensor(B1, dtype=torch.float)
B = torch.tensor(B1, dtype=torch.float)
xm = torch.mean(torch.transpose(B,0,1), 1, keepdim=True)
xm=torch.transpose(xm,0,1)
c=torch.zeros(B.shape)
c = A - xm[0]
lenn = c.shape[0]
covariance = torch.matmul(torch.transpose(B, 0, 1),B)/(lenn-1)
es, vs = torch.symeig(torch.transpose(covariance, 0, 1), eigenvectors=True)

len_pca=2
pc1=torch.matmul(torch.transpose(vs, 0, 1),torch.transpose(c, 0, 1))

pca1=torch.transpose(pca1, 0, 1)
print(pca1[:,:lenC])

tensor([[-1.9730e+00, -2.6653e-03],
[ 1.0763e+00, 2.1804e+00],
[ 1.2333e+00, 2.0192e+00],
[ 6.0225e-01, 4.1150e-01],
[-9.3635e-01, -2.1175e+00],
[-2.4548e-03, -2.4910e+00]])