Why does transforms.ToTensor() output range [-1, 1]

According to the documentation, transforms.ToTensor() should transform in array in range 0-255 to [0, 1]. But I just tested the output of my DataLoader, which results in the following:

class ImageDataset(Dataset):
    def __init__(self, images):
        super(ImageDataset, self).__init__()
        self.images = images
        self.transforms = transforms.Compose([transforms.ToTensor()])

    def __len__(self):
        return len(self.images)

    def __getitem__(self, index):
        # Select Image
        image = self.images[index]
        image = self.transforms(image)

        return image
# Load Datasets and DataLoader
data = load_images_from_folder()
train_data = np.array(data[:-1000])
train_dataset = ImageDataset(train_data)
test_data = np.array(data[-1000:])
test_dataset = ImageDataset(test_data)
train_loader = DataLoader(train_dataset, batch_size=bn, shuffle=True, num_workers=4)
test_loader = DataLoader(test_dataset, batch_size=bn, num_workers=4)

# Make Training
for epoch in range(epochs+1):
# Train on Train Set
model.train()
model.mode = 'train'
for step, original in enumerate(train_loader):
    original = original.to(device)
    if step == 0 and epoch == 0:
        print(f'input information: mean: {torch.mean(original[0])}, max: {torch.max(original[0])}, min: {torch.min(original[0])}')

input information: mean: 0.32807812094688416, max: 1.0, min: -1.0

What’s the reason for that?

Okay got it fixed - there was a problem with my upload deployment configurations and the code didn’t upload. It works now