Torchvision.transforms.functional.to_tensor: TypeError: __array__() takes 1 positional argument but 2 were given error:


I am having a very weird error when performing transforms.ToTensor() in the data loading of a dataset.

My current torch / numpy versions are:

pytorch == 1.9.0
torchvision == 0.10.0
numpy == 1.21.0

In particular, I am loading the rotatedMNIST dataset following this template:

class RotatedMNISTBase(VisionDataset):
    """Rotated MNIST datasets.
    Download the datasets from
    and preprocess it as in (Cohen and Welling)

    resources = [

    training_file = ""
    test_file = ""
    classes = [
        "0 - zero",
        "1 - one",
        "2 - two",
        "3 - three",
        "4 - four",
        "5 - five",
        "6 - six",
        "7 - seven",
        "8 - eight",
        "9 - nine",

    def __init__(
        self, root, train=True, transform=None, target_transform=None, download=False
        super().__init__(root, transform=transform, target_transform=target_transform)
        self.train = train  # training set or test set

        if download:

        if not self._check_exists():
            raise RuntimeError(
                "Dataset not found." + " You can use download=True to download it"

        if self.train:
            data_file = self.training_file
            data_file = self.test_file, self.targets = torch.load(
            os.path.join(self.processed_folder, data_file)

    def __getitem__(self, index):
            index (int): Index
            tuple: (image, target) where target is index of the target class.
        img, target =[index], int(self.targets[index])

        # doing this so that it is consistent with all other datasets
        # to return a PIL Image
        img = Image.fromarray(img.numpy(), mode="L")

        if self.transform is not None:
            img = self.transform(img)

        if self.target_transform is not None:
            target = self.target_transform(target)

        return img, target

    def __len__(self):
        return len(

    def raw_folder(self):
        return os.path.join(self.root, self.__class__.__name__, "raw")

    def processed_folder(self):
        return os.path.join(self.root, self.__class__.__name__, "processed")

    def class_to_idx(self):
        return {_class: i for i, _class in enumerate(self.classes)}

    def _check_exists(self):
        return os.path.exists(
            os.path.join(self.processed_folder, self.training_file)
        ) and os.path.exists(os.path.join(self.processed_folder, self.test_file))

    def download(self):
        """Download the MNIST data if it doesn't exist in processed_folder already."""

        if self._check_exists():

        os.makedirs(self.raw_folder, exist_ok=True)
        os.makedirs(self.processed_folder, exist_ok=True)

        # download files
        for url, md5 in self.resources:
            filename = url.rpartition("/")[2]
                url, download_root=self.raw_folder, filename=filename, md5=md5

        # process and save as torch files

        train_filename = os.path.join(
            self.raw_folder, "mnist_all_rotation_normalized_float_train_valid.amat"
        test_filename = os.path.join(
            self.raw_folder, "mnist_all_rotation_normalized_float_test.amat"

        train_val = torch.from_numpy(np.loadtxt(train_filename))
        test = torch.from_numpy(np.loadtxt(test_filename))

        train_val_data = train_val[:, :-1].reshape(-1, 28, 28)
        train_val_data = (train_val_data * 256).round().type(torch.uint8)
        train_val_labels = train_val[:, -1].type(torch.uint8)
        training_set = (train_val_data[:10000], train_val_labels[:10000])
        # we ignore the validation test

        test_data = test[:, :-1].reshape(-1, 28, 28)
        test_data = (test_data * 256).round().type(torch.uint8)
        test_labels = test[:, -1].type(torch.uint8)
        test_set = (test_data, test_labels)

        with open(os.path.join(self.processed_folder, self.training_file), "wb") as f:
  , f)
        with open(os.path.join(self.processed_folder, self.test_file), "wb") as f:
  , f)


    def extra_repr(self):
        return "Split: {}".format("Train" if self.train is True else "Test")

class RotatedMNIST(RotatedMNISTBase):
    def __init__(
        root = utils.get_original_cwd()
        root = os.path.join(root, "data")

        if partition == "train":
            train = True
            train = False

        data_mean = (0.1307,)
        data_stddev = (0.3081,)

        if augment == "None":
            transform = torchvision.transforms.Compose(
                    torchvision.transforms.Normalize(data_mean, data_stddev),
            raise NotImplementedError()


When I iterate through the data in the training routine, I get the following error:

Original Traceback (most recent call last):
  File "/homes/romeroguzman/anaconda3/envs/psym/lib/python3.9/site-packages/torch/utils/data/_utils/", line 287, in _worker_loop
    data = fetcher.fetch(index)
  File "/homes/romeroguzman/anaconda3/envs/psym/lib/python3.9/site-packages/torch/utils/data/_utils/", line 44, in fetch
    data = [self.dataset[idx] for idx in possibly_batched_index]
  File "/homes/romeroguzman/anaconda3/envs/psym/lib/python3.9/site-packages/torch/utils/data/_utils/", line 44, in <listcomp>
    data = [self.dataset[idx] for idx in possibly_batched_index]
  File "/homes/romeroguzman/PycharmProjects/pSym/dr/datasets/", line 107, in __getitem__
    img = self.transform(img)
  File "/homes/romeroguzman/anaconda3/envs/psym/lib/python3.9/site-packages/torchvision/transforms/", line 60, in __call__
    img = t(img)
  File "/homes/romeroguzman/anaconda3/envs/psym/lib/python3.9/site-packages/torchvision/transforms/", line 97, in __call__
    return F.to_tensor(pic)
  File "/homes/romeroguzman/anaconda3/envs/psym/lib/python3.9/site-packages/torchvision/transforms/", line 129, in to_tensor
    np.array(pic, dtype=mode_to_nptype.get(pic.mode, np.uint8), copy=True)
TypeError: __array__() takes 1 positional argument but 2 were given

I have the impression that this is a compatibility issue between numpy and torchvision.

At first sight, It seemed to me that np.array simply did not longer support having the dtype argument as a positional argument. To check if that was the case, I simply replace the line in the torchvision.transforms.functional file by

np.array(pic, dtype=mode_to_nptype.get(pic.mode, np.uint8), copy=True)

Nevertheless, an equivalent error was raised.

It seems thus, that np.array does not longer support the dtype argument at all (which seems quite weird to me). The problem was solved by doing

img = torch.from_numpy(
        np.array(pic, copy=True).astype(mode_to_nptype.get(pic.mode, np.uint8))

I am not sure if this is a problem of torchvision itself or it is a problem of the data I am using.

Any comments / insights / suggestions are much appreciated :slight_smile:


This seems to be the PIL/Pillow issue as described here.

1 Like

Hi @ptrblck, problem solved. Thank you!