`Invalid scalar type` when dist.scatter boolean tensor

I am trying to scatter boolean tensor but failed with error: Invalid scalar type while it works well with dtype=uint8. could anyone help on this?

torch version: 1.13.0+cu117

def worker(rank, world_size):
    ...
    dtype = th.uint8
    dtype = th.bool
    scatter_list = [ th.tensor([True, True, True, True], dtype=dtype) for _ in range(4) ]
    gather_list = [ th.tensor([False, False, False, False], dtype=dtype) for _ in range(4) ]
    for i in range(world_size):
        dist.scatter(gather_list[i], scatter_list if i == rank else [], src=i)
File "/home/ubuntu/.local/lib/python3.6/site-packages/torch/distributed/distributed_c10d.py", line 2290, in scatter
    work.wait()
RuntimeError: Invalid scalar type

This error occurs with other calls.

import os
import torch
from torch import distributed as dist
import torch.multiprocessing as mp

def worker(rank, world_size):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12355'
    device = 'cpu' #'cuda'
    dist.init_process_group('gloo' if device == 'cpu' else 'nccl', rank=rank, world_size=world_size)

    dtype = torch.bool
    #dtype = torch.uint8
    tensor = torch.tensor([0,1], dtype=dtype)
    dist.all_reduce(tensor)

if __name__ == '__main__':
    mp.spawn(worker, args=(2,), nprocs=2, join=True)

could someone help on this?

This looks like a bug, can you report the issue on github: Issues · pytorch/pytorch · GitHub

reported: [Distributed] `Invalid scalar type` when `dist.scatter()` boolean tensor · Issue #90245 · pytorch/pytorch · GitHub