DistributedSampler: the result of every epoch and even every time I run it is same

Hi, I have a bug when I test a simple code of DistributedDataParallel.

In summary, I just want to figure out whether the subset dataset split by DistributedSampler can be shuffle as it should be. However, the result of every epoch and even every time I run it is the same.

Is someone have any insight into this?

The code is shown below, pytorch=1.1.0 or 1.3.1:

run

CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 test.py
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler


torch.distributed.init_process_group(backend="nccl")

input_size = 5
output_size = 2
batch_size = 2
data_size = 16

local_rank = torch.distributed.get_rank()
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)

class RandomDataset(Dataset):
    def __init__(self, size, length, local_rank):
        self.len = length
        self.data = torch.stack([torch.ones(5), torch.ones(5)*2,
                                 torch.ones(5)*3,torch.ones(5)*4,
                                 torch.ones(5)*5,torch.ones(5)*6,
                                 torch.ones(5)*7,torch.ones(5)*8,
                                 torch.ones(5)*9, torch.ones(5)*10,
                                 torch.ones(5)*11,torch.ones(5)*12,
                                 torch.ones(5)*13,torch.ones(5)*14,
                                 torch.ones(5)*15,torch.ones(5)*16]).to('cuda')
        
        self.local_rank = local_rank
    def __getitem__(self, index):
        
        return self.data[index]
    
    def __len__(self):
        return self.len
    

dataset = RandomDataset(input_size, data_size, local_rank)
sampler = DistributedSampler(dataset)
rand_loader = DataLoader(dataset=dataset,
                         batch_size=batch_size,
                         sampler=sampler)
    
class Model(nn.Module):
    def __init__(self, input_size, output_size):
        super(Model, self).__init__()
        self.fc = nn.Linear(input_size, output_size)

    def forward(self, input):
        output = self.fc(input)
        return output
    
model = Model(input_size, output_size)

model.to(device)
if torch.cuda.device_count() > 1:
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)   

e = 0
while e < 2:
    t = 0
    for data in rand_loader:
        if torch.cuda.is_available():
            input_var = data
        else:
            input_var = data
        _ = model(input_var)
        print(input_var)
    e+=1

I have solved this. Just add sampler.set_epoch(e) after while e < 2:, just like:

while e < 2:
    t = 0
    sampler.set_epoch(e)
    for data in rand_loader:
        print(data)
    e+=1