I save a model from `cuda:0`

(`model1`

) and load it to `cuda:1`

(`model2`

). After I delete the tensor on `model1`

, the `cuda:0`

is still occupied. Why is that? The following is the snippet to follow.

```
# Define model
class TheModelClass(nn.Module):
def __init__(self):
super(TheModelClass, self).__init__()
self.fc = nn.Linear(10, 10)
def forward(self, x):
x = self.fc(x)
return x
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
def torch_save(model, optimizer):
torch.save({'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'epoch': 0
}, 'test.pt')
def torch_load(optimizer):
model = TheModelClass()
checkpoint = torch.load('test.pt', map_location={'cuda:0': 'cuda:1'})
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
print(f'epoch = {epoch}')
return model
model = TheModelClass()
model1 = model.to('cuda')
print(f'Device of model = {model.fc.weight.device}')
torch_save(model1, optimizer)
model2 = torch_load(optimizer)
del model, model1
torch.cuda.empty_cache()
# Expected to have only cuda:1 has tensors,
# but both cuda:0 and cuda:1 have tensors.
# This is wired.
```