Hi,

I am trying to implement SGDR in my training but I am not sure how to implement it in PyTorch.

I want the learning rate to reset every epoch.

Here is my code:

```
model = ConvolutionalAutoEncoder().to(device)
# model = nn.DataParallel(model)
# Loss and optimizer
learning_rate = 0.1
weight_decay = 0.005
momentum = 0.9
# criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate, weight_decay=weight_decay, momentum=momentum)
# optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader), eta_min=learning_rate)
params = list(model.parameters())
print(len(params))
print(params[0].size()) # conv1's .weight
num_epochs = 30
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, data in enumerate(train_loader):
# data = Variable(data, requires_grad=True)
# print(data)
inp, targ = data
inp = inp.to(device)
targ = targ.to(device)
# inp = Variable(inp, requires_grad=True).to(device)
# targ = Variable(targ).to(device)
output = model(inp)
# scheduler.zero_grad()
loss = F.binary_cross_entropy(output, targ)
loss.backward()
scheduler.step()
if i % 50 == 0:
for param_group in optimizer.param_groups:
print("Current learning rate is: {}".format(param_group['lr']))
print("Epoch[{}/{}]({}/{}): Loss: {:.4f}".format(epoch+1,num_epochs, i, len(train_loader), loss.item()))
```

But Iâ€™m not seeing any change in the learning rate. Please help.

Thanks