Here inputs requires_grad default is True, and labels must be False, and my question is whether tmp_conv and h_init requires_grad True in the forward. Many thx
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
#alexnet
self.conv1 = nn.Conv2d(3, 20, 5, stride=1)
self.conv1_bn = nn.BatchNorm2d(20)
#for initial
self.fc_h2l = nn.Linear(hidden_dim, out_dim)
def forward(self, inputs):
#alexnet
inputs = F.max_pool2d(F.relu(self.conv1_bn(self.conv1(inputs))), (3, 3), stride = 2)
#variable to store inputs
tmp_conv = Variable(torch.zeros(2,batch_size,inputs.size()[1],inputs.size()[2],inputs.size()[3]))
tmp_conv[0,:,:,:,:] = inputs[:,:,:,:].clone()
......
#for initial
h_init = Variable(torch.randn(batch_size,hidden_dim))
step_init = F.sigmoid(self.fc_h2l(h_init))
.....
alexnet = Net()
alexnet.cuda()
#####train
inputs= Variable(inpt.cuda())
labels = Variable(labels.cuda(), requires_grad=False)