loading the pretrained model here:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
del checkpoint
else:
print("=> no checkpoint found at '{}'".format(args.resume))
#print("before fusing model",model)
fuse_module(model)
print(model.state_dict())
cudnn.benchmark = True
for key, value in model.named_parameters():
this is where i am printing weight
class BinConv2d(nn.Module): # change the name of BinConv2d
def __init__(self, input_channels, output_channels,
kernel_size=-1, stride=-1, padding=-1, groups=1, dropout=0,
Linear=False):
super(BinConv2d, self).__init__()
self.layer_type = 'BinConv2d'
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dropout_ratio = dropout
if dropout!=0:
self.dropout = nn.Dropout(dropout)
self.Linear = Linear
if not self.Linear:
self.bn = nn.BatchNorm2d(input_channels, eps=1e-4, momentum=0.1, affine=True)
self.conv = nn.Conv2d(input_channels, output_channels,
and this place where i am printimg weight
kernel_size=kernel_size, stride=stride, padding=padding, groups=groups)
else:
self.bn = nn.BatchNorm1d(input_channels, eps=1e-4, momentum=0.1, affine=True)
self.linear = nn.Linear(input_channels, output_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.bn(x)
x = BinActive()(x)
if self.dropout_ratio!=0:
x = self.dropout(x)
if not self.Linear:
x = self.conv(x)
else:
x = self.linear(x)
x = self.relu(x)
return x
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.num_classes = num_classes
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=0),
nn.BatchNorm2d(96, eps=1e-4, momentum=0.1, affine=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
BinConv2d(96, 256, kernel_size=5, stride=1, padding=2, groups=1),
nn.MaxPool2d(kernel_size=3, stride=2),
BinConv2d(256, 384, kernel_size=3, stride=1, padding=1),
BinConv2d(384, 384, kernel_size=3, stride=1, padding=1, groups=1),
BinConv2d(384, 256, kernel_size=3, stride=1, padding=1, groups=1),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
BinConv2d(256 * 6 * 6, 4096, Linear=True),
BinConv2d(4096, 4096, dropout=0.5, Linear=True),
nn.BatchNorm1d(4096, eps=1e-3, momentum=0.1, affine=True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
return x