Hi,everyone,this a problem in my codes:
Traceback (most recent call last):
File "train.py", line 395, in <module>
num_epochs=60)
File "train.py", line 209, in train_model
outputs = model(inputs)
File "/home/liangzi/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 491, in __call__
result = self.forward(*input, **kwargs)
File "/home/liangzi/FNNN/model.py", line 242, in forward
x = self.classifier(x)
File "/home/liangzi/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 491, in __call__
result = self.forward(*input, **kwargs)
File "/home/liangzi/FNNN/model.py", line 52, in forward
x = self.add_block(x)
File "/home/liangzi/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 491, in __call__
result = self.forward(*input, **kwargs)
File "/home/liangzi/anaconda3/lib/python3.6/site-packages/torch/nn/modules/container.py", line 91, in forward
input = module(input)
File "/home/liangzi/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 491, in __call__
result = self.forward(*input, **kwargs)
File "/home/liangzi/anaconda3/lib/python3.6/site-packages/torch/nn/modules/linear.py", line 55, in forward
return F.linear(input, self.weight, self.bias)
File "/home/liangzi/anaconda3/lib/python3.6/site-packages/torch/nn/functional.py", line 992, in linear
return torch.addmm(bias, input, weight.t())
RuntimeError: size mismatch, m1: [2 x 3046], m2: [4070 x 512] at /pytorch/aten/src/THC/generic/THCTensorMathBlas.cu:249
part of my codes is here:
class fusion_net(nn.Module):
def __init__(self, class_num ):
super(fusion_net, self).__init__()
model_ft = models.resnet50(pretrained=True)
# avg pooling to global pooling
model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.model = model_ft
#2048+4096=6144
self.classifier = ClassBlock(4070, class_num)
#self.buffer1=nn.Linear(4096,4096,bias=1)
hand_feature=np.ones((2,2022))
self.hand_feature=Variable(torch.from_numpy(hand_feature).cuda().float(),requires_grad=False)
def forward(self, x):
#传统特征
#hand_feature = decent_dim.dd(LOMO.LOMO(x.cpu().numpy()),dAfter=2022)
#深度特征
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
#x = torch.squeeze(x)
x=x.reshape(2,-1)
#print('***************************************************************')
#print(x.shape)
#print(type(x))
#融合
#print(hand_feature.shape)
x=torch.cat((x,self.hand_feature),1)
x=torch.squeeze(x)
#print(x.shape)
#x=x.reshape(1,-1)
x = self.classifier(x)
return x
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_out')
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm1d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
init.normal_(m.weight.data, std=0.001)
init.constant_(m.bias.data, 0.0)
class ClassBlock(nn.Module):
def __init__(self, input_dim, class_num, dropout=True, relu=True, num_bottleneck=512):
super(ClassBlock, self).__init__()
add_block = []
add_block += [nn.Linear(input_dim, num_bottleneck)]
add_block += [nn.BatchNorm1d(num_bottleneck)]
if relu:
add_block += [nn.LeakyReLU(0.1)]
if dropout:
add_block += [nn.Dropout(p=0.5)]
add_block = nn.Sequential(*add_block)
add_block.apply(weights_init_kaiming)
classifier = []
classifier += [nn.Linear(num_bottleneck, class_num)]
classifier = nn.Sequential(*classifier)
classifier.apply(weights_init_classifier)
self.add_block = add_block
self.classifier = classifier
def forward(self, x):
x = self.add_block(x)
x = self.classifier(x)
return x
what should i do?