Hi ,
i am beginner in deep learning and pytorch , in my project i want to extract feature using pre-trained model then used these feature to train SVM classifier, how can i use hinge loss in pytorch? when i use nn.MultiMarginLoss() i get the error:
Traceback (most recent call last):
File "<ipython-input-1-740ff909cca9>", line 1, in <module>
runfile('C:/Users/Windows10/Downloads/Hala3/main-run-vr.py', wdir='C:/Users/Windows10/Downloads/Hala3')
File "C:\Users\Windows10\Anaconda3\envs\New\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 827, in runfile
execfile(filename, namespace)
File "C:\Users\Windows10\Anaconda3\envs\New\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "C:/Users/Windows10/Downloads/Hala3/main-run-vr.py", line 548, in <module>
__main__()
File "C:/Users/Windows10/Downloads/Hala3/main-run-vr.py", line 546, in __main__
evalInterval, evalMode, numWorkers, outDir,modelUsed,pretrained,train_test_split,datasetDir,crossValidation,nFolds)
File "C:/Users/Windows10/Downloads/Hala3/main-run-vr.py", line 421, in main_run
model,accuracy=modelTrain(modelUsed,pretrained,trainDataset,trainLabels,validationDataset,validationLabels,numEpochs,evalInterval,evalMode,outDir,numWorkers,lr, stepSize, decayRate, trainBatchSize, seqLen,True)
File "C:/Users/Windows10/Downloads/Hala3/main-run-vr.py", line 303, in modelTrain
loss = lossFn(labelVariable, outputLabel)
File "C:\Users\Windows10\Anaconda3\envs\New\lib\site-packages\torch\nn\modules\module.py", line 493, in __call__
result = self.forward(*input, **kwargs)
File "C:\Users\Windows10\Anaconda3\envs\New\lib\site-packages\torch\nn\modules\loss.py", line 1151, in forward
weight=self.weight, reduction=self.reduction)
File "C:\Users\Windows10\Anaconda3\envs\New\lib\site-packages\torch\nn\functional.py", line 2386, in multi_margin_loss
return torch._C._nn.multi_margin_loss(input, target, p, margin, weight, reduction_enum)
RuntimeError: _thnn_multi_margin_loss_forward not supported on CPUType for Long
can anyone tell me if my code is ok or not
lossFn = nn.MultiMarginLoss()
optimizerFn = torch.optim.RMSprop(trainParams, lr=lr)
optimizerFn.zero_grad()
optimScheduler = torch.optim.lr_scheduler.StepLR(optimizerFn, stepSize, decayRate)
minAccuracy = 50
train_loss=[]
val_loss=[]
train_acc=[]
val_acc=[]
bestmodel=None
for epoch in range(numEpochs):
optimScheduler.step()
epochLoss = 0
numCorrTrain = 0
iterPerEpoch = 0
model.train(True)
print('Epoch = {}'.format(epoch + 1))
writer.add_scalar('lr', optimizerFn.param_groups[0]['lr'], epoch+1)
for i, (inputs, targets) in enumerate(trainLoader):
iterPerEpoch += 1
optimizerFn.zero_grad()
if(torch.cuda.is_available()):
inputVariable1 = Variable(inputs.permute(1, 0, 2, 3, 4).cuda()).cpu()
labelVariable = Variable(targets.cuda()).cpu()
else:
inputVariable1=Variable(inputs.permute(1,0,2,3,4))
labelVariable=Variable(targets)
outputLabel = model(inputVariable1)
features = model(inputVariable1)
features = features.detach().cpu().numpy()
labelVariable=(labelVariable.data).detach().cpu().numpy()
svclassifier.fit(features, labelVariable)
outputLabel= svclassifier.predict(features)
labelVariable = torch.LongTensor(labelVariable)
outputLabel = torch.LongTensor(outputLabel)
loss = lossFn(labelVariable, outputLabel)
loss.backward()
optimizerFn.step()
#outputProb = torch.nn.Softmax(dim=1)(outputLabel)
#_, predicted = torch.max(outputProb.data, 1)
_, predicted= outputLabel
if(torch.cuda.is_available()):
numCorrTrain += (predicted == targets.cuda()).sum()
else:
numCorrTrain+=(predicted==targets).sum()
epochLoss += loss.item()