Evaluate one only element

Hy guys, how can I evaluate my model by one only element?
Is it possible without data loader?
I would be interested in evaluating my element by element model.
Sorry for my bad English :slight_smile:

If you would like to pass a single sample to your model, you could load/create the sample and add the batch dimension manually to it:

# Create model
model = models.resnet50()

# Load your sample here (or create it somehow)
x = torch.randn(3, 224, 224)

# Add batch dim
x = x.unsqueeze(0)

# Forward pass
output = model(x)

You could also call model.eval() and wrap the forward pass in a with torch.no_grad() block, if you just want to run the inference.

1 Like

Thank you a lot, man :slight_smile:

1 Like

Hy man, there is a problem (maybe). The cnn doing me the same inference for every sample.
Here the procedure of training :

def train_regression(model, train_loader, test_loader,exp_name='train_regressor', lr=0.0001, epochs=1000, momentum=0.90, weight_decay = 0.001):
    criterion = nn.MSELoss()

    optimizer = SGD(model.parameters(), lr, momentum=momentum, weight_decay=weight_decay)
    # meters
    loss1_meter = AverageValueMeter()
    loss2_meter = AverageValueMeter()
    total_loss_meter = AverageValueMeter()
    # plotters
    loss1_logger = VisdomPlotLogger('line', env=exp_name, opts={'title': 'Loss1', 'legend':['train','test']})
    loss2_logger = VisdomPlotLogger('line', env=exp_name, opts={'title': 'Loss2', 'legend':['train','test']})
    total_loss_logger = VisdomPlotLogger('line', env=exp_name, opts={'title': 'Total Loss', 'legend': ['train', 'test']})

    visdom_saver = VisdomSaver(envs=[exp_name])

    device = "cuda" if torch.cuda.is_available() else "cpu"
    model.to(device)

    loader = {'train': train_loader, 'test': test_loader}





    for e in range(epochs):
        for mode in ['train', 'test']:
            loss1_meter.reset()
            loss2_meter.reset()
            total_loss_meter.reset()

            model.train() if mode == 'train' else model.eval()
            with torch.set_grad_enabled(mode == 'train'):  # abilitiamo i gradienti solo in training
                for i, batch in enumerate(loader[mode]):
                    x = batch['image'].to(device)
                    dxdz = batch['movement'][:, :2].float().to(device)
                    dudv = batch['movement'][:, 2:4].float().to(device)
                    
                    output = model(x)
                    out1, out2 = output[:,:2], output[:,2:4]



                    l1 = criterion(out1, dxdz)
                    l2 = criterion(out2, dudv)

                    l=l1+l2
                    if mode == 'train':
                        l.backward()
                        optimizer.step()
                        optimizer.zero_grad()

                    n = x.shape[0]  

                    loss1_meter.add(l1.item()*n,n)
                    loss2_meter.add(l2.item()*n,n)
                   
                    total_loss_meter.add(l.item() * n, n)

                    if mode == 'train':
                        loss1_logger.log(e + (i + 1) / len(loader[mode]), loss1_meter.value()[0], name=mode)
                        loss2_logger.log(e + (i + 1) / len(loader[mode]), loss2_meter.value()[0], name=mode)
                        #loss3_logger.log(e + (i + 1) / len(loader[mode]), loss3_meter.value()[0], name=mode)
                        total_loss_logger.log(e + (i + 1) / len(loader[mode]), total_loss_meter.value()[0], name = mode)
            loss1_logger.log(e + (i + 1) / len(loader[mode]), loss1_meter.value()[0], name=mode)
            loss2_logger.log(e + (i + 1) / len(loader[mode]), loss2_meter.value()[0], name=mode)
           
            total_loss_logger.log(e + (i + 1) / len(loader[mode]), total_loss_meter.value()[0], name=mode)
        visdom_saver.save()
        torch.save(model.state_dict(), '%s.pth' % exp_name)



    return model

And here the model:

class ConvNet(nn.Module):
    def __init__(self):
        super(ConvNet, self).__init__()



        self.feature_extractor= nn.Sequential(
            nn.Conv2d(9, 32, kernel_size=3),  # input 9x224x224  output 32x222x222
            nn.MaxPool2d(kernel_size=2), # input 32x222x222  output 32x111x111
            nn.ReLU(),


            nn.Conv2d(32, 32, kernel_size=3), # input 32x111x111  output 32x109x109
            nn.MaxPool2d(kernel_size=2), # input 32x109x109  output 32x54x54
            nn.ReLU(),


            nn.Conv2d(32, 64, kernel_size=3), #input 32x54x54  output 64x52x52
            nn.MaxPool2d(kernel_size=2),  #input 64x52x52  output 64x26x26
            nn.ReLU(),


            nn.Conv2d(64, 64, kernel_size=3),  # input 64x26x26  output 64x24x24
            nn.MaxPool2d(kernel_size=2),  # input 64x24x24 output 64x12x12
            nn.ReLU()
        )
        self.classifier = nn.Sequential(
            nn.Linear(9216, 64), #input 64x12x12
            nn.ReLU(),
            nn.Linear(64, 32),
            nn.ReLU(),
            nn.Linear(32,4)
        )

    def forward(self, x):
       x = self.feature_extractor(x)
       x = self.classifier(x.view(x.shape[0], -1))
       return x

I open the topic for inference of sample by sample, because I think the code for inference maybe was wrong. But now I don’t see why my model inference the same result for every sample.