Post training quantized model gets the error "Copying from quantized Tensor to non-quantized Tensor is not allowed" even though I'm not copying tensor

I got a pretrained resnet 18 model from this lane detection repo in order to use it as an ADAS(advanced driver assistance systems) function for an electric car making competition. My current goal is to quantize this model pth file to make it smaller and possibly faster to run on a rpi 5. I tried to follow several different quantization tutorials (such as this one) and implement it on my model. however when doing so it gives a bunch of errors and I’m not sure why I am getting these? which line causes said error is not stated.

here’s my code:

import torch, os, cv2
from model.model import parsingNet #basically the neural network that we apply weights to
from utils.common import merge_config
import torch
import scipy.special
import tqdm #provides progress bars
import numpy as np
import torchvision.transforms as transforms
from data.dataset import LaneTestDataset
from data.constant import culane_row_anchor
import tkinter as tk
import numpy as np
from PIL import Image, ImageTk
import time

if __name__ == "__main__":
    import logging
    logger = logging.getLogger(__name__)
    if os.path.isfile("debugging.log"):
        os.remove("debugging.log")
    logging.basicConfig(filename='debugging.log', encoding='utf-8', level=logging.DEBUG)

    def quantize_existing_model(model, saveto = "quantized.p"):
        model.eval()

        # modules_to_fuse = [['model.conv1', 'model.bn1', 'model.relu']] #code doesn't work even when this is disabled 
        # model = torch.quantization.fuse_modules(model, modules_to_fuse, inplace=True)
        model.qconfig = torch.quantization.default_qconfig
        torch.quantization.prepare(model, inplace=True)
        model.eval()
        print("calibrating model for quantization")
        with torch.no_grad():
            dataCalibrationLength = 10
            for i, data in enumerate(tqdm.tqdm(loader)):
                img, names = data
                #img = img.cuda() #disabled because I don't have gpu
                out = net(img) #feeding data into neural network
                if(dataCalibrationLength <= 0):
                    break
                dataCalibrationLength -= 1
        torch.quantization.convert(model, inplace=True)
        torch.save(model.state_dict(), "weights/"+saveto)
        return model

    torch.backends.cudnn.benchmark = True

    #args means command line arguments
    args, cfg = merge_config() #takes command line args and uses them in args and cfg variables


    if cfg.dataset == 'CULane':
        cls_num_per_lane = 18
    else:
        raise NotImplementedError

    #griding num is number of COLUMNS the image is divided into
    net = parsingNet(weights=None, backbone=cfg.backbone, cls_dim = (cfg.griding_num+1, cls_num_per_lane, 4),
                    use_aux=False, quantized=args.quantized)#.cuda() #commented out cuda bc we dont need auxiliary segmentation in testing

    state_dict = torch.load(cfg.test_model, map_location='cpu')

    compatible_state_dict = {}
    for k, v in state_dict.items():
        if 'module.' in k:
            k = k[7:] #remove module keyword
        compatible_state_dict[k] = v
        # print(k + (" " * (50-len(k))) + " loaded")
        # print(v)
    print("cccccc")

    net.load_state_dict(compatible_state_dict)
    print("dddddddd")
    net.eval()
    
    img_transforms = transforms.Compose([
        transforms.Resize((288, 800)),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    if cfg.dataset == 'CULane':
        split = 'frames2.txt'#, 'test1_crowd.txt', 'test2_hlight.txt', 'test3_shadow.txt', 'test4_noline.txt', 'test5_arrow.txt', 'test6_curve.txt', 'test7_cross.txt', 'test8_night.txt']
        #we don't need other tests so it's now "split" instead of "splits"
        dataset = LaneTestDataset(cfg.data_root, cfg.data_root+'/list/test_split/'+split, img_transform = img_transforms)#loads dataset from specified path in split
        img_w, img_h = 1640, 590
        row_anchor = culane_row_anchor #see data/constant.py for details
        #removed tusimple option because we only need culane for our project
    else:
        raise NotImplementedError

    #removed for loop for multiple splits and datasets because we only need one of each now
    loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle = False, num_workers=1)

    if(args.do_quantization):
        quantize_existing_model(net, args.do_quantization)
        os._exit(os.EX_OK)

    for i, data in enumerate(tqdm.tqdm(loader)): #tqdm is an iterator which acts exactly like the original iterable, but prints a dynamically updating progressbar every time a value is requested.
        img, names = data
        #img = img.cuda() #disabled because I don't have gpu
        with torch.no_grad():
            out = net(img) #feeding data into neural network

        #I am sure that this part works correctly
        col_sample = np.linspace(0, 800, cfg.griding_num) #why 800? see img_transforms Resize above
        col_sample_w = col_sample[1] - col_sample[0]

        out_j = out[0].data.cpu().numpy()
        out_j = out_j[:, ::-1, :]
        prob = scipy.special.softmax(out_j[:-1, :, :], axis=0)
        idx = np.arange(cfg.griding_num) + 1
        idx = idx.reshape(-1, 1, 1)
        loc = np.sum(prob * idx, axis=0)
        out_j = np.argmax(out_j, axis=0)
        loc[out_j == cfg.griding_num] = 0
        out_j = loc 

        logging.debug(out_j)

#to run:
#python nameOfThis.py configs/culane.py --test_model .\weights\quant6_2_26.p --quantized

As you can see this is a heavily modified version of the code from repo I previously mentioned. I made the quantized version of the model using the quantize_existing_model function in this code, hopefully that’s not the broken part. And when I try to run this code I get these errors:

Traceback (most recent call last):
  File "C:\Users\ekimy\Documents\uni icin\solar team\ai biseyleri\Ultra-Fast-Lane-Detection\showcase on internet.py", line 71, in <module>
    net.load_state_dict(compatible_state_dict)
    ~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\ekimy\AppData\Local\Programs\Python\Python313\Lib\site-packages\torch\nn\modules\module.py", line 2581, in load_state_dict
    raise RuntimeError(
    ...<3 lines>...
    )
RuntimeError: Error(s) in loading state_dict for parsingNet:
        Missing key(s) in state_dict: "cls.0.weight", "cls.0.bias", "cls.2.weight", "cls.2.bias".
        Unexpected key(s) in state_dict: "quant.scale", "quant.zero_point", "model.conv1.bias", "model.conv1.scale", "model.conv1.zero_point", "model.bn1.scale", "model.bn1.zero_point", "model.layer1.0.conv1.bias", "model.layer1.0.conv1.scale", "model.layer1.0.conv1.zero_point", "model.layer1.0.bn1.scale", "model.layer1.0.bn1.zero_point", "model.layer1.0.conv2.bias", "model.layer1.0.conv2.scale", "model.layer1.0.conv2.zero_point", "model.layer1.0.bn2.scale", "model.layer1.0.bn2.zero_point", "model.layer1.1.conv1.bias", "model.layer1.1.conv1.scale", "model.layer1.1.conv1.zero_point", "model.layer1.1.bn1.scale", "model.layer1.1.bn1.zero_point", "model.layer1.1.conv2.bias", "model.layer1.1.conv2.scale", "model.layer1.1.conv2.zero_point", "model.layer1.1.bn2.scale", "model.layer1.1.bn2.zero_point", "model.layer2.0.conv1.bias", "model.layer2.0.conv1.scale", "model.layer2.0.conv1.zero_point", "model.layer2.0.bn1.scale", "model.layer2.0.bn1.zero_point", "model.layer2.0.conv2.bias", "model.layer2.0.conv2.scale", "model.layer2.0.conv2.zero_point", "model.layer2.0.bn2.scale", "model.layer2.0.bn2.zero_point", "model.layer2.0.downsample.0.bias", "model.layer2.0.downsample.0.scale", "model.layer2.0.downsample.0.zero_point", "model.layer2.0.downsample.1.scale", "model.layer2.0.downsample.1.zero_point", "model.layer2.1.conv1.bias", "model.layer2.1.conv1.scale", "model.layer2.1.conv1.zero_point", "model.layer2.1.bn1.scale", "model.layer2.1.bn1.zero_point", "model.layer2.1.conv2.bias", "model.layer2.1.conv2.scale", "model.layer2.1.conv2.zero_point", "model.layer2.1.bn2.scale", "model.layer2.1.bn2.zero_point", "model.layer3.0.conv1.bias", "model.layer3.0.conv1.scale", "model.layer3.0.conv1.zero_point", "model.layer3.0.bn1.scale", "model.layer3.0.bn1.zero_point", "model.layer3.0.conv2.bias", "model.layer3.0.conv2.scale", "model.layer3.0.conv2.zero_point", "model.layer3.0.bn2.scale", "model.layer3.0.bn2.zero_point", "model.layer3.0.downsample.0.bias", "model.layer3.0.downsample.0.scale", "model.layer3.0.downsample.0.zero_point", "model.layer3.0.downsample.1.scale", "model.layer3.0.downsample.1.zero_point", "model.layer3.1.conv1.bias", "model.layer3.1.conv1.scale", "model.layer3.1.conv1.zero_point", "model.layer3.1.bn1.scale", "model.layer3.1.bn1.zero_point", "model.layer3.1.conv2.bias", "model.layer3.1.conv2.scale", "model.layer3.1.conv2.zero_point", "model.layer3.1.bn2.scale", "model.layer3.1.bn2.zero_point", "model.layer4.0.conv1.bias", "model.layer4.0.conv1.scale", "model.layer4.0.conv1.zero_point", "model.layer4.0.bn1.scale", "model.layer4.0.bn1.zero_point", "model.layer4.0.conv2.bias", "model.layer4.0.conv2.scale", "model.layer4.0.conv2.zero_point", "model.layer4.0.bn2.scale", "model.layer4.0.bn2.zero_point", "model.layer4.0.downsample.0.bias", "model.layer4.0.downsample.0.scale", "model.layer4.0.downsample.0.zero_point", "model.layer4.0.downsample.1.scale", "model.layer4.0.downsample.1.zero_point", "model.layer4.1.conv1.bias", "model.layer4.1.conv1.scale", "model.layer4.1.conv1.zero_point", "model.layer4.1.bn1.scale", "model.layer4.1.bn1.zero_point", "model.layer4.1.conv2.bias", "model.layer4.1.conv2.scale", "model.layer4.1.conv2.zero_point", "model.layer4.1.bn2.scale", "model.layer4.1.bn2.zero_point", "cls.0.scale", "cls.0.zero_point", "cls.0._packed_params.dtype", "cls.0._packed_params._packed_params", "cls.2.scale", "cls.2.zero_point", "cls.2._packed_params.dtype", "cls.2._packed_params._packed_params", "pool.scale", "pool.zero_point".
        While copying the parameter named "model.conv1.weight", whose dimensions in the model are torch.Size([64, 3, 7, 7]) and whose dimensions in the checkpoint are torch.Size([64, 3, 7, 7]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "model.layer1.0.conv1.weight", whose dimensions in the model are torch.Size([64, 64, 3, 3]) and whose dimensions in the checkpoint are torch.Size([64, 64, 3, 3]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "model.layer1.0.conv2.weight", whose dimensions in the model are torch.Size([64, 64, 3, 3]) and whose dimensions in the checkpoint are torch.Size([64, 64, 3, 3]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "model.layer1.1.conv1.weight", whose dimensions in the model are torch.Size([64, 64, 3, 3]) and whose dimensions in the checkpoint are torch.Size([64, 64, 3, 3]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "model.layer1.1.conv2.weight", whose dimensions in the model are torch.Size([64, 64, 3, 3]) and whose dimensions in the checkpoint are torch.Size([64, 64, 3, 3]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "model.layer2.0.conv1.weight", whose dimensions in the model are torch.Size([128, 64, 3, 3]) and whose dimensions in the checkpoint are torch.Size([128, 64, 3, 3]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "model.layer2.0.conv2.weight", whose dimensions in the model are torch.Size([128, 128, 3, 3]) and whose dimensions in the checkpoint are torch.Size([128, 128, 3, 3]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "model.layer2.0.downsample.0.weight", whose dimensions in the model are torch.Size([128, 64, 1, 1]) and whose dimensions in the checkpoint are torch.Size([128, 64, 1, 1]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "model.layer2.1.conv1.weight", whose dimensions in the model are torch.Size([128, 128, 3, 3]) and whose dimensions in the checkpoint are torch.Size([128, 128, 3, 3]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "model.layer2.1.conv2.weight", whose dimensions in the model are torch.Size([128, 128, 3, 3]) and whose dimensions in the checkpoint are torch.Size([128, 128, 3, 3]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "model.layer3.0.conv1.weight", whose dimensions in the model are torch.Size([256, 128, 3, 3]) and whose dimensions in the checkpoint are torch.Size([256, 128, 3, 3]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "model.layer3.0.conv2.weight", whose dimensions in the model are torch.Size([256, 256, 3, 3]) and whose dimensions in the checkpoint are torch.Size([256, 256, 3, 3]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "model.layer3.0.downsample.0.weight", whose dimensions in the model are torch.Size([256, 128, 1, 1]) and whose dimensions in the checkpoint are torch.Size([256, 128, 1, 1]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "model.layer3.1.conv1.weight", whose dimensions in the model are torch.Size([256, 256, 3, 3]) and whose dimensions in the checkpoint are torch.Size([256, 256, 3, 3]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "model.layer3.1.conv2.weight", whose dimensions in the model are torch.Size([256, 256, 3, 3]) and whose dimensions in the checkpoint are torch.Size([256, 256, 3, 3])2, 3, 3]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "model.layer4.1.conv2.weight", whose dimensions in the model are torch.Size([512, 512, 3, 3]) and whose dimensions in the checkpoint are torch.Size([512, 512, 3, 3]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).
        While copying the parameter named "pool.weight", whose dimensions in the model are torch.Size([8, 512, 1, 1]) and whose dimensions in the checkpoint are torch.Size([8, 512, 1, 1]), an exception occurred : ('Copying from quantized Tensor to non-quantized Tensor is not allowed, please use dequantize to get a float Tensor from a quantized Tensor',).

I can probably get rid of the unexpected keys errors by manipulating the dictionary, but would that be ok? also what does the other errors mean? I’m not copying my tensors anywhere???

even if you’re not explicitly saying “copy”, you are using quantized weights from the state dictionary and try to load them into a non quantized model.

Try to run the following lines before you load the state dictionary:

net.qconfig = torch.quantization.default_qconfig

torch.quantization.prepare(net, inplace=True)

torch.quantization.convert(net, inplace=True)

1 Like

as for the undefined key errors:

That all happens because of a model mismatch basically. You need to create the same model architecture/layer names as you had in your pretrained model, ideally just load the same architecture form their code.

1 Like

thank you for your helpful comments! that seems to have solved my issue, although I’m still getting key mismatch errors. I used the quantize_existing_model in the top of the code mentioned above. any idea why my quantized model got the keys wrong?

edit: I forgot to mention unquantized weights work perfectly fine when loaded into code, no mismatch happens. Until I attempt to quantize the model, then the quantized model has a bunch of unnecessary and missing keys. Which must mean something is wrong with the quantization function right? But what is wrong exactly? Super unsure. I tried to read the documentation for pytorch quantization, didn’t get what I was doing differently than those tutorials.

Important note: I don’t have a GPU. could that be the reason my quantization is broken and why I can’t use torchao?

hi sorry for the late resposnse! Im glad that helped with the initial issue. They key mismatch errors could be due to pytorch changing the layer names to something related to quantized, maybe print out the layer names on both and see if there is a mismatch thats easy to fix (llike a _quantized) in the end.

Make sure to exactly mirror the saving steps in the loading!! So before you load into the model, you have to do the same preparation steps you took tfor the saved model (possibly net.qconfig = torch.quantization.default_qconfig, torch.quantization.prepare ……). Read carefully what they did before saving. Also, your model should include a torch.quantization.QuantStub() and a torch.quantization.DeQuantStub() as first and last thig of the forward pass.

Good luck and let me know if you need anything else!

1 Like

Ok, I printed the names of keys in each, here they are:

model file pth:

DEBUG:__main__: model.conv1.weight                                 loaded
DEBUG:__main__: model.bn1.weight                                   loaded
DEBUG:__main__: model.bn1.bias                                     loaded
DEBUG:__main__: model.bn1.running_mean                             loaded
DEBUG:__main__: model.bn1.running_var                              loaded
DEBUG:__main__: model.bn1.num_batches_tracked                      loaded
DEBUG:__main__: model.layer1.0.conv1.weight                        loaded
DEBUG:__main__: model.layer1.0.bn1.weight                          loaded
DEBUG:__main__: model.layer1.0.bn1.bias                            loaded
DEBUG:__main__: model.layer1.0.bn1.running_mean                    loaded
DEBUG:__main__: model.layer1.0.bn1.running_var                     loaded
DEBUG:__main__: model.layer1.0.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer1.0.conv2.weight                        loaded
DEBUG:__main__: model.layer1.0.bn2.weight                          loaded
DEBUG:__main__: model.layer1.0.bn2.bias                            loaded
DEBUG:__main__: model.layer1.0.bn2.running_mean                    loaded
DEBUG:__main__: model.layer1.0.bn2.running_var                     loaded
DEBUG:__main__: model.layer1.0.bn2.num_batches_tracked             loaded
DEBUG:__main__: model.layer1.1.conv1.weight                        loaded
DEBUG:__main__: model.layer1.1.bn1.weight                          loaded
DEBUG:__main__: model.layer1.1.bn1.bias                            loaded
DEBUG:__main__: model.layer1.1.bn1.running_mean                    loaded
DEBUG:__main__: model.layer1.1.bn1.running_var                     loaded
DEBUG:__main__: model.layer1.1.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer1.1.conv2.weight                        loaded
DEBUG:__main__: model.layer1.1.bn2.weight                          loaded
DEBUG:__main__: model.layer1.1.bn2.bias                            loaded
DEBUG:__main__: model.layer1.1.bn2.running_mean                    loaded
DEBUG:__main__: model.layer1.1.bn2.running_var                     loaded
DEBUG:__main__: model.layer1.1.bn2.num_batches_tracked             loaded
DEBUG:__main__: model.layer2.0.conv1.weight                        loaded
DEBUG:__main__: model.layer2.0.bn1.weight                          loaded
DEBUG:__main__: model.layer2.0.bn1.bias                            loaded
DEBUG:__main__: model.layer2.0.bn1.running_mean                    loaded
DEBUG:__main__: model.layer2.0.bn1.running_var                     loaded
DEBUG:__main__: model.layer2.0.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer2.0.conv2.weight                        loaded
DEBUG:__main__: model.layer2.0.bn2.weight                          loaded
DEBUG:__main__: model.layer2.0.bn2.bias                            loaded
DEBUG:__main__: model.layer2.0.bn2.running_mean                    loaded
DEBUG:__main__: model.layer2.0.bn2.running_var                     loaded
DEBUG:__main__: model.layer2.0.bn2.num_batches_tracked             loaded
DEBUG:__main__: model.layer2.0.downsample.0.weight                 loaded
DEBUG:__main__: model.layer2.0.downsample.1.weight                 loaded
DEBUG:__main__: model.layer2.0.downsample.1.bias                   loaded
DEBUG:__main__: model.layer2.0.downsample.1.running_mean           loaded
DEBUG:__main__: model.layer2.0.downsample.1.running_var            loaded
DEBUG:__main__: model.layer2.0.downsample.1.num_batches_tracked    loaded
DEBUG:__main__: model.layer2.1.conv1.weight                        loaded
DEBUG:__main__: model.layer2.1.bn1.weight                          loaded
DEBUG:__main__: model.layer2.1.bn1.bias                            loaded
DEBUG:__main__: model.layer2.1.bn1.running_mean                    loaded
DEBUG:__main__: model.layer2.1.bn1.running_var                     loaded
DEBUG:__main__: model.layer2.1.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer2.1.conv2.weight                        loaded
DEBUG:__main__: model.layer2.1.bn2.weight                          loaded
DEBUG:__main__: model.layer2.1.bn2.bias                            loaded
DEBUG:__main__: model.layer2.1.bn2.running_mean                    loaded
DEBUG:__main__: model.layer2.1.bn2.running_var                     loaded
DEBUG:__main__: model.layer2.1.bn2.num_batches_tracked             loaded
DEBUG:__main__: model.layer3.0.conv1.weight                        loaded
DEBUG:__main__: model.layer3.0.bn1.weight                          loaded
DEBUG:__main__: model.layer3.0.bn1.bias                            loaded
DEBUG:__main__: model.layer3.0.bn1.running_mean                    loaded
DEBUG:__main__: model.layer3.0.bn1.running_var                     loaded
DEBUG:__main__: model.layer3.0.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer3.0.conv2.weight                        loaded
DEBUG:__main__: model.layer3.0.bn2.weight                          loaded
DEBUG:__main__: model.layer3.0.bn2.bias                            loaded
DEBUG:__main__: model.layer3.0.bn2.running_mean                    loaded
DEBUG:__main__: model.layer3.0.bn2.running_var                     loaded
DEBUG:__main__: model.layer3.0.bn2.num_batches_tracked             loaded
DEBUG:__main__: model.layer3.0.downsample.0.weight                 loaded
DEBUG:__main__: model.layer3.0.downsample.1.weight                 loaded
DEBUG:__main__: model.layer3.0.downsample.1.bias                   loaded
DEBUG:__main__: model.layer3.0.downsample.1.running_mean           loaded
DEBUG:__main__: model.layer3.0.downsample.1.running_var            loaded
DEBUG:__main__: model.layer3.0.downsample.1.num_batches_tracked    loaded
DEBUG:__main__: model.layer3.1.conv1.weight                        loaded
DEBUG:__main__: model.layer3.1.bn1.weight                          loaded
DEBUG:__main__: model.layer3.1.bn1.bias                            loaded
DEBUG:__main__: model.layer3.1.bn1.running_mean                    loaded
DEBUG:__main__: model.layer3.1.bn1.running_var                     loaded
DEBUG:__main__: model.layer3.1.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer3.1.conv2.weight                        loaded
DEBUG:__main__: model.layer3.1.bn2.weight                          loaded
DEBUG:__main__: model.layer3.1.bn2.bias                            loaded
DEBUG:__main__: model.layer3.1.bn2.running_mean                    loaded
DEBUG:__main__: model.layer3.1.bn2.running_var                     loaded
DEBUG:__main__: model.layer3.1.bn2.num_batches_tracked             loaded
DEBUG:__main__: model.layer4.0.conv1.weight                        loaded
DEBUG:__main__: model.layer4.0.bn1.weight                          loaded
DEBUG:__main__: model.layer4.0.bn1.bias                            loaded
DEBUG:__main__: model.layer4.0.bn1.running_mean                    loaded
DEBUG:__main__: model.layer4.0.bn1.running_var                     loaded
DEBUG:__main__: model.layer4.0.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer4.0.conv2.weight                        loaded
DEBUG:__main__: model.layer4.0.bn2.weight                          loaded
DEBUG:__main__: model.layer4.0.bn2.bias                            loaded
DEBUG:__main__: model.layer4.0.bn2.running_mean                    loaded
DEBUG:__main__: model.layer4.0.bn2.running_var                     loaded
DEBUG:__main__: model.layer4.0.bn2.num_batches_tracked             loaded
DEBUG:__main__: model.layer4.0.downsample.0.weight                 loaded
DEBUG:__main__: model.layer4.0.downsample.1.weight                 loaded
DEBUG:__main__: model.layer4.0.downsample.1.bias                   loaded
DEBUG:__main__: model.layer4.0.downsample.1.running_mean           loaded
DEBUG:__main__: model.layer4.0.downsample.1.running_var            loaded
DEBUG:__main__: model.layer4.0.downsample.1.num_batches_tracked    loaded
DEBUG:__main__: model.layer4.1.conv1.weight                        loaded
DEBUG:__main__: model.layer4.1.bn1.weight                          loaded
DEBUG:__main__: model.layer4.1.bn1.bias                            loaded
DEBUG:__main__: model.layer4.1.bn1.running_mean                    loaded
DEBUG:__main__: model.layer4.1.bn1.running_var                     loaded
DEBUG:__main__: model.layer4.1.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer4.1.conv2.weight                        loaded
DEBUG:__main__: model.layer4.1.bn2.weight                          loaded
DEBUG:__main__: model.layer4.1.bn2.bias                            loaded
DEBUG:__main__: model.layer4.1.bn2.running_mean                    loaded
DEBUG:__main__: model.layer4.1.bn2.running_var                     loaded
DEBUG:__main__: model.layer4.1.bn2.num_batches_tracked             loaded
DEBUG:__main__: cls.0.weight                                       loaded
DEBUG:__main__: cls.0.bias                                         loaded
DEBUG:__main__: cls.2.weight                                       loaded
DEBUG:__main__: cls.2.bias                                         loaded
DEBUG:__main__: pool.weight                                        loaded
DEBUG:__main__: pool.bias                                          loaded

quantized file p:

DEBUG:__main__: quant.scale                                        loaded
DEBUG:__main__: quant.zero_point                                   loaded
DEBUG:__main__: model.conv1.weight                                 loaded
DEBUG:__main__: model.conv1.bias                                   loaded
DEBUG:__main__: model.conv1.scale                                  loaded
DEBUG:__main__: model.conv1.zero_point                             loaded
DEBUG:__main__: model.bn1.weight                                   loaded
DEBUG:__main__: model.bn1.bias                                     loaded
DEBUG:__main__: model.bn1.running_mean                             loaded
DEBUG:__main__: model.bn1.running_var                              loaded
DEBUG:__main__: model.bn1.num_batches_tracked                      loaded
DEBUG:__main__: model.bn1.scale                                    loaded
DEBUG:__main__: model.bn1.zero_point                               loaded
DEBUG:__main__: model.layer1.0.conv1.weight                        loaded
DEBUG:__main__: model.layer1.0.conv1.bias                          loaded
DEBUG:__main__: model.layer1.0.conv1.scale                         loaded
DEBUG:__main__: model.layer1.0.conv1.zero_point                    loaded
DEBUG:__main__: model.layer1.0.bn1.weight                          loaded
DEBUG:__main__: model.layer1.0.bn1.bias                            loaded
DEBUG:__main__: model.layer1.0.bn1.running_mean                    loaded
DEBUG:__main__: model.layer1.0.bn1.running_var                     loaded
DEBUG:__main__: model.layer1.0.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer1.0.bn1.scale                           loaded
DEBUG:__main__: model.layer1.0.bn1.zero_point                      loaded
DEBUG:__main__: model.layer1.0.conv2.weight                        loaded
DEBUG:__main__: model.layer1.0.conv2.bias                          loaded
DEBUG:__main__: model.layer1.0.conv2.scale                         loaded
DEBUG:__main__: model.layer1.0.conv2.zero_point                    loaded
DEBUG:__main__: model.layer1.0.bn2.weight                          loaded
DEBUG:__main__: model.layer1.0.bn2.bias                            loaded
DEBUG:__main__: model.layer1.0.bn2.running_mean                    loaded
DEBUG:__main__: model.layer1.0.bn2.running_var                     loaded
DEBUG:__main__: model.layer1.0.bn2.num_batches_tracked             loaded
DEBUG:__main__: model.layer1.0.bn2.scale                           loaded
DEBUG:__main__: model.layer1.0.bn2.zero_point                      loaded
DEBUG:__main__: model.layer1.1.conv1.weight                        loaded
DEBUG:__main__: model.layer1.1.conv1.bias                          loaded
DEBUG:__main__: model.layer1.1.conv1.scale                         loaded
DEBUG:__main__: model.layer1.1.conv1.zero_point                    loaded
DEBUG:__main__: model.layer1.1.bn1.weight                          loaded
DEBUG:__main__: model.layer1.1.bn1.bias                            loaded
DEBUG:__main__: model.layer1.1.bn1.running_mean                    loaded
DEBUG:__main__: model.layer1.1.bn1.running_var                     loaded
DEBUG:__main__: model.layer1.1.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer1.1.bn1.scale                           loaded
DEBUG:__main__: model.layer1.1.bn1.zero_point                      loaded
DEBUG:__main__: model.layer1.1.conv2.weight                        loaded
DEBUG:__main__: model.layer1.1.conv2.bias                          loaded
DEBUG:__main__: model.layer1.1.conv2.scale                         loaded
DEBUG:__main__: model.layer1.1.conv2.zero_point                    loaded
DEBUG:__main__: model.layer1.1.bn2.weight                          loaded
DEBUG:__main__: model.layer1.1.bn2.bias                            loaded
DEBUG:__main__: model.layer1.1.bn2.running_mean                    loaded
DEBUG:__main__: model.layer1.1.bn2.running_var                     loaded
DEBUG:__main__: model.layer1.1.bn2.num_batches_tracked             loaded
DEBUG:__main__: model.layer1.1.bn2.scale                           loaded
DEBUG:__main__: model.layer1.1.bn2.zero_point                      loaded
DEBUG:__main__: model.layer2.0.conv1.weight                        loaded
DEBUG:__main__: model.layer2.0.conv1.bias                          loaded
DEBUG:__main__: model.layer2.0.conv1.scale                         loaded
DEBUG:__main__: model.layer2.0.conv1.zero_point                    loaded
DEBUG:__main__: model.layer2.0.bn1.weight                          loaded
DEBUG:__main__: model.layer2.0.bn1.bias                            loaded
DEBUG:__main__: model.layer2.0.bn1.running_mean                    loaded
DEBUG:__main__: model.layer2.0.bn1.running_var                     loaded
DEBUG:__main__: model.layer2.0.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer2.0.bn1.scale                           loaded
DEBUG:__main__: model.layer2.0.bn1.zero_point                      loaded
DEBUG:__main__: model.layer2.0.conv2.weight                        loaded
DEBUG:__main__: model.layer2.0.conv2.bias                          loaded
DEBUG:__main__: model.layer2.0.conv2.scale                         loaded
DEBUG:__main__: model.layer2.0.conv2.zero_point                    loaded
DEBUG:__main__: model.layer2.0.bn2.weight                          loaded
DEBUG:__main__: model.layer2.0.bn2.bias                            loaded
DEBUG:__main__: model.layer2.0.bn2.running_mean                    loaded
DEBUG:__main__: model.layer2.0.bn2.running_var                     loaded
DEBUG:__main__: model.layer2.0.bn2.num_batches_tracked             loaded
DEBUG:__main__: model.layer2.0.bn2.scale                           loaded
DEBUG:__main__: model.layer2.0.bn2.zero_point                      loaded
DEBUG:__main__: model.layer2.0.downsample.0.weight                 loaded
DEBUG:__main__: model.layer2.0.downsample.0.bias                   loaded
DEBUG:__main__: model.layer2.0.downsample.0.scale                  loaded
DEBUG:__main__: model.layer2.0.downsample.0.zero_point             loaded
DEBUG:__main__: model.layer2.0.downsample.1.weight                 loaded
DEBUG:__main__: model.layer2.0.downsample.1.bias                   loaded
DEBUG:__main__: model.layer2.0.downsample.1.running_mean           loaded
DEBUG:__main__: model.layer2.0.downsample.1.running_var            loaded
DEBUG:__main__: model.layer2.0.downsample.1.num_batches_tracked    loaded
DEBUG:__main__: model.layer2.0.downsample.1.scale                  loaded
DEBUG:__main__: model.layer2.0.downsample.1.zero_point             loaded
DEBUG:__main__: model.layer2.1.conv1.weight                        loaded
DEBUG:__main__: model.layer2.1.conv1.bias                          loaded
DEBUG:__main__: model.layer2.1.conv1.scale                         loaded
DEBUG:__main__: model.layer2.1.conv1.zero_point                    loaded
DEBUG:__main__: model.layer2.1.bn1.weight                          loaded
DEBUG:__main__: model.layer2.1.bn1.bias                            loaded
DEBUG:__main__: model.layer2.1.bn1.running_mean                    loaded
DEBUG:__main__: model.layer2.1.bn1.running_var                     loaded
DEBUG:__main__: model.layer2.1.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer2.1.bn1.scale                           loaded
DEBUG:__main__: model.layer2.1.bn1.zero_point                      loaded
DEBUG:__main__: model.layer2.1.conv2.weight                        loaded
DEBUG:__main__: model.layer2.1.conv2.bias                          loaded
DEBUG:__main__: model.layer2.1.conv2.scale                         loaded
DEBUG:__main__: model.layer2.1.conv2.zero_point                    loaded
DEBUG:__main__: model.layer2.1.bn2.weight                          loaded
DEBUG:__main__: model.layer2.1.bn2.bias                            loaded
DEBUG:__main__: model.layer2.1.bn2.running_mean                    loaded
DEBUG:__main__: model.layer2.1.bn2.running_var                     loaded
DEBUG:__main__: model.layer2.1.bn2.num_batches_tracked             loaded
DEBUG:__main__: model.layer2.1.bn2.scale                           loaded
DEBUG:__main__: model.layer2.1.bn2.zero_point                      loaded
DEBUG:__main__: model.layer3.0.conv1.weight                        loaded
DEBUG:__main__: model.layer3.0.conv1.bias                          loaded
DEBUG:__main__: model.layer3.0.conv1.scale                         loaded
DEBUG:__main__: model.layer3.0.conv1.zero_point                    loaded
DEBUG:__main__: model.layer3.0.bn1.weight                          loaded
DEBUG:__main__: model.layer3.0.bn1.bias                            loaded
DEBUG:__main__: model.layer3.0.bn1.running_mean                    loaded
DEBUG:__main__: model.layer3.0.bn1.running_var                     loaded
DEBUG:__main__: model.layer3.0.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer3.0.bn1.scale                           loaded
DEBUG:__main__: model.layer3.0.bn1.zero_point                      loaded
DEBUG:__main__: model.layer3.0.conv2.weight                        loaded
DEBUG:__main__: model.layer3.0.conv2.bias                          loaded
DEBUG:__main__: model.layer3.0.conv2.scale                         loaded
DEBUG:__main__: model.layer3.0.conv2.zero_point                    loaded
DEBUG:__main__: model.layer3.0.bn2.weight                          loaded
DEBUG:__main__: model.layer3.0.bn2.bias                            loaded
DEBUG:__main__: model.layer3.0.bn2.running_mean                    loaded
DEBUG:__main__: model.layer3.0.bn2.running_var                     loaded
DEBUG:__main__: model.layer3.0.bn2.num_batches_tracked             loaded
DEBUG:__main__: model.layer3.0.bn2.scale                           loaded
DEBUG:__main__: model.layer3.0.bn2.zero_point                      loaded
DEBUG:__main__: model.layer3.0.downsample.0.weight                 loaded
DEBUG:__main__: model.layer3.0.downsample.0.bias                   loaded
DEBUG:__main__: model.layer3.0.downsample.0.scale                  loaded
DEBUG:__main__: model.layer3.0.downsample.0.zero_point             loaded
DEBUG:__main__: model.layer3.0.downsample.1.weight                 loaded
DEBUG:__main__: model.layer3.0.downsample.1.bias                   loaded
DEBUG:__main__: model.layer3.0.downsample.1.running_mean           loaded
DEBUG:__main__: model.layer3.0.downsample.1.running_var            loaded
DEBUG:__main__: model.layer3.0.downsample.1.num_batches_tracked    loaded
DEBUG:__main__: model.layer3.0.downsample.1.scale                  loaded
DEBUG:__main__: model.layer3.0.downsample.1.zero_point             loaded
DEBUG:__main__: model.layer3.1.conv1.weight                        loaded
DEBUG:__main__: model.layer3.1.conv1.bias                          loaded
DEBUG:__main__: model.layer3.1.conv1.scale                         loaded
DEBUG:__main__: model.layer3.1.conv1.zero_point                    loaded
DEBUG:__main__: model.layer3.1.bn1.weight                          loaded
DEBUG:__main__: model.layer3.1.bn1.bias                            loaded
DEBUG:__main__: model.layer3.1.bn1.running_mean                    loaded
DEBUG:__main__: model.layer3.1.bn1.running_var                     loaded
DEBUG:__main__: model.layer3.1.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer3.1.bn1.scale                           loaded
DEBUG:__main__: model.layer3.1.bn1.zero_point                      loaded
DEBUG:__main__: model.layer3.1.conv2.weight                        loaded
DEBUG:__main__: model.layer3.1.conv2.bias                          loaded
DEBUG:__main__: model.layer3.1.conv2.scale                         loaded
DEBUG:__main__: model.layer3.1.conv2.zero_point                    loaded
DEBUG:__main__: model.layer3.1.bn2.weight                          loaded
DEBUG:__main__: model.layer3.1.bn2.bias                            loaded
DEBUG:__main__: model.layer3.1.bn2.running_mean                    loaded
DEBUG:__main__: model.layer3.1.bn2.running_var                     loaded
DEBUG:__main__: model.layer3.1.bn2.num_batches_tracked             loaded
DEBUG:__main__: model.layer3.1.bn2.scale                           loaded
DEBUG:__main__: model.layer3.1.bn2.zero_point                      loaded
DEBUG:__main__: model.layer4.0.conv1.weight                        loaded
DEBUG:__main__: model.layer4.0.conv1.bias                          loaded
DEBUG:__main__: model.layer4.0.conv1.scale                         loaded
DEBUG:__main__: model.layer4.0.conv1.zero_point                    loaded
DEBUG:__main__: model.layer4.0.bn1.weight                          loaded
DEBUG:__main__: model.layer4.0.bn1.bias                            loaded
DEBUG:__main__: model.layer4.0.bn1.running_mean                    loaded
DEBUG:__main__: model.layer4.0.bn1.running_var                     loaded
DEBUG:__main__: model.layer4.0.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer4.0.bn1.scale                           loaded
DEBUG:__main__: model.layer4.0.bn1.zero_point                      loaded
DEBUG:__main__: model.layer4.0.conv2.weight                        loaded
DEBUG:__main__: model.layer4.0.conv2.bias                          loaded
DEBUG:__main__: model.layer4.0.conv2.scale                         loaded
DEBUG:__main__: model.layer4.0.conv2.zero_point                    loaded
DEBUG:__main__: model.layer4.0.bn2.weight                          loaded
DEBUG:__main__: model.layer4.0.bn2.bias                            loaded
DEBUG:__main__: model.layer4.0.bn2.running_mean                    loaded
DEBUG:__main__: model.layer4.0.bn2.running_var                     loaded
DEBUG:__main__: model.layer4.0.bn2.num_batches_tracked             loaded
DEBUG:__main__: model.layer4.0.bn2.scale                           loaded
DEBUG:__main__: model.layer4.0.bn2.zero_point                      loaded
DEBUG:__main__: model.layer4.0.downsample.0.weight                 loaded
DEBUG:__main__: model.layer4.0.downsample.0.bias                   loaded
DEBUG:__main__: model.layer4.0.downsample.0.scale                  loaded
DEBUG:__main__: model.layer4.0.downsample.0.zero_point             loaded
DEBUG:__main__: model.layer4.0.downsample.1.weight                 loaded
DEBUG:__main__: model.layer4.0.downsample.1.bias                   loaded
DEBUG:__main__: model.layer4.0.downsample.1.running_mean           loaded
DEBUG:__main__: model.layer4.0.downsample.1.running_var            loaded
DEBUG:__main__: model.layer4.0.downsample.1.num_batches_tracked    loaded
DEBUG:__main__: model.layer4.0.downsample.1.scale                  loaded
DEBUG:__main__: model.layer4.0.downsample.1.zero_point             loaded
DEBUG:__main__: model.layer4.1.conv1.weight                        loaded
DEBUG:__main__: model.layer4.1.conv1.bias                          loaded
DEBUG:__main__: model.layer4.1.conv1.scale                         loaded
DEBUG:__main__: model.layer4.1.conv1.zero_point                    loaded
DEBUG:__main__: model.layer4.1.bn1.weight                          loaded
DEBUG:__main__: model.layer4.1.bn1.bias                            loaded
DEBUG:__main__: model.layer4.1.bn1.running_mean                    loaded
DEBUG:__main__: model.layer4.1.bn1.running_var                     loaded
DEBUG:__main__: model.layer4.1.bn1.num_batches_tracked             loaded
DEBUG:__main__: model.layer4.1.bn1.scale                           loaded
DEBUG:__main__: model.layer4.1.bn1.zero_point                      loaded
DEBUG:__main__: model.layer4.1.conv2.weight                        loaded
DEBUG:__main__: model.layer4.1.conv2.bias                          loaded
DEBUG:__main__: model.layer4.1.conv2.scale                         loaded
DEBUG:__main__: model.layer4.1.conv2.zero_point                    loaded
DEBUG:__main__: model.layer4.1.bn2.weight                          loaded
DEBUG:__main__: model.layer4.1.bn2.bias                            loaded
DEBUG:__main__: model.layer4.1.bn2.running_mean                    loaded
DEBUG:__main__: model.layer4.1.bn2.running_var                     loaded
DEBUG:__main__: model.layer4.1.bn2.num_batches_tracked             loaded
DEBUG:__main__: model.layer4.1.bn2.scale                           loaded
DEBUG:__main__: model.layer4.1.bn2.zero_point                      loaded
DEBUG:__main__: cls.0.scale                                        loaded
DEBUG:__main__: cls.0.zero_point                                   loaded
DEBUG:__main__: cls.0._packed_params.dtype                         loaded
DEBUG:__main__: cls.0._packed_params._packed_params                loaded
DEBUG:__main__: cls.2.scale                                        loaded
DEBUG:__main__: cls.2.zero_point                                   loaded
DEBUG:__main__: cls.2._packed_params.dtype                         loaded
DEBUG:__main__: cls.2._packed_params._packed_params                loaded
DEBUG:__main__: pool.weight                                        loaded
DEBUG:__main__: pool.bias                                          loaded
DEBUG:__main__: pool.scale                                         loaded
DEBUG:__main__: pool.zero_point                                    loaded

unfortunately the error is not caused by me forgetting to insert QuantStub and its pair, right now after following your advice on preparing quantized object the only error I get is the key mismatch error.

I don’t quite get what you mean by “you have to do the same preparation steps you took tfor the saved model”… can you expand on that? or did you just mean to insert these following lines:

    if args.quantized:
        #prepare to load quantized weights
        net.qconfig = torch.quantization.default_qconfig
        torch.quantization.prepare(net, inplace=True)
        torch.quantization.convert(net, inplace=True)

in any case, here’s the latest error message:

Traceback (most recent call last):
  File "C:\Users\ekimy\Documents\uni icin\solar team\ai biseyleri\Ultra-Fast-Lane-Detection\processLaneDetection.py", line 108, in <module>
    out = net(img) #feeding data into neural network
  File "C:\Users\ekimy\AppData\Local\Programs\Python\Python313\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
  File "C:\Users\ekimy\AppData\Local\Programs\Python\Python313\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
    return forward_call(*args, **kwargs)
  File "C:\Users\ekimy\Documents\uni icin\solar team\ai biseyleri\Ultra-Fast-Lane-Detection\model\model.py", line 56, in forward
    x2,x3,fea = self.model(x)
                ~~~~~~~~~~^^^
  File "C:\Users\ekimy\AppData\Local\Programs\Python\Python313\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
  File "C:\Users\ekimy\AppData\Local\Programs\Python\Python313\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
    return forward_call(*args, **kwargs)
  File "C:\Users\ekimy\Documents\uni icin\solar team\ai biseyleri\Ultra-Fast-Lane-Detection\model\backbone.py", line 53, in forward
    x = self.layer1(x)
  File "C:\Users\ekimy\AppData\Local\Programs\Python\Python313\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
  File "C:\Users\ekimy\AppData\Local\Programs\Python\Python313\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
    return forward_call(*args, **kwargs)
  File "C:\Users\ekimy\AppData\Local\Programs\Python\Python313\Lib\site-packages\torch\nn\modules\container.py", line 250, in forward
    input = module(input)
  File "C:\Users\ekimy\AppData\Local\Programs\Python\Python313\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
  File "C:\Users\ekimy\AppData\Local\Programs\Python\Python313\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
    return forward_call(*args, **kwargs)
  File "C:\Users\ekimy\AppData\Local\Programs\Python\Python313\Lib\site-packages\torchvision\models\resnet.py", line 102, in forward
    out += identity
NotImplementedError: Could not run 'aten::add.out' with arguments from the 'QuantizedCPU' backend. This could be because the operator doesn't exist for this backend, or was omitted during the selective/custom build process (if using custom build). If you are a Facebook employee using PyTorch on mobile, please visit https://fburl.com/ptmfixes for possible resolutions. 'aten::add.out' is only available for these backends: [CPU, Meta, MkldnnCPU, SparseCPU, SparseMeta, SparseCsrCPU, SparseCsrMeta, BackendSelect, Python, FuncTorchDynamicLayerBackMode, Functionalize, Named, Conjugate, Negative, ZeroTensor, ADInplaceOrView, AutogradOther, AutogradCPU, AutogradCUDA, AutogradHIP, AutogradXLA, AutogradMPS, AutogradIPU, AutogradXPU, AutogradHPU, AutogradVE, AutogradLazy, AutogradMTIA, AutogradPrivateUse1, AutogradPrivateUse2, AutogradPrivateUse3, AutogradMeta, AutogradNestedTensor, Tracer, AutocastCPU, AutocastXPU, AutocastMPS, AutocastCUDA, FuncTorchBatched, BatchedNestedTensor, FuncTorchVmapMode, Batched, VmapMode, FuncTorchGradWrapper, PythonTLSSnapshot, FuncTorchDynamicLayerFrontMode, PreDispatch, PythonDispatcher].

And then a bunch of lines about my cpu and other tensors. Unsure if those are relevant.