This is the stacktrace:
AttributeError Traceback (most recent call last)
/tmp/ipython-input-2698559059.py in <cell line: 0>()
12 lr_scheduler.step()
13 # evaluate on the test dataset
---> 14 evaluate(model, data_loader_test, device=device)
4 frames
/usr/local/lib/python3.12/dist-packages/torch/utils/_contextlib.py in decorate_context(*args, **kwargs)
118 def decorate_context(*args, **kwargs):
119 with ctx_factory():
--> 120 return func(*args, **kwargs)
121
122 return decorate_context
/content/engine.py in evaluate(model, data_loader, device)
110
111 # accumulate predictions from all images
--> 112 coco_evaluator.accumulate()
113 coco_evaluator.summarize()
114 torch.set_num_threads(n_threads)
/content/coco_eval.py in accumulate(self)
49 def accumulate(self):
50 for coco_eval in self.coco_eval.values():
---> 51 coco_eval.accumulate()
52
53 def summarize(self):
/usr/local/lib/python3.12/dist-packages/pycocotools/cocoeval.py in accumulate(self, p)
376 fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )
377
--> 378 tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
379 fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
380 for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
/usr/local/lib/python3.12/dist-packages/numpy/__init__.py in __getattr__(attr)
392
393 if attr in __former_attrs__:
--> 394 raise AttributeError(__former_attrs__[attr])
395
396 if attr in __expired_attributes__:
AttributeError: module 'numpy' has no attribute 'float'.
`np.float` was a deprecated alias for the builtin `float`. To
avoid this error in existing code, use `float` by itself. Doing this
will not modify any behavior and is safe. If you specifically wanted the
numpy scalar type, use `np.float64` here. The aliases was originally
deprecated in NumPy 1.20; for more details and guidance see the original
release note at: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
This is all of my code. I do not import numpy at all. I’m working in a Google Colab environment.
%%shell
pip install cython
# Install pycocotools from the latest version
pip install -U ‘git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI
import os
import torch
import torchvision
import torch.utils.data
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from torchvision.io import read_image
from torchvision.ops.boxes import masks_to_boxes
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F
from torchvision.transforms import v2 as T
#print(torch.**version**)
#2.8.0+cu126
#0print(torchvision.**version**)
#.23.0+cu126
from google.colab import drive
drive.mount(‘/content/drive’, force_remount=True)
img_dir = “/content/drive/MyDrive/compvis/test”
# Define Dataset
class sensor_image(torch.utils.data.Dataset):
def **init**(self, root, transforms=None, target_transform=None):
self.root = root
self.transforms = transforms
self.target_transform = target_transform
# load all image files, sorting them to
# ensure that they are aligned
self.imgs = list(sorted(os.listdir(os.path.join(root,“img”))))
self.masks = list(sorted(os.listdir(os.path.join(root,“mask”))))
def __getitem__(self, idx):
# load images ad masks
img_path = os.path.join(self.root, "img", self.imgs[idx])
mask_path = os.path.join(self.root, "mask", self.masks[idx])
img = read_image(img_path)
# note that we haven't converted the mask to RGB,
# because each color corresponds to a different instance
# with 0 being background
mask = read_image(mask_path)
# Convert from image object to array
#mask = np.array(mask)
obj_ids = torch.unique(mask)
# first is background, other values are noise, removed them
obj_ids = obj_ids[-4:]
num_objs = len(obj_ids)
# split the color-encoded mask into a set
# of binary masks
masks = (mask == obj_ids[:, None, None]).to(dtype=torch.uint8)
#print("masks", masks)
# get bounding box coordinates for each mask
#num_objs = len(obj_ids)
#boxes = []
#for i in range(num_objs):
# pos = np.where(masks[i])
# xmin = np.min(pos[1])
# xmax = np.max(pos[1])
# ymin = np.min(pos[0])
# ymax = np.max(pos[0])
# boxes.append([xmin, ymin, xmax, ymax])
#boxes = torch.as_tensor(boxes, dtype=torch.float32)
#print("boxes", boxes)
boxes = masks_to_boxes(masks)
# there is only one class
labels = torch.ones((num_objs,), dtype=torch.int64)
#print("labels", labels)
#image_id = torch.tensor([idx])
image_id = idx
#print("image_id", image_id)
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
# suppose all instances are not crowd
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
# Wrap sample and targets into torchvision tv_tensors:
img = tv_tensors.Image(img)
target = {}
target["boxes"] = tv_tensors.BoundingBoxes(boxes, format="XYXY", canvas_size=F.get_size(img))
target["labels"] = labels
target["masks"] = tv_tensors.Mask(masks)
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.imgs)
os.system(“wget https://raw.githubusercontent.com/pytorch/vision/main/references/detection/engine.py”)
os.system(“wget https://raw.githubusercontent.com/pytorch/vision/main/references/detection/utils.py”)
os.system(“wget https://raw.githubusercontent.com/pytorch/vision/main/references/detection/coco_utils.py”)
os.system(“wget https://raw.githubusercontent.com/pytorch/vision/main/references/detection/coco_eval.py”)
os.system(“wget https://raw.githubusercontent.com/pytorch/vision/main/references/detection/transforms.py”)
def get_transform(train):
transforms = [ ]
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
transforms.append(T.ToDtype(torch.float, scale=True))
transforms.append(T.ToPureTensor())
return T.Compose(transforms)
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
num_classes = 4
def get_instance_segmentation_model(num_classes):
# load an instance segmentation model pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(weights=True)
# get the number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
hidden_layer,
num_classes)
return model
# use our dataset and defined transformations
dataset = sensor_image(img_dir, get_transform(train=True))
dataset_test = sensor_image(img_dir, get_transform(train=False))
import utils
# split the dataset in train and test set
torch.manual_seed(1)
indices = torch.randperm(len(dataset)).tolist()
dataset = torch.utils.data.Subset(dataset, indices\[:-1\])
dataset_test = torch.utils.data.Subset(dataset_test, indices\[-1:\])
# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=1, shuffle=True, num_workers=2,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, shuffle=False, num_workers=2,
collate_fn=utils.collate_fn)
device = torch.device(‘cuda’) if torch.cuda.is_available() else torch.device(‘cpu’)
# get the model using our helper function
model = get_instance_segmentation_model(num_classes)
# move model to the right device
model.to(device)
# construct an optimizer
params = \[p for p in model.parameters() if p.requires_grad\]
optimizer = torch.optim.SGD(params, lr=0.001,
momentum=0.9, weight_decay=0.0005)
# and a learning rate scheduler which decreases the learning rate by
# 10x every 3 epochs
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=3,
gamma=0.1)
# Option part of training
images,targets = next(iter(data_loader))
images = list(image for image in images)
targets = \[{k: v for k, v in t.items()} for t in targets\]
output = model(images,targets)
from engine import train_one_epoch, evaluate
# let’s train it for 10 epochs
from torch.optim.lr_scheduler import StepLR
num_epochs = 10
for epoch in range(num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
evaluate(model, data_loader_test, device=device)
##Error is returned here
How do I fix this?