# Semantic segmentation metric

Hi, everyone.
I have trained a semantic segmentation model and i want to calculate three metrics:
mIoU, F1_score, and overall accuracy.
Actually, I used this simple code for calculation:

class SemSeg_Metric(object):
def init(self, num_class):
self.num_class = num_class
self.confusion_matrix = np.zeros((self.num_class,)*2)

``````def Intersection_over_Union(self):
IoU = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))

return IoU

def Pixel_Accuracy(self):
Acc = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()
return Acc

def precision (self):
TP  = np.diag(self.confusion_matrix)
FP = np.sum(self.confusion_matrix, axis=0) - TP
return TP/(TP+FP + 1e-7)

def recall (self):
TP  = np.diag(self.confusion_matrix)
FN = np.sum(self.confusion_matrix, axis=1) - TP
return TP/(TP+FN + 1e-7)

def _generate_matrix(self, gt_image, pre_image):
mask = (gt_image >= 0) & (gt_image < self.num_class)
count = np.bincount(label, minlength=self.num_class**2)
confusion_matrix = count.reshape(self.num_class, self.num_class)
return confusion_matrix

assert gt_image.shape == pre_image.shape
self.confusion_matrix += self._generate_matrix(gt_image, pre_image)

def reset(self):
self.confusion_matrix = np.zeros((self.num_class,) * 2)
``````

i call this class using this line:

evaluator.add_batch(target, pred) # pred size: 1xHxW , target size: 1xHxW
then i print the result

my question is:
Is there any mistake in above code.