本文主要是介绍MMSegmentation改进:增加Kappa系数评价指数,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
将mmseg\evaluation\metrics\iou_metric.py文件中的内容替换成以下内容即可:
支持输出单类Kappa系数和平均Kappa系数。
使用方法:将dataset的config文件中:val_evaluator 添加'mKappa',如 val_evaluator = dict(type='mmseg.IoUMetric', iou_metrics=['mFscore', 'mIoU', 'mKappa'])。
欢迎关注大地主的CSDN与 ABCnutter (github.com),敬请期待更多精彩内容
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from collections import OrderedDict
from typing import Dict, List, Optional, Sequenceimport numpy as np
import torch
from mmengine.dist import is_main_process
from mmengine.evaluator import BaseMetric
from mmengine.logging import MMLogger, print_log
from mmengine.utils import mkdir_or_exist
from PIL import Image
from prettytable import PrettyTablefrom mmseg.registry import METRICS@METRICS.register_module()
class IoUMetric(BaseMetric):"""IoU evaluation metric.Args:ignore_index (int): Index that will be ignored in evaluation.Default: 255.iou_metrics (list[str] | str): Metrics to be calculated, the optionsinclude 'mIoU', 'mDice', 'mFscore', and 'Kappa'.nan_to_num (int, optional): If specified, NaN values will be replacedby the numbers defined by the user. Default: None.beta (int): Determines the weight of recall in the combined score.Default: 1.collect_device (str): Device name used for collecting results fromdifferent ranks during distributed training. Must be 'cpu' or'gpu'. Defaults to 'cpu'.output_dir (str): The directory for output prediction. Defaults toNone.format_only (bool): Only format result for results commit withoutperform evaluation. It is useful when you want to save the resultto a specific format and submit it to the test server.Defaults to False.prefix (str, optional): The prefix that will be added in the metricnames to disambiguate homonymous metrics of different evaluators.If prefix is not provided in the argument, self.default_prefixwill be used instead. Defaults to None."""def __init__(self,ignore_index: int = 255,iou_metrics: List[str] = ['mIoU'],nan_to_num: Optional[int] = None,beta: int = 1,collect_device: str = 'cpu',output_dir: Optional[str] = None,format_only: bool = False,prefix: Optional[str] = None,**kwargs) -> None:super().__init__(collect_device=collect_device, prefix=prefix)self.ignore_index = ignore_indexself.metrics = iou_metricsself.nan_to_num = nan_to_numself.beta = betaself.output_dir = output_dirif self.output_dir and is_main_process():mkdir_or_exist(self.output_dir)self.format_only = format_onlydef process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:"""Process one batch of data and data_samples.The processed results should be stored in ``self.results``, which willbe used to compute the metrics when all batches have been processed.Args:data_batch (dict): A batch of data from the dataloader.data_samples (Sequence[dict]): A batch of outputs from the model."""num_classes = len(self.dataset_meta['classes'])for data_sample in data_samples:pred_label = data_sample['pred_sem_seg']['data'].squeeze()# format_only always for test dataset without ground truthif not self.format_only:label = data_sample['gt_sem_seg']['data'].squeeze().to(pred_label)self.results.append(self.intersect_and_union(pred_label, label, num_classes,self.ignore_index))# format_resultif self.output_dir is not None:basename = osp.splitext(osp.basename(data_sample['img_path']))[0]png_filename = osp.abspath(osp.join(self.output_dir, f'{basename}.png'))output_mask = pred_label.cpu().numpy()# The index range of official ADE20k dataset is from 0 to 150.# But the index range of output is from 0 to 149.# That is because we set reduce_zero_label=True.if data_sample.get('reduce_zero_label', False):output_mask = output_mask + 1output = Image.fromarray(output_mask.astype(np.uint8))output.save(png_filename)def compute_metrics(self, results: list) -> Dict[str, float]:"""Compute the metrics from processed results.Args:results (list): The processed results of each batch.Returns:Dict[str, float]: The computed metrics. The keys are the names ofthe metrics, and the values are corresponding results. The keymainly includes aAcc, mIoU, mAcc, mDice, mFscore, mPrecision,mRecall, and Kappa."""logger: MMLogger = MMLogger.get_current_instance()if self.format_only:logger.info(f'results are saved to {osp.dirname(self.output_dir)}')return OrderedDict()# convert list of tuples to tuple of lists, e.g.# [(A_1, B_1, C_1, D_1), ..., (A_n, B_n, C_n, D_n)] to# ([A_1, ..., A_n], ..., [D_1, ..., D_n])results = tuple(zip(*results))assert len(results) == 4total_area_intersect = sum(results[0])total_area_union = sum(results[1])total_area_pred_label = sum(results[2])total_area_label = sum(results[3])ret_metrics = self.total_area_to_metrics(total_area_intersect, total_area_union, total_area_pred_label,total_area_label, self.metrics, self.nan_to_num, self.beta)class_names = self.dataset_meta['classes']# summary tableret_metrics_summary = OrderedDict({ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)for ret_metric, ret_metric_value in ret_metrics.items()})metrics = dict()for key, val in ret_metrics_summary.items():if key == 'aAcc':metrics[key] = valelse:metrics['m' + key] = val# each class tableret_metrics.pop('aAcc', None)# ret_metrics.pop('Kappa', None)ret_metrics_class = OrderedDict({ret_metric: np.round(ret_metric_value * 100, 2)for ret_metric, ret_metric_value in ret_metrics.items()})ret_metrics_class.update({'Class': class_names})ret_metrics_class.move_to_end('Class', last=False)class_table_data = PrettyTable()for key, val in ret_metrics_class.items():class_table_data.add_column(key, val)print_log('per class results:', logger)print_log('\n' + class_table_data.get_string(), logger=logger)return metrics@staticmethoddef intersect_and_union(pred_label: torch.tensor, label: torch.tensor,num_classes: int, ignore_index: int):"""Calculate Intersection and Union.Args:pred_label (torch.tensor): Prediction segmentation mapor predict result filename. The shape is (H, W).label (torch.tensor): Ground truth segmentation mapor label filename. The shape is (H, W).num_classes (int): Number of categories.ignore_index (int): Index that will be ignored in evaluation.Returns:torch.Tensor: The intersection of prediction and ground truthhistogram on all classes.torch.Tensor: The union of prediction and ground truth histogram onall classes.torch.Tensor: The prediction histogram on all classes.torch.Tensor: The ground truth histogram on all classes."""mask = (label != ignore_index)pred_label = pred_label[mask]label = label[mask]intersect = pred_label[pred_label == label]area_intersect = torch.histc(intersect.float(), bins=(num_classes), min=0,max=num_classes - 1).cpu()area_pred_label = torch.histc(pred_label.float(), bins=(num_classes), min=0,max=num_classes - 1).cpu()area_label = torch.histc(label.float(), bins=(num_classes), min=0,max=num_classes - 1).cpu()area_union = area_pred_label + area_label - area_intersectreturn area_intersect, area_union, area_pred_label, area_label@staticmethoddef total_area_to_metrics(total_area_intersect: np.ndarray,total_area_union: np.ndarray,total_area_pred_label: np.ndarray,total_area_label: np.ndarray,metrics: List[str] = ['mIoU'],nan_to_num: Optional[int] = None,beta: int = 1):"""Calculate evaluation metricsArgs:total_area_intersect (np.ndarray): The intersection of predictionand ground truth histogram on all classes.total_area_union (np.ndarray): The union of prediction and groundtruth histogram on all classes.total_area_pred_label (np.ndarray): The prediction histogram onall classes.total_area_label (np.ndarray): The ground truth histogram onall classes.metrics (List[str] | str): Metrics to be evaluated, 'mIoU', 'mDice','mFscore', and 'Kappa'.nan_to_num (int, optional): If specified, NaN values will bereplaced by the numbers defined by the user. Default: None.beta (int): Determines the weight of recall in the combined score.Default: 1.Returns:Dict[str, np.ndarray]: per category evaluation metrics,shape (num_classes, )."""def f_score(precision, recall, beta=1):"""calculate the f-score value.Args:precision (float | torch.Tensor): The precision value.recall (float | torch.Tensor): The recall value.beta (int): Determines the weight of recall in the combinedscore. Default: 1.Returns:[torch.tensor]: The f-score value."""score = (1 + beta**2) * (precision * recall) / ((beta**2 * precision) + recall)return scoreif isinstance(metrics, str):metrics = [metrics]allowed_metrics = ['mIoU', 'mDice', 'mFscore', 'mKappa']if not set(metrics).issubset(set(allowed_metrics)):raise KeyError(f'metrics {metrics} is not supported')all_acc = total_area_intersect.sum() / total_area_label.sum()ret_metrics = OrderedDict({'aAcc': all_acc})for metric in metrics:if metric == 'mIoU':iou = total_area_intersect / total_area_unionacc = total_area_intersect / total_area_labelret_metrics['IoU'] = iouret_metrics['Acc'] = accelif metric == 'mDice':dice = 2 * total_area_intersect / (total_area_pred_label + total_area_label)acc = total_area_intersect / total_area_labelret_metrics['Dice'] = diceret_metrics['Acc'] = accelif metric == 'mFscore':precision = total_area_intersect / total_area_pred_labelrecall = total_area_intersect / total_area_labelf_value = torch.tensor([f_score(x[0], x[1], beta) for x in zip(precision, recall)])ret_metrics['Fscore'] = f_valueret_metrics['Precision'] = precisionret_metrics['Recall'] = recallelif metric == 'mKappa':total = total_area_label.sum()po = total_area_intersect / total_area_labelpe = (total_area_pred_label * total_area_label) / (total ** 2)kappa = (po - pe) / (1 - pe)ret_metrics['Kappa'] = kapparet_metrics = {metric: value.numpy() if isinstance(value, torch.Tensor) else valuefor metric, value in ret_metrics.items()}if nan_to_num is not None:ret_metrics = OrderedDict({metric: np.nan_to_num(metric_value, nan=nan_to_num)for metric, metric_value in ret_metrics.items()})return ret_metrics
这篇关于MMSegmentation改进:增加Kappa系数评价指数的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!