Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

added evaluation script for PPHumanSeg model #130

Merged
merged 9 commits into from
Feb 22, 2023
12 changes: 12 additions & 0 deletions models/human_segmentation_pphumanseg/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,18 @@ python demo.py --help

![messi](./examples/messi.jpg)

---
Results of accuracy evaluation with [tools/eval](../../tools/eval).

| Models | Accuracy |
| ------------------ | -------------- |
| PPHumanSeg | 0.9570 |
| PPHumanSeg quant | 0.4557 |


\*: 'quant' stands for 'quantized'.

---
## License

All files in this directory are licensed under [Apache 2.0 License](./LICENSE).
Expand Down
21 changes: 21 additions & 0 deletions tools/eval/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ Supported datasets:
- [LFW](#lfw)
- [ICDAR](#ICDAR2003)
- [IIIT5K](#iiit5k)
- [Mini Supervisely](#mini_supervisely)

## ImageNet

Expand Down Expand Up @@ -190,4 +191,24 @@ Run evaluation with the following command:

```shell
python eval.py -m crnn -d iiit5k -dr /path/to/iiit5k
```


## mini_supervisely
fengyuentau marked this conversation as resolved.
Show resolved Hide resolved

### Prepare data
Please download the mini_supervisely data from [here](https://paddleseg.bj.bcebos.com/humanseg/data/mini_supervisely.zip) which includes the validation dataset and unzip it.

### Evaluation

Run evaluation with the following command
fengyuentau marked this conversation as resolved.
Show resolved Hide resolved

```shell
python eval.py -m pphumanseg -d mini_supervisely -dr /path/to/pphumanseg
```

Run evaluation on quantized model with the following command
fengyuentau marked this conversation as resolved.
Show resolved Hide resolved

```shell
python eval.py -m pphumanseg_q -d mini_supervisely -dr /path/to/pphumanseg
```
4 changes: 3 additions & 1 deletion tools/eval/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from .lfw import LFW
from .icdar import ICDAR
from .iiit5k import IIIT5K
from .minisupervisely import MiniSupervisely

class Registery:
def __init__(self, name):
Expand All @@ -20,4 +21,5 @@ def register(self, item):
DATASETS.register(WIDERFace)
DATASETS.register(LFW)
DATASETS.register(ICDAR)
DATASETS.register(IIIT5K)
DATASETS.register(IIIT5K)
DATASETS.register(MiniSupervisely)
fengyuentau marked this conversation as resolved.
Show resolved Hide resolved
206 changes: 206 additions & 0 deletions tools/eval/datasets/minisupervisely.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,206 @@
import os
import cv2 as cv
import numpy as np
from tqdm import tqdm


class MiniSupervisely :
fengyuentau marked this conversation as resolved.
Show resolved Hide resolved
def __init__(self, root) :
self.root = root
self.val_path = os.path.join(root, 'val.txt')
self.image_set = self.load_data(self.val_path)
self.num_classes = 2
self.miou = -1
self.class_miou = -1
self.acc = -1
self.class_acc = -1


@property
def name(self):
return self.__class__.__name__


def load_data(self, val_path) :
"""
Load validation image set from val.txt file
Args :
val_path (str) : path to val.txt file
Returns :
image_set (list) : list of image path of input and expected image
"""

image_set = []
with open(val_path, 'r') as f :
for line in f.readlines() :
image_set.append(line.strip().split())

return image_set


def eval(self, model) :
"""
Evaluate model on validation set
Args :
model (object) : PP_HumanSeg model object
"""

intersect_area_all = []
pred_area_all = []
label_area_all = []

pbar = tqdm(self.image_set)

for input_image, expected_image in pbar :
pbar.set_description(
"Evaluating {} with {} val set".format(model.name, self.name))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These two lines should be placed outside the for loop since the description does not need to be updated every single iteration.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This was referred from eval script of icdar.
I've changed it for this script, should I also change it for icdar?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've changed it for this script, should I also change it for icdar?

Not in this pull request.


input_image = cv.imread(os.path.join(self.root, input_image))
input_image = cv.resize(input_image, (192, 192))
fengyuentau marked this conversation as resolved.
Show resolved Hide resolved

expected_image = cv.imread(os.path.join(self.root, expected_image),cv.IMREAD_GRAYSCALE)
expected_image = cv.resize(expected_image, (192, 192))[np.newaxis, :, :]


output_image = model.infer(input_image)

intersect_area, pred_area, label_area = self.calculate_area(
output_image,
expected_image,
self.num_classes)
intersect_area_all = intersect_area_all + intersect_area
pred_area_all = pred_area_all + pred_area
label_area_all = label_area_all + label_area



self.class_iou, self.miou = self.mean_iou(intersect_area_all, pred_area_all,
label_area_all)
self.class_acc, self.acc = self.accuracy(intersect_area_all, pred_area_all)


def get_results(self) :
"""
Get evaluation results
Returns :
miou (float) : mean iou
class_miou (list) : iou on all classes
acc (float) : mean accuracy
class_acc (list) : accuracy on all classes
"""
return self.miou, self.class_miou, self.acc, self.class_acc


def print_result(self) :
"""
Print evaluation results
"""
print("Mean IoU : ", self.miou)
print("Mean Accuracy : ", self.acc)


def one_hot(self, arr, max_size) :
return np.eye(max_size)[arr]


fengyuentau marked this conversation as resolved.
Show resolved Hide resolved
def calculate_area(self,pred, label, num_classes, ignore_index=255):
"""
Calculate intersect, prediction and label area
Args:
pred (Tensor): The prediction by model.
label (Tensor): The ground truth of image.
num_classes (int): The unique number of target classes.
ignore_index (int): Specifies a target value that is ignored. Default: 255.
Returns:
Tensor: The intersection area of prediction and the ground on all class.
Tensor: The prediction area on all class.
Tensor: The ground truth area on all class
"""

# Delete ignore_index
fengyuentau marked this conversation as resolved.
Show resolved Hide resolved
mask = label != ignore_index
pred = pred + 1
label = label + 1
fengyuentau marked this conversation as resolved.
Show resolved Hide resolved
pred = pred * mask
label = label * mask


pred = self.one_hot(pred, num_classes + 1)
label = self.one_hot(label, num_classes + 1)

pred = pred[:, :, :, 1:]
label = label[:, :, :, 1:]

pred_area = []
label_area = []
intersect_area = []

#iterate over all classes and calculate their respective areas
for i in range(num_classes):
pred_i = pred[:, :, :, i]
label_i = label[:, :, :, i]
pred_area_i = np.sum(pred_i)
label_area_i = np.sum(label_i)
intersect_area_i = np.sum(pred_i * label_i)
fengyuentau marked this conversation as resolved.
Show resolved Hide resolved
pred_area.append(pred_area_i)
label_area.append(label_area_i)
intersect_area.append(intersect_area_i)

return intersect_area, pred_area, label_area


def mean_iou(self,intersect_area, pred_area, label_area):
"""
Calculate iou.
Args:
intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.
pred_area (Tensor): The prediction area on all classes.
label_area (Tensor): The ground truth area on all classes.
Returns:
np.ndarray: iou on all classes.
float: mean iou of all classes.
"""
intersect_area = np.array(intersect_area)
pred_area = np.array(pred_area)
label_area = np.array(label_area)

union = pred_area + label_area - intersect_area

class_iou = []
for i in range(len(intersect_area)):
if union[i] == 0:
iou = 0
else:
iou = intersect_area[i] / union[i]
class_iou.append(iou)

miou = np.mean(class_iou)

return np.array(class_iou), miou


def accuracy(self,intersect_area, pred_area):
"""
Calculate accuracy
Args:
intersect_area (Tensor): The intersection area of prediction and ground truth on all classes..
pred_area (Tensor): The prediction area on all classes.
Returns:
np.ndarray: accuracy on all classes.
float: mean accuracy.
"""

intersect_area = np.array(intersect_area)
pred_area = np.array(pred_area)

class_acc = []
for i in range(len(intersect_area)):
if pred_area[i] == 0:
acc = 0
else:
acc = intersect_area[i] / pred_area[i]
class_acc.append(acc)

macc = np.sum(intersect_area) / np.sum(pred_area)

return np.array(class_acc), macc
11 changes: 11 additions & 0 deletions tools/eval/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,14 @@
name="CRNN",
topic="text_recognition",
modelPath=os.path.join(root_dir, "models/text_recognition_crnn/text_recognition_CRNN_EN_2021sep.onnx")),
pphumanseg=dict(
name="PPHumanSeg",
topic="human_segmentation",
modelPath=os.path.join(root_dir, "models/human_segmentation_pphumanseg/human_segmentation_pphumanseg_2021oct.onnx")),
pphumanseg_q=dict(
name="PPHumanSeg",
topic="human_segmentation",
modelPath=os.path.join(root_dir, "models/human_segmentation_pphumanseg/human_segmentation_pphumanseg_2021oct-act_int8-wt_int8-quantized.onnx")),
)

datasets = dict(
Expand All @@ -97,6 +105,9 @@
iiit5k=dict(
name="IIIT5K",
topic="text_recognition"),
mini_supervisely=dict(
name="MiniSupervisely",
topic="human_segmentation"),
fengyuentau marked this conversation as resolved.
Show resolved Hide resolved
)

def main(args):
Expand Down