From e37b7c6bdaf755d120abc3a5abe280d5f7bbdf2d Mon Sep 17 00:00:00 2001
From: wuyefeilin <30919197+wuyefeilin@users.noreply.github.com>
Date: Wed, 19 May 2021 17:40:20 +0800
Subject: [PATCH] Add Panoptic Segmentation.
---
contrib/PanopticDeepLab/README.md | 144 ++++++
.../configs/_base_/cityscapes_panoptic.yml | 55 +++
...32_cityscapes_1025x513_bs8_90k_lr00005.yml | 19 +
...2_cityscapes_2049x1025_bs1_90k_lr00005.yml | 23 +
contrib/PanopticDeepLab/core/__init__.py | 20 +
contrib/PanopticDeepLab/core/infer.py | 351 ++++++++++++++
contrib/PanopticDeepLab/core/predict.py | 189 ++++++++
contrib/PanopticDeepLab/core/train.py | 315 +++++++++++++
contrib/PanopticDeepLab/core/val.py | 181 ++++++++
contrib/PanopticDeepLab/datasets/__init__.py | 15 +
.../datasets/cityscapes_panoptic.py | 196 ++++++++
.../PanopticDeepLab/docs/panoptic_deeplab.jpg | Bin 0 -> 117991 bytes
.../docs/visualization_instance.png | Bin 0 -> 264029 bytes
.../docs/visualization_panoptic.png | Bin 0 -> 286634 bytes
.../docs/visualization_semantic.png | Bin 0 -> 302773 bytes
contrib/PanopticDeepLab/models/__init__.py | 15 +
.../models/panoptic_deeplab.py | 436 ++++++++++++++++++
contrib/PanopticDeepLab/predict.py | 149 ++++++
contrib/PanopticDeepLab/train.py | 178 +++++++
.../PanopticDeepLab/transforms/__init__.py | 15 +
.../transforms/target_transforms.py | 307 ++++++++++++
contrib/PanopticDeepLab/utils/__init__.py | 15 +
.../utils/evaluation/__init__.py | 17 +
.../utils/evaluation/instance.py | 353 ++++++++++++++
.../utils/evaluation/panoptic.py | 210 +++++++++
.../utils/evaluation/semantic.py | 84 ++++
contrib/PanopticDeepLab/utils/visualize.py | 197 ++++++++
contrib/PanopticDeepLab/val.py | 112 +++++
paddleseg/models/losses/__init__.py | 2 +
paddleseg/models/losses/cross_entropy_loss.py | 20 +-
paddleseg/models/losses/l1_loss.py | 76 +++
.../models/losses/mean_square_error_loss.py | 65 +++
32 files changed, 3755 insertions(+), 4 deletions(-)
create mode 100644 contrib/PanopticDeepLab/README.md
create mode 100644 contrib/PanopticDeepLab/configs/_base_/cityscapes_panoptic.yml
create mode 100644 contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml
create mode 100644 contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005.yml
create mode 100644 contrib/PanopticDeepLab/core/__init__.py
create mode 100644 contrib/PanopticDeepLab/core/infer.py
create mode 100644 contrib/PanopticDeepLab/core/predict.py
create mode 100644 contrib/PanopticDeepLab/core/train.py
create mode 100644 contrib/PanopticDeepLab/core/val.py
create mode 100644 contrib/PanopticDeepLab/datasets/__init__.py
create mode 100644 contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py
create mode 100644 contrib/PanopticDeepLab/docs/panoptic_deeplab.jpg
create mode 100644 contrib/PanopticDeepLab/docs/visualization_instance.png
create mode 100644 contrib/PanopticDeepLab/docs/visualization_panoptic.png
create mode 100644 contrib/PanopticDeepLab/docs/visualization_semantic.png
create mode 100644 contrib/PanopticDeepLab/models/__init__.py
create mode 100644 contrib/PanopticDeepLab/models/panoptic_deeplab.py
create mode 100644 contrib/PanopticDeepLab/predict.py
create mode 100644 contrib/PanopticDeepLab/train.py
create mode 100644 contrib/PanopticDeepLab/transforms/__init__.py
create mode 100644 contrib/PanopticDeepLab/transforms/target_transforms.py
create mode 100644 contrib/PanopticDeepLab/utils/__init__.py
create mode 100644 contrib/PanopticDeepLab/utils/evaluation/__init__.py
create mode 100644 contrib/PanopticDeepLab/utils/evaluation/instance.py
create mode 100644 contrib/PanopticDeepLab/utils/evaluation/panoptic.py
create mode 100644 contrib/PanopticDeepLab/utils/evaluation/semantic.py
create mode 100644 contrib/PanopticDeepLab/utils/visualize.py
create mode 100644 contrib/PanopticDeepLab/val.py
create mode 100644 paddleseg/models/losses/l1_loss.py
create mode 100644 paddleseg/models/losses/mean_square_error_loss.py
diff --git a/contrib/PanopticDeepLab/README.md b/contrib/PanopticDeepLab/README.md
new file mode 100644
index 0000000000..d4faa744fa
--- /dev/null
+++ b/contrib/PanopticDeepLab/README.md
@@ -0,0 +1,144 @@
+
+# Panoptic DeepLab
+
+基于PaddlePaddle实现[Panoptic Deeplab](https://arxiv.org/abs/1911.10194)全景分割算法。
+
+Panoptic DeepLab首次证实了bottem-up算法能够达到state-of-the-art的效果。Panoptic DeepLab预测三个输出:Semantic Segmentation, Center Prediction 和 Center Regression。实例类别像素根据最近距离原则聚集到实例中心点得到实例分割结果。最后按照majority-vote规则融合语义分割结果和实例分割结果,得到最终的全景分割结果。
+其通过将每一个像素赋值给每一个类别或实例达到分割的效果。
+![](./docs/panoptic_deeplab.jpg)
+
+## Model Baselines
+
+### Cityscapes
+| Backbone | Batch Size |Resolution | Training Iters | PQ | SQ | RQ | AP | mIoU | Links |
+|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|
+|ResNet50_OS32| 8 | 2049x1025|90000|58.35%|80.03%|71.52%|25.80%|79.18%|[model](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005/train.log)|
+|ResNet50_OS32| 64 | 1025x513|90000|60.32%|80.56%|73.56%|26.77%|79.67%|[model](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005/train.log)|
+
+## 环境准备
+
+1. 系统环境
+* PaddlePaddle >= 2.0.0
+* Python >= 3.6+
+推荐使用GPU版本的PaddlePaddle版本。详细安装教程请参考官方网站[PaddlePaddle](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/windows-pip.html)
+
+2. 下载PaddleSeg repo
+```shell
+git clone https://github.com/PaddlePaddle/PaddleSeg
+```
+
+3. 安装paddleseg
+```shell
+cd PaddleSeg
+pip install -e .
+```
+
+4. 进入PaddleSeg/contrib/PanopticDeepLab目录
+```shell
+cd contrib/PanopticDeepLab
+```
+
+## 数据集准备
+
+将数据集放置于`data`目录下。
+
+### Cityscapes
+
+前往[CityScapes官网](https://www.cityscapes-dataset.com/)下载数据集并整理成如下结构:
+
+```
+cityscapes/
+|--gtFine/
+| |--train/
+| | |--aachen/
+| | | |--*_color.png, *_instanceIds.png, *_labelIds.png, *_polygons.json,
+| | | |--*_labelTrainIds.png
+| | | |--...
+| |--val/
+| |--test/
+| |--cityscapes_panoptic_train_trainId.json
+| |--cityscapes_panoptic_train_trainId/
+| | |-- *_panoptic.png
+| |--cityscapes_panoptic_val_trainId.json
+| |--cityscapes_panoptic_val_trainId/
+| | |-- *_panoptic.png
+|--leftImg8bit/
+| |--train/
+| |--val/
+| |--test/
+
+```
+
+安装CityscapesScripts
+```shell
+pip install git+https://github.com/mcordts/cityscapesScripts.git
+```
+
+`*_panoptic.png` 生成命令(需找到`createPanopticImgs.py`文件):
+```shell
+python /path/to/cityscapesscripts/preparation/createPanopticImgs.py \
+ --dataset-folder data/cityscapes/gtFine/ \
+ --output-folder data/cityscapes/gtFine/ \
+ --use-train-id
+```
+
+## 训练
+```shell
+export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # 根据实际情况进行显卡数量的设置
+python -m paddle.distributed.launch train.py \
+ --config configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml \
+ --do_eval \
+ --use_vdl \
+ --save_interval 5000 \
+ --save_dir output
+```
+
+**note:** 使用--do_eval会影响训练速度及增加显存消耗,根据选择进行开闭。
+
+更多参数信息请运行如下命令进行查看:
+```shell
+python train.py --help
+```
+
+## 评估
+```shell
+python val.py \
+ --config configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml \
+ --model_path output/iter_90000/model.pdparams
+```
+你可以直接下载我们提供的模型进行评估。
+
+更多参数信息请运行如下命令进行查看:
+```shell
+python val.py --help
+```
+
+## 预测及可视化结果保存
+```shell
+export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # 根据实际情况进行显卡数量的设置
+python -m paddle.distributed.launch predict.py \
+ --config configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_120k.yml \
+ --model_path output/iter_90000/model.pdparams \
+ --image_path data/cityscapes/leftImg8bit/val/ \
+ --save_dir ./output/result
+```
+你可以直接下载我们提供的模型进行预测。
+
+更多参数信息请运行如下命令进行查看:
+```shell
+python predict.py --help
+```
+全景分割结果:
+
+
+
+
+语义分割结果:
+
+
+
+
+实例分割结果:
+
+
+
diff --git a/contrib/PanopticDeepLab/configs/_base_/cityscapes_panoptic.yml b/contrib/PanopticDeepLab/configs/_base_/cityscapes_panoptic.yml
new file mode 100644
index 0000000000..aa9466ac47
--- /dev/null
+++ b/contrib/PanopticDeepLab/configs/_base_/cityscapes_panoptic.yml
@@ -0,0 +1,55 @@
+train_dataset:
+ type: CityscapesPanoptic
+ dataset_root: data/cityscapes
+ transforms:
+ - type: ResizeStepScaling
+ min_scale_factor: 0.5
+ max_scale_factor: 2.0
+ scale_step_size: 0.25
+ - type: RandomPaddingCrop
+ crop_size: [2049, 1025]
+ label_padding_value: [0, 0, 0]
+ - type: RandomHorizontalFlip
+ - type: RandomDistort
+ brightness_range: 0.4
+ contrast_range: 0.4
+ saturation_range: 0.4
+ - type: Normalize
+ mode: train
+ ignore_stuff_in_offset: True
+ small_instance_area: 4096
+ small_instance_weight: 3
+
+val_dataset:
+ type: CityscapesPanoptic
+ dataset_root: data/cityscapes
+ transforms:
+ - type: Padding
+ target_size: [2049, 1025]
+ label_padding_value: [0, 0, 0]
+ - type: Normalize
+ mode: val
+ ignore_stuff_in_offset: True
+ small_instance_area: 4096
+ small_instance_weight: 3
+
+
+optimizer:
+ type: adam
+
+learning_rate:
+ value: 0.00005
+ decay:
+ type: poly
+ power: 0.9
+ end_lr: 0.0
+
+loss:
+ types:
+ - type: CrossEntropyLoss
+ top_k_percent_pixels: 0.2
+ - type: MSELoss
+ reduction: "none"
+ - type: L1Loss
+ reduction: "none"
+ coef: [1, 200, 0.001]
diff --git a/contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml b/contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml
new file mode 100644
index 0000000000..445b11fbdb
--- /dev/null
+++ b/contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml
@@ -0,0 +1,19 @@
+_base_: ./panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005.yml
+
+batch_size: 8
+
+train_dataset:
+ transforms:
+ - type: ResizeStepScaling
+ min_scale_factor: 0.5
+ max_scale_factor: 2.0
+ scale_step_size: 0.25
+ - type: RandomPaddingCrop
+ crop_size: [1025, 513]
+ label_padding_value: [0, 0, 0]
+ - type: RandomHorizontalFlip
+ - type: RandomDistort
+ brightness_range: 0.4
+ contrast_range: 0.4
+ saturation_range: 0.4
+ - type: Normalize
diff --git a/contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005.yml b/contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005.yml
new file mode 100644
index 0000000000..d35e90d98c
--- /dev/null
+++ b/contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005.yml
@@ -0,0 +1,23 @@
+_base_: ../_base_/cityscapes_panoptic.yml
+
+batch_size: 1
+iters: 90000
+
+model:
+ type: PanopticDeepLab
+ backbone:
+ type: ResNet50_vd
+ output_stride: 32
+ pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
+ backbone_indices: [2,1,0,3]
+ aspp_ratios: [1, 3, 6, 9]
+ aspp_out_channels: 256
+ decoder_channels: 256
+ low_level_channels_projects: [128, 64, 32]
+ align_corners: True
+ instance_aspp_out_channels: 256
+ instance_decoder_channels: 128
+ instance_low_level_channels_projects: [64, 32, 16]
+ instance_num_classes: [1, 2]
+ instance_head_channels: 32
+ instance_class_key: ["center", "offset"]
diff --git a/contrib/PanopticDeepLab/core/__init__.py b/contrib/PanopticDeepLab/core/__init__.py
new file mode 100644
index 0000000000..3358db4d38
--- /dev/null
+++ b/contrib/PanopticDeepLab/core/__init__.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .train import train
+from .val import evaluate
+from .predict import predict
+from . import infer
+
+__all__ = ['train', 'evaluate', 'predict']
diff --git a/contrib/PanopticDeepLab/core/infer.py b/contrib/PanopticDeepLab/core/infer.py
new file mode 100644
index 0000000000..8ac1d800fe
--- /dev/null
+++ b/contrib/PanopticDeepLab/core/infer.py
@@ -0,0 +1,351 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections.abc
+from itertools import combinations
+from functools import partial
+
+import numpy as np
+import paddle
+import paddle.nn.functional as F
+
+
+def get_reverse_list(ori_shape, transforms):
+ """
+ get reverse list of transform.
+
+ Args:
+ ori_shape (list): Origin shape of image.
+ transforms (list): List of transform.
+
+ Returns:
+ list: List of tuple, there are two format:
+ ('resize', (h, w)) The image shape before resize,
+ ('padding', (h, w)) The image shape before padding.
+ """
+ reverse_list = []
+ h, w = ori_shape[0], ori_shape[1]
+ for op in transforms:
+ if op.__class__.__name__ in ['Resize']:
+ reverse_list.append(('resize', (h, w)))
+ h, w = op.target_size[0], op.target_size[1]
+ if op.__class__.__name__ in ['ResizeByLong']:
+ reverse_list.append(('resize', (h, w)))
+ long_edge = max(h, w)
+ short_edge = min(h, w)
+ short_edge = int(round(short_edge * op.long_size / long_edge))
+ long_edge = op.long_size
+ if h > w:
+ h = long_edge
+ w = short_edge
+ else:
+ w = long_edge
+ h = short_edge
+ if op.__class__.__name__ in ['Padding']:
+ reverse_list.append(('padding', (h, w)))
+ w, h = op.target_size[0], op.target_size[1]
+ if op.__class__.__name__ in ['LimitLong']:
+ long_edge = max(h, w)
+ short_edge = min(h, w)
+ if ((op.max_long is not None) and (long_edge > op.max_long)):
+ reverse_list.append(('resize', (h, w)))
+ long_edge = op.max_long
+ short_edge = int(round(short_edge * op.max_long / long_edge))
+ elif ((op.min_long is not None) and (long_edge < op.min_long)):
+ reverse_list.append(('resize', (h, w)))
+ long_edge = op.min_long
+ short_edge = int(round(short_edge * op.min_long / long_edge))
+ if h > w:
+ h = long_edge
+ w = short_edge
+ else:
+ w = long_edge
+ h = short_edge
+ return reverse_list
+
+
+def reverse_transform(pred, ori_shape, transforms):
+ """recover pred to origin shape"""
+ reverse_list = get_reverse_list(ori_shape, transforms)
+ for item in reverse_list[::-1]:
+ if item[0] == 'resize':
+ h, w = item[1][0], item[1][1]
+ pred = F.interpolate(pred, (h, w), mode='nearest')
+ elif item[0] == 'padding':
+ h, w = item[1][0], item[1][1]
+ pred = pred[:, :, 0:h, 0:w]
+ else:
+ raise Exception("Unexpected info '{}' in im_info".format(item[0]))
+ return pred
+
+
+def find_instance_center(ctr_hmp, threshold=0.1, nms_kernel=3, top_k=None):
+ """
+ Find the center points from the center heatmap.
+
+ Args:
+ ctr_hmp (Tensor): A Tensor of shape [1, H, W] of raw center heatmap output.
+ threshold (float, optional): Threshold applied to center heatmap score. Default: 0.1.
+ nms_kernel (int, optional): NMS max pooling kernel size. Default: 3.
+ top_k (int, optional): An Integer, top k centers to keep. Default: None
+
+ Returns:
+ Tensor: A Tensor of shape [K, 2] where K is the number of center points. The order of second dim is (y, x).
+ """
+ # thresholding, setting values below threshold to 0
+ ctr_hmp = F.thresholded_relu(ctr_hmp, threshold)
+
+ #NMS
+ nms_padding = (nms_kernel - 1) // 2
+ ctr_hmp = ctr_hmp.unsqueeze(0)
+ ctr_hmp_max_pooled = F.max_pool2d(
+ ctr_hmp, kernel_size=nms_kernel, stride=1, padding=nms_padding)
+ ctr_hmp = ctr_hmp * (ctr_hmp_max_pooled == ctr_hmp)
+
+ ctr_hmp = ctr_hmp.squeeze((0, 1))
+ if len(ctr_hmp.shape) != 2:
+ raise ValueError('Something is wrong with center heatmap dimension.')
+
+ if top_k is None:
+ top_k_score = 0
+ else:
+ top_k_score, _ = paddle.topk(paddle.flatten(ctr_hmp), top_k)
+ top_k_score = top_k_score[-1]
+ # non-zero points are candidate centers
+ ctr_hmp_k = (ctr_hmp > top_k_score[-1]).astype('int64')
+ if ctr_hmp_k.sum() == 0:
+ ctr_all = None
+ else:
+ ctr_all = paddle.nonzero(ctr_hmp_k)
+ return ctr_all
+
+
+def group_pixels(ctr, offsets):
+ """
+ Gives each pixel in the image an instance id.
+
+ Args:
+ ctr (Tensor): A Tensor of shape [K, 2] where K is the number of center points. The order of second dim is (y, x).
+ offsets (Tensor): A Tensor of shape [2, H, W] of raw offset output, where N is the batch size,
+ for consistent, we only support N=1. The order of second dim is (offset_y, offset_x).
+
+ Returns:
+ Tensor: A Tensor of shape [1, H, W], ins_id is 1, 2, ...
+ """
+ height, width = offsets.shape[-2:]
+ y_coord = paddle.arange(height, dtype=offsets.dtype).reshape([1, -1, 1])
+ y_coord = paddle.concat([y_coord] * width, axis=2)
+ x_coord = paddle.arange(width, dtype=offsets.dtype).reshape([1, 1, -1])
+ x_coord = paddle.concat([x_coord] * height, axis=1)
+ coord = paddle.concat([y_coord, x_coord], axis=0)
+
+ ctr_loc = coord + offsets
+ ctr_loc = ctr_loc.reshape((2, height * width)).transpose((1, 0))
+
+ # ctr: [K, 2] -> [K, 1, 2]
+ # ctr_loc = [H*W, 2] -> [1, H*W, 2]
+ ctr = ctr.unsqueeze(1)
+ ctr_loc = ctr_loc.unsqueeze(0)
+
+ # distance: [K, H*W]
+ distance = paddle.norm((ctr - ctr_loc).astype('float32'), axis=-1)
+
+ # finds center with minimum distance at each location, offset by 1, to reserve id=0 for stuff
+ instance_id = paddle.argmin(
+ distance, axis=0).reshape((1, height, width)) + 1
+
+ return instance_id
+
+
+def get_instance_segmentation(semantic,
+ ctr_hmp,
+ offset,
+ thing_list,
+ threshold=0.1,
+ nms_kernel=3,
+ top_k=None):
+ """
+ Post-processing for instance segmentation, gets class agnostic instance id map.
+
+ Args:
+ semantic (Tensor): A Tensor of shape [1, H, W], predicted semantic label.
+ ctr_hmp (Tensor): A Tensor of shape [1, H, W] of raw center heatmap output, where N is the batch size,
+ for consistent, we only support N=1.
+ offsets (Tensor): A Tensor of shape [2, H, W] of raw offset output, where N is the batch size,
+ for consistent, we only support N=1. The order of second dim is (offset_y, offset_x).
+ thing_list (list): A List of thing class id.
+ threshold (float, optional): A Float, threshold applied to center heatmap score. Default: 0.1.
+ nms_kernel (int, optional): An Integer, NMS max pooling kernel size. Default: 3.
+ top_k (int, optional): An Integer, top k centers to keep. Default: None.
+
+ Returns:
+ Tensor: Instance segmentation results which shape is [1, H, W].
+ Tensor: A Tensor of shape [1, K, 2] where K is the number of center points. The order of second dim is (y, x).
+ """
+ thing_seg = paddle.zeros_like(semantic)
+ for thing_class in thing_list:
+ thing_seg = thing_seg + (semantic == thing_class).astype('int64')
+ thing_seg = (thing_seg > 0).astype('int64')
+ center = find_instance_center(
+ ctr_hmp, threshold=threshold, nms_kernel=nms_kernel, top_k=top_k)
+ if center is None:
+ return paddle.zeros_like(semantic), center
+ ins_seg = group_pixels(center, offset)
+ return thing_seg * ins_seg, center.unsqueeze(0)
+
+
+def merge_semantic_and_instance(semantic, instance, label_divisor, thing_list,
+ stuff_area, ignore_index):
+ """
+ Post-processing for panoptic segmentation, by merging semantic segmentation label and class agnostic
+ instance segmentation label.
+
+ Args:
+ semantic (Tensor): A Tensor of shape [1, H, W], predicted semantic label.
+ instance (Tensor): A Tensor of shape [1, H, W], predicted instance label.
+ label_divisor (int): An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id.
+ thing_list (list): A List of thing class id.
+ stuff_area (int): An Integer, remove stuff whose area is less tan stuff_area.
+ ignore_index (int): Specifies a value that is ignored.
+
+ Returns:
+ Tensor: A Tensor of shape [1, H, W] . The pixels whose value equaling ignore_index is ignored.
+ The stuff class is represented as format like class_id, while
+ thing class as class_id * label_divisor + ins_id and ins_id begin from 1.
+ """
+ # In case thing mask does not align with semantic prediction
+ pan_seg = paddle.zeros_like(semantic) + ignore_index
+ thing_seg = instance > 0
+ semantic_thing_seg = paddle.zeros_like(semantic)
+ for thing_class in thing_list:
+ semantic_thing_seg += semantic == thing_class
+
+ # keep track of instance id for each class
+ class_id_tracker = {}
+
+ # paste thing by majority voting
+ ins_ids = paddle.unique(instance)
+ for ins_id in ins_ids:
+ if ins_id == 0:
+ continue
+ # Make sure only do majority voting within semantic_thing_seg
+ thing_mask = paddle.logical_and(instance == ins_id,
+ semantic_thing_seg == 1)
+ if paddle.all(paddle.logical_not(thing_mask)):
+ continue
+ # get class id for instance of ins_id
+ sem_ins_id = paddle.gather(
+ semantic.reshape((-1, )), paddle.nonzero(
+ thing_mask.reshape((-1, )))) # equal to semantic[thing_mask]
+ v, c = paddle.unique(sem_ins_id, return_counts=True)
+ class_id = paddle.gather(v, c.argmax())
+ class_id = class_id.numpy()[0]
+ if class_id in class_id_tracker:
+ new_ins_id = class_id_tracker[class_id]
+ else:
+ class_id_tracker[class_id] = 1
+ new_ins_id = 1
+ class_id_tracker[class_id] += 1
+
+ # pan_seg[thing_mask] = class_id * label_divisor + new_ins_id
+ pan_seg = pan_seg * (paddle.logical_not(thing_mask)) + (
+ class_id * label_divisor + new_ins_id) * thing_mask.astype('int64')
+
+ # paste stuff to unoccupied area
+ class_ids = paddle.unique(semantic)
+ for class_id in class_ids:
+ if class_id.numpy() in thing_list:
+ # thing class
+ continue
+ # calculate stuff area
+ stuff_mask = paddle.logical_and(semantic == class_id,
+ paddle.logical_not(thing_seg))
+ area = paddle.sum(stuff_mask.astype('int64'))
+ if area >= stuff_area:
+ # pan_seg[stuff_mask] = class_id
+ pan_seg = pan_seg * (paddle.logical_not(stuff_mask)
+ ) + stuff_mask.astype('int64') * class_id
+
+ return pan_seg
+
+
+def inference(
+ model,
+ im,
+ transforms,
+ thing_list,
+ label_divisor,
+ stuff_area,
+ ignore_index,
+ threshold=0.1,
+ nms_kernel=3,
+ top_k=None,
+ ori_shape=None,
+):
+ """
+ Inference for image.
+
+ Args:
+ model (paddle.nn.Layer): model to get logits of image.
+ im (Tensor): the input image.
+ transforms (list): Transforms for image.
+ thing_list (list): A List of thing class id.
+ label_divisor (int): An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id.
+ stuff_area (int): An Integer, remove stuff whose area is less tan stuff_area.
+ ignore_index (int): Specifies a value that is ignored.
+ threshold (float, optional): A Float, threshold applied to center heatmap score. Default: 0.1.
+ nms_kernel (int, optional): An Integer, NMS max pooling kernel size. Default: 3.
+ top_k (int, optional): An Integer, top k centers to keep. Default: None.
+ ori_shape (list. optional): Origin shape of image. Default: None.
+
+ Returns:
+ list: A list of [semantic, semantic_softmax, instance, panoptic, ctr_hmp].
+ semantic: Semantic segmentation results with shape [1, 1, H, W], which value is 0, 1, 2...
+ semantic_softmax: A Tensor represent probabilities for each class, which shape is [1, num_classes, H, W].
+ instance: Instance segmentation results with class agnostic, which value is 0, 1, 2, ..., and 0 is stuff.
+ panoptic: Panoptic segmentation results which value is ignore_index, stuff_id, thing_id * label_divisor + ins_id , ins_id >= 1.
+ """
+ logits = model(im)
+ # semantic: [1, c, h, w], center: [1, 1, h, w], offset: [1, 2, h, w]
+ semantic, ctr_hmp, offset = logits
+ semantic = paddle.argmax(semantic, axis=1, keepdim=True)
+ semantic = semantic.squeeze(0) # shape: [1, h, w]
+ semantic_softmax = F.softmax(logits[0], axis=1).squeeze()
+ ctr_hmp = ctr_hmp.squeeze(0) # shape: [1, h, w]
+ offset = offset.squeeze(0) # shape: [2, h, w]
+
+ instance, center = get_instance_segmentation(
+ semantic=semantic,
+ ctr_hmp=ctr_hmp,
+ offset=offset,
+ thing_list=thing_list,
+ threshold=threshold,
+ nms_kernel=nms_kernel,
+ top_k=top_k)
+ panoptic = merge_semantic_and_instance(semantic, instance, label_divisor,
+ thing_list, stuff_area, ignore_index)
+
+ # Recover to origin shape
+ # semantic: 0, 1, 2, 3...
+ # instance: 0, 1, 2, 3, 4, 5... and the 0 is stuff.
+ # panoptic: ignore_index, stuff_id, thing_id * label_divisor + ins_id , ins_id >= 1.
+ results = [semantic, semantic_softmax, instance, panoptic, ctr_hmp]
+ if ori_shape is not None:
+ results = [i.unsqueeze(0) for i in results]
+ results = [
+ reverse_transform(i, ori_shape=ori_shape, transforms=transforms)
+ for i in results
+ ]
+
+ return results
diff --git a/contrib/PanopticDeepLab/core/predict.py b/contrib/PanopticDeepLab/core/predict.py
new file mode 100644
index 0000000000..78b9b54ec2
--- /dev/null
+++ b/contrib/PanopticDeepLab/core/predict.py
@@ -0,0 +1,189 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import math
+
+import cv2
+import numpy as np
+import paddle
+import paddleseg
+from paddleseg.utils import logger, progbar
+
+from core import infer
+import utils
+
+
+def mkdir(path):
+ sub_dir = os.path.dirname(path)
+ if not os.path.exists(sub_dir):
+ os.makedirs(sub_dir)
+
+
+def partition_list(arr, m):
+ """split the list 'arr' into m pieces"""
+ n = int(math.ceil(len(arr) / float(m)))
+ return [arr[i:i + n] for i in range(0, len(arr), n)]
+
+
+def get_save_name(im_path, im_dir):
+ """get the saved name"""
+ if im_dir is not None:
+ im_file = im_path.replace(im_dir, '')
+ else:
+ im_file = os.path.basename(im_path)
+ if im_file[0] == '/':
+ im_file = im_file[1:]
+ return im_file
+
+
+def add_info_to_save_path(save_path, info):
+ """Add more information to save path"""
+ fname, fextension = os.path.splitext(save_path)
+ fname = '_'.join([fname, info])
+ save_path = ''.join([fname, fextension])
+ return save_path
+
+
+def predict(model,
+ model_path,
+ image_list,
+ transforms,
+ thing_list,
+ label_divisor,
+ stuff_area,
+ ignore_index,
+ image_dir=None,
+ save_dir='output',
+ threshold=0.1,
+ nms_kernel=7,
+ top_k=200):
+ """
+ predict and visualize the image_list.
+
+ Args:
+ model (nn.Layer): Used to predict for input image.
+ model_path (str): The path of pretrained model.
+ image_list (list): A list of image path to be predicted.
+ transforms (transform.Compose): Preprocess for input image.
+ thing_list (list): A List of thing class id.
+ label_divisor (int): An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id.
+ stuff_area (int): An Integer, remove stuff whose area is less tan stuff_area.
+ ignore_index (int): Specifies a value that is ignored.
+ image_dir (str, optional): The root directory of the images predicted. Default: None.
+ save_dir (str, optional): The directory to save the visualized results. Default: 'output'.
+ threshold(float, optional): Threshold applied to center heatmap score. Defalut: 0.1.
+ nms_kernel(int, optional): NMS max pooling kernel size. Default: 7.
+ top_k(int, optional): Top k centers to keep. Default: 200.
+ """
+ paddleseg.utils.utils.load_entire_model(model, model_path)
+ model.eval()
+ nranks = paddle.distributed.get_world_size()
+ local_rank = paddle.distributed.get_rank()
+ if nranks > 1:
+ img_lists = partition_list(image_list, nranks)
+ else:
+ img_lists = [image_list]
+
+ semantic_save_dir = os.path.join(save_dir, 'semantic')
+ instance_save_dir = os.path.join(save_dir, 'instance')
+ panoptic_save_dir = os.path.join(save_dir, 'panoptic')
+
+ colormap = utils.cityscape_colormap()
+
+ logger.info("Start to predict...")
+ progbar_pred = progbar.Progbar(target=len(img_lists[0]), verbose=1)
+ with paddle.no_grad():
+ for i, im_path in enumerate(img_lists[local_rank]):
+ ori_im = cv2.imread(im_path)
+ ori_shape = ori_im.shape[:2]
+ im, _ = transforms(ori_im)
+ im = im[np.newaxis, ...]
+ im = paddle.to_tensor(im)
+
+ semantic, semantic_softmax, instance, panoptic, ctr_hmp = infer.inference(
+ model=model,
+ im=im,
+ transforms=transforms.transforms,
+ thing_list=thing_list,
+ label_divisor=label_divisor,
+ stuff_area=stuff_area,
+ ignore_index=ignore_index,
+ threshold=threshold,
+ nms_kernel=nms_kernel,
+ top_k=top_k,
+ ori_shape=ori_shape)
+ semantic = semantic.squeeze().numpy()
+ instance = instance.squeeze().numpy()
+ panoptic = panoptic.squeeze().numpy()
+
+ im_file = get_save_name(im_path, image_dir)
+
+ # visual semantic segmentation results
+ save_path = os.path.join(semantic_save_dir, im_file)
+ mkdir(save_path)
+ utils.visualize_semantic(
+ semantic, save_path=save_path, colormap=colormap)
+ # Save added image for semantic segmentation results
+ save_path_ = add_info_to_save_path(save_path, 'add')
+ utils.visualize_semantic(
+ semantic, save_path=save_path_, colormap=colormap, image=ori_im)
+ # panoptic to semantic
+ ins_mask = panoptic > label_divisor
+ pan_to_sem = panoptic.copy()
+ pan_to_sem[ins_mask] = pan_to_sem[ins_mask] // label_divisor
+ save_path_ = add_info_to_save_path(save_path,
+ 'panoptic_to_semantic')
+ utils.visualize_semantic(
+ pan_to_sem, save_path=save_path_, colormap=colormap)
+ save_path_ = add_info_to_save_path(save_path,
+ 'panoptic_to_semantic_added')
+ utils.visualize_semantic(
+ pan_to_sem,
+ save_path=save_path_,
+ colormap=colormap,
+ image=ori_im)
+
+ # vusual instance segmentation results
+ pan_to_ins = panoptic.copy()
+ ins_mask = pan_to_ins > label_divisor
+ pan_to_ins[~ins_mask] = 0
+ save_path = os.path.join(instance_save_dir, im_file)
+ mkdir(save_path)
+ utils.visualize_instance(pan_to_ins, save_path=save_path)
+ # Save added image for instance segmentation results
+ save_path_ = add_info_to_save_path(save_path, 'added')
+ utils.visualize_instance(
+ pan_to_ins, save_path=save_path_, image=ori_im)
+
+ # visual panoptic segmentation results
+ save_path = os.path.join(panoptic_save_dir, im_file)
+ mkdir(save_path)
+ utils.visualize_panoptic(
+ panoptic,
+ save_path=save_path,
+ label_divisor=label_divisor,
+ colormap=colormap,
+ ignore_index=ignore_index)
+ # Save added image for panoptic segmentation results
+ save_path_ = add_info_to_save_path(save_path, 'added')
+ utils.visualize_panoptic(
+ panoptic,
+ save_path=save_path_,
+ label_divisor=label_divisor,
+ colormap=colormap,
+ image=ori_im,
+ ignore_index=ignore_index)
+
+ progbar_pred.update(i + 1)
diff --git a/contrib/PanopticDeepLab/core/train.py b/contrib/PanopticDeepLab/core/train.py
new file mode 100644
index 0000000000..a3bdaf966c
--- /dev/null
+++ b/contrib/PanopticDeepLab/core/train.py
@@ -0,0 +1,315 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import time
+from collections import deque
+import shutil
+
+import paddle
+import paddle.nn.functional as F
+from paddleseg.utils import TimeAverager, calculate_eta, resume, logger
+
+from core.val import evaluate
+
+
+def check_logits_losses(logits_list, losses):
+ len_logits = len(logits_list)
+ len_losses = len(losses['types'])
+ if len_logits != len_losses:
+ raise RuntimeError(
+ 'The length of logits_list should equal to the types of loss config: {} != {}.'
+ .format(len_logits, len_losses))
+
+
+def loss_computation(logits_list, semantic, semantic_weights, center,
+ center_weights, offset, offset_weights, losses):
+ # semantic loss
+ semantic_loss = losses['types'][0](logits_list[0], semantic,
+ semantic_weights)
+ semantic_loss = semantic_loss * losses['coef'][0]
+
+ # center loss
+ center_loss = losses['types'][1](logits_list[1], center)
+ center_weights = (center_weights.unsqueeze(1)).expand_as(center_loss)
+ center_loss = center_loss * center_weights
+ if center_loss.sum() > 0:
+ center_loss = center_loss.sum() / center_weights.sum()
+ else:
+ center_loss = center_loss.sum() * 0
+ center_loss = center_loss * losses['coef'][1]
+
+ # offset loss
+ offset_loss = losses['types'][2](logits_list[2], offset)
+ offset_weights = (offset_weights.unsqueeze(1)).expand_as(offset_loss)
+ offset_loss = offset_loss * offset_weights
+ if offset_weights.sum() > 0:
+ offset_loss = offset_loss.sum() / offset_weights.sum()
+ else:
+ offset_loss = offset_loss.sum() * 0
+ offset_loss = offset_loss * losses['coef'][2]
+
+ loss_list = [semantic_loss, center_loss, offset_loss]
+
+ return loss_list
+
+
+def train(model,
+ train_dataset,
+ val_dataset=None,
+ optimizer=None,
+ save_dir='output',
+ iters=10000,
+ batch_size=2,
+ resume_model=None,
+ save_interval=1000,
+ log_iters=10,
+ num_workers=0,
+ use_vdl=False,
+ losses=None,
+ keep_checkpoint_max=5,
+ threshold=0.1,
+ nms_kernel=7,
+ top_k=200):
+ """
+ Launch training.
+
+ Args:
+ model(nn.Layer): A sementic segmentation model.
+ train_dataset (paddle.io.Dataset): Used to read and process training datasets.
+ val_dataset (paddle.io.Dataset, optional): Used to read and process validation datasets.
+ optimizer (paddle.optimizer.Optimizer): The optimizer.
+ save_dir (str, optional): The directory for saving the model snapshot. Default: 'output'.
+ iters (int, optional): How may iters to train the model. Defualt: 10000.
+ batch_size (int, optional): Mini batch size of one gpu or cpu. Default: 2.
+ resume_model (str, optional): The path of resume model.
+ save_interval (int, optional): How many iters to save a model snapshot once during training. Default: 1000.
+ log_iters (int, optional): Display logging information at every log_iters. Default: 10.
+ num_workers (int, optional): Num workers for data loader. Default: 0.
+ use_vdl (bool, optional): Whether to record the data to VisualDL during training. Default: False.
+ losses (dict): A dict including 'types' and 'coef'. The length of coef should equal to 1 or len(losses['types']).
+ The 'types' item is a list of object of paddleseg.models.losses while the 'coef' item is a list of the relevant coefficient.
+ keep_checkpoint_max (int, optional): Maximum number of checkpoints to save. Default: 5.
+ threshold (float, optional): A Float, threshold applied to center heatmap score. Default: 0.1.
+ nms_kernel (int, optional): An Integer, NMS max pooling kernel size. Default: 7.
+ top_k (int, optional): An Integer, top k centers to keep. Default: 200.
+ """
+ model.train()
+ nranks = paddle.distributed.ParallelEnv().nranks
+ local_rank = paddle.distributed.ParallelEnv().local_rank
+
+ start_iter = 0
+ if resume_model is not None:
+ start_iter = resume(model, optimizer, resume_model)
+
+ if not os.path.isdir(save_dir):
+ if os.path.exists(save_dir):
+ os.remove(save_dir)
+ os.makedirs(save_dir)
+
+ if nranks > 1:
+ # Initialize parallel environment if not done.
+ if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized(
+ ):
+ paddle.distributed.init_parallel_env()
+ ddp_model = paddle.DataParallel(model)
+ else:
+ ddp_model = paddle.DataParallel(model)
+
+ batch_sampler = paddle.io.DistributedBatchSampler(
+ train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
+
+ loader = paddle.io.DataLoader(
+ train_dataset,
+ batch_sampler=batch_sampler,
+ num_workers=num_workers,
+ return_list=True,
+ )
+
+ if use_vdl:
+ from visualdl import LogWriter
+ log_writer = LogWriter(save_dir)
+
+ avg_loss = 0.0
+ avg_loss_list = []
+ iters_per_epoch = len(batch_sampler)
+ best_pq = -1.0
+ best_model_iter = -1
+ reader_cost_averager = TimeAverager()
+ batch_cost_averager = TimeAverager()
+ save_models = deque()
+ batch_start = time.time()
+
+ iter = start_iter
+ while iter < iters:
+ for data in loader:
+ iter += 1
+ if iter > iters:
+ break
+ reader_cost_averager.record(time.time() - batch_start)
+ images = data[0]
+ semantic = data[1]
+ semantic_weights = data[2]
+ center = data[3]
+ center_weights = data[4]
+ offset = data[5]
+ offset_weights = data[6]
+ foreground = data[7]
+
+ if nranks > 1:
+ logits_list = ddp_model(images)
+ else:
+ logits_list = model(images)
+
+ loss_list = loss_computation(
+ logits_list=logits_list,
+ losses=losses,
+ semantic=semantic,
+ semantic_weights=semantic_weights,
+ center=center,
+ center_weights=center_weights,
+ offset=offset,
+ offset_weights=offset_weights)
+ loss = sum(loss_list)
+ loss.backward()
+
+ optimizer.step()
+ lr = optimizer.get_lr()
+ if isinstance(optimizer._learning_rate,
+ paddle.optimizer.lr.LRScheduler):
+ optimizer._learning_rate.step()
+ model.clear_gradients()
+ avg_loss += loss.numpy()[0]
+ if not avg_loss_list:
+ avg_loss_list = [l.numpy() for l in loss_list]
+ else:
+ for i in range(len(loss_list)):
+ avg_loss_list[i] += loss_list[i].numpy()
+ batch_cost_averager.record(
+ time.time() - batch_start, num_samples=batch_size)
+
+ if (iter) % log_iters == 0 and local_rank == 0:
+ avg_loss /= log_iters
+ avg_loss_list = [l[0] / log_iters for l in avg_loss_list]
+ remain_iters = iters - iter
+ avg_train_batch_cost = batch_cost_averager.get_average()
+ avg_train_reader_cost = reader_cost_averager.get_average()
+ eta = calculate_eta(remain_iters, avg_train_batch_cost)
+ logger.info(
+ "[TRAIN] epoch={}, iter={}/{}, loss={:.4f}, lr={:.6f}, batch_cost={:.4f}, reader_cost={:.5f}, ips={:.4f} samples/sec | ETA {}"
+ .format((iter - 1) // iters_per_epoch + 1, iter, iters,
+ avg_loss, lr, avg_train_batch_cost,
+ avg_train_reader_cost,
+ batch_cost_averager.get_ips_average(), eta))
+ logger.info(
+ "[LOSS] loss={:.4f}, semantic_loss={:.4f}, center_loss={:.4f}, offset_loss={:.4f}"
+ .format(avg_loss, avg_loss_list[0], avg_loss_list[1],
+ avg_loss_list[2]))
+ if use_vdl:
+ log_writer.add_scalar('Train/loss', avg_loss, iter)
+ # Record all losses if there are more than 2 losses.
+ if len(avg_loss_list) > 1:
+ avg_loss_dict = {}
+ for i, value in enumerate(avg_loss_list):
+ avg_loss_dict['loss_' + str(i)] = value
+ for key, value in avg_loss_dict.items():
+ log_tag = 'Train/' + key
+ log_writer.add_scalar(log_tag, value, iter)
+
+ log_writer.add_scalar('Train/lr', lr, iter)
+ log_writer.add_scalar('Train/batch_cost',
+ avg_train_batch_cost, iter)
+ log_writer.add_scalar('Train/reader_cost',
+ avg_train_reader_cost, iter)
+
+ avg_loss = 0.0
+ avg_loss_list = []
+ reader_cost_averager.reset()
+ batch_cost_averager.reset()
+
+ # save model
+ if (iter % save_interval == 0 or iter == iters) and local_rank == 0:
+ current_save_dir = os.path.join(save_dir,
+ "iter_{}".format(iter))
+ if not os.path.isdir(current_save_dir):
+ os.makedirs(current_save_dir)
+ paddle.save(model.state_dict(),
+ os.path.join(current_save_dir, 'model.pdparams'))
+ paddle.save(optimizer.state_dict(),
+ os.path.join(current_save_dir, 'model.pdopt'))
+ save_models.append(current_save_dir)
+ if len(save_models) > keep_checkpoint_max > 0:
+ model_to_remove = save_models.popleft()
+ shutil.rmtree(model_to_remove)
+
+ # eval model
+ if (iter % save_interval == 0 or iter == iters) and (
+ val_dataset is
+ not None) and local_rank == 0 and iter > iters // 2:
+ num_workers = 1 if num_workers > 0 else 0
+ panoptic_results, semantic_results, instance_results = evaluate(
+ model,
+ val_dataset,
+ threshold=threshold,
+ nms_kernel=nms_kernel,
+ top_k=top_k,
+ num_workers=num_workers,
+ print_detail=False)
+ pq = panoptic_results['pan_seg']['All']['pq']
+ miou = semantic_results['sem_seg']['mIoU']
+ map = instance_results['ins_seg']['mAP']
+ map50 = instance_results['ins_seg']['mAP50']
+ logger.info(
+ "[EVAL] PQ: {:.4f}, mIoU: {:.4f}, mAP: {:.4f}, mAP50: {:.4f}"
+ .format(pq, miou, map, map50))
+ model.train()
+
+ # save best model and add evaluate results to vdl
+ if (iter % save_interval == 0 or iter == iters) and local_rank == 0:
+ if val_dataset is not None and iter > iters // 2:
+ if pq > best_pq:
+ best_pq = pq
+ best_model_iter = iter
+ best_model_dir = os.path.join(save_dir, "best_model")
+ paddle.save(
+ model.state_dict(),
+ os.path.join(best_model_dir, 'model.pdparams'))
+ logger.info(
+ '[EVAL] The model with the best validation pq ({:.4f}) was saved at iter {}.'
+ .format(best_pq, best_model_iter))
+
+ if use_vdl:
+ log_writer.add_scalar('Evaluate/PQ', pq, iter)
+ log_writer.add_scalar('Evaluate/mIoU', miou, iter)
+ log_writer.add_scalar('Evaluate/mAP', map, iter)
+ log_writer.add_scalar('Evaluate/mAP50', map50, iter)
+ batch_start = time.time()
+
+ # Calculate flops.
+ if local_rank == 0:
+
+ def count_syncbn(m, x, y):
+ x = x[0]
+ nelements = x.numel()
+ m.total_ops += int(2 * nelements)
+
+ _, c, h, w = images.shape
+ flops = paddle.flops(
+ model, [1, c, h, w],
+ custom_ops={paddle.nn.SyncBatchNorm: count_syncbn})
+
+ # Sleep for half a second to let dataloader release resources.
+ time.sleep(0.5)
+ if use_vdl:
+ log_writer.close()
diff --git a/contrib/PanopticDeepLab/core/val.py b/contrib/PanopticDeepLab/core/val.py
new file mode 100644
index 0000000000..9e0f90b97b
--- /dev/null
+++ b/contrib/PanopticDeepLab/core/val.py
@@ -0,0 +1,181 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from collections import OrderedDict
+
+import numpy as np
+import time
+import paddle
+import paddle.nn.functional as F
+from paddleseg.utils import TimeAverager, calculate_eta, logger, progbar
+
+from utils.evaluation import SemanticEvaluator, InstanceEvaluator, PanopticEvaluator
+from core import infer
+
+np.set_printoptions(suppress=True)
+
+
+def evaluate(model,
+ eval_dataset,
+ threshold=0.1,
+ nms_kernel=7,
+ top_k=200,
+ num_workers=0,
+ print_detail=True):
+ """
+ Launch evaluation.
+
+ Args:
+ model(nn.Layer): A sementic segmentation model.
+ eval_dataset (paddle.io.Dataset): Used to read and process validation datasets.
+ threshold (float, optional): Threshold applied to center heatmap score. Defalut: 0.1.
+ nms_kernel (int, optional): NMS max pooling kernel size. Default: 7.
+ top_k (int, optional): Top k centers to keep. Default: 200.
+ num_workers (int, optional): Num workers for data loader. Default: 0.
+ print_detail (bool, optional): Whether to print detailed information about the evaluation process. Default: True.
+
+ Returns:
+ dict: Panoptic evaluation results which includes PQ, RQ, SQ for all, each class, Things and stuff.
+ dict: Semantic evaluation results which includes mIoU, fwIoU, mACC and pACC.
+ dict: Instance evaluation results which includes mAP and mAP50, and also AP and AP50 for each class.
+
+ """
+ model.eval()
+ nranks = paddle.distributed.ParallelEnv().nranks
+ local_rank = paddle.distributed.ParallelEnv().local_rank
+ if nranks > 1:
+ # Initialize parallel environment if not done.
+ if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized(
+ ):
+ paddle.distributed.init_parallel_env()
+ batch_sampler = paddle.io.DistributedBatchSampler(
+ eval_dataset, batch_size=1, shuffle=False, drop_last=False)
+ loader = paddle.io.DataLoader(
+ eval_dataset,
+ batch_sampler=batch_sampler,
+ num_workers=num_workers,
+ return_list=True,
+ )
+
+ total_iters = len(loader)
+ semantic_metric = SemanticEvaluator(
+ eval_dataset.num_classes, ignore_index=eval_dataset.ignore_index)
+ instance_metric_AP50 = InstanceEvaluator(
+ eval_dataset.num_classes,
+ overlaps=0.5,
+ thing_list=eval_dataset.thing_list)
+ instance_metric_AP = InstanceEvaluator(
+ eval_dataset.num_classes,
+ overlaps=list(np.arange(0.5, 1.0, 0.05)),
+ thing_list=eval_dataset.thing_list)
+ panoptic_metric = PanopticEvaluator(
+ num_classes=eval_dataset.num_classes,
+ thing_list=eval_dataset.thing_list,
+ ignore_index=eval_dataset.ignore_index,
+ label_divisor=eval_dataset.label_divisor)
+
+ if print_detail:
+ logger.info(
+ "Start evaluating (total_samples={}, total_iters={})...".format(
+ len(eval_dataset), total_iters))
+ progbar_val = progbar.Progbar(target=total_iters, verbose=1)
+ reader_cost_averager = TimeAverager()
+ batch_cost_averager = TimeAverager()
+ batch_start = time.time()
+ with paddle.no_grad():
+ for iter, data in enumerate(loader):
+ reader_cost_averager.record(time.time() - batch_start)
+ im = data[0]
+ raw_semantic_label = data[1] # raw semantic label.
+ raw_instance_label = data[2]
+ raw_panoptic_label = data[3]
+ ori_shape = raw_semantic_label.shape[-2:]
+
+ semantic, semantic_softmax, instance, panoptic, ctr_hmp = infer.inference(
+ model=model,
+ im=im,
+ transforms=eval_dataset.transforms.transforms,
+ thing_list=eval_dataset.thing_list,
+ label_divisor=eval_dataset.label_divisor,
+ stuff_area=eval_dataset.stuff_area,
+ ignore_index=eval_dataset.ignore_index,
+ threshold=threshold,
+ nms_kernel=nms_kernel,
+ top_k=top_k,
+ ori_shape=ori_shape)
+ semantic = semantic.squeeze().numpy()
+ semantic_softmax = semantic_softmax.squeeze().numpy()
+ instance = instance.squeeze().numpy()
+ panoptic = panoptic.squeeze().numpy()
+ ctr_hmp = ctr_hmp.squeeze().numpy()
+ raw_semantic_label = raw_semantic_label.squeeze().numpy()
+ raw_instance_label = raw_instance_label.squeeze().numpy()
+ raw_panoptic_label = raw_panoptic_label.squeeze().numpy()
+
+ # update metric for semantic, instance, panoptic
+ semantic_metric.update(semantic, raw_semantic_label)
+
+ gts = instance_metric_AP.convert_gt_map(raw_semantic_label,
+ raw_instance_label)
+ # print([i[0] for i in gts])
+ preds = instance_metric_AP.convert_pred_map(semantic_softmax,
+ panoptic)
+ # print([(i[0], i[1]) for i in preds ])
+ ignore_mask = raw_semantic_label == eval_dataset.ignore_index
+ instance_metric_AP.update(preds, gts, ignore_mask=ignore_mask)
+ instance_metric_AP50.update(preds, gts, ignore_mask=ignore_mask)
+
+ panoptic_metric.update(panoptic, raw_panoptic_label)
+
+ batch_cost_averager.record(
+ time.time() - batch_start, num_samples=len(im))
+ batch_cost = batch_cost_averager.get_average()
+ reader_cost = reader_cost_averager.get_average()
+
+ if local_rank == 0:
+ progbar_val.update(iter + 1, [('batch_cost', batch_cost),
+ ('reader cost', reader_cost)])
+ reader_cost_averager.reset()
+ batch_cost_averager.reset()
+ batch_start = time.time()
+
+ semantic_results = semantic_metric.evaluate()
+ panoptic_results = panoptic_metric.evaluate()
+ instance_results = OrderedDict()
+ ins_ap = instance_metric_AP.evaluate()
+ ins_ap50 = instance_metric_AP50.evaluate()
+ instance_results['ins_seg'] = OrderedDict()
+ instance_results['ins_seg']['mAP'] = ins_ap['ins_seg']['mAP']
+ instance_results['ins_seg']['AP'] = ins_ap['ins_seg']['AP']
+ instance_results['ins_seg']['mAP50'] = ins_ap50['ins_seg']['mAP']
+ instance_results['ins_seg']['AP50'] = ins_ap50['ins_seg']['AP']
+
+ if print_detail:
+ logger.info(panoptic_results)
+ print()
+ logger.info(semantic_results)
+ print()
+ logger.info(instance_results)
+ print()
+
+ pq = panoptic_results['pan_seg']['All']['pq']
+ miou = semantic_results['sem_seg']['mIoU']
+ map = instance_results['ins_seg']['mAP']
+ map50 = instance_results['ins_seg']['mAP50']
+ logger.info(
+ "PQ: {:.4f}, mIoU: {:.4f}, mAP: {:.4f}, mAP50: {:.4f}".format(
+ pq, miou, map, map50))
+
+ return panoptic_results, semantic_results, instance_results
diff --git a/contrib/PanopticDeepLab/datasets/__init__.py b/contrib/PanopticDeepLab/datasets/__init__.py
new file mode 100644
index 0000000000..4f0f3a9500
--- /dev/null
+++ b/contrib/PanopticDeepLab/datasets/__init__.py
@@ -0,0 +1,15 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .cityscapes_panoptic import CityscapesPanoptic
diff --git a/contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py b/contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py
new file mode 100644
index 0000000000..59141367c0
--- /dev/null
+++ b/contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py
@@ -0,0 +1,196 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+import glob
+
+import numpy as np
+import paddle
+from paddleseg.cvlibs import manager
+from paddleseg.transforms import Compose
+import PIL.Image as Image
+
+from transforms import PanopticTargetGenerator, SemanticTargetGenerator, InstanceTargetGenerator, RawPanopticTargetGenerator
+
+
+@manager.DATASETS.add_component
+class CityscapesPanoptic(paddle.io.Dataset):
+ """
+ Cityscapes dataset `https://www.cityscapes-dataset.com/`.
+ The folder structure is as follow:
+
+ cityscapes/
+ |--gtFine/
+ | |--train/
+ | | |--aachen/
+ | | | |--*_color.png, *_instanceIds.png, *_labelIds.png, *_polygons.json,
+ | | | |--*_labelTrainIds.png
+ | | | |--...
+ | |--val/
+ | |--test/
+ | |--cityscapes_panoptic_train_trainId.json
+ | |--cityscapes_panoptic_train_trainId/
+ | | |-- *_panoptic.png
+ | |--cityscapes_panoptic_val_trainId.json
+ | |--cityscapes_panoptic_val_trainId/
+ | | |-- *_panoptic.png
+ |--leftImg8bit/
+ | |--train/
+ | |--val/
+ | |--test/
+
+ Args:
+ transforms (list): Transforms for image.
+ dataset_root (str): Cityscapes dataset directory.
+ mode (str, optional): Which part of dataset to use. it is one of ('train', 'val'). Default: 'train'.
+ ignore_stuff_in_offset (bool, optional): Whether to ignore stuff region when training the offset branch. Default: False.
+ small_instance_area (int, optional): Instance which area less than given value is considered small. Default: 0.
+ small_instance_weight (int, optional): The loss weight for small instance. Default: 1.
+ stuff_area (int, optional): An Integer, remove stuff whose area is less tan stuff_area. Default: 2048.
+ """
+
+ def __init__(self,
+ transforms,
+ dataset_root,
+ mode='train',
+ ignore_stuff_in_offset=False,
+ small_instance_area=0,
+ small_instance_weight=1,
+ stuff_area=2048):
+ self.dataset_root = dataset_root
+ self.transforms = Compose(transforms)
+ self.file_list = list()
+ self.ins_list = []
+ mode = mode.lower()
+ self.mode = mode
+ self.num_classes = 19
+ self.ignore_index = 255
+ self.thing_list = [11, 12, 13, 14, 15, 16, 17, 18]
+ self.label_divisor = 1000
+ self.stuff_area = stuff_area
+
+ if mode not in ['train', 'val']:
+ raise ValueError(
+ "mode should be 'train' or 'val' , but got {}.".format(mode))
+
+ if self.transforms is None:
+ raise ValueError("`transforms` is necessary, but it is None.")
+
+ img_dir = os.path.join(self.dataset_root, 'leftImg8bit')
+ label_dir = os.path.join(self.dataset_root, 'gtFine')
+ if self.dataset_root is None or not os.path.isdir(
+ self.dataset_root) or not os.path.isdir(
+ img_dir) or not os.path.isdir(label_dir):
+ raise ValueError(
+ "The dataset is not Found or the folder structure is nonconfoumance."
+ )
+ json_filename = os.path.join(
+ self.dataset_root, 'gtFine',
+ 'cityscapes_panoptic_{}_trainId.json'.format(mode))
+ dataset = json.load(open(json_filename))
+ img_files = []
+ label_files = []
+ for img in dataset['images']:
+ img_file_name = img['file_name']
+ img_files.append(
+ os.path.join(self.dataset_root, 'leftImg8bit', mode,
+ img_file_name.split('_')[0],
+ img_file_name.replace('_gtFine', '')))
+ for ann in dataset['annotations']:
+ ann_file_name = ann['file_name']
+ label_files.append(
+ os.path.join(self.dataset_root, 'gtFine',
+ 'cityscapes_panoptic_{}_trainId'.format(mode),
+ ann_file_name))
+ self.ins_list.append(ann['segments_info'])
+
+ self.file_list = [[
+ img_path, label_path
+ ] for img_path, label_path in zip(img_files, label_files)]
+
+ self.target_transform = PanopticTargetGenerator(
+ self.ignore_index,
+ self.rgb2id,
+ self.thing_list,
+ sigma=8,
+ ignore_stuff_in_offset=ignore_stuff_in_offset,
+ small_instance_area=small_instance_area,
+ small_instance_weight=small_instance_weight)
+
+ self.raw_semantic_generator = SemanticTargetGenerator(
+ ignore_index=self.ignore_index, rgb2id=self.rgb2id)
+ self.raw_instance_generator = InstanceTargetGenerator(self.rgb2id)
+ self.raw_panoptic_generator = RawPanopticTargetGenerator(
+ ignore_index=self.ignore_index,
+ rgb2id=self.rgb2id,
+ label_divisor=self.label_divisor)
+
+ @staticmethod
+ def rgb2id(color):
+ """Converts the color to panoptic label.
+ Color is created by `color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256]`.
+
+ Args:
+ color: Ndarray or a tuple, color encoded image.
+
+ Returns:
+ Panoptic label.
+ """
+ if isinstance(color, np.ndarray) and len(color.shape) == 3:
+ if color.dtype == np.uint8:
+ color = color.astype(np.int32)
+ return color[:, :,
+ 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
+ return int(color[0] + 256 * color[1] + 256 * 256 * color[2])
+
+ def __getitem__(self, idx):
+ image_path, label_path = self.file_list[idx]
+ dataset_dict = {}
+ im, label = self.transforms(im=image_path, label=label_path)
+ label_dict = self.target_transform(label, self.ins_list[idx])
+ for key in label_dict.keys():
+ dataset_dict[key] = label_dict[key]
+ dataset_dict['image'] = im
+ if self.mode == 'val':
+ raw_label = np.asarray(Image.open(label_path))
+ dataset_dict['raw_semantic_label'] = self.raw_semantic_generator(
+ raw_label, self.ins_list[idx])['semantic']
+ dataset_dict['raw_instance_label'] = self.raw_instance_generator(
+ raw_label)['instance']
+ dataset_dict['raw_panoptic_label'] = self.raw_panoptic_generator(
+ raw_label, self.ins_list[idx])['panoptic']
+
+ image = np.array(dataset_dict['image'])
+ semantic = np.array(dataset_dict['semantic'])
+ semantic_weights = np.array(dataset_dict['semantic_weights'])
+ center = np.array(dataset_dict['center'])
+ center_weights = np.array(dataset_dict['center_weights'])
+ offset = np.array(dataset_dict['offset'])
+ offset_weights = np.array(dataset_dict['offset_weights'])
+ foreground = np.array(dataset_dict['foreground'])
+ if self.mode == 'train':
+ return image, semantic, semantic_weights, center, center_weights, offset, offset_weights, foreground
+ elif self.mode == 'val':
+ raw_semantic_label = np.array(dataset_dict['raw_semantic_label'])
+ raw_instance_label = np.array(dataset_dict['raw_instance_label'])
+ raw_panoptic_label = np.array(dataset_dict['raw_panoptic_label'])
+ return image, raw_semantic_label, raw_instance_label, raw_panoptic_label
+ else:
+ raise ValueError(
+ '{} is not surpported, please set it one of ("train", "val")'.
+ format(self.mode))
+
+ def __len__(self):
+ return len(self.file_list)
diff --git a/contrib/PanopticDeepLab/docs/panoptic_deeplab.jpg b/contrib/PanopticDeepLab/docs/panoptic_deeplab.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ace44918e4ab3877f23d4767db57c42f857add10
GIT binary patch
literal 117991
zcmeEu2Ut^0*6>Y2mm(4rkX|GN=^cg8LKPvQ7p1q*yP^<8MUC{PAPUmEqI5)1DJn%k
zz(Ns4x~NE#mi#wZ-m>rhyWj5q-+udl@6B^_&&-@@=gvKI=FB-mdPy1wcI#{FX#)@l
z0O*20fba2R#M&%w#lC5YG2#m&P<
zh5u7yGe56~vkJe3oS~$lpQekuhi+Jai%Hl~Q>QRbCnaZolq&Dx6UryN{k&a*9C%N7
zd-((^pHSi7DO?$Z$_z5y=0a^kY$lKf!q0B2Watd{ooykJO$
z|9c}tLqo+wWyF00+$5xwl$0bSr6r`L#Xt_{f4kyHX0tJ4Qpyd+i6yV_(Y+7x13{#n@_+uV4v_^t0C+B@Ys}00Gy3lXn4SCrfuV!!7d;SI|9PdAs#NFAUpuVf?mPi9w7V`gyG&U4uJqbK~Ihka&dA8VM!3C4=}}Q
zg79Hb!lifn73}ycILPG$*iHb@^z}O#;Nj*T#EWne;RPi_MP6N(P%oFDATeW5dUXhJ
z1|>{yKL?+a0PwBP3hOva%d1sKl
zUBATrlJ}Q5-yCpEt$?&i_$AIU9RO<20|49WU*ZI_0f6Zo0MtDDRe$!9$IGuIWe*o8
zaWX-FUVjteC+1%VepR11xxSyY_>)g{2i2dv5*9AzFpZs6jKT3cOU;^L(JHQPH00=+~kOmZhLx3892J`_Vz!b0ojss4BJKzlj
z0HMHX;0zEATm}+=R3HP$0`h=jpaQr9)B^W_2S7W}3G@Pkz%VclOak-3XJ8fBgg_wF
z5C#YfgdM^I5rT+9q#=qBRR|h#1cHTFLF^%J5Fbb|LCvx
zPau7eSCF@mS;!a21{4ORhr*#;&;w8js3KGyY5+Bb9)r3;{h?vdbI>^GRcH>h3|a$y
z0PTbhLdT)=&=nW}+XdSTmdk!0ieSob~
zP*Jc@@KT6T9HP*rFr#py@TZ8Nh@r@!D59vLXr*{g@s{E<1(A}0l9N)DQkhbZ(vs4R
zGL$l!GL5o;vYN7ua*%S8a+QjTYA=;Al>(J6l@%3^>J(KB)pe>{R4r6}R1;Jy)YR1M
z)S}cX)JD_})IrqI)ale^)XmiW)RWX-X?D@@&`8s0(^%8^&_vOs(Uj2Kr|GAerrD(3
zO)EsJOlw5zOnZ_xo;HuRf%X~gB<&_0Go3J<3Y{q(j_wTIRk{kgHo6hIrCqyr@$XXF
zh1un?D{|M>UAK2V-t~6ZI{hAcB)ulREqyS39DN~u3;iqlB?bnD0}N^m)(k-maSX)_
zj~K=nzA~~fiZkjlIy2%KuQApz_A`EDqGb|dQfE5O6vmXsbeHKF(}&%(yASNv-0iSC
zV)wP(^}Ao}USVcomSQ$y_GXS@E@6JcJhO*-&w)MYJuvc%d*WUQOclHkL
z-Pp&qPko=uzDxTm_Vw>uW82TB#^%CCV7tXO$hN`G%Z_IEWKU$TWgq3B;6QREPJkk3S+qVl31qSr%9`PtEm|-w9W8=Z4|)&U
z44r`<)8^N9(=OHitRts$N~cAaQrAE?QTK%&m!6YevEFBW1^saSRs(ti6N3zccSn#%
z0**8oLJjo|lMP3X3LW)1T5ALt=@}&(jT#FZ`x`f6s4&KuYnUml6!tW>!-U1e&ZNX-
z)l|bY-gMXuVHRxm(45KK*1Xt!%|go}$zt46!ZO^l(~85&!>Z1j+S=T@z1V=y9><
zlFX&_OGJV*;pt_`%W0RlVw_^0#!AIrjU~mo#`VO@$KObxO7KY-N<<|VB{3(3B~2t7
zCfB6!rVvt=Q|(fpq{*gbU7@=Yd}aLVk*js-g6RqATN&;dgV!{!-OA+5yp*|i-T8X|
z4YeB;SzK9{vo^9lvR~fRxmlBQASW%CDmNr|D$hKxBVRGUxPYVJa=~_?PvLkGwy3pO
zp}4q&vn0M0S{htBTV`9(
zYL;q34W#B|&F5P8+VMK8y1sh-`iBjN8mb$`8w>C8-MiMr+7y4E?*6&^q~@^Zl@`C2
z4-ecPOgyxEIQ+=+(LgJvwWrOX?Qy$y`=bu^j^@WIkMBKEep3Hb@o8gtFP&e`yz+Xr
z_&VhE=5XW)$BzO$;E(0(wCSe_NBsQ+2w~T
z#w%m1-m9Bygs<#hi`Es_J2osfrZ+>ksJE_ei)=R#4TvKoZxV^r0x;8%B>)(NAAmAL
zV9XFwE5HLv5|rSi4PImkfqX%rFbYa4Y8qO)T_8gBZctYYgFz`^l$7KJ3?v*(2Pl{+
z_wY%nQ?XzisQLZj(q|HH(g+s%vWN>Kh(DYHe%pc>LsP|MP*tp%*V-y?*;{;{D{*^vvws;+Li6mDRPc
z>l@^LLBM|gxc;zT%wWHu6cjKDYI46I&`@&6nJFmwq^S0&W2hbcS@@;T(7-hkZ&p5_
z6_CMxW_1kcquVPe`&MX?+_jya{e2ya`iFY$i87n?8lZjY{5uM9jf
zJigMd{p>Lbh(&E;H;L&)oh?r!(+WP;N=w{B!CoAXv%^0}k<)ozUwO0q>~v-lEEH!~i{q{&1LyQL
zoe7mzJ>Du5uTaC)OqL`-fG8W@XuJ|nE*yhdV5=}%UCf_aAHeqK@{ZTt}^~|M7z-s>5whUN9-*$00
z2{^KoSAzgF{}Pn_g$Ty^*}LBSAcW43tb=scK+|8rpI6)0TrJT&oAn;w9=~AlX~S)u
zXR$<4sSOVK!$#di19rcnCF)1sqJCq_PYX6AKx?P1rTCdFGOcqB)b`=4eYaV_PS5w_
zac?$+CrQAI5>sQ!H{YCKIq{
zcCXh+z^EM=QD?OYM6w_w4eBGHZ&usLLE<`ULgyFI43WJHKfI5eXY)OIi2MpR1bm4h;t<1jqsK`arg&_Yl1^itE1CD>KUER*+IcIi2+92wI^qq*Peh~
zM%7BF>?60t7;8)!ztH&%O#<#4f$y(A^czj0lA)0Y@jk~kgh84oZw7qoLxJOgit+$;
zcJ?lw;o68x;48@84EYvshuff&zx78cHjZI*^1@&W>SvzsW&QOOi_ZAXoOmSaBLS%X
z*&tM{5BP#m6K1E2NI(PPOD=H%uqKW+E*tH9Sl>%8xo?Ue&HLiCOeX4;-!{_ZNqm3u
z6C$4Y1@jEGEUfaagZ_RANg@UL*)GSMk?j+ZtcJQ
z*jTTlTKaWx{lz1Zk>U7_0f-75qecnGvP2r?17fV${x63HB-nUiasJ!L(O+u*(SrZR
zCu@2czT@n+&QiH@*AQyr0Wo*Ims)~)qq+T7tE%nBAPs78D8|`~^}6A?jy~giO;?tR
zYMaYli);3Z_3C~yiwvEX7DS&MqW9*x>k(H$#1*}$|h<*=*bF0x2qS=i5Rh!
zoDJ<|woXe*YtY}5mPD4|(jxZ|5ZRP%`T~CH@WJqV%!%fvQHrxxJ%+TG<5n1e
z<1R(2y9Fy07WsKO4ODQ9ff_9uZVWNd17BBF3J9R#0CtCB25OW-V9IY&(eSEosozkA
zKw|$u6B7c&$oS-vKwuKvfs092V<5PjmuJkODdCtS
zP_U2x={FOp|DXPPAKV7!EUZ?D)s?c6PxcFz$R~COR(1p*xEUbk%$zW?m^s*9IpDfq
zxNe=vmg#dgMIU>O{&kGaSrdcO)-voUhvT7J`pw?nta<#14aU5+HMy&DY>)0(syRP-
z9|y^%br{SEWj+$i!t*Jp^o50p$G*(*#7uv1zAj_XpPV|&($d$EVAc@dQ-M2oT}*E-
zq?h6jt}pWN)iLc3Ud9`(18EJJ7JE(}EKpdM6VKX%_F%)rL>kqXEtsh_)+G3aLNnGNVs{^l5q4e)MkN|FD5>SVLDjle`nR(~FB*3^oo&+4TtgDY6
zJp3e%+SkO&0`-Nhc7x|V4(!Q1R0Qma(eY)7#E%Y&Y7c$c)h5RLKA5_w)Do~ab6*g9
zn_d{}*F6+@u+K`2vBWYpUF{jC=};ezR@}#h{^|^sQZ`*!;Z}t8w$XBVQ|XK-izx|+
z@3bo>rq-*mZKOuY8u|mn{HI6!GtJ7C-;Y|C#jagoSYcJ^EDQN^h!ZHNTz=T=k@PI)
z{avOOrwwDu;p;~cJWtUcc73NzrRHht#9EE^o=R+Uo9!P(Gbf1#iUcNYHVl@g`&Kx(
zAAepn{*jZ%8W^Rg8DZiIw35LsmR?<$@F1*+_LtlpvGJf3!j>`i&7VbQzDQf0OL)CU
z>iTfQW-sMSIZKxWG_vYQn9r)dAmY_FKd>RAeS=+0O9p*X_t7t{CfCt3@uz|2Y&_l%7Ss1FNnWZW!kiO0Fk
zEKX3*wH}K_Z4HIgSbCMU?L~nmpQ%pJ+qCw2MY@yRC1>-uZ|>
ze^M^&PQ@+n>`Y|G)DbF0&iM97{$ozQ8(+DoD^MM*MS^-amjsl|sj2Sd-MgVn7rA0g;Af%42Vl^+
zdj!$+nl(YB2&}SY7Tc#0GC*gVdPtjEsmL?`&iOS
z!W2fXY%kf!UCzmD6CnXUHKoPyA2aiR`V&uj{!hdN2P1b7yzhM0rrkdD9i3ms$?5(tZ;nsjUSX|IT&13^VISb1hey
zQDjQ6P3L<<;wa&ef*@Nwah-XKub
z`cQFk*=^QM%iT46o%jBX8k-+lW&7RO(R=};hQe~4vJ0Nq{4Ox`$jr9|nO!Ims2*)Z
zCP9>VSw@UuS1c@}J0HJR;5sqBjbylss}&L8({)msZVI9VTiL&yO~;XGBzG;(Nw@-;
zADzqixLp3W|M_+mQCu=^T9G}Q-t$~wY0HD7!25}~3r%9%Red93+eMucV|ExDSz{`N
z)xkPjWkvkQiHwYPo6bwz9?_5EN3lxfTqQ?!zl+xo2hU!xm-!av=%uM0t9k
zkgszhl~xV5jnAi^z8aWxC}Y@Cz*37&RZS`$mqq}l9n!poGT0L{I43Pl`*B3SFJ+h;
zn!OWeY|g=h$Ag*WFrB`4scp(y8gmC*S2o_>Hby}4@b{wH2%AWrwVsZR0e0|W+iIlrYZCYqX2
zi6W$^;=na?QD3j~PCW)3qvzGnz?j~0MWjjseu~+27E%$Jie5IU%XvTFGDeV%>84
zs-R<2i(Fry8oTkF;lWb&GcWwxb;r&2N)*iPvDH66k*w%2EZ5hoc9;WbO~!gwOIq?B
z9Pv3!nX3~!aVODscPMu**LiP%S{?khCuL{F%P@KF>AAvFKmb#@U9zR#zRoJ`lT1gI
z%_n)yI3pJYb7`>F|F|QRL#qPDx`SojreIb&H?<~
zi&?A*%SWbsP)vh~g!Y2k;DO_GOOa7!KEHA5)z0M)%VTrV}Per*R|&7Bt`Beb6Ya&+?$m4BLs3(kO?Co9te~YM4((x2H7lugZ{56!p8@hjIv_T?fg^-11Q3
z&ASU*^Ge4QjXkwL5mkmi6N4`w`+8!GLbuT@zx@Nj_v+VnkJNj7ZUJ82?pbqrBp|G2
zo<)lUm_m5yHvve>Uv27|1@9!)}-vDcZ%1XoNwHt18xwCS06X+|B}0q7^X0(
zws)o^%PYRG8L9eeUv7jc#D|=`{YS|>e=Yg)?{fcP;lJemUGlfw|6KB4ZTWASR(1u`
zNC8BJqpkjDtU~X~#ee;bFCqpP{-P?M-uE#+UksKFe#AE_OzllX)P%G1`sXOC?H}={
zWuy;BNfCkrRi1zfmxLW@Pf;oFrw+a#cHC@O3dpiHjM5Txb}YTB%T6ulv#VQlL_4|{gFy?dH+8{fT)-AX
z_jY$MEY3YWf=d}3lKzu2ByQL#ivX}bJa(HL(B@4KpV<#m1L
z1gxZ=dwIK%7(4~2N~uZ#u>BadU7%12z*DL~?)Q@|z`>B7f!>caY%kO#WsjdowU4T?
z=~Htx)r>is9%+vO3+WKSxFILlTM{r>JLBn`KxL%g@r4^R+3G)%Z1I}fox+H3qL^zQ
z`BYqQq)&zZq>JaJn8Nd%h}D9DJMB&Zk{UO%^y3=eC7ElT2V2mh|D6*8rSctn25~pu
zY`bSOL
z-uoBNw-D0czl4t=&t)yHqDD(sG>pD(@_g*2zd=Bh@*?tPEjvS{Qf#N!=qCZ9&Qc!L
zAS?FE#D`wl$UUe06SKl_W7=Y^{n3~fB{pyaK_6&Do7)tnTOZZ96(GsfpR;*dk@3Mp
z?ikZ|krEi=dyBdJEL~Y4mt1v2Ed{V29QSRzOjuie9cEOUC7gOUAFJA?iXKUOUMI?tk2m8-7#j9^>KTc?^rI=
zNqg`&5GFj^hj7Cyqz~WHy%Y+<)$Zx+k^{FZjR)j_*BT`7P+&@21?yAIrlFBCs
z@}xOg{h#SaH{9G0J?OLz?y{}85s!fzMz^f#MaaumkiAeN93!mnl)y~{H0~u2<(CPI
z&YXMfv-=4PVt%N2H#(YTI^5$%YKB#8+`E%+%=DrTTICAbDFkLzn|Z%?H8YvTUefl%
z3GMR}%A=56H+OieWHAdAE5EZ+ILLId
zy(frDig7B}u2zGSO9fx@
zE)#Iiu$-0B$d_1UOkubdE1x-P$V;)DO;PMf0xX-YU5A$C1bNrb@<$3bY);IY;Rag{
z9HqwHs%}C+loySdYy3_W#FU;Q)MY0yGHAUYYB38E!3;&Ws}#rkbg3Fc^4V332^}5^1Mk{;EA_BQ~t60)~gzwTDHv&
z)bZ4u-u5pC%cMK|^Yf@=li90kN7n|t9X>LsetLernGpQ?lmyaq#WtNLS62jv&B0fIaD(OVtNp(m#(Pp}LZ_C-L1ZAimPbz>t
zbmhaT4UqRDDDlR~%8>z0Z3bd_dumoG6p94#$RPPe^{j*TH}bw1y>o|%=X!X*j}5z%
zfE1-QHweb8rP#-7THx~1%)c~W?Tl8@PeP?a1y5b)80m*#uowJ`BkitRUK}tq|@JluenvkW>=w
zaA%wyyR5>EGBkcHt^PMTF7Cm91
zH^PzOF;SCk0lCmrXU9kWEhSM7BPK&yDhzH&L!ZdXfJrC5umQh7k7sMMf(JHZx}-Rv3)yxkkCHz&Y^A+^Otuy`FVpajk;Fq59F+@;%)p&V@^0q$8pRBDd3&uD;dF^#
z3nH&MCifTa{W{~H5gTCXRDQf9x#kg}-tIAKEL+R;Y~~(i)!3jLgf1m#|6pW(>;s-~0hNNYq%pq-7{l+u2B1ay)$YrEKGn*{XV
zieiH*a{$(ef{%9A_R0izdr+kWz_C~#?kZqT1VWt;t`ft?=gDH05
z1=-ENUAZGFV*xU{eV``LIyfC%;dNXFtp;vFI>4=nnS>uL|9hUOE-&wHs2=nK#YBHP
z+6egpR$E(~_hcn6=XwIu63_kL!yD1KzR8xO>_F9LsRv_hK0_D
z;&ZZPHs~BupB)t`k#-1p;=*O9&~0R0h{hF(=hXXl>tp!2SB?@?&S^}Y7s6V&%-~#7
zuTHR+O`^-A(>*-c0yE5JS8kSO6%Qkb^OzKjI&W}^Vq0EOh6|TcQCC-(zcmu;Snqn(
zMSZMfuW)g<5Lj-Rh1mKNgQ>!Dlbq6*t>VY&*_=6o-3AtwdY+HnP2DegbEFatKOeQC
zSVkqLwq*YnkxBy2MaUg*5b2+5eI=2({6=cqBL&=M`=Cn#K=7f5HsI8TMst?l-ox%J
zEOhBWywo;M88LY*@=8zGiLA|srG41hwq>p-$FU=flsd`tdiqqluqG_QhDO@`$x88o
zOE<$~)KyLt6_=~oW?A3$>ga-&46MGVRacS3Ij2U)sx_LL&@?tuZRFD)n*(*t!$A)VZZ=%Q-R!>bcAH}^a_d}n)A_;pSW
zVN}#heZbvrj%Ryrq)cVkwk8Q^vPo@OBda;p{+=Q7KOG1E-3a#ma{M|is;~KaaOzVJ
zo-cdwxp?J~fx@)0flk^-P?Z_zHfxto$#KIy>OKlRf%TQopTEAw@0k!UdzUjnjhC1L
z8R)b9pgg{*Bt!qS378q=**g01LPnwE4YkL1#3mqUz(SO<@RXZm^rq0M_W{&JD)S{2
z%!z80tj~_)LJy3;wz^5rNGp)l5@w2bdm+`_HT~R=1gscS8=wP2UYB>7Tt2nZq1Yp-
zLjtTD^Z`WKp>4}Y!mk#zr+aAU&(5gK<+N2i+3ao$e3<|)q-SR;oN=LP!`V&8%5%rr
zzAU%6fFD(UG%Lu=`0L@%$fvBLk%4*;AqPonlPdx!1W}{b^G2hEO-%;1hAkwnus>xL
z$sEwf_hPQptA$aWXL>8+^@VLLz5$gQcsIPjg<1D8{6^jKhHHaj%{mT#Hj)Z$sB%G3
z|4@#8skt8DylC2aX6HpCgW9`d<8iRc{t@t_`7nz%o9Ra-(Hy&YnN(<%M
zm@aExqm#W=OoNp)#VqVTB{cH1Sit-sU0kZ~<9MZxx8|XVnnhQ&vqCH$EX!FtKN!7H
zo!y^`9ZOC7*e~T;RJiL<0jmJ(T@F3r14#tY?AD)O!S}BLsr$f(^iyC?eeZK_?&&6Er9L)biVbTR=<)*vzF(t#^z-*4zh>mF%OkU
zdB0+R;!!GFF~!WuXu~Bcl%Dy~DOp#~sXk6GuVO|smGdU}9g^+&fH_%-6(-_}k2&{w
z;!P4TR{nHpY!VcVb^p0>@IS*ej_JE`>ZI&^G*%gIfSeWn*ft;<4eeGGC6=;KxeK6_*YKKiAr}L^m$BIeJEq
z=w4!js&_gqcY8fzzoKvXq7;4RD*<@id(%oN>Fv5`%d_G+&!P%DjjKAM!_AcS>?>U8
zU^wJ@iCVH%y+F$TGaF_MuBqCtHi0A0zJw^QkT$~5Vj
z&k*IV-R!)xkV8Egyse`@YgkNZN5
zhp5`IS*_0{ul?vqNc_ef!p@kcBn+R%x}EwqPSM2K+i$eauG~|s%#&SnY44no3FaBjIo2s=
z!K`eAd)nEb`OcZ^z*6E{m549fxQ0XRuQW^tHw|1zr=od8+C|lD+Um1>$L6uwuVruW
zOD5aI()71BUKESV&DUPMQ)H~tEqm!;kA>Ji{}Hpc;6xbTW4V%WM)vlxFE7tNaeKtA
zf_wZhCa6t9|A%sjKEebkF4Yz`+VOZ|YqZfiHW*2nY9x9^_H|IO=bX7UScZ*Sp{ib5?CjlNdr--Jnlv^b^1PNGBLS)39~Q)?JWJ1)L1&A;?G)5>;Vftp3c4!%ufQ=^c4WLW@tN$t
z5_^zatC>un%V(T6wTIt)$=&%cApt*#i@I}RYk3>Bu?+bW;ZHhjhy6#6hXR8ln|ofl
zN&@{kKfn9ug3qCRco{|Zoll?sW`FskQ-=@3>cbgKiyfn_;smhao5ob{6+Rn0I(^|y
z;WP%f^-@(uXQ|*(0b(k!Zi6ueS5#bhC%c7hV(}*A@qbz4@%!bhb&XR}Q^_c{W^;Ph
z!ZV{_>hnub!ioa*$Dq|x8o|)EuIB-`fz_LQw+_@h3E3w%lAmPjho9~y?7|y08|K~~
zdc^Yksn=}!Up_5(i-wyQID9v=g8Xv1PS5anYbpGn()y!nQ`X&z>_IKAz%lalO-+oq_$~bybHpMt6t;oml
zBiI)V-}l0)R#QW{st$6pq%Bxyy-vUflK?)VTyp+vv95zw7axU`qdtIg%ze*ir_w+N
zNT!d?pc75}f-b57RNI4VMz>W{{2N)ki_Z3vE;R|jsc*m7xQe@QU(Mqg#XrV_u9Rt@&zvs^$?T-`G(_5T
zP8L67&t^>Ia#ouOpm|(qW+U-+C4Sbt=s(f13ijO|atNXN
zYAFrr)_7bq8o)fyPI}$vnBs6ob$f0FZcN3HAKPVp*|s6_LMYyP43y_P2Fs=!En9iH
z@}@J*W2?Nms{B!AYE0wEFa#_k%b=k)bHtKcIZ`>^SM%snb-hj+i|>74+cOt5@bsv%
zi9aZP{fm^)CDg<*hB9x~D*tBs5=bdn4;L}THDSd1`mKSubk%0-5}_BnyKe=8Puzxp*h)-j<$`bOg;8JqHr?RMJV^)dthUo^
z#p}-@vKf!v9Fk}b6*oII4Mbe`^l5KDgjg*+7_k&K`1%js>0taSGGYy`6TRk{ejPNe
zI7(2rJv+a4|RsDkUPAeo;#3WdO=pr}}(Y{XFPQ1UCs=&IWEoV$Tqa^n-cnG-I5X
z^IT1_U4E39)+-k>%j}BtUghzHAAcwt9}uGDm;S)_b7`ekLd5_Zyrd)-7XvYPKC5xe
zQa{wT@@24GlSiOl+*EqH`EyF9&GrhjL+YD%iHpQwFYfk~5ZThj+a(e#YGckkK`VBj
zOItTgbl*i+Ai7(frSm$Q@XWI&W+LS;6|Vb!Y4(3PaQo<;7@pJ%7>#gtY*ejmi157O
zjJC(}g`xsyG5A=PzO_ADH~R+Q8GaQrlGB6*=yrQ`7%qHh@%+}?o
zr(*k_;~%Huur>r~Z6kP!0nGw?eafF9cdhN-8m6c*Mxx3~(
z!mieb=?4Msw>?LJQWn}v+pk(W-&0I9@v-hEE6;<2&!t9|Yz
z(TAefFWYaJ?0x$jF(DX~zknUJJbmTWm7yn?Q#d}l~*<
zM+7dHbNAU?RpR{CHSKTK+(2
zZ$72BZccnBwXono6~?MI+(7>U8jk&2ycfde%xpDgn>8wzkG6GlblIO`%~kp7Fpqvz
zl?!{!yStymMJ@H`O_WM$(bl!c^9IY9^o6iJkBl3nXUdN-=}W30!*3;Qr>CFQ28}Ro
zmo-Td5_X4~726Ie25nqDWz*XirA!ztxD!pO`&iCG*eyFrk;8^;RCEP#&_;r{_|9XBV
z!^*KgRNohF;*0QWr2+TW>Ost3dR~FYf_o}iUui0$>bDJus`zpKNV&4+i6KH6zZ_F+D{8=7lCVE{@jB1%Vk)a{^FI8CzZ}#Y_F7xHRbR
zFrYF{-dSx&6zPFG^=G$V@0Jcj6if@lYOU{33g6DA6LAM}k_%ToEZ%piL(C
zAm^Ptk*2x**NPSf4T625k`U~}Jy~fV4$-A;2jL<#7$NlfukOie7OOs>1TB>J^BUDt^RWE
zoa`I$)Ciy4n_Rei77Uy@uLBf9RI3$gV(Nv2x>+=?#RP~9n5EM&dU!~U7}KbT0N9`n
zlcE9$T6XK+YCiShUJ53%X`1?FpWdNr!)8tBptH+_Ei$|(q4k}NIb7KykVwx~B
z%Xme&{WQa8wjH}?Q1SVvXlmD{M=suT_NO^X*E6iJ+f7&n(*K#!Ye-127=^)$Db?#MWtvw?}Ip
zi+nVaeaP1#IH0*;*b~xkTl_d7fKp%FQ9$rre&dtkLkwapy5$PjZ_}#H4^^}g%Oj$S
zjrdZW=@D|yigBz?Gf4XvGNGKwle$skG$YEKa(Z5+QQMZ!9W)lE^}b>i<-hpaHH7tB
ztA2Z4VwG_=Q++NpPku`i}i?Oh;d8)eD
zt?Oi*hk(fmv7SwKeTv9kJ*iSMpjRpNrb!R2WLSwH+c~PM|C3w@_UOj)=ZPjwH1?V{
zAurL1cR9agVElt98WSvQb_E+2^>$S&{A=L_uDPZ+-Ma-UOTq0HY{a~KcMbvhO!oI0
z^S@6i{e!PI&`qyMd|f92kpy4}^rVZoB&Om@Ug0~+@cPw%V6Ewl0r%P9|2zJ7y3g}3
z^12{fD}N%~buG(tciJQnOH!}6|
zKHe{{9sWvbHpXl&N4v76fRSnFhW4p`xNsAo3Z5q*;DJb2
z^@VP`lA%YFPUfB&xdUUn)Ji@9PW4A+%n`+g<=;@(z(n^n9d
z;FB>$mcrUFaYOd|W3Ea-P1&^odz)3&_fzfmvio}T{WoiUrBSlfiUn2rCpk?wwM0`R
zev_uE|NL-4x18)gRso>^&=m4h?$IyJ!}!ga0fHp3oL^=;+KQfJ`h)^Y`yO9~>DoNA
zFO>+Y>)&X{i*DC~s-ai|H0QHA=98M6&qzQxhbR9nb8->O#sw{98cuv~P@gz1PZ2lq
zZ?yA*Ux$|{j~#;Zd~5pYH*b75XZrh0q2HjsC46}Er(Dq3>wBk~zu{rjsJpjU$yI&N
zSY`aX9J&HRp>&2XX^6k>Utx+1yX;ezx*wT^zZW7VCb^$nlZ?)@?bWSr&~)!A{!{Ig
z`y;UV%`yfCYV@sHOMT5U+Tbg{5xbtXScvVVIFU>YQ<%cH$AFTm%KjzP1Qu%ylalZt
zgW3naBNmGwpoa}X1ST@3(RToJ1Q%!}0ZY9^w=EAT)YdX89n_phn==wN$!AJ2DG
z|5wkZa5!t=7BP8yiA-BmfAc;3r(Bc%UP}D(txr6gGkwI=?Fn+yjkHKn5|EESvGZoW
zC4whTS>|Qatv3)CLE?cYcwM{oNnv{#-(H68rSuxbpKO*P%3iBzvH|;G?w(CI{0-E*
zKR*bj$l(Y;={Hc;w|>R93oOn$ClYz`ifTDXl-#a*uHhv)9|BM~H4+ArZ^=i+W=SrD
z0ObEZJZ-c+wbdJqu`Eca`h3t(BL1NLFYlsr|2;QM&?wSRr*?GWWc
z3F&l{>yde*kzGsR2#RpqatEL5Aj;6g$dLuMLT@{Dt4$+5+qUn~ebe3BOJI5B<)t$s
zbbND^+%F!BJB87c9?apar>`~2$2|Z|c4Pk4t^ZM;2+d>OHE<@EZt!z2b~UUB
zXOd$LE(|{!QU*?WQomONacTds?|9VPJTdx*mLC6rQ0?~u|7AC*1F5gzo^dnAuN6U0
zwA-4M6veRK!MUa|?n}1p5v!E8Ju{RnFjSnR_axh?3FW+3?9=hc=~+X2+c+sg49zB|
zX=hxAoS2P098|#epjw(j1s
zwbrLPN(p*m>Q_!jKbDg|@PI%Nb_`i5o~NxnB_)+4;_|M2;T0x*bZ>LYm9H+W)M1vUtFQy1dSjDHG^noi*g9F
zh<}xPjRHZ5@tC^miVz$^{VJO{p1&!}Gr}qdhcJ;txgUD*ex?J)6g;lA#PO}-h;HgN
zN<%cDzUq#uL+qfc{9(qtO}XYZN+a+flV32wLiMK*IcM^;Y|kVnntF{G$#-bA(~g_R
ze%cCqvRqcKK-QEUh?H0D_I+XhyUIlVxayr&=KW~MAB2AP0OfjiGb;YB`t}18jN0z-
z45p8CcHcwH2Y25bDS;Hb|FRGF7Yp!T?a}R|i83D3_7^|`&?Kc*a5Jo`0{?2O2y}F=
zCjkqhS*v&?>d&bEk(AhRq_G1XX|Tk8@MXDuDe@7YtiSk3Ts})(jZN0%s>4ODSotO>
z6kMDAg?iScgEqnlngq~i4sJXRthw0Fd_MXh)$2;(T)i0?#T;sRA|MKEouS*e>6GXbZNxXtmwnWW#~oyx6?t%S0y`!!@Z$lj
z-ss1*&>0G_%C~@F2PTs6H14I{C&>B!4|(=={pGt(h_D(iuDYESqZffiU_9ITce35!c;vsdGk^JC2IhaeeIA_umH{bV6Ep#-`~1Wb40o1v
z*SEhyod$bnx7>5UU9Z+cVKXpNp|myMYD2X2QQJ
z!GCtk5#AHGX}X+K8tXtFs`1D*0L~tL{ULo$>UHgI7{&%a05^h
zbc-|xS-4FcJUhJR+&teNZ<58Cy~BtBLmp(WOB85bc?dgDxIQz+0@WL=5a5@+#^>`B
z!&8&+B0^IA@e|xsZ3jGGl)_S4?(}*xU+TURcs`(KTW2To)ea|0_=SV5fbZA?RPo^U
zILP`3SkQ#gQRYa1hb)=NeSy{qfV}Hx?&EOfZfZyvZEDbF*fz=Oeizj?Vm!v=J&=-r
zfx5h=5~O~1X?*4s0L(5xYL5jdyUBMS!QHZMgJgw;X}qEh?~uf0-S#T7O_r6v+s+!-
zS8|c2pUJC|he8!?kqQpdIxGQ%KB4Hdg^FurfEu_b=**P2ViilS>E!
zR^9O)iQdaVzWxc2rE}25^y9<;6AWrgqv0HW9tekc{)cGze~{>j{~3v%cW<*_U091_
zbZ!t=#XM!8GV>epnLiL9{z(b=s{Q=1HJ{yiRj>hw&T<4a!J2hqD}N=4Ol!Vn4>!v9{My}WRkW>)|?zLS>wy|
zani%gMYK?muD`ME&g=A
z7I(deoU&1tk$tgKFL%dfcqv2ZOoH9K6*W$|1eAb9)@X??@TAa^iky{*IalPE#u$W#
zK~s8ijSnNkcpzKpnW}6*rs&ZO>XGHGy^j@Gn-x+A^AvgNK^wQ}gMnUrmRvoehOD{RstAs!c|(27fA~&V64E+)<6$HHl+zS_aL6LB{x(a+lL-hK)|mxU9=5tq`1Gi+op#|h>*`%AA=5__qW
zh>Oyr6CwfqC0n1pOOMn;p@-xrBqI!Qm>ergJH%DBJH2&JvCpnjDNc@`p8EvpOK-RJ
zBbLSsab?Duk;$k|#=L3*b&t@xoR5OR_>=r<%mvK2XLEDvjgEiUME<|O<-3<5a
zvAz+z#BFh;BVVeq8{MG8W>oe3Hs5gTEJ}E4>rvSx$2)&EbfY!XBS$Tk5Xr-*p7+@p
z-Mg
z`nt&eyS6_F*q>Bp)Z>4P#{8d^ucOQFMwd>nU^!ZQ>&BaFa{zlUFLAbKu(_
zT8c5QQhMA~1?#jzr`qlwiL#Abg&_9?8ZoE90>xH0NfA@7r6ZA0j^a>C7A&+fP}P#%
z@nBppZ-a0_dhGpsZY`JqV@J9Zv)r%KJ+L06NKAk^LrVoXRqT5CDohitWin|NSgNpa
z>Ff*l9(CmPY9_OwHB-f*k@OJs1(KNWm7#y4O?|S@zjYSO`vU}Bfp7URH)WR9)sgl?
zi3@-s))fd#4J-CxpV!w6%AKh`K}z$Tv-yxNV&+EFmP2%|1DK86S@xAPljczs5BuVd
z`>h)5s3Th#TKY#4QdTmqFopX|D>6KrI%gru$?6_389&*)JFXlh)Rrg#mO9rxd)@lr
zx2DzpAB5FEAs!p{#A7I`#vt4t#P}MAMwe9ZIi5@0BTzdGyC;n(l=jNO$vdhoAt1`AWHEe{1
zJ49TD%^tb`*$5BY_2+NX!@M025hYJ1UvaK-nU}+k=So|2svI$sIu_by^)lG!!9!!O
zwN_r^2CI{oTZ0u(wf6Kj(4Q$707RciA1XjMwMRa+4|tb>a=pj%2e|ycvHk&x3KYaW
z2Lre_zzRwQS!EMow0bqus-`0MI7dF!L5|;Ff~S|wrt}$I0J=VBnpH1f-sd&4oaZrK
zW^NHHB*S|k_LM){HSHm(Ia(5D
zRdY0=g?!+6rCc;(;U~i^U+vFcy``7fkaJUUL9|e*?s0}k!lmnHr94d<
zE3WdhJg{%qy`>^gCR?xlArNcIZ9RR&{H8Qx)_UFY203shcjey{OUGzmbX7TXJ`(4f
z$F;^7-PI`tyMXu$Y6AQbvHFihB=92rg);K4&%Rhs4(5_Qe}UA_tOJxrl+=-0EKNS)+x~v1g{ewZuXy6(|dX~Oj_7=ONB@!mj;k1SXx4e@dCVn3G+{g
zgWo;oS2A?9x^nj%b6-&ScdpA*P?*zhn{a8n6GVyOxF^-PRTKyl6GNV3%058~YytfO
z^CaqN;5>cZlFTOHzax?VJNu`c_&Wz&is7~X?MpQO-DCdVMgDB3{erW$2pZ{Ns
z4=E`TPIPp4b~tzVT$=(s`Z!C7W8)?)?MLJHoA2BS);?>r44@rQDk%1=y`u3I+4!UQ
zd9%bAv8Y16tYxpywXt(s0k>wrzl6`K_dxwJ9cmzbjX7KsQ||P3i$a&$VOw>#=+Y`o
z6>a!}(X)EJZxIuB+CytCn@V;4&^+P&8l_pwQR{~z#qg{f7SL?&WYOidj0bN;X*5M)
zrw=lX*%E5fs>SF&B?t&N=1xO;xAzrCz;+`DvFw|^^uaoWZ%ud9pRtyYm{{gAR2A@(
zD6FRAJ>d286f2Dkm9k^tDi6}H<4lS!kS6CFemTn#4zu-Iq~`V_msA2h*s6=tuaiQO
zJWJ+#!y2%L(DG<>m|=
z=@;6uqS}0n+JweAwWHZ+5i6+_qTK{J^EPE90fXUjc_o>dN(5?gOna?lpjiI}Y>$h~SJ`!_cjUoUWc1
z0(jlRSb9Lb1v09w-_|xe(AI!-_C6qWQ6i{e{i}+eaIg8E?cyubB
zNJx6gBTZwLZNx6h9yCtGXyq0y;1JRg_(ECU8j>lVmCY_TlNx*AXwbOWnn})6Y1u}8
zlelsQ7vMrOSEqG=Y-G`tQlc8`>vo-RzBx6!RjbEqy`o89RaD=>8z)cf$`8@3d0@k+
z0Z85Deh1$FM^^Pevd<4I{~z!^NliT!uELpk9>-u-#y+pm=<^>DwRKTR2P`rleMC%Q
za3+Cb$e|eZRh9b0gl^U+o^{rNJ)J?!!NfAo}35f=coH^Ecd3tPbdEQ
zCxZ700iUPMAB~jd&JD{SLp+_-Ki8G4Ag0JR0ad;|36w#O=
zM6`@``;`OfcTZnnq&w*PZvThmvZ?cH^$2_Svb=HNBcFTHeCV@|`h
zE-SsS@Y#k*<>eL;V!JqZs7j}9HqSiuNlK=-3CGp?(K%8lrB87i&NZbmg5^j?5?#6f
zG0BwBG-pB@NI8w%*YZ+(9HyJwd;4SV;ob*&2c!VAobF|@=~mr`?hJQ(?)too(@^6~
z__(F+;ntTOXs=&lj_fdxo=TMWLU*d(&r=31q~Ib)4VAm#`oTzo`%2J^UgApHAoeB42@o|I4ATSzgPxjkh}no@`ER{
zQ>;(y4i{?9B@j>(CAC_Ga7DJBYEOWiM8B(Pjk~6wju__GrGH>=exY~N`_%7oPvOI29i3L!kN__-v)+Ly8kB{!5=g=l~AEpFgc+
z1DDix-|Wl^hcK^Y;obuZ@oDP~nXZW7+tXWn+}gh{H;5S1F(aeJ`lLhDPu74p$gmm$
zp%;I=nrFMBkZ_>3*DSQ{x^Q6HVQWWe7Xz8lP3D)Nx`8;g9beSqaUBye9o#8YLTN5q
z1244VPJdtappl+|J-u;V)eU~fG|17T66S({0ntXOUe7X&IbC+A-dOq67IyS?vY`yA!Wpq
zP2kXh^C`!cX3AL+;k?$>){YP2@Mn5(k};F~2iOADxTr@6Oy_iiEIO%%T@*wduA|z(
zsp(%@dzZ08#o__4bBrA81J|Y}>aORtIJ2~OJyX}`T|xsMd8-aeisj*)Umm8jZ=@CL
zffiN8KRnk7-*F(C>dNQjguDP{N@4!o53Z?f9uKc(n9Q~at!i1fYkC)GBHTfwpgvJo
ze0Sj{p^S0!E_=*fYU)GR%55?#W`bv8vIrmAW|BVP9^NK~bfDL}OpXXqg$^d~`0M7m
ztw{TuSaQh`IF@`M)#+Rud5h0*?^OJ5Cccu~*xfsv*;muw?xScY;jGmnCcl(W+CDc$
z%G}Y7Lg=?FazBhP1U%g0ZUGS;^!*t{}Gkt_5>h!E3}7yV4P
zvaPf7b{I6x!`_xF{eDyO`r&d`p~f^*3&&@NW80iBEi}l|&S|ZY3ZIm-eRysxb-Wy}
zh%X1ebiVyea5+t5tpz?dKs%k_`Rte*L$5%f;5&kQFp%qgx
zQ+aZ@*Z8~1;*4gza;6Vl<)6r831%+l9qA*CuDk2BCYlpnR$kGYZaI04MX-I93R<(9
zVH#h1c_#oNToEF}vE=VfuHvY3RjR0wz8J8$>*)=Py+3&Uisr6N4aAvBMc5HkiEgp}
zaPH$<9K+RR?3{vyO|r&)(*R{hjZ%83JI#Pg)v8!+L!oJP4`1iRrAmozQ3Sp?Bdj-7
zz&mgBI8v}2+UI8YPs=hrp;Qc<@F-m{8oa@6plZ4KP$^SK>3R0pC}=^Rs``7
z!D&cp)mmFTM>2?P0LOB-GzCk~%Dl@&|5Euu9REYp^qT2y
zjcx{>9Yu((Oy+CZ>qVX(A92nma%1=KwA9-#1#xn%DqKjF)=|!L)L)nyIBIMy)E>EU
zJMoP;<0}?F^aE~|;GL3SoqLxh_@=>|nctyxB;E1W@?hNAkjiyY3;f)Ot*#@3Y~Ett
zpsy|Y>D;@
z!Ya1HDGNcohw&7MQ>9r)@0m97LFvcoY?TS>G(cKU0+vjntIQQwHlu3G!Tk@eW%e1^0dy^SVgww+t|bmzs~V>}zM%omn__tk~S5S)?#-TV-FL
znbfK05aCdDJ+(r$@>Zhh1I$s`F+M)MLvy8#;WN9VD7RoIYnF=3+T&S#&}7(iO24PG
zqmrs3sa6I}D#D?U^E7ews~^hGwYhkI^w#SP@44o?2+9u6iPH!Vbt;@Pr`CEJH|A|X
z;2Pn@Pk3b-abJrkTc^={x-i5Q?Y->w
zyNHsNr~>Xh@IAN`uTos%z1pKfxF!J#nljJDv9Gox!c}_h{8NpOZ7+3f5vN>Dr44Pb
zHeGQnaR&3#Iz7eg=oc}Q=e55n*OE^`wc>t}K%S${y6f4Qek0S0i35$@qV*7`#TmLt
z2P(4E_v~4GyzDoIb!1fcvuGIXUAZyM@!TOrH_`%|Lj20w5@LaF-Sp{$H6YX)ZEUM*8XX)_)nZNFvKdHIy>1jZFRgd@71=#Qmdk{2G|7pS1wAu49
z0MJkR9j{t)NvVa*u8Mm9(M7+viR(A(skXV`kNkbThq5nJ;!DZ_7g}48ocz
zV)f)x1lzlE`qF5qcFTi#)2m=1ol$s7XVs2Z++Q3HRnBmN#mi%J=)3OZEW7*|IX~y?sn?^*l6~H1C>z3UbEZE4+UPwlsjn+}3i`A{wSwc%5aeEsQI&U+78A4WIdYqA2Ocd
z$nCt}=Gb5AsP0|UiFu@vDxrI%I7yNu@eY4tBbF8x$!P#B*^1#YkRdRJuz5M6!-k)m
za^F~A(jgc#PdUk7%3-Tp&E=febpbl=MZh-dDIGb>M
za<_?Vi^sbbb?JRJl~oW?c|k_6&$P$IZ3V&c6MLMN3ZahIp2I~&$@|J!Dg=bBudi!Zr45YCi
zG5!)qz+u3QlK}a7kL#i{OY8Qk+1w$HlmYBow@~j-PXb-F(nbJxp~!=-DrTZ&bd$Jw
z)e6$vQ_5TjzepSGEA0lI(^=NDiZpEtQS}VqmE9%eV01aB7Zyvansl3G6}#;|XgN+2
zAnb79oreYf@r6=7GcJbO%Z)5?0ISvm*g#*ShJl(mYGu!6&S^R+V*la8K9vr^7$mL<
z=LA_mElI8NcRq(a5MPD0Sp)0+baGw*V$FJ$3Nqf&&xt`b?j$ZUPjY69k~&nK-a$3v
z#!=H3Jca3(KCZ;>HsCZKyulTNn?qr3hjT=8Gi!AAGlx4?F+M+2lEt}?%a5|#52O`B
zoHd&DKS7$VfFa5Xut;lu;BD&d9W#-KddG_xp@OF6qZ%=&T10S;t}N1+X%sc3YBaE$
z_Zq`|!=VnT9b@*fgs^Xp13I<^?lBXS<5ARYyTs441`FoKo+?*>1q}&12m?}4h!d46
z$#_CZ3B4(-9@-kyX#eHcQ0=l1_P(vt*R~ILbP(VSD|f+*5C9$&)2~f~
z(q66%i5f4&^&2}`E3=wG$u@7=H`L#C5Sj%fy6+oH5I^HsG1ZsaU)KYwbC@dDR6aV@
z#j;;mK>687&fMrMzlZBUr6Gm0M^Q4ba|{!4FGM<8hwK3
zQEz8#_A7jTL{t20B5!ZMI#zbHw&YCCCYq2)ttkm#6%0N*YM4JU=@TR~be2={ryt;W
z30dCR()8)noBbNvbge11gB4X7`p%7}A2l9wX&x{=C_->us9o}72VspP-AAS?Hlwie
zHW_cj&&C;&F;Pa>`EQi`dsT7aoZ;Lo?A260~bR;))TpszHz
zKmg2W7clq2@f|8{^l%q=P>)gZ&ipT}Mn4>Why$7xe%ALYTP~@>!vyzC>=xD~yMu>?
zf`|%wKeGHVi@)sSnR=1!3sc+AKBN{%g^Y3f`7xDNu3MuP$zOQ#AWCe((gG5%K5qv1
zGSG-7WL{%GK`Ia&kmtD0O5FRMtPn$GUYa2<@=`mGn9*(T5806g7b51HJelD5#9=$EoAC->a0nZ=_AG%p!6omSZH+AwnHx+*ff
z^2J(*TS`2^gp>&GJINP@yr9`(4Ct};5Z@4uRs;@V43>hd-mk6VtoMZ}6(CFHZpvSj
ztN1?^35@jD$?zLnsNvhbgK2}jQ;g*HI`<^+CPv8Mca=>i5g!?_!4{6BIJ%Yd-$ujn
zXcyj8Du%+1brh2yD``sOcYEtOsSrBXMq1lGd2evfWwOKqhpbOSF)i4pX%4L{tS5j3pOnic7BM*YWd%rw_lzH5S#*(RiA6v)Cg>D8&BoZy`e+D?gS3#)Mox(eR7ZeLw508ds_DtlfW&ZXq&U_5
zd{;9u>yaVHR{qtwtBDI-@tZ{XX@(%9EW^>*#(6~+P)6J%%tMOPpkAp=?V94k(`?M_
ztPl>5K)p4q-Y;Y7mwZQ)mZi5bwUCn4;U{aJMnxS;mB7pj4S;G;N$46VVJPBae@y
z;NS1Lj}el|ZF96*rT8l3-Q~Bro#G^Deqa6vVzYk49#ks(;H%p=GPdQXZnCYd<&=Q@
zwsKvAn(ocG6s9!y?)j3B^AD*^>I?e^@;k+0w$~OGAFo8RIk9@{q)2+ZT=L%n1y?Yh
zaFCy=d-DXOLe=5=O@zO8d3nG950%b<(9}pe#`9>x{ybKm7rdk(ENA?rTFi938x0O(
zjbKYSOS#)O)}z@MG=NtyMcUllssm=j%S5@-s0bBP(_z%pQaE^G9Sc1Z!%=QdI1MZn
zk9dN52vaTT
z7Q`Xz8G9h~r=zIGcXdlz&R?M%U_l|&0P-_4==-(tH`-8()HLiNuW$LPaDr9ZQx)YB$f3L#(}5{kk?975w1TeU~D^D__-1M*eoMzl1Zp
zM~t1k*h_(oIbA$uUBL0EiFQ4wetF&8=(YLkuGL9tzXf~rK=#VB$vfU&>_aE-X>MG~
z?kJw_g_ef7X?!&O5NdaO^_&!)gKG7~l4b^JpLQ#ct0O!^l;yfI8af{uCS+WK7ELc<
z&?zmp+hBpbB=LPDM2qhe^1S1RlJ5!;QM9RyB4>KWhb9RJofj14jOFNU3d@|69H;qBiK
zbN$oT(^v$I8n|$ioMBXB^)4KrUJ0n)$+($3-^v9MoY=kX^eVa^qy?u>E!d+OHIg@5
zp||vLCvtbTEAlx>4#*?h3-JEKJ0-}X6nxQ*Yd=}4Xv@Rg)3X+v+2@NUZg5OFNjeg(
zxv1sYt6F|HFYjV_6xofOmw3?j6WP+$am1au+0EF)Dh3xJWc#Gw=ftgMS>&o&t6TLp
zslud(<9d3Kk~WJ6_B2ZIpLEkdJx>IAsFqJ+rKD6trLsztXqbHCHL{-EpV7>A<=3bV
z?K|IQctP2g^OD@0jw9swHMhRP5O-RW<4(C!AhsQ6{PX>PdjWs@RqXgn>dbyG%YUo=
zL;U!se2%{bD{Fn{f2+LD7hWYDS9|cReV`;7GL@MZxDBnF0;CbOsIAp|;6
z3Jq}Hnm#*z&!`V0%=iko1VYYzNhG?Q8YS+Xoo8414|JcF(T-i+tI3rtrDH7;w%u9&
z4wZJHQ#SF&;We5hLZmb)&B-Ky9Y15~5(>&s>YOPSXH=h1A|t{|6xNVaK?c~bTb{9i
zVEhgbfVC^@FuwG{us!k*1%Ut5o1rR+D@BYjx20qq1qI3ym&m8YnSAS
z1Yp^3iN7d8N;y;f1mD2{4cSOjum*ry&Lde&Q>qaI^
z;7R+B3=9j(_IT`*WtHfZnE-k}C4M@5pw}rcV$o`}uy?RI50A%n^&b`Vqcgukz
z^EWoApt|U4rbO4%L;iKAQ0Fy$Y9ENiwYiYDbP&S0$y8O;weZ@w#m
zJ{=1v(Xu`fd}Zv7JhO`Rv8jvEWS2!6GbC|TP&GM=Bl8EAn;y4g<#P3+Y+bY8FxUs`
za$U1EuDLIJ*D>+22d9xfWB||oOcI$in*Bn^b&(D{r0$s+tx$(Up6=z$yQr?xq>ix5
zZJX;x%SmLWL|uN&@{3xR3Xy@Qm>pZ$yH6?a6j4E+b{hCq@uvia!aXW+&fb}p?N=o6
zE6GuAkQNL&VPHfd`YwA9&dc+d?1u3!FT-}hc@`F
zJOu;ib4E8OM>9Yfif|9$_EDE%S#KT;e#gEdEA~snz0i68ZU%5sSJ$
zcB9OCv5`LOh(U^}&D+kSLG)f{6$9XJ=}lEq20Vp$Fa`8&P*BM8*5gWjtr<*yf-EQg4F~&3=Y%pOmm$7?TCMWQn1%5E5{8%?>
zYe*HO$jdR#5B)NsPn*1rkX%V3iFj4DC|uAO4YK4lXqz->PvpP0>^3|n^TIoaZjS5P
z&zS3@t<2;%o{nvi3jOK&~QwJ{QpV1*Iy-;e{T{V)i}vU
z($y)xj~3Q8M=?Cu#B}nqA5Y_@eqr;lxCkM!Nl#IdqJUJjOThuT2;r4co_0+{bUKBy
zp#^^DGft8$q8ViK`7_`hzg(yL=4f?Ad6UnJz5KoIp#n?o%IUM>jEMsjv+8=5yi^nS
zS66pt_%Hg;C$_Lbn^>;KB<00O-EZe@9vlTHK+d>g#`x2khl~W
zBv!_d5U~wMeX}0I0oS{qAP2^01~58|(y;qmbdXgmULxJG7~FDCiu`jr&sO%?#uYy3
z^i8IcD{lSqn?GJARF0bCw}yo`S)YVMRUqA&>dH
z&`@!TK`Nb&rt1Fl`B+zbsYA!i6jL*4Kn!nMMS5}NOtaxWXTG2Pi_q|#ria%K-zO+|
zjL_}!^A%TrXIg4HmsR!JXdAmT7wh$5a>!xQaatG
zLOxIA99e?aXGtN>SMc_W&c&mTcjxb5?}R8nt_)cFA^3RtyI
zuOuCO`A3nBYIQNPTuj;lwDy5*m5B?nOG_YzAi}}vk9%DU;slw$Z19UP6{BDN8~DGS
z{x<|K{+}I!@{-uByf>~|dJRGx|WRzKyQ^4!1qGJeWE
z`*Tnz=qY=?xK*aon}=15Em>*&WlF^g!t!ir_9~=>czv~w{`8h3w_HT2=iOGeNV1Y!T7E+#7=l-7B^Bvvm9UJM`ew{HC#<6R)ceSxvyK
zY$)DFE6gt~XtMgKfvOl5$*l!Q**jyJ6F2xZ#+_z4rP2189fBv!kXi`@wsCa1%fikK
znw>G`N?eBlwEc?D?<~9B|L5&91d+0-RJ1^`Kf5+=r!BWvwNKlA(zjQ
zg6LFNg?upOT%HzXFY97Edn(|-fa8;T2`PwC-6QIv19&!%>w&WMgO6>dguMD>Ulqrs
z6A?7^f_+EdME$#S^#u1s+6duzy;Nj3fk*#4Q~2zf6shS6canZ`gN96d(~}kT*HCg6_%KdYD_T&Z1x@5bby`<
zxP>A&I|#+NciDDg{0z3_Hu;@CFHfFAU%t^)uZDiGnwbU`5cTbl+ltbk_pAbDNmTu4
zYGNqzVVxY}L|#I0?F`667qLG7)QSE90f2QfI;?|?iVulPT0iRYv;+A&H{@2C+bYlt
zYepa6hfX8n?SFUH|NIvGN55-nC7~Jt8mMk}pF?=>H1qO2NW1t1kr1Z`E|G7J$A5#H
zMd@(mbb3$&$jUjsn*PX!_L(bY`cwlD63?lip=b_mEhq26m`fJ5goJ>mtg`j#pB7EO
zI(-P`2U)pvM6ET=xM=Z;ve_Rx+4%=EM5T-{dHi(8>(6q0PPEwtDL0(Y1A817q~fGSrro`I9b@dxZMhghN|(MT
z>c1fK=5LsA{-8JJ2l=C-L>WW)kQjM>9^Y5U&!3f^g=1+i4XN#`umSmb&B
z@)6Sj3w-&QyZ~8MGU2xsa`}skh(1~SX+aoQL<(0(5yqFqu@&N^+gbmBDOHQ>s{Oj&
z-KY=HsTnoH*|UZ|TF?vYFo0rf=;v}_;JnJn8Ojy{P_#b^k?#e{*EU4bhr^^{(^_&11G+EUFZV6P3lo`9A&+6r}&ZD;hP|@Q*R|@q1@wq-_9(S3VL~X
zD1pR(v-@^8Aoc?#2KY&-paKK7h^ds@=2b66RR?6%FTF9M5n9VThlb3oJ)Aa#5?rDw
zY;LrgglDjjyis-(XLNaJTd4b>4y}YD>n%h+qj*N~Jf7;Zde{%Ixdy4io)=7EXLk2@
z+y<;3-2k@kah!;6MtOWcS>Pyo;%ph?&XL|-9lY!b)bCQ3of
zf+U`#VlQNm-Y}0k>gQNiy6$_+_lBlj)mbfLzoeJW{9-&^=xC)0_^(B0zh3=sw!o;g
zbYj%L?F5A(fvwqhv?{bib@IZbv=1zHJ=()ktW#hkbCT?eWv7m`rSdYJB-_0l=C%L;;LgnT
z06a*%9u||-LItvnis}(;E*=+9`8pvo(g3{zRmsTg(MfK}DSlo+{)PQ?fxMkM(C0Q9E%hlL@Rm6sy(!D=h?Q%r}Y1VJNr(#`NN~}2akb$r$*_g
zIlB?cg)$%KJ6yf%UtuB?D3ax)L*})pLR1Ush^YT*b_FmZtASG?MsFw2KzciVMKuD
zG8KGFf0$Ht3aOzuGL(Alqmhl
zLNi8Ls%Lw$%&b!(N~o36@zAtfdThx&sS4nJ<2Kh9xczXei@t0glZ}+0*Ka9Y&+^Es
zvxx0W#yl(l>~CS8Acs@Ar?U74*!TZd0Qg_|&7eh?QfY;a`8i4$XaMj89;=p|+3WRHMy`z(zIsa-O|yx=wxDe1%Xm@ZRDT@#lYBMgTjKb=zTSHxXnW=d0V=
zE!sw2!-lUi5JNJ4h7LV*r?*wIV&ad#uj{##cldaN=
z`T(a7S2rJ2lnQTu(=WLgdP5!au5HC
z#qQvimwficdXoz#j=^=SU#B}}WiEq4znSIT5y35UPnPdxz+ctQ;Th4d-(WKiM9p;N
zAjRp72Kq-qDLu&@j=LevoRKbU36s%M5W}(_TbmowF91yU11F}e&t{S)cEe`twsT`=
zoKq6H3<-^yk`&i<&5iOS89e8RGVESd(c9Rvi|O4d*a`sf3|&tFP7!s)^t0VHoU@04
zzU_tmqV-p26PM=^qefHWbez8tmHJ)_{c1iV^=rOpp*=r90RN|jerI?6Ff%HT*Y>1G
zW9~dWq>OJT@lkzyZB!a81gNK;!d9Hof$EP+gJj%N1MR)Hu3M)BTnr}&j>~Z%-7CEM
zI}0F3Qf-+fu8n4?ndZr?=(6~cX-q-sO*_^rhinfF%;i3=#wglYMsM3)Qp4`jvkoH{#}0@v
z0aDMo4#L*&wevsHh9Q@*DY5g5&&9+W$kn>zG^)O767bCQ7?B$-MRD*Qw=M~L*o&J%
zaem{;%x#`0Fu}#%K{y^C22T$Ce
z+#hOkuySFLhsS7QQ!_3m#h@XmslsMFxEpJt9t2Pa${Y7qlE@`Lr&&MT6GFenJfycX
zW)${>5C}K?s$xzo0I0|WVkGZJ$L876ui*sP;B4LUo0AzIS-Q&
z^ZjGVVvqcGt#ym|Nzbv5TOL&@BD=A)mRX`#7IV%^G9+Fs4C=Os<2MQwy2{k7AB#8|DVM~Oo)+DrAlxJ{O%As?(p(=8q
zNyL>h;B5PhL8uHv61B=mbe<}_d)uP%PPx1NU~&@aOB+@Ctc&p61MT`QTXZqt6fEgd
zCtcGKSvdy?;I79Y9^$!Ns70BzB+}_CloyLKSp2oy0&nc9Y&CphY*h02T!0@@of~Ahv`V4kQ4ayhcpOR|)c-~I86V7{
z7;ZphW%4P1=3-!;(Q$iqVi|9Z1CFmbr&V5Va$lfeE`acM-5V9jyo-&LN0GB+sK>o?
zoHSSykR>P6qWmE{rao8Esyo|rCP2nM22x}1VP``VJSgI;PUfg%H2BPgKp$1p{-JSB
zqvWB4!=>(>rZ|X)TfcnAg;`{LOF#X$V&5b
zhUu+sGOu@C7mJ^URQ6(%j#KiUnG5h<&bG{Fzjj`msqy4mJwm?F$5`)yC-;!wIxKHl
zuc?|*-Fr2OpsdPwM&SAFq%{~@(SB1)*(PKh@x&G8*D>f#8TfVkd3Pm!N|X9fYwP0#q|mANk|Mhc6WyOXcz@pRt(o+NvFj4FFd2#X`!~p0Zawv+&9YKyw_SgcOV1kW
zcl~zhk@&1{!r
zVrgY?!+cX~)|ykkyYI@jM$>S`4#?v_D^B)pI_IeNtCUxZsD#@|YVKs-Z|Dq6bBsy5
zUs^w$-voU})>udd?bf`e8%J@eBS`BqTd1G?$DVZoW~(wCWy5>3oM8dOd8LGRuOFni
z?d5sSy#JWw{h;znFwT^!gL7x(pRf_~y00k)rXk^82X
zWzNc?`ZBhkZFUeP=l=#k`};^MpoaS#X_fjtcR@v|=_ca8`O7#)}v4JnqA@E^&*_9@{@BmwvZf&&SXUK5#xbu9vRTGBGyssZV9N
z^1TZs=)*4pdR|mIdEeXNh$qPu$pIK>*(3~FCAX3IEJ%jZRS?p?0vOGAeU+Dp18!{K
zg~ROTRks4$Fzz?iGIuJ|(-T#WV!ZYX@@aqvji0a&LZumYM3O#zC;L^IrxOxzV8CD<
zo5w;s_MFfixkU&8HLgsQX@=C+{{+dqRPOCuRnH1YdR%PEVnv(Ril}TN@zhN
zz3fJ630N^1<1nNjQr-Mpgh=h_R(FV4b^Y^>KL>u7vp?6O7aVyBblG-Io!sgFE
zJa0M))LDO%!DjAYr)G``y)`CGDYIU=`U_49cP4}`q`vo5NOO?%^PH>RgRov*Fwf(R
z)>=>;@A7foXSrG*57r%(7{6m+lfQGb{u4wW=!+zbl%H9HodEWCmNTLg0my`%5iMG3
zfc$P_>pm_W3qL9Ic-9yfu4M=?#55cH8KxkJr?STb5)Lu_=+RG+ahuf1`Q?zVzGXSW
zvXmfs$9~=;dtfeTfqwkj#-JDAr?RPp9uS4(nM%^