From e38c8282b828bbc70ed20288f9f979b2cfc986da Mon Sep 17 00:00:00 2001 From: "Romain F. Laine" Date: Wed, 5 Aug 2020 16:27:31 +0100 Subject: [PATCH] v1.8 Shift of beta notebooks as main release --- .DS_Store | Bin 12292 -> 12292 bytes Colab_notebooks/CARE_2D_ZeroCostDL4Mic.ipynb | 2 +- Colab_notebooks/CARE_3D_ZeroCostDL4Mic.ipynb | 2 +- Colab_notebooks/ChangeLog.txt | 23 ++++++++++++++++++ Colab_notebooks/CycleGAN_ZeroCostDL4Mic.ipynb | 1 + .../Deep-STORM_2D_ZeroCostDL4Mic.ipynb | 1 + .../Noise2VOID_2D_ZeroCostDL4Mic.ipynb | 2 +- .../Noise2VOID_3D_ZeroCostDL4Mic.ipynb | 2 +- .../Stardist_2D_ZeroCostDL4Mic.ipynb | 2 +- .../Stardist_3D_ZeroCostDL4Mic.ipynb | 2 +- Colab_notebooks/Template_ZeroCostDL4Mic.ipynb | 2 +- Colab_notebooks/U-Net_3D_ZeroCostDL4Mic.ipynb | 1 + Colab_notebooks/U-net_2D_ZeroCostDL4Mic.ipynb | 2 +- Colab_notebooks/YOLOv2_ZeroCostDL4Mic.ipynb | 1 + .../ZeroCostDL4Mic_UserManual_v1.2.pdf | Bin 0 -> 2313410 bytes .../ZeroCostDL4Mic_UserManual_v1.pdf | Bin 1607254 -> 0 bytes Colab_notebooks/fnet_ZeroCostDL4Mic.ipynb | 2 +- Colab_notebooks/pix2pix_ZeroCostDL4Mic.ipynb | 1 + .../CycleGAN_ZeroCostDL4Mic.ipynb | 1 - .../DeepSTORM_2D_ZeroCostDL4Mic.ipynb | 1 - .../Stardist_2D_Fiji_ZeroCostDL4Mic.ipynb | 1 - .../U-net_3D_ZeroCostDL4Mic.ipynb | 1 - .../YoloV2_ZeroCostDL4Mic.ipynb | 1 - .../pix2pix_ZeroCostDL4Mic.ipynb | 1 - 24 files changed, 37 insertions(+), 15 deletions(-) create mode 100755 Colab_notebooks/CycleGAN_ZeroCostDL4Mic.ipynb create mode 100755 Colab_notebooks/Deep-STORM_2D_ZeroCostDL4Mic.ipynb create mode 100755 Colab_notebooks/U-Net_3D_ZeroCostDL4Mic.ipynb create mode 100755 Colab_notebooks/YOLOv2_ZeroCostDL4Mic.ipynb create mode 100755 Colab_notebooks/ZeroCostDL4Mic_UserManual_v1.2.pdf delete mode 100755 Colab_notebooks/ZeroCostDL4Mic_UserManual_v1.pdf create mode 100755 Colab_notebooks/pix2pix_ZeroCostDL4Mic.ipynb delete mode 100755 Colab_notebooks_Beta/CycleGAN_ZeroCostDL4Mic.ipynb delete mode 100755 Colab_notebooks_Beta/DeepSTORM_2D_ZeroCostDL4Mic.ipynb delete mode 100755 Colab_notebooks_Beta/Stardist_2D_Fiji_ZeroCostDL4Mic.ipynb delete mode 100644 Colab_notebooks_Beta/U-net_3D_ZeroCostDL4Mic.ipynb delete mode 100755 Colab_notebooks_Beta/YoloV2_ZeroCostDL4Mic.ipynb delete mode 100755 Colab_notebooks_Beta/pix2pix_ZeroCostDL4Mic.ipynb diff --git a/.DS_Store b/.DS_Store index 8253394059d428f5f4174c34c63dfd6e9d9b3293..f888e9ef2378063f5be7df09dc952bf935d21bb4 100644 GIT binary patch delta 2160 zcmeHHTWnNS6y2+|^d9H7J;QBh+FPb~%Fq`XN}teD`{30U1W_!30D+dykc`kN^dT={ zd^9mez{pX8XjDRs5sV=zMNmW%BT*!xGR8oJ4`VdZ4~Y7q(Wv|0JEDQ$kAKGbn9Sb$ zo;zo)z1AA(9_c>1B0W=hjCOBFr|J*nsai9vuKFx>vDoknaYWBEtx);Yni+S_N@iu} zNbdwgsHoyi$~<3aF13_6UNdFJ!_*I zWL-qK=W6SOvFw59z+kLDF7HY;8ybbNytluvFB%^j^dg*Us|y+V>Gqw~cNu0P)K*2# zM}jmepfI&iJFTEziqmG=MZ0Ma9iT(>Ivu0abe2A(kLUt@Oqb{~eL+{~Yr00i(r@%T z2m*QV!H*zHP=<0;pawJ0h?!`@Tr9vsEW+JbhBfHHTJ&NAhKsNTTd@s~VlSS?GuVe0 z@e&T=2wuY&POCHdR_hrCm9$AWoAhSQ266*ca)R{cw6>Y%YMaZ2J%dAwH%8;k^C>3O zIZ|Zq%?$p9QZUE>)zDm8NZm9*+Zo_~I>_LT(;M_Iou?!N`-(wbryuA?`Wb2PAPd>Z zLH^%hwV)Mkn4bc*1S_!$s~K1XeTZWtHeoX!!o!Sf1iKKjfjx@@IK;5t!rM5Fckn(w zz&RxG8Lr|RTvw|sOI;34y@TK`MDvV#HDCH@sa|D^(`?V}D7#sl5bBbQrHO#5E-6*_ zdkt>v^cE&WrCB9dLvmcw=g%)NYel^#BSL04rDzZ{1?x$syVGXTsM0gX!cta9H!Gx<$y|qiCUXFT7{*Q{n9xU<(B0UBr&5A>!M$HviZtr9 z>(M7C9L`dKf|1;s`ucFNzG_D}xV^fnhJR15t5#zL^~h0D7OB$WNttHn{7&BjU42rV zr;hq6)f%%weebjKj%52M7MDz!R@c}v$I+6WtNWPeeC5f=%EAfJ74PYbIto;>(3c1l znfk3}w*@+>$TV*{F`?bOl5Lzp3fgFsZf?bpw0)PiNXRi-3M zVW+UltgxoZv?M#EuyUHE%W+Qb+GTB>ZpjR0UUQ}mjk+$=oeZ|8%`H3~?09DNWw*eH+TaXf+0E_TX3?C0Eh4$tFdynrY#C7{`BK2K4lBpuhH1IFz;&N-1o>TFPCZunR-kbXj17$#h`PTm{M;Wt-rv zGpJEXRF2ycFH>i{OcO6uUzlvm=7s2+%}ri-ON_?7F=j^J=o=HCb6Uei{{a*GqUrDX z_T)Uz^Zk5(Gka(DzOy^iB<9FncCmZT)u^4~cI~7iWS4Gm6niVGYa3fT7jyG0T(rq{ z?avZlVokHq8ygx^jjCKA7Yf`f^iK{)@0pCo?_K11mfRwzEDlD7;_9~XNOWIRjqvvc zv!QsLvV5DaY&@vacSI13a+yySdnck(Dj!P!v2v{__Krkj5k6r~JzBF~l>0`962s~! zkC;=>h3Z6k*CF-bM07mHd(y?GW>N0Hb9`(}jm0Ods7jaH+GY86{ncBCCgQgqP-E2A zr}Tb(IN2$05`+O=?TkdIEpJB!Gd^YxCa^+Q%Ia7v+s+0OEW!@6S$3SAV5ixeEXh7# zAF)g9GP}yYU|+KD*$?bTc8y(UzatabFhPJ7F1S&Q3RGh~0tli7VRT{xHexGwpcj4E zi92u?qPQC)h~p?8z%e|C$MFoF!}BJZnY?GMf)S$qh&Z7R1p$#kxz|Qp~h&m8Jp3CE!a*C?m|BXFubhE zDcnbm-j7G{1fIeiG-~w~qs=#{(GT%4KEVPm<8xfWH~1FUa2>yBgT=+#SEjWRtrw{s z3pq)vke`_>mjXJ94nontgkr6BmG93GT?C(^^;%03qR-(s6Hy$C`2|X$!{=Bh2t30$ zh&7IYnIPh8Qd&r?7l;;~l~S9;7Bd0GGgE4t7#0W{p1oLDpyYRMaEP2)iEr~V|5v(C z<|qDk{|uXF7s>gr*>~jkPwY2xoRQmkPz<+A;f2pwWDvEeLmS%BL9XkbZ^JFL)B)_q zAoh^+`^otcj3S1^ID#om(_$Z{#XgG1FpJ|jNgOR zDcv`9%Tmvr>m}_oM>sJ{kul6IREk!nQ^&#OR-}KXQ&&pein1!5db!-MNK5I|E9epcXuag_Yd$~2pX>UUNfZ0ZLo;H*uvX=wFQW13AvZIN2jY#M4<>P)j~ zsGFoNv(=U{YvxSwreebw-BQ{)cgvmt8i*`Cs5hgB*xE^9O|hk7Uz)8rCUJ<^(r=X+ zJVb0gNuky7G)~|Jyol3ynZo-P-p0F_hjGc^q8{SpfpQD%!BBmbr#W=A$}=6T3kK+S cuwJ|3sZB2ULs{CdWljI@=U?}uEflo>1sTz`tN;K2 diff --git a/Colab_notebooks/CARE_2D_ZeroCostDL4Mic.ipynb b/Colab_notebooks/CARE_2D_ZeroCostDL4Mic.ipynb index e8c7bf8a..de3f26f9 100755 --- a/Colab_notebooks/CARE_2D_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/CARE_2D_ZeroCostDL4Mic.ipynb @@ -1 +1 @@ -{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"CARE_2D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1mqcexfPBaIWuvMWWbJZUFtPoZoJJwrEA","timestamp":1589278334507},{"file_id":"159ARwlQE7-zi0EHxunOF_YPFLt-ZVU5x","timestamp":1587562499898},{"file_id":"1W-7NHehG5MRFILvZZzhPWWnOdJMkadb2","timestamp":1586332290412},{"file_id":"1pUetEQICxYWkYVaQIgdRH1EZBTl7oc2A","timestamp":1586292199692},{"file_id":"1MD36ZkM6XR9EuV12zimJmfCjzyeYZFWq","timestamp":1586269469061},{"file_id":"16A2mbaHzlEElntS8qkFBOsBvZG-mUeY6","timestamp":1586253795726},{"file_id":"1gJlcjOiSxr2buDOxmcFbT_d-GqwLjXtK","timestamp":1583343225796},{"file_id":"10yGI51WzHfgWgZAyE-EbkZFEvIOd6CP6","timestamp":1583171396283}],"collapsed_sections":[],"toc_visible":true},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.4"},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"V9zNGvape2-I","colab_type":"text"},"source":["# **Content-aware image restoration (CARE) 2D**\n","\n","CARE is a neural network capable of image restoration from corrupted bio-images, first published in 2018 by [Weigert *et al.* in Nature Methods](https://www.nature.com/articles/s41592-018-0216-7). The network allows image denoising and resolution improvement in 2D and 3D images, in a supervised training manner. The function of the network is essentially determined by the set of images provided in the training dataset. For instance, if noisy images are provided as input and high signal-to-noise ratio images are provided as targets, the network will perform denoising.\n","\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the *Zero-Cost Deep-Learning to Enhance Microscopy* project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is based on the following paper: \n","\n","**Content-aware image restoration: pushing the limits of fluorescence microscopy**, Nature Methods, Volume 15. pages 1090–1097(2018) by *Martin Weigert, Uwe Schmidt, Tobias Boothe, Andreas Müller, Alexandr Dibrov, Akanksha Jain, Benjamin Wilhelm, Deborah Schmidt, Coleman Broaddus, Siân Culley, Mauricio Rocha-Martins, Fabián Segovia-Miranda, Caren Norden, Ricardo Henriques, Marino Zerial, Michele Solimena, Jochen Rink, Pavel Tomancak, Loic Royer, Florian Jug & Eugene W. Myers* (https://www.nature.com/articles/s41592-018-0216-7)\n","\n","And source code found in: https://github.com/csbdeep/csbdeep\n","\n","For a more in-depth description of the features of the network,please refer to [this guide](http://csbdeep.bioimagecomputing.com/doc/) provided by the original authors of the work.\n","\n","We provide a dataset for the training of this notebook as a way to test its functionalities but the training and test data of the restoration experiments is also available from the authors of the original paper [here](https://publications.mpi-cbg.de/publications-sites/7207/).\n","\n","\n","**Please also cite this original paper when using or developing this notebook.**"]},{"cell_type":"markdown","metadata":{"id":"jWAz2i7RdxUV","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"vNMDQHm0Ah-Z","colab_type":"text"},"source":["#**0. Before getting started**\n","---\n"," For CARE to train, **it needs to have access to a paired training dataset**. This means that the same image needs to be acquired in the two conditions (for instance, low signal-to-noise ratio and high signal-to-noise ratio) and provided with indication of correspondence.\n","\n"," Therefore, the data structure is important. It is necessary that all the input data are in the same folder and that all the output data is in a separate folder. The provided training dataset is already split in two folders called \"Training - Low SNR images\" (Training_source) and \"Training - high SNR images\" (Training_target). Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\n","\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n"," **Additionally, the corresponding input and output files need to have the same name**.\n","\n"," Please note that you currently can **only use .tif files!**\n","\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset**\n"," - Low SNR images (Training_source)\n"," - img_1.tif, img_2.tif, ...\n"," - High SNR images (Training_target)\n"," - img_1.tif, img_2.tif, ...\n"," - **Quality control dataset**\n"," - Low SNR images\n"," - img_1.tif, img_2.tif\n"," - High SNR images\n"," - img_1.tif, img_2.tif\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"DMNHVZfHmbKb","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"BCPhV-pe-syw","colab_type":"text"},"source":["\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"VNZetvLiS1qV","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"UBrnApIUBgxv","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"01Djr8v-5pPk","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","#mounts user's Google Drive to Google Colab.\n","\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"n4yWFoJNnoin","colab_type":"text"},"source":["# **2. Install CARE and Dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"id":"3u2mXn3XsWzd","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Install CARE and dependencies\n","\n","#Libraries contains information of certain topics. \n","#For example the tifffile library contains information on how to handle tif-files.\n","\n","#Here, we install libraries which are not already included in Colab.\n","!pip install tifffile # contains tools to operate tiff-files\n","!pip install csbdeep # contains tools for restoration of fluorescence microcopy images (Content-aware Image Restoration, CARE). It uses Keras and Tensorflow.\n","!pip install wget\n","!pip install memory_profiler\n","%load_ext memory_profiler\n","\n","#Here, we import and enable Tensorflow 1 instead of Tensorflow 2.\n","%tensorflow_version 1.x\n","import tensorflow\n","import tensorflow.compat.v1 as tf\n","tf.disable_v2_behavior()\n","print(tensorflow.__version__)\n","print(\"Tensorflow enabled.\")\n","\n","# ------- Variable specific to CARE -------\n","from csbdeep.utils import download_and_extract_zip_file, plot_some, axes_dict, plot_history, Path, download_and_extract_zip_file\n","from csbdeep.data import RawData, create_patches \n","from csbdeep.io import load_training_data, save_tiff_imagej_compatible\n","from csbdeep.models import Config, CARE\n","from csbdeep import data\n","from __future__ import print_function, unicode_literals, absolute_import, division\n","%matplotlib inline\n","%config InlineBackend.figure_format = 'retina'\n","\n","\n","\n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","import numpy as np\n","from matplotlib import pyplot as plt\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n","import wget\n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32\n","from skimage.util import img_as_ubyte\n","from tqdm import tqdm \n","\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","\n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","print(\"Libraries installed\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"Fw0kkTU6CsU4","colab_type":"text"},"source":["# **3. Select your parameters and paths**\n","\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"BLmBseWbRvxL","colab_type":"text"},"source":["## **3.1. Setting main training parameters**\n","---\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"CB6acvUFtWqd","colab_type":"text"},"source":[" **Paths for training, predictions and results**\n","\n","**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source (Low SNR images) and Training_target (High SNR images or ground truth) training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","**Training Parameters**\n","\n","**`number_of_epochs`:**Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10-30) epochs, but a full training should run for 100-300 epochs. Evaluate the performance after training (see 5). **Default value: 50**\n","\n","**`patch_size`:** CARE divides the image into patches for training. Input the size of the patches (length of a side). The value should be smaller than the dimensions of the image and divisible by 8. **Default value: 80**\n","\n","**When choosing the patch_size, the value should be i) large enough that it will enclose many instances, ii) small enough that the resulting patches fit into the RAM.** \n","\n","**`number_of_patches`:** Input the number of the patches per image. Increasing the number of patches allows for larger training datasets. **Default value: 100** \n","\n","**Decreasing the patch size or increasing the number of patches may improve the training but may also increase the training time.**\n","\n","**Advanced Parameters - experienced users only**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 16**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during training. **Default value: 10** \n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0004**"]},{"cell_type":"code","metadata":{"id":"ewpNJ_I0Mv47","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ###Path to training images:\n","\n","Training_source = \"\" #@param {type:\"string\"}\n","InputFile = Training_source+\"/*.tif\"\n","\n","Training_target = \"\" #@param {type:\"string\"}\n","OutputFile = Training_target+\"/*.tif\"\n","\n","#Define where the patch file will be saved\n","base = \"/content\"\n","\n","\n","# model name and path\n","#@markdown ###Name of the model and path to model folder:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","# other parameters for training.\n","#@markdown ###Training Parameters\n","#@markdown Number of epochs:\n","number_of_epochs = 50#@param {type:\"number\"}\n","\n","#@markdown Patch size (pixels) and number\n","patch_size = 80#@param {type:\"number\"} # in pixels\n","number_of_patches = 100#@param {type:\"number\"}\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","#@markdown ###If not, please input:\n","\n","number_of_steps = 400#@param {type:\"number\"}\n","batch_size = 16#@param {type:\"number\"}\n","percentage_validation = 10 #@param {type:\"number\"}\n","initial_learning_rate = 0.0004 #@param {type:\"number\"}\n","\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 16\n"," percentage_validation = 10\n"," initial_learning_rate = 0.0004\n","\n","#Here we define the percentage to use for validation\n","percentage = percentage_validation/100\n","\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name):\n"," print(bcolors.WARNING +\"!! WARNING: Folder already exists and has been removed !!\")\n"," shutil.rmtree(model_path+'/'+model_name)\n","\n","\n","# Here we disable pre-trained model by default (in case the cell is not ran)\n","Use_pretrained_model = False\n","\n","# Here we disable data augmentation by default (in case the cell is not ran)\n","\n","Use_Data_augmentation = False\n","\n","# The shape of the images.\n","x = imread(InputFile)\n","y = imread(OutputFile)\n","\n","print('Loaded Input images (number, width, length) =', x.shape)\n","print('Loaded Output images (number, width, length) =', y.shape)\n","print(\"Parameters initiated.\")\n","\n","# This will display a randomly chosen dataset input and output\n","random_choice = random.choice(os.listdir(Training_source))\n","x = imread(Training_source+\"/\"+random_choice)\n","\n","\n","# Here we check that the input images contains the expected dimensions\n","if len(x.shape) == 2:\n"," print(\"Image dimensions (y,x)\",x.shape)\n","\n","if not len(x.shape) == 2:\n"," print(bcolors.WARNING +\"Your images appear to have the wrong dimensions. Image dimension\",x.shape)\n","\n","\n","#Find image XY dimension\n","Image_Y = x.shape[0]\n","Image_X = x.shape[1]\n","\n","#Hyperparameters failsafes\n","\n","# Here we check that patch_size is smaller than the smallest xy dimension of the image \n","\n","if patch_size > min(Image_Y, Image_X):\n"," patch_size = min(Image_Y, Image_X)\n"," print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_size is divisible by 8\n","if not patch_size % 8 == 0:\n"," patch_size = ((int(patch_size / 8)-1) * 8)\n"," print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:\",patch_size)\n","\n","\n","os.chdir(Training_target)\n","y = imread(Training_target+\"/\"+random_choice)\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x, norm=simple_norm(x, percent = 99), interpolation='nearest')\n","plt.title('Training source')\n","plt.axis('off');\n","\n","plt.subplot(1,2,2)\n","plt.imshow(y, norm=simple_norm(y, percent = 99), interpolation='nearest')\n","plt.title('Training target')\n","plt.axis('off');\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"_-CEUqlS8o3M","colab_type":"text"},"source":["## **3.2. Data augmentation**\n","---\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"qe9zvEJ9qOH2","colab_type":"text"},"source":["Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n","\n"," **However, data augmentation is not a magic solution and may also introduce issues. Therefore, we recommend that you train your network with and without augmentation, and use the QC section to validate that it improves overall performances.** \n","\n","Data augmentation is performed here by [Augmentor.](https://github.com/mdbloice/Augmentor)\n","\n","[Augmentor](https://github.com/mdbloice/Augmentor) was described in the following article:\n","\n","Marcus D Bloice, Peter M Roth, Andreas Holzinger, Biomedical image augmentation using Augmentor, Bioinformatics, https://doi.org/10.1093/bioinformatics/btz259\n","\n","**Please also cite this original paper when publishing results obtained using this notebook with augmentation enabled.** "]},{"cell_type":"code","metadata":{"id":"zmtlu9YU266X","colab_type":"code","cellView":"form","colab":{}},"source":["#Data augmentation\n","\n","Use_Data_augmentation = False #@param {type:\"boolean\"}\n","\n","if Use_Data_augmentation:\n"," !pip install Augmentor\n"," import Augmentor\n","\n","\n","#@markdown ####Choose a factor by which you want to multiply your original dataset\n","\n","Multiply_dataset_by = 2 #@param {type:\"slider\", min:1, max:30, step:1}\n","\n","Save_augmented_images = False #@param {type:\"boolean\"}\n","\n","Saving_path = \"\" #@param {type:\"string\"}\n","\n","\n","Use_Default_Augmentation_Parameters = True #@param {type:\"boolean\"}\n","#@markdown ###If not, please choose the probability of the following image manipulations to be used to augment your dataset (1 = always used; 0 = disabled ):\n","\n","#@markdown ####Mirror and rotate images\n","rotate_90_degrees = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","rotate_270_degrees = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","flip_left_right = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","flip_top_bottom = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","#@markdown ####Random image Zoom\n","\n","random_zoom = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","random_zoom_magnification = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","#@markdown ####Random image distortion\n","\n","random_distortion = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","\n","#@markdown ####Image shearing and skewing \n","\n","image_shear = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","max_image_shear = 1 #@param {type:\"slider\", min:1, max:25, step:1}\n","\n","skew_image = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","skew_image_magnitude = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","\n","if Use_Default_Augmentation_Parameters:\n"," rotate_90_degrees = 0.5\n"," rotate_270_degrees = 0.5\n"," flip_left_right = 0.5\n"," flip_top_bottom = 0.5\n","\n"," if not Multiply_dataset_by >5:\n"," random_zoom = 0\n"," random_zoom_magnification = 0.9\n"," random_distortion = 0\n"," image_shear = 0\n"," max_image_shear = 10\n"," skew_image = 0\n"," skew_image_magnitude = 0\n","\n"," if Multiply_dataset_by >5:\n"," random_zoom = 0.1\n"," random_zoom_magnification = 0.9\n"," random_distortion = 0.5\n"," image_shear = 0.2\n"," max_image_shear = 5\n"," skew_image = 0.2\n"," skew_image_magnitude = 0.4\n","\n"," if Multiply_dataset_by >25:\n"," random_zoom = 0.5\n"," random_zoom_magnification = 0.8\n"," random_distortion = 0.5\n"," image_shear = 0.5\n"," max_image_shear = 20\n"," skew_image = 0.5\n"," skew_image_magnitude = 0.6\n","\n","\n","list_files = os.listdir(Training_source)\n","Nb_files = len(list_files)\n","\n","Nb_augmented_files = (Nb_files * Multiply_dataset_by)\n","\n","\n","if Use_Data_augmentation:\n"," print(\"Data augmentation enabled\")\n","# Here we set the path for the various folder were the augmented images will be loaded\n","\n","# All images are first saved into the augmented folder\n"," #Augmented_folder = \"/content/Augmented_Folder\"\n"," \n"," if not Save_augmented_images:\n"," Saving_path= \"/content\"\n","\n"," Augmented_folder = Saving_path+\"/Augmented_Folder\"\n"," if os.path.exists(Augmented_folder):\n"," shutil.rmtree(Augmented_folder)\n"," os.makedirs(Augmented_folder)\n","\n"," #Training_source_augmented = \"/content/Training_source_augmented\"\n"," Training_source_augmented = Saving_path+\"/Training_source_augmented\"\n","\n"," if os.path.exists(Training_source_augmented):\n"," shutil.rmtree(Training_source_augmented)\n"," os.makedirs(Training_source_augmented)\n","\n"," #Training_target_augmented = \"/content/Training_target_augmented\"\n"," Training_target_augmented = Saving_path+\"/Training_target_augmented\"\n","\n"," if os.path.exists(Training_target_augmented):\n"," shutil.rmtree(Training_target_augmented)\n"," os.makedirs(Training_target_augmented)\n","\n","\n","# Here we generate the augmented images\n","#Load the images\n"," p = Augmentor.Pipeline(Training_source, Augmented_folder)\n","\n","#Define the matching images\n"," p.ground_truth(Training_target)\n","#Define the augmentation possibilities\n"," if not rotate_90_degrees == 0:\n"," p.rotate90(probability=rotate_90_degrees)\n"," \n"," if not rotate_270_degrees == 0:\n"," p.rotate270(probability=rotate_270_degrees)\n","\n"," if not flip_left_right == 0:\n"," p.flip_left_right(probability=flip_left_right)\n","\n"," if not flip_top_bottom == 0:\n"," p.flip_top_bottom(probability=flip_top_bottom)\n","\n"," if not random_zoom == 0:\n"," p.zoom_random(probability=random_zoom, percentage_area=random_zoom_magnification)\n"," \n"," if not random_distortion == 0:\n"," p.random_distortion(probability=random_distortion, grid_width=4, grid_height=4, magnitude=8)\n","\n"," if not image_shear == 0:\n"," p.shear(probability=image_shear,max_shear_left=20,max_shear_right=20)\n"," \n"," if not skew_image == 0:\n"," p.skew(probability=skew_image,magnitude=skew_image_magnitude)\n","\n"," p.sample(int(Nb_augmented_files))\n","\n"," print(int(Nb_augmented_files),\"matching images generated\")\n","\n","# Here we sort through the images and move them back to augmented trainning source and targets folders\n","\n"," augmented_files = os.listdir(Augmented_folder)\n","\n"," for f in augmented_files:\n","\n"," if (f.startswith(\"_groundtruth_(1)_\")):\n"," shortname_noprefix = f[17:]\n"," shutil.copyfile(Augmented_folder+\"/\"+f, Training_target_augmented+\"/\"+shortname_noprefix) \n"," if not (f.startswith(\"_groundtruth_(1)_\")):\n"," shutil.copyfile(Augmented_folder+\"/\"+f, Training_source_augmented+\"/\"+f)\n"," \n","\n"," for filename in os.listdir(Training_source_augmented):\n"," os.chdir(Training_source_augmented)\n"," os.rename(filename, filename.replace('_original', ''))\n"," \n"," #Here we clean up the extra files\n"," shutil.rmtree(Augmented_folder)\n","\n","if not Use_Data_augmentation:\n"," print(bcolors.WARNING+\"Data augmentation disabled\") \n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"4kb3xSZMRzxU","colab_type":"text"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a CARE 2D model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"mlN-VNOgR-nr","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"Model_from_file\" #@param [\"Model_from_file\"]\n","\n","Weights_choice = \"best\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Download the a model provided in the XXX ------------------------\n","\n"," if pretrained_model_choice == \"Model_name\":\n"," pretrained_model_name = \"Model_name\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path) \n"," wget.download(\"\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(bcolors.WARNING+'WARNING: weights_'+Weights_choice+'.h5 pretrained model does not exist')\n"," Use_pretrained_model = False\n","\n"," \n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead')\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead')\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n","else:\n"," print(bcolors.WARNING+'No pretrained network will be used.')\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"rQndJj70FzfL","colab_type":"text"},"source":["# **4. Train the network**\n","---"]},{"cell_type":"markdown","metadata":{"id":"-A4ipz8gs3Ew","colab_type":"text"},"source":["## **4.1. Prepare the training data and model for training**\n","---\n","Here, we use the information from 3. to build the model and convert the training data into a suitable format for training."]},{"cell_type":"code","metadata":{"id":"LKYRNhA5Qnis","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Create the model and dataset objects\n","\n","\n","# --------------------- Here we load the augmented data or the raw data ------------------------\n","\n","if Use_Data_augmentation:\n"," Training_source_dir = Training_source_augmented\n"," Training_target_dir = Training_target_augmented\n","\n","if not Use_Data_augmentation:\n"," Training_source_dir = Training_source\n"," Training_target_dir = Training_target\n","# --------------------- ------------------------------------------------\n","\n","# This object holds the image pairs (GT and low), ensuring that CARE compares corresponding images.\n","# This file is saved in .npz format and later called when loading the trainig data.\n","\n","\n","raw_data = data.RawData.from_folder(\n"," basepath=base,\n"," source_dirs=[Training_source_dir], \n"," target_dir=Training_target_dir, \n"," axes='CYX', \n"," pattern='*.tif*')\n","\n","X, Y, XY_axes = data.create_patches(\n"," raw_data, \n"," patch_filter=None, \n"," patch_size=(patch_size,patch_size), \n"," n_patches_per_image=number_of_patches)\n","\n","print ('Creating 2D training dataset')\n","training_path = model_path+\"/rawdata\"\n","rawdata1 = training_path+\".npz\"\n","np.savez(training_path,X=X, Y=Y, axes=XY_axes)\n","\n","# Load Training Data\n","(X,Y), (X_val,Y_val), axes = load_training_data(rawdata1, validation_split=percentage, verbose=True)\n","c = axes_dict(axes)['C']\n","n_channel_in, n_channel_out = X.shape[c], Y.shape[c]\n","\n","%memit \n","\n","#plot of training patches.\n","plt.figure(figsize=(12,5))\n","plot_some(X[:5],Y[:5])\n","plt.suptitle('5 example training patches (top row: source, bottom row: target)');\n","\n","#plot of validation patches\n","plt.figure(figsize=(12,5))\n","plot_some(X_val[:5],Y_val[:5])\n","plt.suptitle('5 example validation patches (top row: source, bottom row: target)');\n","\n","\n","#Here we automatically define number_of_step in function of training data and batch size\n","if (Use_Default_Advanced_Parameters): \n"," number_of_steps= int(X.shape[0]/batch_size)+1\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we ensure that the learning rate set correctly when using pre-trained models\n","if Use_pretrained_model:\n"," if Weights_choice == \"last\":\n"," initial_learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\": \n"," initial_learning_rate = bestLearningRate\n","# --------------------- ---------------------- ------------------------\n","\n","\n","#Here we create the configuration file\n","\n","config = Config(axes, n_channel_in, n_channel_out, probabilistic=True, train_steps_per_epoch=number_of_steps, train_epochs=number_of_epochs, unet_kern_size=5, unet_n_depth=3, train_batch_size=batch_size, train_learning_rate=initial_learning_rate)\n","\n","print(config)\n","vars(config)\n","\n","# Compile the CARE model for network training\n","model_training= CARE(config, model_name, basedir=model_path)\n","\n","\n","# --------------------- Using pretrained model ------------------------\n","# Load the pretrained weights \n","if Use_pretrained_model:\n"," model_training.load_weights(h5_file_path)\n","# --------------------- ---------------------- ------------------------\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"wQPz0F6JlvJR","colab_type":"text"},"source":["## **4.2. Train the network**\n","---\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches."]},{"cell_type":"code","metadata":{"id":"biXiR017C4UU","colab_type":"code","cellView":"form","colab":{}},"source":["start = time.time()\n","\n","#@markdown ##Start Training\n","\n","# Start Training\n","history = model_training.train(X,Y, validation_data=(X_val,Y_val))\n","\n","print(\"Training, done.\")\n","\n","# convert the history.history dict to a pandas DataFrame: \n","lossData = pd.DataFrame(history.history) \n","\n","if os.path.exists(model_path+\"/\"+model_name+\"/Quality Control\"):\n"," shutil.rmtree(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","os.makedirs(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","# The training evaluation.csv is saved (overwrites the Files if needed). \n","lossDataCSVpath = model_path+'/'+model_name+'/Quality Control/training_evaluation.csv'\n","with open(lossDataCSVpath, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['loss','val_loss', 'learning rate'])\n"," for i in range(len(history.history['loss'])):\n"," writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])\n","\n","\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"XQjQb_J_Qyku","colab_type":"text"},"source":["##**4.3. Download your model(s) from Google Drive**\n","\n","\n","---\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"2HbZd7rFqAad","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","metadata":{"id":"EdcnkCr9Nbl8","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"yDY9dtzdUTLh","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased.\n","\n","**Note: Plots of the losses will be shown in a linear and in a log scale. This can help visualise changes in the losses at different magnitudes. However, note that if the losses are negative the plot on the log scale will be empty. This is not an error.**"]},{"cell_type":"code","metadata":{"id":"vMzSP50kMv5p","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"RZOPCVN0qcYb","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n","This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the \"Source_QC_folder\" and \"Target_QC_folder\" !\n","\n","**1. The SSIM (structural similarity) map** \n","\n","The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). \n","\n","**mSSIM** is the SSIM value calculated across the entire window of both images.\n","\n","**The output below shows the SSIM maps with the mSSIM**\n","\n","**2. The RSE (Root Squared Error) map** \n","\n","This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).\n","\n","\n","**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.\n","\n","**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.\n","\n","**The output below shows the RSE maps with the NRMSE and PSNR values.**\n","\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"Nh8MlX3sqd_7","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","# Create a quality control/Prediction Folder\n","if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\"):\n"," shutil.rmtree(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","# Activate the pretrained model. \n","model_training = CARE(config=None, name=QC_model_name, basedir=QC_model_path)\n","\n","# List Tif images in Source_QC_folder\n","Source_QC_folder_tif = Source_QC_folder+\"/*.tif\"\n","Z = sorted(glob(Source_QC_folder_tif))\n","Z = list(map(imread,Z))\n","print('Number of test dataset found in the folder: '+str(len(Z)))\n","\n","\n","# Perform prediction on all datasets in the Source_QC folder\n","for filename in os.listdir(Source_QC_folder):\n"," img = imread(os.path.join(Source_QC_folder, filename))\n"," predicted = model_training.predict(img, axes='YX')\n"," os.chdir(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n"," imsave(filename, predicted)\n","\n","\n","def ssim(img1, img2):\n"," return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)\n","\n","\n","def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," \"\"\"Percentile-based image normalization.\"\"\"\n","\n"," mi = np.percentile(x,pmin,axis=axis,keepdims=True)\n"," ma = np.percentile(x,pmax,axis=axis,keepdims=True)\n"," return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)\n","\n","\n","def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," if dtype is not None:\n"," x = x.astype(dtype,copy=False)\n"," mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)\n"," ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)\n"," eps = dtype(eps)\n","\n"," try:\n"," import numexpr\n"," x = numexpr.evaluate(\"(x - mi) / ( ma - mi + eps )\")\n"," except ImportError:\n"," x = (x - mi) / ( ma - mi + eps )\n","\n"," if clip:\n"," x = np.clip(x,0,1)\n","\n"," return x\n","\n","def norm_minmse(gt, x, normalize_gt=True):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n","\n"," \"\"\"\n"," normalizes and affinely scales an image pair such that the MSE is minimized \n"," \n"," Parameters\n"," ----------\n"," gt: ndarray\n"," the ground truth image \n"," x: ndarray\n"," the image that will be affinely scaled \n"," normalize_gt: bool\n"," set to True of gt image should be normalized (default)\n"," Returns\n"," -------\n"," gt_scaled, x_scaled \n"," \"\"\"\n"," if normalize_gt:\n"," gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)\n"," x = x.astype(np.float32, copy=False) - np.mean(x)\n"," #x = x - np.mean(x)\n"," gt = gt.astype(np.float32, copy=False) - np.mean(gt)\n"," #gt = gt - np.mean(gt)\n"," scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())\n"," return gt, scale * x\n","\n","# Open and create the csv file that will contain all the QC metrics\n","with open(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/QC_metrics_\"+QC_model_name+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n","\n"," # Write the header in the csv file\n"," writer.writerow([\"image #\",\"Prediction v. GT mSSIM\",\"Input v. GT mSSIM\", \"Prediction v. GT NRMSE\", \"Input v. GT NRMSE\", \"Prediction v. GT PSNR\", \"Input v. GT PSNR\"]) \n","\n"," # Let's loop through the provided dataset in the QC folders\n","\n","\n"," for i in os.listdir(Source_QC_folder):\n"," if not os.path.isdir(os.path.join(Source_QC_folder,i)):\n"," print('Running QC on: '+i)\n"," # -------------------------------- Target test data (Ground truth) --------------------------------\n"," test_GT = io.imread(os.path.join(Target_QC_folder, i))\n","\n"," # -------------------------------- Source test data --------------------------------\n"," test_source = io.imread(os.path.join(Source_QC_folder,i))\n","\n"," # Normalize the images wrt each other by minimizing the MSE between GT and Source image\n"," test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)\n","\n"," # -------------------------------- Prediction --------------------------------\n"," test_prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\",i))\n","\n"," # Normalize the images wrt each other by minimizing the MSE between GT and prediction\n"," test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True) \n","\n","\n"," # -------------------------------- Calculate the metric maps and save them --------------------------------\n","\n"," # Calculate the SSIM maps\n"," index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)\n"," index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)\n","\n"," #Save ssim_maps\n"," img_SSIM_GTvsPrediction_32bit = np.float32(img_SSIM_GTvsPrediction)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsPrediction_'+i,img_SSIM_GTvsPrediction_32bit)\n"," img_SSIM_GTvsSource_32bit = np.float32(img_SSIM_GTvsSource)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsSource_'+i,img_SSIM_GTvsSource_32bit)\n"," \n"," # Calculate the Root Squared Error (RSE) maps\n"," img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))\n"," img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))\n","\n"," # Save SE maps\n"," img_RSE_GTvsPrediction_32bit = np.float32(img_RSE_GTvsPrediction)\n"," img_RSE_GTvsSource_32bit = np.float32(img_RSE_GTvsSource)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsPrediction_'+i,img_RSE_GTvsPrediction_32bit)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsSource_'+i,img_RSE_GTvsSource_32bit)\n","\n","\n"," # -------------------------------- Calculate the RSE metrics and save them --------------------------------\n","\n"," # Normalised Root Mean Squared Error (here it's valid to take the mean of the image)\n"," NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))\n"," NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))\n"," \n"," # We can also measure the peak signal to noise ratio between the images\n"," PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)\n"," PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)\n","\n"," writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])\n","\n","\n","# All data is now processed saved\n","Test_FileList = os.listdir(Source_QC_folder) # this assumes, as it should, that both source and target are named the same\n","\n","plt.figure(figsize=(20,20))\n","# Currently only displays the last computed set, from memory\n","# Target (Ground-truth)\n","plt.subplot(3,3,1)\n","plt.axis('off')\n","img_GT = io.imread(os.path.join(Target_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_GT, norm=simple_norm(img_GT, percent = 99))\n","plt.title('Target',fontsize=15)\n","\n","# Source\n","plt.subplot(3,3,2)\n","plt.axis('off')\n","img_Source = io.imread(os.path.join(Source_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_Source, norm=simple_norm(img_Source, percent = 99))\n","plt.title('Source',fontsize=15)\n","\n","#Prediction\n","plt.subplot(3,3,3)\n","plt.axis('off')\n","img_Prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction/\", Test_FileList[-1]))\n","plt.imshow(img_Prediction, norm=simple_norm(img_Prediction, percent = 99))\n","plt.title('Prediction',fontsize=15)\n","\n","#Setting up colours\n","cmap = plt.cm.CMRmap\n","\n","#SSIM between GT and Source\n","plt.subplot(3,3,5)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)\n","plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#SSIM between GT and Prediction\n","plt.subplot(3,3,6)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)\n","plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)\n","\n","#Root Squared Error between GT and Source\n","plt.subplot(3,3,8)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)\n","plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)\n","#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))\n","plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#Root Squared Error between GT and Prediction\n","plt.subplot(3,3,9)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"Esqnbew8uznk"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"d8wuQGjoq6eN","colab_type":"text"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n","\n","**`Result_folder`:** This folder will contain the predicted output images."]},{"cell_type":"code","metadata":{"id":"9ZmST3JRq-Ho","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.\n","\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = os.path.join(Prediction_model_path, Prediction_model_name)\n","\n","\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","\n","\n","#Activate the pretrained model. \n","model_training = CARE(config=None, name=Prediction_model_name, basedir=Prediction_model_path)\n","\n","\n","# creates a loop, creating filenames and saving them\n","for filename in os.listdir(Data_folder):\n"," img = imread(os.path.join(Data_folder,filename))\n"," restored = model_training.predict(img, axes='YX')\n"," os.chdir(Result_folder)\n"," imsave(filename,restored)\n","\n","print(\"Images saved into folder:\", Result_folder)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"EIe3CRD7XUxa","colab_type":"text"},"source":["## **6.2. Inspect the predicted output**\n","---\n","\n"]},{"cell_type":"code","metadata":{"id":"LmDP8xiwXTTL","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.\n","\n","# This will display a randomly chosen dataset input and predicted output\n","random_choice = random.choice(os.listdir(Data_folder))\n","x = imread(Data_folder+\"/\"+random_choice)\n","\n","os.chdir(Result_folder)\n","y = imread(Result_folder+\"/\"+random_choice)\n","\n","plt.figure(figsize=(16,8))\n","\n","plt.subplot(1,2,1)\n","plt.axis('off')\n","plt.imshow(x, norm=simple_norm(x, percent = 99), interpolation='nearest')\n","plt.title('Input')\n","\n","plt.subplot(1,2,2)\n","plt.axis('off')\n","plt.imshow(y, norm=simple_norm(y, percent = 99), interpolation='nearest')\n","plt.title('Predicted output');\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"hvkd66PldsXB","colab_type":"text"},"source":["## **6.3. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"Rn9zpWpo0xNw","colab_type":"text"},"source":["\n","#**Thank you for using CARE 2D!**"]}]} \ No newline at end of file +{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"CARE_2D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1mqcexfPBaIWuvMWWbJZUFtPoZoJJwrEA","timestamp":1589278334507},{"file_id":"159ARwlQE7-zi0EHxunOF_YPFLt-ZVU5x","timestamp":1587562499898},{"file_id":"1W-7NHehG5MRFILvZZzhPWWnOdJMkadb2","timestamp":1586332290412},{"file_id":"1pUetEQICxYWkYVaQIgdRH1EZBTl7oc2A","timestamp":1586292199692},{"file_id":"1MD36ZkM6XR9EuV12zimJmfCjzyeYZFWq","timestamp":1586269469061},{"file_id":"16A2mbaHzlEElntS8qkFBOsBvZG-mUeY6","timestamp":1586253795726},{"file_id":"1gJlcjOiSxr2buDOxmcFbT_d-GqwLjXtK","timestamp":1583343225796},{"file_id":"10yGI51WzHfgWgZAyE-EbkZFEvIOd6CP6","timestamp":1583171396283}],"collapsed_sections":[],"toc_visible":true},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.4"},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"V9zNGvape2-I","colab_type":"text"},"source":["# **CARE: Content-aware image restoration (2D)**\n","\n","---\n","\n","CARE is a neural network capable of image restoration from corrupted bio-images, first published in 2018 by [Weigert *et al.* in Nature Methods](https://www.nature.com/articles/s41592-018-0216-7). The CARE network uses a U-Net network architecture and allows image restoration and resolution improvement in 2D and 3D images, in a supervised manner, using noisy images as input and low-noise images as targets for training. The function of the network is essentially determined by the set of images provided in the training dataset. For instance, if noisy images are provided as input and high signal-to-noise ratio images are provided as targets, the network will perform denoising.\n","\n"," **This particular notebook enables restoration of 2D dataset. If you are interested in restoring 3D dataset, you should use the CARE 3D notebook instead.**\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the *Zero-Cost Deep-Learning to Enhance Microscopy* project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is based on the following paper: \n","\n","**Content-aware image restoration: pushing the limits of fluorescence microscopy**, by Weigert *et al.* published in Nature Methods in 2018 (https://www.nature.com/articles/s41592-018-0216-7)\n","\n","And source code found in: https://github.com/csbdeep/csbdeep\n","\n","For a more in-depth description of the features of the network,please refer to [this guide](http://csbdeep.bioimagecomputing.com/doc/) provided by the original authors of the work.\n","\n","We provide a dataset for the training of this notebook as a way to test its functionalities but the training and test data of the restoration experiments is also available from the authors of the original paper [here](https://publications.mpi-cbg.de/publications-sites/7207/).\n","\n","\n","**Please also cite this original paper when using or developing this notebook.**"]},{"cell_type":"markdown","metadata":{"id":"jWAz2i7RdxUV","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"vNMDQHm0Ah-Z","colab_type":"text"},"source":["#**0. Before getting started**\n","---\n"," For CARE to train, **it needs to have access to a paired training dataset**. This means that the same image needs to be acquired in the two conditions (for instance, low signal-to-noise ratio and high signal-to-noise ratio) and provided with indication of correspondence.\n","\n"," Therefore, the data structure is important. It is necessary that all the input data are in the same folder and that all the output data is in a separate folder. The provided training dataset is already split in two folders called \"Training - Low SNR images\" (Training_source) and \"Training - high SNR images\" (Training_target). Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\n","\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n"," **Additionally, the corresponding input and output files need to have the same name**.\n","\n"," Please note that you currently can **only use .tif files!**\n","\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset**\n"," - Low SNR images (Training_source)\n"," - img_1.tif, img_2.tif, ...\n"," - High SNR images (Training_target)\n"," - img_1.tif, img_2.tif, ...\n"," - **Quality control dataset**\n"," - Low SNR images\n"," - img_1.tif, img_2.tif\n"," - High SNR images\n"," - img_1.tif, img_2.tif\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"DMNHVZfHmbKb","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"BCPhV-pe-syw","colab_type":"text"},"source":["\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"VNZetvLiS1qV","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"UBrnApIUBgxv","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"01Djr8v-5pPk","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","#mounts user's Google Drive to Google Colab.\n","\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"n4yWFoJNnoin","colab_type":"text"},"source":["# **2. Install CARE and dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"id":"3u2mXn3XsWzd","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Install CARE and dependencies\n","\n","#Libraries contains information of certain topics. \n","#For example the tifffile library contains information on how to handle tif-files.\n","\n","#Here, we install libraries which are not already included in Colab.\n","!pip install tifffile # contains tools to operate tiff-files\n","!pip install csbdeep # contains tools for restoration of fluorescence microcopy images (Content-aware Image Restoration, CARE). It uses Keras and Tensorflow.\n","!pip install wget\n","!pip install memory_profiler\n","%load_ext memory_profiler\n","\n","#Here, we import and enable Tensorflow 1 instead of Tensorflow 2.\n","import tensorflow \n","import tensorflow as tf\n","\n","print(tensorflow.__version__)\n","print(\"Tensorflow enabled.\")\n","\n","# ------- Variable specific to CARE -------\n","from csbdeep.utils import download_and_extract_zip_file, plot_some, axes_dict, plot_history, Path, download_and_extract_zip_file\n","from csbdeep.data import RawData, create_patches \n","from csbdeep.io import load_training_data, save_tiff_imagej_compatible\n","from csbdeep.models import Config, CARE\n","from csbdeep import data\n","from __future__ import print_function, unicode_literals, absolute_import, division\n","%matplotlib inline\n","%config InlineBackend.figure_format = 'retina'\n","\n","\n","\n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","import numpy as np\n","from matplotlib import pyplot as plt\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n","import wget\n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32\n","from skimage.util import img_as_ubyte\n","from tqdm import tqdm \n","\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","\n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","print(\"Libraries installed\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"Fw0kkTU6CsU4","colab_type":"text"},"source":["# **3. Select your parameters and paths**\n","\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"BLmBseWbRvxL","colab_type":"text"},"source":["## **3.1. Setting main training parameters**\n","---\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"CB6acvUFtWqd","colab_type":"text"},"source":[" **Paths for training, predictions and results**\n","\n","**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source (Low SNR images) and Training_target (High SNR images or ground truth) training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","**Training Parameters**\n","\n","**`number_of_epochs`:**Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10-30) epochs, but a full training should run for 100-300 epochs. Evaluate the performance after training (see 5). **Default value: 50**\n","\n","**`patch_size`:** CARE divides the image into patches for training. Input the size of the patches (length of a side). The value should be smaller than the dimensions of the image and divisible by 8. **Default value: 80**\n","\n","**When choosing the patch_size, the value should be i) large enough that it will enclose many instances, ii) small enough that the resulting patches fit into the RAM.** \n","\n","**`number_of_patches`:** Input the number of the patches per image. Increasing the number of patches allows for larger training datasets. **Default value: 100** \n","\n","**Decreasing the patch size or increasing the number of patches may improve the training but may also increase the training time.**\n","\n","**Advanced Parameters - experienced users only**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 16**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during training. **Default value: 10** \n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0004**"]},{"cell_type":"code","metadata":{"id":"ewpNJ_I0Mv47","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ###Path to training images:\n","\n","Training_source = \"\" #@param {type:\"string\"}\n","InputFile = Training_source+\"/*.tif\"\n","\n","Training_target = \"\" #@param {type:\"string\"}\n","OutputFile = Training_target+\"/*.tif\"\n","\n","#Define where the patch file will be saved\n","base = \"/content\"\n","\n","\n","# model name and path\n","#@markdown ###Name of the model and path to model folder:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","# other parameters for training.\n","#@markdown ###Training Parameters\n","#@markdown Number of epochs:\n","number_of_epochs = 50#@param {type:\"number\"}\n","\n","#@markdown Patch size (pixels) and number\n","patch_size = 80#@param {type:\"number\"} # in pixels\n","number_of_patches = 100#@param {type:\"number\"}\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","#@markdown ###If not, please input:\n","\n","batch_size = 16#@param {type:\"number\"}\n","number_of_steps = 400#@param {type:\"number\"}\n","percentage_validation = 10 #@param {type:\"number\"}\n","initial_learning_rate = 0.0004 #@param {type:\"number\"}\n","\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 16\n"," percentage_validation = 10\n"," initial_learning_rate = 0.0004\n","\n","#Here we define the percentage to use for validation\n","percentage = percentage_validation/100\n","\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name):\n"," print(bcolors.WARNING +\"!! WARNING: Folder already exists and has been removed !!\")\n"," shutil.rmtree(model_path+'/'+model_name)\n","\n","\n","# Here we disable pre-trained model by default (in case the cell is not ran)\n","Use_pretrained_model = False\n","\n","# Here we disable data augmentation by default (in case the cell is not ran)\n","\n","Use_Data_augmentation = False\n","\n","# The shape of the images.\n","x = imread(InputFile)\n","y = imread(OutputFile)\n","\n","print('Loaded Input images (number, width, length) =', x.shape)\n","print('Loaded Output images (number, width, length) =', y.shape)\n","print(\"Parameters initiated.\")\n","\n","# This will display a randomly chosen dataset input and output\n","random_choice = random.choice(os.listdir(Training_source))\n","x = imread(Training_source+\"/\"+random_choice)\n","\n","\n","# Here we check that the input images contains the expected dimensions\n","if len(x.shape) == 2:\n"," print(\"Image dimensions (y,x)\",x.shape)\n","\n","if not len(x.shape) == 2:\n"," print(bcolors.WARNING +\"Your images appear to have the wrong dimensions. Image dimension\",x.shape)\n","\n","\n","#Find image XY dimension\n","Image_Y = x.shape[0]\n","Image_X = x.shape[1]\n","\n","#Hyperparameters failsafes\n","\n","# Here we check that patch_size is smaller than the smallest xy dimension of the image \n","\n","if patch_size > min(Image_Y, Image_X):\n"," patch_size = min(Image_Y, Image_X)\n"," print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_size is divisible by 8\n","if not patch_size % 8 == 0:\n"," patch_size = ((int(patch_size / 8)-1) * 8)\n"," print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:\",patch_size)\n","\n","\n","os.chdir(Training_target)\n","y = imread(Training_target+\"/\"+random_choice)\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x, norm=simple_norm(x, percent = 99), interpolation='nearest')\n","plt.title('Training source')\n","plt.axis('off');\n","\n","plt.subplot(1,2,2)\n","plt.imshow(y, norm=simple_norm(y, percent = 99), interpolation='nearest')\n","plt.title('Training target')\n","plt.axis('off');\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"_-CEUqlS8o3M","colab_type":"text"},"source":["## **3.2. Data augmentation**\n","---\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"qe9zvEJ9qOH2","colab_type":"text"},"source":["Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n","\n"," **However, data augmentation is not a magic solution and may also introduce issues. Therefore, we recommend that you train your network with and without augmentation, and use the QC section to validate that it improves overall performances.** \n","\n","Data augmentation is performed here by [Augmentor.](https://github.com/mdbloice/Augmentor)\n","\n","[Augmentor](https://github.com/mdbloice/Augmentor) was described in the following article:\n","\n","Marcus D Bloice, Peter M Roth, Andreas Holzinger, Biomedical image augmentation using Augmentor, Bioinformatics, https://doi.org/10.1093/bioinformatics/btz259\n","\n","**Please also cite this original paper when publishing results obtained using this notebook with augmentation enabled.** "]},{"cell_type":"code","metadata":{"id":"zmtlu9YU266X","colab_type":"code","cellView":"form","colab":{}},"source":["#Data augmentation\n","\n","Use_Data_augmentation = False #@param {type:\"boolean\"}\n","\n","if Use_Data_augmentation:\n"," !pip install Augmentor\n"," import Augmentor\n","\n","\n","#@markdown ####Choose a factor by which you want to multiply your original dataset\n","\n","Multiply_dataset_by = 1 #@param {type:\"slider\", min:1, max:30, step:1}\n","\n","Save_augmented_images = False #@param {type:\"boolean\"}\n","\n","Saving_path = \"\" #@param {type:\"string\"}\n","\n","\n","Use_Default_Augmentation_Parameters = True #@param {type:\"boolean\"}\n","#@markdown ###If not, please choose the probability of the following image manipulations to be used to augment your dataset (1 = always used; 0 = disabled ):\n","\n","#@markdown ####Mirror and rotate images\n","rotate_90_degrees = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","rotate_270_degrees = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","flip_left_right = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","flip_top_bottom = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","#@markdown ####Random image Zoom\n","\n","random_zoom = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","random_zoom_magnification = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","#@markdown ####Random image distortion\n","\n","random_distortion = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","\n","#@markdown ####Image shearing and skewing \n","\n","image_shear = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","max_image_shear = 1 #@param {type:\"slider\", min:1, max:25, step:1}\n","\n","skew_image = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","skew_image_magnitude = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","\n","if Use_Default_Augmentation_Parameters:\n"," rotate_90_degrees = 0.5\n"," rotate_270_degrees = 0.5\n"," flip_left_right = 0.5\n"," flip_top_bottom = 0.5\n","\n"," if not Multiply_dataset_by >5:\n"," random_zoom = 0\n"," random_zoom_magnification = 0.9\n"," random_distortion = 0\n"," image_shear = 0\n"," max_image_shear = 10\n"," skew_image = 0\n"," skew_image_magnitude = 0\n","\n"," if Multiply_dataset_by >5:\n"," random_zoom = 0.1\n"," random_zoom_magnification = 0.9\n"," random_distortion = 0.5\n"," image_shear = 0.2\n"," max_image_shear = 5\n"," skew_image = 0.2\n"," skew_image_magnitude = 0.4\n","\n"," if Multiply_dataset_by >25:\n"," random_zoom = 0.5\n"," random_zoom_magnification = 0.8\n"," random_distortion = 0.5\n"," image_shear = 0.5\n"," max_image_shear = 20\n"," skew_image = 0.5\n"," skew_image_magnitude = 0.6\n","\n","\n","list_files = os.listdir(Training_source)\n","Nb_files = len(list_files)\n","\n","Nb_augmented_files = (Nb_files * Multiply_dataset_by)\n","\n","\n","if Use_Data_augmentation:\n"," print(\"Data augmentation enabled\")\n","# Here we set the path for the various folder were the augmented images will be loaded\n","\n","# All images are first saved into the augmented folder\n"," #Augmented_folder = \"/content/Augmented_Folder\"\n"," \n"," if not Save_augmented_images:\n"," Saving_path= \"/content\"\n","\n"," Augmented_folder = Saving_path+\"/Augmented_Folder\"\n"," if os.path.exists(Augmented_folder):\n"," shutil.rmtree(Augmented_folder)\n"," os.makedirs(Augmented_folder)\n","\n"," #Training_source_augmented = \"/content/Training_source_augmented\"\n"," Training_source_augmented = Saving_path+\"/Training_source_augmented\"\n","\n"," if os.path.exists(Training_source_augmented):\n"," shutil.rmtree(Training_source_augmented)\n"," os.makedirs(Training_source_augmented)\n","\n"," #Training_target_augmented = \"/content/Training_target_augmented\"\n"," Training_target_augmented = Saving_path+\"/Training_target_augmented\"\n","\n"," if os.path.exists(Training_target_augmented):\n"," shutil.rmtree(Training_target_augmented)\n"," os.makedirs(Training_target_augmented)\n","\n","\n","# Here we generate the augmented images\n","#Load the images\n"," p = Augmentor.Pipeline(Training_source, Augmented_folder)\n","\n","#Define the matching images\n"," p.ground_truth(Training_target)\n","#Define the augmentation possibilities\n"," if not rotate_90_degrees == 0:\n"," p.rotate90(probability=rotate_90_degrees)\n"," \n"," if not rotate_270_degrees == 0:\n"," p.rotate270(probability=rotate_270_degrees)\n","\n"," if not flip_left_right == 0:\n"," p.flip_left_right(probability=flip_left_right)\n","\n"," if not flip_top_bottom == 0:\n"," p.flip_top_bottom(probability=flip_top_bottom)\n","\n"," if not random_zoom == 0:\n"," p.zoom_random(probability=random_zoom, percentage_area=random_zoom_magnification)\n"," \n"," if not random_distortion == 0:\n"," p.random_distortion(probability=random_distortion, grid_width=4, grid_height=4, magnitude=8)\n","\n"," if not image_shear == 0:\n"," p.shear(probability=image_shear,max_shear_left=20,max_shear_right=20)\n"," \n"," if not skew_image == 0:\n"," p.skew(probability=skew_image,magnitude=skew_image_magnitude)\n","\n"," p.sample(int(Nb_augmented_files))\n","\n"," print(int(Nb_augmented_files),\"matching images generated\")\n","\n","# Here we sort through the images and move them back to augmented trainning source and targets folders\n","\n"," augmented_files = os.listdir(Augmented_folder)\n","\n"," for f in augmented_files:\n","\n"," if (f.startswith(\"_groundtruth_(1)_\")):\n"," shortname_noprefix = f[17:]\n"," shutil.copyfile(Augmented_folder+\"/\"+f, Training_target_augmented+\"/\"+shortname_noprefix) \n"," if not (f.startswith(\"_groundtruth_(1)_\")):\n"," shutil.copyfile(Augmented_folder+\"/\"+f, Training_source_augmented+\"/\"+f)\n"," \n","\n"," for filename in os.listdir(Training_source_augmented):\n"," os.chdir(Training_source_augmented)\n"," os.rename(filename, filename.replace('_original', ''))\n"," \n"," #Here we clean up the extra files\n"," shutil.rmtree(Augmented_folder)\n","\n","if not Use_Data_augmentation:\n"," print(bcolors.WARNING+\"Data augmentation disabled\") \n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"4kb3xSZMRzxU","colab_type":"text"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a CARE 2D model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"mlN-VNOgR-nr","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"Model_from_file\" #@param [\"Model_from_file\"]\n","\n","Weights_choice = \"best\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Download the a model provided in the XXX ------------------------\n","\n"," if pretrained_model_choice == \"Model_name\":\n"," pretrained_model_name = \"Model_name\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path) \n"," wget.download(\"\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(bcolors.WARNING+'WARNING: weights_'+Weights_choice+'.h5 pretrained model does not exist')\n"," Use_pretrained_model = False\n","\n"," \n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead')\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead')\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n","else:\n"," print(bcolors.WARNING+'No pretrained network will be used.')\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"rQndJj70FzfL","colab_type":"text"},"source":["# **4. Train the network**\n","---"]},{"cell_type":"markdown","metadata":{"id":"-A4ipz8gs3Ew","colab_type":"text"},"source":["## **4.1. Prepare the training data and model for training**\n","---\n","Here, we use the information from 3. to build the model and convert the training data into a suitable format for training."]},{"cell_type":"code","metadata":{"id":"LKYRNhA5Qnis","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Create the model and dataset objects\n","\n","\n","# --------------------- Here we load the augmented data or the raw data ------------------------\n","\n","if Use_Data_augmentation:\n"," Training_source_dir = Training_source_augmented\n"," Training_target_dir = Training_target_augmented\n","\n","if not Use_Data_augmentation:\n"," Training_source_dir = Training_source\n"," Training_target_dir = Training_target\n","# --------------------- ------------------------------------------------\n","\n","# This object holds the image pairs (GT and low), ensuring that CARE compares corresponding images.\n","# This file is saved in .npz format and later called when loading the trainig data.\n","\n","\n","raw_data = data.RawData.from_folder(\n"," basepath=base,\n"," source_dirs=[Training_source_dir], \n"," target_dir=Training_target_dir, \n"," axes='CYX', \n"," pattern='*.tif*')\n","\n","X, Y, XY_axes = data.create_patches(\n"," raw_data, \n"," patch_filter=None, \n"," patch_size=(patch_size,patch_size), \n"," n_patches_per_image=number_of_patches)\n","\n","print ('Creating 2D training dataset')\n","training_path = model_path+\"/rawdata\"\n","rawdata1 = training_path+\".npz\"\n","np.savez(training_path,X=X, Y=Y, axes=XY_axes)\n","\n","# Load Training Data\n","(X,Y), (X_val,Y_val), axes = load_training_data(rawdata1, validation_split=percentage, verbose=True)\n","c = axes_dict(axes)['C']\n","n_channel_in, n_channel_out = X.shape[c], Y.shape[c]\n","\n","%memit \n","\n","#plot of training patches.\n","plt.figure(figsize=(12,5))\n","plot_some(X[:5],Y[:5])\n","plt.suptitle('5 example training patches (top row: source, bottom row: target)');\n","\n","#plot of validation patches\n","plt.figure(figsize=(12,5))\n","plot_some(X_val[:5],Y_val[:5])\n","plt.suptitle('5 example validation patches (top row: source, bottom row: target)');\n","\n","\n","#Here we automatically define number_of_step in function of training data and batch size\n","if (Use_Default_Advanced_Parameters): \n"," number_of_steps= int(X.shape[0]/batch_size)+1\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we ensure that the learning rate set correctly when using pre-trained models\n","if Use_pretrained_model:\n"," if Weights_choice == \"last\":\n"," initial_learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\": \n"," initial_learning_rate = bestLearningRate\n","# --------------------- ---------------------- ------------------------\n","\n","\n","#Here we create the configuration file\n","\n","config = Config(axes, n_channel_in, n_channel_out, probabilistic=True, train_steps_per_epoch=number_of_steps, train_epochs=number_of_epochs, unet_kern_size=5, unet_n_depth=3, train_batch_size=batch_size, train_learning_rate=initial_learning_rate)\n","\n","print(config)\n","vars(config)\n","\n","# Compile the CARE model for network training\n","model_training= CARE(config, model_name, basedir=model_path)\n","\n","\n","# --------------------- Using pretrained model ------------------------\n","# Load the pretrained weights \n","if Use_pretrained_model:\n"," model_training.load_weights(h5_file_path)\n","# --------------------- ---------------------- ------------------------\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"wQPz0F6JlvJR","colab_type":"text"},"source":["## **4.2. Train the network**\n","---\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches."]},{"cell_type":"code","metadata":{"id":"biXiR017C4UU","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Start training\n","\n","start = time.time()\n","\n","# Start Training\n","history = model_training.train(X,Y, validation_data=(X_val,Y_val))\n","\n","print(\"Training, done.\")\n","\n","# convert the history.history dict to a pandas DataFrame: \n","lossData = pd.DataFrame(history.history) \n","\n","if os.path.exists(model_path+\"/\"+model_name+\"/Quality Control\"):\n"," shutil.rmtree(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","os.makedirs(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","# The training evaluation.csv is saved (overwrites the Files if needed). \n","lossDataCSVpath = model_path+'/'+model_name+'/Quality Control/training_evaluation.csv'\n","with open(lossDataCSVpath, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['loss','val_loss', 'learning rate'])\n"," for i in range(len(history.history['loss'])):\n"," writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])\n","\n","\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"XQjQb_J_Qyku","colab_type":"text"},"source":["##**4.3. Download your model(s) from Google Drive**\n","\n","\n","---\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"2HbZd7rFqAad","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","metadata":{"id":"EdcnkCr9Nbl8","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"yDY9dtzdUTLh","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased.\n","\n","**Note: Plots of the losses will be shown in a linear and in a log scale. This can help visualise changes in the losses at different magnitudes. However, note that if the losses are negative the plot on the log scale will be empty. This is not an error.**"]},{"cell_type":"code","metadata":{"id":"vMzSP50kMv5p","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"RZOPCVN0qcYb","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n","This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the \"Source_QC_folder\" and \"Target_QC_folder\" !\n","\n","**1. The SSIM (structural similarity) map** \n","\n","The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). \n","\n","**mSSIM** is the SSIM value calculated across the entire window of both images.\n","\n","**The output below shows the SSIM maps with the mSSIM**\n","\n","**2. The RSE (Root Squared Error) map** \n","\n","This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).\n","\n","\n","**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.\n","\n","**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.\n","\n","**The output below shows the RSE maps with the NRMSE and PSNR values.**\n","\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"Nh8MlX3sqd_7","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","# Create a quality control/Prediction Folder\n","if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\"):\n"," shutil.rmtree(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","# Activate the pretrained model. \n","model_training = CARE(config=None, name=QC_model_name, basedir=QC_model_path)\n","\n","# List Tif images in Source_QC_folder\n","Source_QC_folder_tif = Source_QC_folder+\"/*.tif\"\n","Z = sorted(glob(Source_QC_folder_tif))\n","Z = list(map(imread,Z))\n","print('Number of test dataset found in the folder: '+str(len(Z)))\n","\n","\n","# Perform prediction on all datasets in the Source_QC folder\n","for filename in os.listdir(Source_QC_folder):\n"," img = imread(os.path.join(Source_QC_folder, filename))\n"," predicted = model_training.predict(img, axes='YX')\n"," os.chdir(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n"," imsave(filename, predicted)\n","\n","\n","def ssim(img1, img2):\n"," return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)\n","\n","\n","def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," \"\"\"Percentile-based image normalization.\"\"\"\n","\n"," mi = np.percentile(x,pmin,axis=axis,keepdims=True)\n"," ma = np.percentile(x,pmax,axis=axis,keepdims=True)\n"," return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)\n","\n","\n","def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," if dtype is not None:\n"," x = x.astype(dtype,copy=False)\n"," mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)\n"," ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)\n"," eps = dtype(eps)\n","\n"," try:\n"," import numexpr\n"," x = numexpr.evaluate(\"(x - mi) / ( ma - mi + eps )\")\n"," except ImportError:\n"," x = (x - mi) / ( ma - mi + eps )\n","\n"," if clip:\n"," x = np.clip(x,0,1)\n","\n"," return x\n","\n","def norm_minmse(gt, x, normalize_gt=True):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n","\n"," \"\"\"\n"," normalizes and affinely scales an image pair such that the MSE is minimized \n"," \n"," Parameters\n"," ----------\n"," gt: ndarray\n"," the ground truth image \n"," x: ndarray\n"," the image that will be affinely scaled \n"," normalize_gt: bool\n"," set to True of gt image should be normalized (default)\n"," Returns\n"," -------\n"," gt_scaled, x_scaled \n"," \"\"\"\n"," if normalize_gt:\n"," gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)\n"," x = x.astype(np.float32, copy=False) - np.mean(x)\n"," #x = x - np.mean(x)\n"," gt = gt.astype(np.float32, copy=False) - np.mean(gt)\n"," #gt = gt - np.mean(gt)\n"," scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())\n"," return gt, scale * x\n","\n","# Open and create the csv file that will contain all the QC metrics\n","with open(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/QC_metrics_\"+QC_model_name+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n","\n"," # Write the header in the csv file\n"," writer.writerow([\"image #\",\"Prediction v. GT mSSIM\",\"Input v. GT mSSIM\", \"Prediction v. GT NRMSE\", \"Input v. GT NRMSE\", \"Prediction v. GT PSNR\", \"Input v. GT PSNR\"]) \n","\n"," # Let's loop through the provided dataset in the QC folders\n","\n","\n"," for i in os.listdir(Source_QC_folder):\n"," if not os.path.isdir(os.path.join(Source_QC_folder,i)):\n"," print('Running QC on: '+i)\n"," # -------------------------------- Target test data (Ground truth) --------------------------------\n"," test_GT = io.imread(os.path.join(Target_QC_folder, i))\n","\n"," # -------------------------------- Source test data --------------------------------\n"," test_source = io.imread(os.path.join(Source_QC_folder,i))\n","\n"," # Normalize the images wrt each other by minimizing the MSE between GT and Source image\n"," test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)\n","\n"," # -------------------------------- Prediction --------------------------------\n"," test_prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\",i))\n","\n"," # Normalize the images wrt each other by minimizing the MSE between GT and prediction\n"," test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True) \n","\n","\n"," # -------------------------------- Calculate the metric maps and save them --------------------------------\n","\n"," # Calculate the SSIM maps\n"," index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)\n"," index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)\n","\n"," #Save ssim_maps\n"," img_SSIM_GTvsPrediction_32bit = np.float32(img_SSIM_GTvsPrediction)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsPrediction_'+i,img_SSIM_GTvsPrediction_32bit)\n"," img_SSIM_GTvsSource_32bit = np.float32(img_SSIM_GTvsSource)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsSource_'+i,img_SSIM_GTvsSource_32bit)\n"," \n"," # Calculate the Root Squared Error (RSE) maps\n"," img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))\n"," img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))\n","\n"," # Save SE maps\n"," img_RSE_GTvsPrediction_32bit = np.float32(img_RSE_GTvsPrediction)\n"," img_RSE_GTvsSource_32bit = np.float32(img_RSE_GTvsSource)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsPrediction_'+i,img_RSE_GTvsPrediction_32bit)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsSource_'+i,img_RSE_GTvsSource_32bit)\n","\n","\n"," # -------------------------------- Calculate the RSE metrics and save them --------------------------------\n","\n"," # Normalised Root Mean Squared Error (here it's valid to take the mean of the image)\n"," NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))\n"," NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))\n"," \n"," # We can also measure the peak signal to noise ratio between the images\n"," PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)\n"," PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)\n","\n"," writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])\n","\n","\n","# All data is now processed saved\n","Test_FileList = os.listdir(Source_QC_folder) # this assumes, as it should, that both source and target are named the same\n","\n","plt.figure(figsize=(20,20))\n","# Currently only displays the last computed set, from memory\n","# Target (Ground-truth)\n","plt.subplot(3,3,1)\n","plt.axis('off')\n","img_GT = io.imread(os.path.join(Target_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_GT, norm=simple_norm(img_GT, percent = 99))\n","plt.title('Target',fontsize=15)\n","\n","# Source\n","plt.subplot(3,3,2)\n","plt.axis('off')\n","img_Source = io.imread(os.path.join(Source_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_Source, norm=simple_norm(img_Source, percent = 99))\n","plt.title('Source',fontsize=15)\n","\n","#Prediction\n","plt.subplot(3,3,3)\n","plt.axis('off')\n","img_Prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction/\", Test_FileList[-1]))\n","plt.imshow(img_Prediction, norm=simple_norm(img_Prediction, percent = 99))\n","plt.title('Prediction',fontsize=15)\n","\n","#Setting up colours\n","cmap = plt.cm.CMRmap\n","\n","#SSIM between GT and Source\n","plt.subplot(3,3,5)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)\n","plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#SSIM between GT and Prediction\n","plt.subplot(3,3,6)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)\n","plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)\n","\n","#Root Squared Error between GT and Source\n","plt.subplot(3,3,8)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)\n","plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)\n","#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))\n","plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#Root Squared Error between GT and Prediction\n","plt.subplot(3,3,9)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"Esqnbew8uznk"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"d8wuQGjoq6eN","colab_type":"text"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n","\n","**`Result_folder`:** This folder will contain the predicted output images."]},{"cell_type":"code","metadata":{"id":"9ZmST3JRq-Ho","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.\n","\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = os.path.join(Prediction_model_path, Prediction_model_name)\n","\n","\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","\n","\n","#Activate the pretrained model. \n","model_training = CARE(config=None, name=Prediction_model_name, basedir=Prediction_model_path)\n","\n","\n","# creates a loop, creating filenames and saving them\n","for filename in os.listdir(Data_folder):\n"," img = imread(os.path.join(Data_folder,filename))\n"," restored = model_training.predict(img, axes='YX')\n"," os.chdir(Result_folder)\n"," imsave(filename,restored)\n","\n","print(\"Images saved into folder:\", Result_folder)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"EIe3CRD7XUxa","colab_type":"text"},"source":["## **6.2. Inspect the predicted output**\n","---\n","\n"]},{"cell_type":"code","metadata":{"id":"LmDP8xiwXTTL","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.\n","\n","# This will display a randomly chosen dataset input and predicted output\n","random_choice = random.choice(os.listdir(Data_folder))\n","x = imread(Data_folder+\"/\"+random_choice)\n","\n","os.chdir(Result_folder)\n","y = imread(Result_folder+\"/\"+random_choice)\n","\n","plt.figure(figsize=(16,8))\n","\n","plt.subplot(1,2,1)\n","plt.axis('off')\n","plt.imshow(x, norm=simple_norm(x, percent = 99), interpolation='nearest')\n","plt.title('Input')\n","\n","plt.subplot(1,2,2)\n","plt.axis('off')\n","plt.imshow(y, norm=simple_norm(y, percent = 99), interpolation='nearest')\n","plt.title('Predicted output');\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"hvkd66PldsXB","colab_type":"text"},"source":["## **6.3. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"Rn9zpWpo0xNw","colab_type":"text"},"source":["\n","#**Thank you for using CARE 2D!**"]}]} \ No newline at end of file diff --git a/Colab_notebooks/CARE_3D_ZeroCostDL4Mic.ipynb b/Colab_notebooks/CARE_3D_ZeroCostDL4Mic.ipynb index 96f393e4..8686f7e5 100755 --- a/Colab_notebooks/CARE_3D_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/CARE_3D_ZeroCostDL4Mic.ipynb @@ -1 +1 @@ -{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"CARE_3D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1t9a-44km730bI7F4I08-6Xh7wEZuL98p","timestamp":1591013189418},{"file_id":"11TigzvLl4FSSwFHUNwLzZKI2IAix4Nmu","timestamp":1586415689249},{"file_id":"1_dSnxUg_qtNWjrPc7D6RWDWlCanEL4Ve","timestamp":1585153449937},{"file_id":"1bKo8jYVZPPgXPa_-Gdu1KhDnNN4vYfLx","timestamp":1583200150464}],"collapsed_sections":[],"toc_visible":true,"machine_shape":"hm"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.4"},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"V9zNGvape2-I","colab_type":"text"},"source":["# **Content-aware image restoration (CARE) 3D**\n","\n","CARE is a neural network based architecture for image enhancement, first published in 2018 by Weigert et al. in Nature Methods (see above). The network allows image restoration and resolution improvement in 2D and 3D images, in a supervised manner, using noisy images as input and low-noise images as targets for training.\n","\n","Once the user is familiarised with functions of CARE, it can be instructive to explore in-depth features and functions of CARE in [this guide](http://csbdeep.bioimagecomputing.com/doc/) provided by the authors.\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is largely based on the paper: **Content-aware image restoration: pushing the limits of fluorescence microscopy**, Nature Methods, Volume 15. pages 1090–1097(2018) by *Martin Weigert, Uwe Schmidt, Tobias Boothe, Andreas Müller, Alexandr Dibrov, Akanksha Jain, Benjamin Wilhelm, Deborah Schmidt, Coleman Broaddus, Siân Culley, Mauricio Rocha-Martins, Fabián Segovia-Miranda, Caren Norden, Ricardo Henriques, Marino Zerial, Michele Solimena, Jochen Rink, Pavel Tomancak, Loic Royer, Florian Jug & Eugene W. Myers* (https://www.nature.com/articles/s41592-018-0216-7)\n","\n","And source code found in: https://github.com/csbdeep/csbdeep\n","\n","More documentation on the CARE python package can be found here:\n","http://csbdeep.bioimagecomputing.com/doc/\n","\n","Original The training and test data of the restoration experiments: https://publications.mpi-cbg.de/publications-sites/7207/\n","\n","\n","**Please also cite this original paper when using or developing this notebook.**"]},{"cell_type":"markdown","metadata":{"id":"jWAz2i7RdxUV","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"vNMDQHm0Ah-Z","colab_type":"text"},"source":["#**0. Before getting started**\n","---\n"," For CARE to train, **it needs to have access to a paired training dataset**. This means that the same image needs to be acquired in the two conditions (for instance, low signal-to-noise ratio and high signal-to-noise ratio) and provided with indication of correspondence.\n","\n"," Therefore, the data structure is important. It is necessary that all the input data are in the same folder and that all the output data is in a separate folder. The provided training dataset is already split in two folders called \"Training - Low SNR images\" (Training_source) and \"Training - high SNR images\" (Training_target). Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\n","\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n"," **Additionally, the corresponding input and output files need to have the same name**.\n","\n"," Please note that you currently can **only use .tif files!**\n","\n"," You can also provide a folder that contains the data that you wish to analyse with the trained network once all training has been performed. \n","\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset**\n"," - Low SNR images (Training_source)\n"," - img_1.tif, img_2.tif, ...\n"," - High SNR images (Training_target)\n"," - img_1.tif, img_2.tif, ...\n"," - **Quality control dataset**\n"," - Low SNR images\n"," - img_1.tif, img_2.tif\n"," - High SNR images\n"," - img_1.tif, img_2.tif\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"b4-r1gE7Iamv","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","---"]},{"cell_type":"markdown","metadata":{"id":"DMNHVZfHmbKb","colab_type":"text"},"source":["\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"BDhmUgqCStlm","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"-oqBTeLaImnU","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"01Djr8v-5pPk","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"n4yWFoJNnoin","colab_type":"text"},"source":["# **2. Install CARE and Dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"id":"3u2mXn3XsWzd","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Install CARE and dependencies\n","\n","\n","#Here, we install libraries which are not already included in Colab.\n","!pip install tifffile # contains tools to operate tiff-files\n","!pip install csbdeep # contains tools for restoration of fluorescence microcopy images (Content-aware Image Restoration, CARE). It uses Keras and Tensorflow.\n","!pip install wget\n","!pip install memory_profiler\n","%load_ext memory_profiler\n","\n","#Here, we import and enable Tensorflow 1 instead of Tensorflow 2.\n","%tensorflow_version 1.x\n","import tensorflow\n","import tensorflow.compat.v1 as tf\n","tf.disable_v2_behavior()\n","print(tensorflow.__version__)\n","print(\"Tensorflow enabled.\")\n","\n","# ------- Variable specific to CARE -------\n","from csbdeep.utils import download_and_extract_zip_file, normalize, plot_some, axes_dict, plot_history, Path, download_and_extract_zip_file\n","from csbdeep.data import RawData, create_patches \n","from csbdeep.io import load_training_data, save_tiff_imagej_compatible\n","from csbdeep.models import Config, CARE\n","from csbdeep import data\n","from __future__ import print_function, unicode_literals, absolute_import, division\n","%matplotlib inline\n","%config InlineBackend.figure_format = 'retina'\n","\n","\n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","import numpy as np\n","from matplotlib import pyplot as plt\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n","import wget\n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32\n","from skimage.util import img_as_ubyte\n","from tqdm import tqdm \n","\n","# For sliders and dropdown menu and progress bar\n","from ipywidgets import interact\n","import ipywidgets as widgets\n","\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","\n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","print(\"Libraries installed\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"Fw0kkTU6CsU4","colab_type":"text"},"source":["# **3. Select your parameters and paths**\n","\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"WzYAA-MuaYrT","colab_type":"text"},"source":["## **3.1. Setting main training parameters**\n","---\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"CB6acvUFtWqd","colab_type":"text"},"source":[" **Paths for training, predictions and results**\n","\n","**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source (Low SNR images) and Training_target (High SNR images or ground truth) training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","**Training Parameters**\n","\n","**`number of epochs`:**Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10-30) epochs, but a full training should run for 100-300 epochs. Evaluate the performance after training (see 5.). **Default value: 40**\n","\n","**`patch_size`:** CARE divides the image into patches for training. Input the size of the patches (length of a side). The value should be smaller than the dimensions of the image and divisible by 8. **Default value: 80**\n","\n","**`patch_height`:** The value should be smaller than the Z dimensions of the image and divisible by 4. When analysing isotropic stacks patch_size and patch_height should have similar values.\n","\n","**When choosing the patch_size and patch_height, the values should be i) large enough that they will enclose many instances, ii) small enough that the resulting patches fit into the RAM.** \n","\n","**If you get an Out of memory (OOM) error during the training, manually decrease the patch_size and patch_height values until the OOM error disappear.**\n","\n","**`number_of_patches`:** Input the number of the patches per image. Increasing the number of patches allows for larger training datasets. **Default value: 200** \n","\n","**Decreasing the patch size or increasing the number of patches may improve the training but may also increase the training time.**\n","\n","**Advanced Parameters - experienced users only**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 16**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10** \n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0004**"]},{"cell_type":"code","metadata":{"id":"ewpNJ_I0Mv47","colab_type":"code","cellView":"form","colab":{}},"source":["\n","#@markdown ###Path to training images:\n","\n","# base folder of GT and low images\n","base = \"/content\"\n","\n","# low SNR images\n","Training_source = \"\" #@param {type:\"string\"}\n","lowfile = Training_source+\"/*.tif\"\n","# Ground truth images\n","Training_target = \"\" #@param {type:\"string\"}\n","GTfile = Training_target+\"/*.tif\"\n","\n","\n","# model name and path\n","#@markdown ###Name of the model and path to model folder:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","\n","# create the training data file into model_path folder.\n","training_data = model_path+\"/my_training_data.npz\"\n","\n","# other parameters for training.\n","#@markdown ###Training Parameters\n","#@markdown Number of epochs:\n","\n","number_of_epochs = 40#@param {type:\"number\"}\n","\n","#@markdown Patch size (pixels) and number\n","patch_size = 80#@param {type:\"number\"} # pixels in\n","patch_height = 8#@param {type:\"number\"}\n","number_of_patches = 200#@param {type:\"number\"}\n","\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","#@markdown ###If not, please input:\n","\n","batch_size = 16#@param {type:\"number\"}\n","percentage_validation = 10 #@param {type:\"number\"}\n","number_of_steps = 300#@param {type:\"number\"}\n","initial_learning_rate = 0.0004 #@param {type:\"number\"}\n","\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 16\n"," percentage_validation = 10\n"," initial_learning_rate = 0.0004\n","\n","percentage = percentage_validation/100\n","\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name):\n"," print(bcolors.WARNING +\"!! WARNING: Folder already exists and has been removed !!\")\n"," shutil.rmtree(model_path+'/'+model_name)\n"," \n","# Here we disable pre-trained model by default (in case the next cell is not ran)\n","Use_pretrained_model = False\n","\n","# Here we disable data augmentation by default (in case the cell is not ran)\n","\n","Use_Data_augmentation = False\n","\n","\n","#Load one randomly chosen training source file\n","\n","random_choice=random.choice(os.listdir(Training_source))\n","x = imread(Training_source+\"/\"+random_choice)\n","\n","\n","# Here we check that the input images are stacks\n","if len(x.shape) == 3:\n"," print(\"Image dimensions (z,y,x)\",x.shape)\n","\n","if not len(x.shape) == 3:\n"," print(bcolors.WARNING +\"Your images appear to have the wrong dimensions. Image dimension\",x.shape)\n","\n","\n","#Find image Z dimension and select the mid-plane\n","Image_Z = x.shape[0]\n","mid_plane = int(Image_Z / 2)+1\n","\n","\n","#Find image XY dimension\n","Image_Y = x.shape[1]\n","Image_X = x.shape[2]\n","\n","#Hyperparameters failsafes\n","\n","# Here we check that patch_size is smaller than the smallest xy dimension of the image \n","\n","if patch_size > min(Image_Y, Image_X):\n"," patch_size = min(Image_Y, Image_X)\n"," print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_size is divisible by 8\n","if not patch_size % 8 == 0:\n"," patch_size = ((int(patch_size / 8)-1) * 8)\n"," print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_height is smaller than the z dimension of the image \n","\n","if patch_height > Image_Z :\n"," patch_height = Image_Z\n"," print (bcolors.WARNING + \" Your chosen patch_height is bigger than the z dimension of your image; therefore the patch_size chosen is now:\",patch_height)\n","\n","# Here we check that patch_height is divisible by 4\n","if not patch_height % 4 == 0:\n"," patch_height = ((int(patch_height / 4)-1) * 4)\n"," if patch_height == 0:\n"," patch_height = 4\n"," print (bcolors.WARNING + \" Your chosen patch_height is not divisible by 4; therefore the patch_size chosen is now:\",patch_height)\n","\n","\n","#Load one randomly chosen training target file\n","\n","os.chdir(Training_target)\n","y = imread(Training_target+\"/\"+random_choice)\n","\n","\n","\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x[mid_plane], norm=simple_norm(x[mid_plane], percent = 99), interpolation='nearest')\n","plt.axis('off')\n","plt.title('Low SNR image (single Z plane)');\n","plt.subplot(1,2,2)\n","plt.imshow(y[mid_plane], norm=simple_norm(y[mid_plane], percent = 99), interpolation='nearest')\n","plt.axis('off')\n","plt.title('High SNR image (single Z plane)');\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"xGcl7WGP4WHt","colab_type":"text"},"source":["## **3.2. Data augmentation**\n","---"]},{"cell_type":"markdown","metadata":{"id":"5Lio8hpZ4PJ1","colab_type":"text"},"source":["Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n","\n"," **However, data augmentation is not a magic solution and may also introduce issues. Therefore, we recommend that you train your network with and without augmentation, and use the QC section to validate that it improves overall performances.** \n","\n","Data augmentation is performed here by rotating the training images in the XY-Plane and flipping them along X-Axis.\n","\n","**The flip option alone will double the size of your dataset, rotation will quadruple and both together will increase the dataset by a factor of 8.**"]},{"cell_type":"code","metadata":{"id":"htqjkJWt5J_8","colab_type":"code","cellView":"form","colab":{}},"source":["Use_Data_augmentation = False #@param{type:\"boolean\"}\n","\n","#@markdown Select this option if you want to use augmentation to increase the size of your dataset\n","\n","#@markdown **Rotate each image 3 times by 90 degrees.**\n","Rotation = True #@param{type:\"boolean\"}\n","\n","#@markdown **Flip each image once around the x axis of the stack.**\n","Flip = True #@param{type:\"boolean\"}\n","\n","\n","#@markdown **Would you like to save your augmented images?**\n","\n","Save_augmented_images = False #@param {type:\"boolean\"}\n","\n","Saving_path = \"\" #@param {type:\"string\"}\n","\n","\n","if not Save_augmented_images:\n"," Saving_path= \"/content\"\n","\n","\n","def rotation_aug(Source_path, Target_path, flip=False):\n"," Source_images = os.listdir(Source_path)\n"," Target_images = os.listdir(Target_path)\n"," \n"," for image in Source_images:\n"," source_img = io.imread(os.path.join(Source_path,image))\n"," target_img = io.imread(os.path.join(Target_path,image))\n"," \n"," # Source Rotation\n"," source_img_90 = np.rot90(source_img,axes=(1,2))\n"," source_img_180 = np.rot90(source_img_90,axes=(1,2))\n"," source_img_270 = np.rot90(source_img_180,axes=(1,2))\n","\n"," # Target Rotation\n"," target_img_90 = np.rot90(target_img,axes=(1,2))\n"," target_img_180 = np.rot90(target_img_90,axes=(1,2))\n"," target_img_270 = np.rot90(target_img_180,axes=(1,2))\n","\n"," # Add a flip to the rotation\n"," \n"," if flip == True:\n"," source_img_lr = np.fliplr(source_img)\n"," source_img_90_lr = np.fliplr(source_img_90)\n"," source_img_180_lr = np.fliplr(source_img_180)\n"," source_img_270_lr = np.fliplr(source_img_270)\n","\n"," target_img_lr = np.fliplr(target_img)\n"," target_img_90_lr = np.fliplr(target_img_90)\n"," target_img_180_lr = np.fliplr(target_img_180)\n"," target_img_270_lr = np.fliplr(target_img_270)\n","\n"," #source_img_90_ud = np.flipud(source_img_90)\n"," \n"," # Save the augmented files\n"," # Source images\n"," io.imsave(Saving_path+'/augmented_source/'+image,source_img)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90.tif',source_img_90)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180.tif',source_img_180)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270.tif',source_img_270)\n"," # Target images\n"," io.imsave(Saving_path+'/augmented_target/'+image,target_img)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90.tif',target_img_90)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180.tif',target_img_180)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270.tif',target_img_270)\n","\n"," if flip == True:\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90_lr.tif',source_img_90_lr)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180_lr.tif',source_img_180_lr)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270_lr.tif',source_img_270_lr)\n","\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90_lr.tif',target_img_90_lr)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180_lr.tif',target_img_180_lr)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270_lr.tif',target_img_270_lr)\n","\n","def flip(Source_path, Target_path):\n"," Source_images = os.listdir(Source_path)\n"," Target_images = os.listdir(Target_path) \n","\n"," for image in Source_images:\n"," source_img = io.imread(os.path.join(Source_path,image))\n"," target_img = io.imread(os.path.join(Target_path,image))\n"," \n"," source_img_lr = np.fliplr(source_img)\n"," target_img_lr = np.fliplr(target_img)\n","\n"," io.imsave(Saving_path+'/augmented_source/'+image,source_img)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)\n","\n"," io.imsave(Saving_path+'/augmented_target/'+image,target_img)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)\n","\n","\n","if Use_Data_augmentation:\n","\n"," if os.path.exists(Saving_path+'/augmented_source'):\n"," shutil.rmtree(Saving_path+'/augmented_source')\n"," os.mkdir(Saving_path+'/augmented_source')\n","\n"," if os.path.exists(Saving_path+'/augmented_target'):\n"," shutil.rmtree(Saving_path+'/augmented_target') \n"," os.mkdir(Saving_path+'/augmented_target')\n","\n"," print(\"Data augmentation enabled\")\n"," print(\"Data augmentation in progress....\")\n","\n"," if Rotation == True:\n"," rotation_aug(Training_source,Training_target,flip=Flip)\n"," \n"," elif Rotation == False and Flip == True:\n"," flip(Training_source,Training_target)\n"," print(\"Done\")\n","\n","\n","if not Use_Data_augmentation:\n"," print(bcolors.WARNING+\"Data augmentation disabled\")\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"bQDuybvyadKU","colab_type":"text"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a CARE 3D model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pret-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"8vPkzEBNamE4","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"Model_from_file\" #@param [\"Model_from_file\"]\n","\n","Weights_choice = \"last\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","\n","# --------------------- Download the a model provided in the XXX ------------------------\n","\n"," if pretrained_model_choice == \"Model_name\":\n"," pretrained_model_name = \"Model_name\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path) \n"," wget.download(\"\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist')\n"," Use_pretrained_model = False\n","\n"," \n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n","\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," \n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n","\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n","\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n","else:\n"," print(bcolors.WARNING+'No pretrained nerwork will be used.')\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"rQndJj70FzfL","colab_type":"text"},"source":["# **4. Train the network**\n","---"]},{"cell_type":"markdown","metadata":{"id":"tGW2iaU6X5zi","colab_type":"text"},"source":["## **4.1. Prepare the training data and model for training**\n","---\n","Here, we use the information from 3. to build the model and convert the training data into a suitable format for training."]},{"cell_type":"code","metadata":{"id":"WMJnGJpCMa4y","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Create the model and dataset objects\n","\n","# This object holds the image pairs (GT and low), ensuring that CARE compares corresponding images.\n","# This file is saved in .npz format and later called when loading the trainig data.\n","\n","if Use_Data_augmentation == True:\n"," Training_source = Saving_path+'/augmented_source'\n"," Training_target = Saving_path+'/augmented_target'\n","\n","raw_data = RawData.from_folder (\n"," basepath = base,\n"," source_dirs = [Training_source],\n"," target_dir = Training_target,\n"," axes = 'ZYX',\n"," pattern='*.tif*'\n",")\n","X, Y, XY_axes = create_patches (\n"," raw_data = raw_data,\n"," patch_size = (patch_height,patch_size,patch_size),\n"," n_patches_per_image = number_of_patches, \n"," save_file = training_data,\n",")\n","\n","assert X.shape == Y.shape\n","print(\"shape of X,Y =\", X.shape)\n","print(\"axes of X,Y =\", XY_axes)\n","\n","%memit \n","print ('Creating 3D training dataset')\n","\n","# Load Training Data\n","(X,Y), (X_val,Y_val), axes = load_training_data(training_data, validation_split=percentage, verbose=True)\n","c = axes_dict(axes)['C']\n","n_channel_in, n_channel_out = X.shape[c], Y.shape[c]\n","\n","#Plot example patches\n","\n","#plot of training patches.\n","plt.figure(figsize=(12,5))\n","plot_some(X[:5],Y[:5])\n","plt.suptitle('5 example training patches (top row: source, bottom row: target)');\n","\n","#plot of validation patches\n","plt.figure(figsize=(12,5))\n","plot_some(X_val[:5],Y_val[:5])\n","plt.suptitle('5 example validation patches (top row: source, bottom row: target)');\n","\n","%memit \n","\n","#Here we automatically define number_of_step in function of training data and batch size\n","if (Use_Default_Advanced_Parameters): \n"," number_of_steps= int(X.shape[0]/batch_size)+1\n","\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we ensure that the learning rate set correctly when using pre-trained models\n","if Use_pretrained_model:\n"," if Weights_choice == \"last\":\n"," initial_learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\": \n"," initial_learning_rate = bestLearningRate\n","# --------------------- ---------------------- ------------------------\n","\n","\n","#Here, we create the default Config object which sets the hyperparameters of the network training.\n","\n","config = Config(axes, n_channel_in, n_channel_out, train_steps_per_epoch=number_of_steps, train_epochs=number_of_epochs, train_batch_size=batch_size, train_learning_rate=initial_learning_rate)\n","print(config)\n","vars(config)\n","\n","# Compile the CARE model for network training\n","\n","model_training= CARE(config, model_name, basedir=model_path)\n","\n","# --------------------- Using pretrained model ------------------------\n","# Load the pretrained weights \n","if Use_pretrained_model:\n"," model_training.load_weights(h5_file_path)\n","# --------------------- ---------------------- ------------------------\n","\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"wQPz0F6JlvJR","colab_type":"text"},"source":["## **4.2. Train the network**\n","---\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point."]},{"cell_type":"code","metadata":{"id":"j_Qm5JBmlvJg","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Start Training\n","\n","start = time.time()\n","\n","# Start Training\n","history = model_training.train(X,Y, validation_data=(X_val,Y_val))\n","\n","print(\"Training, done.\")\n","\n","# convert the history.history dict to a pandas DataFrame: \n","lossData = pd.DataFrame(history.history) \n","\n","if os.path.exists(model_path+\"/\"+model_name+\"/Quality Control\"):\n"," shutil.rmtree(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","os.makedirs(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","# The training evaluation.csv is saved (overwrites the Files if needed). \n","lossDataCSVpath = model_path+'/'+model_name+'/Quality Control/training_evaluation.csv'\n","with open(lossDataCSVpath, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['loss','val_loss', 'learning rate'])\n"," for i in range(len(history.history['loss'])):\n"," writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])\n","\n","\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"w8Q_uYGgiico","colab_type":"text"},"source":["## **4.3. Download your model(s) from Google Drive**\n","---\n","\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"QYuIOWQ3imuU","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","metadata":{"id":"zazOZ3wDx0zQ","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"yDY9dtzdUTLh","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased."]},{"cell_type":"code","metadata":{"id":"vMzSP50kMv5p","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"biT9FI9Ri77_","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n","This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the \"Source_QC_folder\" and \"Target_QC_folder\" !\n","\n","**1. The SSIM (structural similarity) map** \n","\n","The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). \n","\n","**mSSIM** is the SSIM value calculated across the entire window of both images.\n","\n","**The output below shows the SSIM maps with the mSSIM**\n","\n","**2. The RSE (Root Squared Error) map** \n","\n","This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).\n","\n","\n","**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.\n","\n","**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.\n","\n","**The output below shows the RSE maps with the NRMSE and PSNR values.**\n","\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"nAs4Wni7VYbq","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","path_metrics_save = QC_model_path+'/'+QC_model_name+'/Quality Control/'\n","\n","# Create a quality control/Prediction Folder\n","if os.path.exists(path_metrics_save+'Prediction'):\n"," shutil.rmtree(path_metrics_save+'Prediction')\n","os.makedirs(path_metrics_save+'Prediction')\n","\n","#Here we allow the user to choose the number of tile to be used when predicting the images\n","#@markdown #####To analyse large image, your images need to be divided into tiles. Each tile will then be processed independently and re-assembled to generate the final image. \"Automatic_number_of_tiles\" will search for and use the smallest number of tiles that can be used, at the expanse of your runtime. Alternatively, manually input the number of tiles in each dimension to be used to process your images. \n","\n","Automatic_number_of_tiles = False #@param {type:\"boolean\"}\n","#@markdown #####If you get an Out of memory (OOM) error when using the \"Automatic_number_of_tiles\" option, disable it and manually input the values to be used to process your images. Progressively increases these numbers until the OOM error disappear.\n","n_tiles_Z = 1#@param {type:\"number\"}\n","n_tiles_Y = 2#@param {type:\"number\"}\n","n_tiles_X = 2#@param {type:\"number\"}\n","\n","if (Automatic_number_of_tiles): \n"," n_tilesZYX = None\n","\n","if not (Automatic_number_of_tiles):\n"," n_tilesZYX = (n_tiles_Z, n_tiles_Y, n_tiles_X)\n","\n","# Activate the pretrained model. \n","model_training = CARE(config=None, name=QC_model_name, basedir=QC_model_path)\n","\n","# List Tif images in Source_QC_folder\n","Source_QC_folder_tif = Source_QC_folder+\"/*.tif\"\n","Z = sorted(glob(Source_QC_folder_tif))\n","Z = list(map(imread,Z))\n","print('Number of test dataset found in the folder: '+str(len(Z)))\n","\n","\n","# Perform prediction on all datasets in the Source_QC folder\n","for filename in os.listdir(Source_QC_folder):\n"," img = imread(os.path.join(Source_QC_folder, filename))\n"," n_slices = img.shape[0]\n"," predicted = model_training.predict(img, axes='ZYX', n_tiles=n_tilesZYX)\n"," os.chdir(path_metrics_save+'Prediction/')\n"," imsave('Predicted_'+filename, predicted)\n","\n","\n","def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," \"\"\"Percentile-based image normalization.\"\"\"\n","\n"," mi = np.percentile(x,pmin,axis=axis,keepdims=True)\n"," ma = np.percentile(x,pmax,axis=axis,keepdims=True)\n"," return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)\n","\n","\n","def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," if dtype is not None:\n"," x = x.astype(dtype,copy=False)\n"," mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)\n"," ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)\n"," eps = dtype(eps)\n","\n"," try:\n"," import numexpr\n"," x = numexpr.evaluate(\"(x - mi) / ( ma - mi + eps )\")\n"," except ImportError:\n"," x = (x - mi) / ( ma - mi + eps )\n","\n"," if clip:\n"," x = np.clip(x,0,1)\n","\n"," return x\n","\n","def norm_minmse(gt, x, normalize_gt=True):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n","\n"," \"\"\"\n"," normalizes and affinely scales an image pair such that the MSE is minimized \n"," \n"," Parameters\n"," ----------\n"," gt: ndarray\n"," the ground truth image \n"," x: ndarray\n"," the image that will be affinely scaled \n"," normalize_gt: bool\n"," set to True of gt image should be normalized (default)\n"," Returns\n"," -------\n"," gt_scaled, x_scaled \n"," \"\"\"\n"," if normalize_gt:\n"," gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)\n"," x = x.astype(np.float32, copy=False) - np.mean(x)\n"," #x = x - np.mean(x)\n"," gt = gt.astype(np.float32, copy=False) - np.mean(gt)\n"," #gt = gt - np.mean(gt)\n"," scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())\n"," return gt, scale * x\n","\n","\n","\n","# Open and create the csv file that will contain all the QC metrics\n","with open(path_metrics_save+'QC_metrics_'+QC_model_name+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n","\n"," # Write the header in the csv file\n"," writer.writerow([\"File name\",\"Slice #\",\"Prediction v. GT mSSIM\",\"Input v. GT mSSIM\", \"Prediction v. GT NRMSE\", \"Input v. GT NRMSE\", \"Prediction v. GT PSNR\", \"Input v. GT PSNR\"]) \n"," \n"," # These lists will be used to collect all the metrics values per slice\n"," file_name_list = []\n"," slice_number_list = []\n"," mSSIM_GvP_list = []\n"," mSSIM_GvS_list = []\n"," NRMSE_GvP_list = []\n"," NRMSE_GvS_list = []\n"," PSNR_GvP_list = []\n"," PSNR_GvS_list = []\n","\n"," # These lists will be used to display the mean metrics for the stacks\n"," mSSIM_GvP_list_mean = []\n"," mSSIM_GvS_list_mean = []\n"," NRMSE_GvP_list_mean = []\n"," NRMSE_GvS_list_mean = []\n"," PSNR_GvP_list_mean = []\n"," PSNR_GvS_list_mean = []\n","\n"," # Let's loop through the provided dataset in the QC folders\n"," for thisFile in os.listdir(Source_QC_folder):\n"," if not os.path.isdir(os.path.join(Source_QC_folder, thisFile)):\n"," print('Running QC on: '+thisFile)\n","\n"," test_GT_stack = io.imread(os.path.join(Target_QC_folder, thisFile))\n"," test_source_stack = io.imread(os.path.join(Source_QC_folder,thisFile))\n"," test_prediction_stack = io.imread(os.path.join(path_metrics_save+\"Prediction/\",'Predicted_'+thisFile))\n"," n_slices = test_GT_stack.shape[0]\n","\n"," # Calculating the position of the mid-plane slice\n"," z_mid_plane = int(n_slices / 2)+1\n","\n"," img_SSIM_GTvsPrediction_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n"," img_SSIM_GTvsSource_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n"," img_RSE_GTvsPrediction_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n"," img_RSE_GTvsSource_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n","\n"," for z in range(n_slices): \n"," # -------------------------------- Normalising the dataset --------------------------------\n","\n"," test_GT_norm, test_source_norm = norm_minmse(test_GT_stack[z], test_source_stack[z], normalize_gt=True)\n"," test_GT_norm, test_prediction_norm = norm_minmse(test_GT_stack[z], test_prediction_stack[z], normalize_gt=True)\n","\n"," # -------------------------------- Calculate the SSIM metric and maps --------------------------------\n","\n"," # Calculate the SSIM maps and index\n"," index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = structural_similarity(test_GT_norm, test_prediction_norm, data_range=1.0, full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)\n"," index_SSIM_GTvsSource, img_SSIM_GTvsSource = structural_similarity(test_GT_norm, test_source_norm, data_range=1.0, full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)\n","\n"," #Calculate ssim_maps\n"," img_SSIM_GTvsPrediction_stack[z] = img_as_float32(img_SSIM_GTvsPrediction, force_copy=False)\n"," img_SSIM_GTvsSource_stack[z] = img_as_float32(img_SSIM_GTvsSource, force_copy=False)\n"," \n","\n"," # -------------------------------- Calculate the NRMSE metrics --------------------------------\n","\n"," # Calculate the Root Squared Error (RSE) maps\n"," img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))\n"," img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))\n","\n"," # Calculate SE maps\n"," img_RSE_GTvsPrediction_stack[z] = img_as_float32(img_RSE_GTvsPrediction, force_copy=False)\n"," img_RSE_GTvsSource_stack[z] = img_as_float32(img_RSE_GTvsSource, force_copy=False)\n","\n"," # Normalised Root Mean Squared Error (here it's valid to take the mean of the image)\n"," NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))\n"," NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))\n","\n"," # Calculate the PSNR between the images\n"," PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)\n"," PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)\n","\n"," writer.writerow([thisFile, str(z),str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource), str(PSNR_GTvsPrediction), str(PSNR_GTvsSource)])\n"," \n"," # Collect values to display in dataframe output\n"," slice_number_list.append(z)\n"," mSSIM_GvP_list.append(index_SSIM_GTvsPrediction)\n"," mSSIM_GvS_list.append(index_SSIM_GTvsSource)\n"," NRMSE_GvP_list.append(NRMSE_GTvsPrediction)\n"," NRMSE_GvS_list.append(NRMSE_GTvsSource)\n"," PSNR_GvP_list.append(PSNR_GTvsPrediction)\n"," PSNR_GvS_list.append(PSNR_GTvsSource)\n","\n"," if (z == z_mid_plane): # catch these for display\n"," SSIM_GTvsP_forDisplay = index_SSIM_GTvsPrediction\n"," SSIM_GTvsS_forDisplay = index_SSIM_GTvsSource\n"," NRMSE_GTvsP_forDisplay = NRMSE_GTvsPrediction\n"," NRMSE_GTvsS_forDisplay = NRMSE_GTvsSource\n"," \n"," # If calculating average metrics for dataframe output\n"," file_name_list.append(thisFile)\n"," mSSIM_GvP_list_mean.append(sum(mSSIM_GvP_list)/len(mSSIM_GvP_list))\n"," mSSIM_GvS_list_mean.append(sum(mSSIM_GvS_list)/len(mSSIM_GvS_list))\n"," NRMSE_GvP_list_mean.append(sum(NRMSE_GvP_list)/len(NRMSE_GvP_list))\n"," NRMSE_GvS_list_mean.append(sum(NRMSE_GvS_list)/len(NRMSE_GvS_list))\n"," PSNR_GvP_list_mean.append(sum(PSNR_GvP_list)/len(PSNR_GvP_list))\n"," PSNR_GvS_list_mean.append(sum(PSNR_GvS_list)/len(PSNR_GvS_list))\n","\n"," # ----------- Change the stacks to 32 bit images -----------\n","\n"," img_SSIM_GTvsSource_stack_32 = img_as_float32(img_SSIM_GTvsSource_stack, force_copy=False)\n"," img_SSIM_GTvsPrediction_stack_32 = img_as_float32(img_SSIM_GTvsPrediction_stack, force_copy=False)\n"," img_RSE_GTvsSource_stack_32 = img_as_float32(img_RSE_GTvsSource_stack, force_copy=False)\n"," img_RSE_GTvsPrediction_stack_32 = img_as_float32(img_RSE_GTvsPrediction_stack, force_copy=False)\n","\n"," # ----------- Saving the error map stacks -----------\n"," io.imsave(path_metrics_save+'SSIM_GTvsSource_'+thisFile,img_SSIM_GTvsSource_stack_32)\n"," io.imsave(path_metrics_save+'SSIM_GTvsPrediction_'+thisFile,img_SSIM_GTvsPrediction_stack_32)\n"," io.imsave(path_metrics_save+'RSE_GTvsSource_'+thisFile,img_RSE_GTvsSource_stack_32)\n"," io.imsave(path_metrics_save+'RSE_GTvsPrediction_'+thisFile,img_RSE_GTvsPrediction_stack_32)\n","\n","#Averages of the metrics per stack as dataframe output\n","pdResults = pd.DataFrame(file_name_list, columns = [\"File name\"])\n","pdResults[\"Prediction v. GT mSSIM\"] = mSSIM_GvP_list_mean\n","pdResults[\"Input v. GT mSSIM\"] = mSSIM_GvS_list_mean\n","pdResults[\"Prediction v. GT NRMSE\"] = NRMSE_GvP_list_mean\n","pdResults[\"Input v. GT NRMSE\"] = NRMSE_GvS_list_mean\n","pdResults[\"Prediction v. GT PSNR\"] = PSNR_GvP_list_mean\n","pdResults[\"Input v. GT PSNR\"] = PSNR_GvS_list_mean\n","\n","# All data is now processed saved\n","Test_FileList = os.listdir(Source_QC_folder) # this assumes, as it should, that both source and target are named the same way\n","\n","plt.figure(figsize=(20,20))\n","# Currently only displays the last computed set, from memory\n","# Target (Ground-truth)\n","plt.subplot(3,3,1)\n","plt.axis('off')\n","img_GT = io.imread(os.path.join(Target_QC_folder, Test_FileList[-1]))\n","\n","# Calculating the position of the mid-plane slice\n","z_mid_plane = int(img_GT.shape[0] / 2)+1\n","\n","plt.imshow(img_GT[z_mid_plane], norm=simple_norm(img_GT[z_mid_plane], percent = 99))\n","plt.title('Target (slice #'+str(z_mid_plane)+')')\n","\n","# Source\n","plt.subplot(3,3,2)\n","plt.axis('off')\n","img_Source = io.imread(os.path.join(Source_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_Source[z_mid_plane], norm=simple_norm(img_Source[z_mid_plane], percent = 99))\n","plt.title('Source (slice #'+str(z_mid_plane)+')')\n","\n","#Prediction\n","plt.subplot(3,3,3)\n","plt.axis('off')\n","img_Prediction = io.imread(os.path.join(path_metrics_save+'Prediction/', 'Predicted_'+Test_FileList[-1]))\n","plt.imshow(img_Prediction[z_mid_plane], norm=simple_norm(img_Prediction[z_mid_plane], percent = 99))\n","plt.title('Prediction (slice #'+str(z_mid_plane)+')')\n","\n","#Setting up colours\n","cmap = plt.cm.CMRmap\n","\n","#SSIM between GT and Source\n","plt.subplot(3,3,5)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n","img_SSIM_GTvsSource = io.imread(os.path.join(path_metrics_save, 'SSIM_GTvsSource_'+Test_FileList[-1]))\n","imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource[z_mid_plane], cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(SSIM_GTvsS_forDisplay,3)),fontsize=14)\n","plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#SSIM between GT and Prediction\n","plt.subplot(3,3,6)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","img_SSIM_GTvsPrediction = io.imread(os.path.join(path_metrics_save, 'SSIM_GTvsPrediction_'+Test_FileList[-1]))\n","imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction[z_mid_plane], cmap = cmap, vmin=0,vmax=1)\n","plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(SSIM_GTvsP_forDisplay,3)),fontsize=14)\n","\n","#Root Squared Error between GT and Source\n","plt.subplot(3,3,8)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n","img_RSE_GTvsSource = io.imread(os.path.join(path_metrics_save, 'RSE_GTvsSource_'+Test_FileList[-1]))\n","imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource[z_mid_plane], cmap = cmap, vmin=0, vmax = 1) \n","plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsS_forDisplay,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)\n","#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))\n","plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#Root Squared Error between GT and Prediction\n","plt.subplot(3,3,9)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","img_RSE_GTvsPrediction = io.imread(os.path.join(path_metrics_save, 'RSE_GTvsPrediction_'+Test_FileList[-1]))\n","imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction[z_mid_plane], cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsP_forDisplay,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)\n","\n","print('-----------------------------------')\n","print('Here are the average scores for the stacks you tested in Quality control. To see values for all slices, open the .csv file saved in the Quality Control folder.')\n","pdResults.head()\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"69aJVFfsqXbY","colab_type":"text"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"tcPNRq1TrMPB","colab_type":"text"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n","\n","**`Result_folder`:** This folder will contain the predicted output images."]},{"cell_type":"code","metadata":{"id":"Am2JSmpC0frj","colab_type":"code","cellView":"form","colab":{}},"source":["\n","#@markdown ##Provide the path to your dataset and to the folder where the prediction will be saved, then play the cell to predict output on your unseen images.\n","\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n"," \n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","\n","#Here we allow the user to choose the number of tile to be used when predicting the images\n","#@markdown #####To analyse large image, your images need to be divided into tiles. Each tile will then be processed independently and re-assembled to generate the final image. \"Automatic_number_of_tiles\" will search for and use the smallest number of tiles that can be used, at the expanse of your runtime. Alternatively, manually input the number of tiles in each dimension to be used to process your images. \n","\n","Automatic_number_of_tiles = False #@param {type:\"boolean\"}\n","#@markdown #####If you get an Out of memory (OOM) error when using the \"Automatic_number_of_tiles\" option, disable it and manually input the values to be used to process your images. Progressively increases these numbers until the OOM error disappear.\n","n_tiles_Z = 1#@param {type:\"number\"}\n","n_tiles_Y = 2#@param {type:\"number\"}\n","n_tiles_X = 2#@param {type:\"number\"}\n","\n","if (Automatic_number_of_tiles): \n"," n_tilesZYX = None\n","\n","if not (Automatic_number_of_tiles):\n"," n_tilesZYX = (n_tiles_Z, n_tiles_Y, n_tiles_X)\n","\n","#Activate the pretrained model. \n","model=CARE(config=None, name=Prediction_model_name, basedir=Prediction_model_path)\n","\n","print(\"Restoring images...\")\n","\n","thisdir = Path(Data_folder)\n","outputdir = Path(Result_folder)\n","suffix = '.tif'\n","\n","# r=root, d=directories, f = files\n","for r, d, f in os.walk(thisdir):\n"," for file in f:\n"," if \".tif\" in file:\n"," print(os.path.join(r, file))\n","\n","for r, d, f in os.walk(thisdir):\n"," for file in f:\n"," base_filename = os.path.basename(file)\n"," input_train = imread(os.path.join(r, file))\n"," pred_train = model.predict(input_train, axes='ZYX', n_tiles=n_tilesZYX)\n"," save_tiff_imagej_compatible(os.path.join(outputdir, base_filename), pred_train, axes='ZYX') \n","\n","print(\"Images saved into the result folder:\", Result_folder)\n","\n","#Display an example\n","\n","random_choice=random.choice(os.listdir(Data_folder))\n","x = imread(Data_folder+\"/\"+random_choice)\n","\n","z_mid_plane = int(x.shape[0] / 2)+1\n","\n","@interact\n","def show_results(file=os.listdir(Result_folder), z_plane=widgets.IntSlider(min=0, max=(x.shape[0]-1), step=1, value=z_mid_plane)):\n"," x = imread(Data_folder+\"/\"+file)\n"," y = imread(Result_folder+\"/\"+file)\n","\n"," f=plt.figure(figsize=(16,8))\n"," plt.subplot(1,2,1)\n"," plt.imshow(x[z_plane], norm=simple_norm(x[z_plane], percent = 99), interpolation='nearest')\n"," plt.axis('off')\n"," plt.title('Noisy Input (single Z plane)');\n"," plt.subplot(1,2,2)\n"," plt.imshow(y[z_plane], norm=simple_norm(y[z_plane], percent = 99), interpolation='nearest')\n"," plt.axis('off')\n"," plt.title('Prediction (single Z plane)');\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"hvkd66PldsXB","colab_type":"text"},"source":["## **6.2. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"u4pcBe8Z3T2J","colab_type":"text"},"source":["#**Thank you for using CARE 3D!**"]}]} \ No newline at end of file +{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"CARE_3D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1t9a-44km730bI7F4I08-6Xh7wEZuL98p","timestamp":1591013189418},{"file_id":"11TigzvLl4FSSwFHUNwLzZKI2IAix4Nmu","timestamp":1586415689249},{"file_id":"1_dSnxUg_qtNWjrPc7D6RWDWlCanEL4Ve","timestamp":1585153449937},{"file_id":"1bKo8jYVZPPgXPa_-Gdu1KhDnNN4vYfLx","timestamp":1583200150464}],"collapsed_sections":[],"toc_visible":true,"machine_shape":"hm"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.4"},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"V9zNGvape2-I","colab_type":"text"},"source":["# **CARE: Content-aware image restoration (3D)**\n","\n","---\n","\n","CARE is a neural network capable of image restoration from corrupted bio-images, first published in 2018 by [Weigert *et al.* in Nature Methods](https://www.nature.com/articles/s41592-018-0216-7). The CARE network uses a U-Net network architecture and allows image restoration and resolution improvement in 2D and 3D images, in a supervised manner, using noisy images as input and low-noise images as targets for training. The function of the network is essentially determined by the set of images provided in the training dataset. For instance, if noisy images are provided as input and high signal-to-noise ratio images are provided as targets, the network will perform denoising.\n","\n"," **This particular notebook enables restoration of 3D dataset. If you are interested in restoring 2D dataset, you should use the CARE 2D notebook instead.**\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is largely based on the following paper: \n","\n","**Content-aware image restoration: pushing the limits of fluorescence microscopy**, by Weigert *et al.* published in Nature Methods in 2018 (https://www.nature.com/articles/s41592-018-0216-7)\n","\n","And source code found in: https://github.com/csbdeep/csbdeep\n","\n","For a more in-depth description of the features of the network,please refer to [this guide](http://csbdeep.bioimagecomputing.com/doc/) provided by the original authors of the work.\n","\n","We provide a dataset for the training of this notebook as a way to test its functionalities but the training and test data of the restoration experiments is also available from the authors of the original paper [here](https://publications.mpi-cbg.de/publications-sites/7207/).\n","\n","**Please also cite this original paper when using or developing this notebook.**"]},{"cell_type":"markdown","metadata":{"id":"jWAz2i7RdxUV","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"vNMDQHm0Ah-Z","colab_type":"text"},"source":["#**0. Before getting started**\n","---\n"," For CARE to train, **it needs to have access to a paired training dataset**. This means that the same image needs to be acquired in the two conditions (for instance, low signal-to-noise ratio and high signal-to-noise ratio) and provided with indication of correspondence.\n","\n"," Therefore, the data structure is important. It is necessary that all the input data are in the same folder and that all the output data is in a separate folder. The provided training dataset is already split in two folders called \"Training - Low SNR images\" (Training_source) and \"Training - high SNR images\" (Training_target). Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\n","\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n"," **Additionally, the corresponding input and output files need to have the same name**.\n","\n"," Please note that you currently can **only use .tif files!**\n","\n"," You can also provide a folder that contains the data that you wish to analyse with the trained network once all training has been performed. \n","\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset**\n"," - Low SNR images (Training_source)\n"," - img_1.tif, img_2.tif, ...\n"," - High SNR images (Training_target)\n"," - img_1.tif, img_2.tif, ...\n"," - **Quality control dataset**\n"," - Low SNR images\n"," - img_1.tif, img_2.tif\n"," - High SNR images\n"," - img_1.tif, img_2.tif\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"b4-r1gE7Iamv","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","---"]},{"cell_type":"markdown","metadata":{"id":"DMNHVZfHmbKb","colab_type":"text"},"source":["\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"BDhmUgqCStlm","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"-oqBTeLaImnU","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"01Djr8v-5pPk","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"n4yWFoJNnoin","colab_type":"text"},"source":["# **2. Install CARE and dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"id":"3u2mXn3XsWzd","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Install CARE and dependencies\n","\n","\n","#Here, we install libraries which are not already included in Colab.\n","!pip install tifffile # contains tools to operate tiff-files\n","!pip install csbdeep # contains tools for restoration of fluorescence microcopy images (Content-aware Image Restoration, CARE). It uses Keras and Tensorflow.\n","!pip install wget\n","!pip install memory_profiler\n","%load_ext memory_profiler\n","\n","#Here, we import and enable Tensorflow 1 instead of Tensorflow 2.\n","\n","import tensorflow\n","import tensorflow as tf\n","\n","print(tensorflow.__version__)\n","print(\"Tensorflow enabled.\")\n","\n","# ------- Variable specific to CARE -------\n","from csbdeep.utils import download_and_extract_zip_file, normalize, plot_some, axes_dict, plot_history, Path, download_and_extract_zip_file\n","from csbdeep.data import RawData, create_patches \n","from csbdeep.io import load_training_data, save_tiff_imagej_compatible\n","from csbdeep.models import Config, CARE\n","from csbdeep import data\n","from __future__ import print_function, unicode_literals, absolute_import, division\n","%matplotlib inline\n","%config InlineBackend.figure_format = 'retina'\n","\n","\n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","import numpy as np\n","from matplotlib import pyplot as plt\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n","import wget\n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32\n","from skimage.util import img_as_ubyte\n","from tqdm import tqdm \n","\n","# For sliders and dropdown menu and progress bar\n","from ipywidgets import interact\n","import ipywidgets as widgets\n","\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","\n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","print(\"Libraries installed\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"Fw0kkTU6CsU4","colab_type":"text"},"source":["# **3. Select your parameters and paths**\n","\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"WzYAA-MuaYrT","colab_type":"text"},"source":["## **3.1. Setting main training parameters**\n","---\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"CB6acvUFtWqd","colab_type":"text"},"source":[" **Paths for training, predictions and results**\n","\n","**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source (Low SNR images) and Training_target (High SNR images or ground truth) training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","**Training Parameters**\n","\n","**`number of epochs`:**Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10-30) epochs, but a full training should run for 100-300 epochs. Evaluate the performance after training (see 5.). **Default value: 40**\n","\n","**`patch_size`:** CARE divides the image into patches for training. Input the size of the patches (length of a side). The value should be smaller than the dimensions of the image and divisible by 8. **Default value: 80**\n","\n","**`patch_height`:** The value should be smaller than the Z dimensions of the image and divisible by 4. When analysing isotropic stacks patch_size and patch_height should have similar values.\n","\n","**When choosing the patch_size and patch_height, the values should be i) large enough that they will enclose many instances, ii) small enough that the resulting patches fit into the RAM.** \n","\n","**If you get an Out of memory (OOM) error during the training, manually decrease the patch_size and patch_height values until the OOM error disappear.**\n","\n","**`number_of_patches`:** Input the number of the patches per image. Increasing the number of patches allows for larger training datasets. **Default value: 200** \n","\n","**Decreasing the patch size or increasing the number of patches may improve the training but may also increase the training time.**\n","\n","**Advanced Parameters - experienced users only**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 16**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10** \n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0004**"]},{"cell_type":"code","metadata":{"id":"ewpNJ_I0Mv47","colab_type":"code","cellView":"form","colab":{}},"source":["\n","#@markdown ###Path to training images:\n","\n","# base folder of GT and low images\n","base = \"/content\"\n","\n","# low SNR images\n","Training_source = \"\" #@param {type:\"string\"}\n","lowfile = Training_source+\"/*.tif\"\n","# Ground truth images\n","Training_target = \"\" #@param {type:\"string\"}\n","GTfile = Training_target+\"/*.tif\"\n","\n","\n","# model name and path\n","#@markdown ###Name of the model and path to model folder:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","\n","# create the training data file into model_path folder.\n","training_data = model_path+\"/my_training_data.npz\"\n","\n","# other parameters for training.\n","#@markdown ###Training Parameters\n","#@markdown Number of epochs:\n","\n","number_of_epochs = 40#@param {type:\"number\"}\n","\n","#@markdown Patch size (pixels) and number\n","patch_size = 80#@param {type:\"number\"} # pixels in\n","patch_height = 8#@param {type:\"number\"}\n","number_of_patches = 200#@param {type:\"number\"}\n","\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","#@markdown ###If not, please input:\n","\n","batch_size = 16#@param {type:\"number\"}\n","number_of_steps = 300#@param {type:\"number\"}\n","percentage_validation = 10 #@param {type:\"number\"}\n","initial_learning_rate = 0.0004 #@param {type:\"number\"}\n","\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 16\n"," percentage_validation = 10\n"," initial_learning_rate = 0.0004\n","\n","percentage = percentage_validation/100\n","\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name):\n"," print(bcolors.WARNING +\"!! WARNING: Folder already exists and has been removed !!\")\n"," shutil.rmtree(model_path+'/'+model_name)\n"," \n","# Here we disable pre-trained model by default (in case the next cell is not ran)\n","Use_pretrained_model = False\n","\n","# Here we disable data augmentation by default (in case the cell is not ran)\n","\n","Use_Data_augmentation = False\n","\n","\n","#Load one randomly chosen training source file\n","\n","random_choice=random.choice(os.listdir(Training_source))\n","x = imread(Training_source+\"/\"+random_choice)\n","\n","\n","# Here we check that the input images are stacks\n","if len(x.shape) == 3:\n"," print(\"Image dimensions (z,y,x)\",x.shape)\n","\n","if not len(x.shape) == 3:\n"," print(bcolors.WARNING +\"Your images appear to have the wrong dimensions. Image dimension\",x.shape)\n","\n","\n","#Find image Z dimension and select the mid-plane\n","Image_Z = x.shape[0]\n","mid_plane = int(Image_Z / 2)+1\n","\n","\n","#Find image XY dimension\n","Image_Y = x.shape[1]\n","Image_X = x.shape[2]\n","\n","#Hyperparameters failsafes\n","\n","# Here we check that patch_size is smaller than the smallest xy dimension of the image \n","\n","if patch_size > min(Image_Y, Image_X):\n"," patch_size = min(Image_Y, Image_X)\n"," print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_size is divisible by 8\n","if not patch_size % 8 == 0:\n"," patch_size = ((int(patch_size / 8)-1) * 8)\n"," print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_height is smaller than the z dimension of the image \n","\n","if patch_height > Image_Z :\n"," patch_height = Image_Z\n"," print (bcolors.WARNING + \" Your chosen patch_height is bigger than the z dimension of your image; therefore the patch_size chosen is now:\",patch_height)\n","\n","# Here we check that patch_height is divisible by 4\n","if not patch_height % 4 == 0:\n"," patch_height = ((int(patch_height / 4)-1) * 4)\n"," if patch_height == 0:\n"," patch_height = 4\n"," print (bcolors.WARNING + \" Your chosen patch_height is not divisible by 4; therefore the patch_size chosen is now:\",patch_height)\n","\n","\n","#Load one randomly chosen training target file\n","\n","os.chdir(Training_target)\n","y = imread(Training_target+\"/\"+random_choice)\n","\n","\n","\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x[mid_plane], norm=simple_norm(x[mid_plane], percent = 99), interpolation='nearest')\n","plt.axis('off')\n","plt.title('Low SNR image (single Z plane)');\n","plt.subplot(1,2,2)\n","plt.imshow(y[mid_plane], norm=simple_norm(y[mid_plane], percent = 99), interpolation='nearest')\n","plt.axis('off')\n","plt.title('High SNR image (single Z plane)');\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"xGcl7WGP4WHt","colab_type":"text"},"source":["## **3.2. Data augmentation**\n","---"]},{"cell_type":"markdown","metadata":{"id":"5Lio8hpZ4PJ1","colab_type":"text"},"source":["Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n","\n"," **However, data augmentation is not a magic solution and may also introduce issues. Therefore, we recommend that you train your network with and without augmentation, and use the QC section to validate that it improves overall performances.** \n","\n","Data augmentation is performed here by rotating the training images in the XY-Plane and flipping them along X-Axis.\n","\n","**The flip option alone will double the size of your dataset, rotation will quadruple and both together will increase the dataset by a factor of 8.**"]},{"cell_type":"code","metadata":{"id":"htqjkJWt5J_8","colab_type":"code","cellView":"form","colab":{}},"source":["Use_Data_augmentation = False #@param{type:\"boolean\"}\n","\n","#@markdown Select this option if you want to use augmentation to increase the size of your dataset\n","\n","#@markdown **Rotate each image 3 times by 90 degrees.**\n","Rotation = True #@param{type:\"boolean\"}\n","\n","#@markdown **Flip each image once around the x axis of the stack.**\n","Flip = True #@param{type:\"boolean\"}\n","\n","\n","#@markdown **Would you like to save your augmented images?**\n","\n","Save_augmented_images = False #@param {type:\"boolean\"}\n","\n","Saving_path = \"\" #@param {type:\"string\"}\n","\n","\n","if not Save_augmented_images:\n"," Saving_path= \"/content\"\n","\n","\n","def rotation_aug(Source_path, Target_path, flip=False):\n"," Source_images = os.listdir(Source_path)\n"," Target_images = os.listdir(Target_path)\n"," \n"," for image in Source_images:\n"," source_img = io.imread(os.path.join(Source_path,image))\n"," target_img = io.imread(os.path.join(Target_path,image))\n"," \n"," # Source Rotation\n"," source_img_90 = np.rot90(source_img,axes=(1,2))\n"," source_img_180 = np.rot90(source_img_90,axes=(1,2))\n"," source_img_270 = np.rot90(source_img_180,axes=(1,2))\n","\n"," # Target Rotation\n"," target_img_90 = np.rot90(target_img,axes=(1,2))\n"," target_img_180 = np.rot90(target_img_90,axes=(1,2))\n"," target_img_270 = np.rot90(target_img_180,axes=(1,2))\n","\n"," # Add a flip to the rotation\n"," \n"," if flip == True:\n"," source_img_lr = np.fliplr(source_img)\n"," source_img_90_lr = np.fliplr(source_img_90)\n"," source_img_180_lr = np.fliplr(source_img_180)\n"," source_img_270_lr = np.fliplr(source_img_270)\n","\n"," target_img_lr = np.fliplr(target_img)\n"," target_img_90_lr = np.fliplr(target_img_90)\n"," target_img_180_lr = np.fliplr(target_img_180)\n"," target_img_270_lr = np.fliplr(target_img_270)\n","\n"," #source_img_90_ud = np.flipud(source_img_90)\n"," \n"," # Save the augmented files\n"," # Source images\n"," io.imsave(Saving_path+'/augmented_source/'+image,source_img)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90.tif',source_img_90)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180.tif',source_img_180)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270.tif',source_img_270)\n"," # Target images\n"," io.imsave(Saving_path+'/augmented_target/'+image,target_img)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90.tif',target_img_90)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180.tif',target_img_180)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270.tif',target_img_270)\n","\n"," if flip == True:\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90_lr.tif',source_img_90_lr)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180_lr.tif',source_img_180_lr)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270_lr.tif',source_img_270_lr)\n","\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90_lr.tif',target_img_90_lr)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180_lr.tif',target_img_180_lr)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270_lr.tif',target_img_270_lr)\n","\n","def flip(Source_path, Target_path):\n"," Source_images = os.listdir(Source_path)\n"," Target_images = os.listdir(Target_path) \n","\n"," for image in Source_images:\n"," source_img = io.imread(os.path.join(Source_path,image))\n"," target_img = io.imread(os.path.join(Target_path,image))\n"," \n"," source_img_lr = np.fliplr(source_img)\n"," target_img_lr = np.fliplr(target_img)\n","\n"," io.imsave(Saving_path+'/augmented_source/'+image,source_img)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)\n","\n"," io.imsave(Saving_path+'/augmented_target/'+image,target_img)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)\n","\n","\n","if Use_Data_augmentation:\n","\n"," if os.path.exists(Saving_path+'/augmented_source'):\n"," shutil.rmtree(Saving_path+'/augmented_source')\n"," os.mkdir(Saving_path+'/augmented_source')\n","\n"," if os.path.exists(Saving_path+'/augmented_target'):\n"," shutil.rmtree(Saving_path+'/augmented_target') \n"," os.mkdir(Saving_path+'/augmented_target')\n","\n"," print(\"Data augmentation enabled\")\n"," print(\"Data augmentation in progress....\")\n","\n"," if Rotation == True:\n"," rotation_aug(Training_source,Training_target,flip=Flip)\n"," \n"," elif Rotation == False and Flip == True:\n"," flip(Training_source,Training_target)\n"," print(\"Done\")\n","\n","\n","if not Use_Data_augmentation:\n"," print(bcolors.WARNING+\"Data augmentation disabled\")\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"bQDuybvyadKU","colab_type":"text"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a CARE 3D model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pret-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"8vPkzEBNamE4","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"Model_from_file\" #@param [\"Model_from_file\"]\n","\n","Weights_choice = \"last\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","\n","# --------------------- Download the a model provided in the XXX ------------------------\n","\n"," if pretrained_model_choice == \"Model_name\":\n"," pretrained_model_name = \"Model_name\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path) \n"," wget.download(\"\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist')\n"," Use_pretrained_model = False\n","\n"," \n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n","\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," \n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n","\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n","\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n","else:\n"," print(bcolors.WARNING+'No pretrained nerwork will be used.')\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"rQndJj70FzfL","colab_type":"text"},"source":["# **4. Train the network**\n","---"]},{"cell_type":"markdown","metadata":{"id":"tGW2iaU6X5zi","colab_type":"text"},"source":["## **4.1. Prepare the training data and model for training**\n","---\n","Here, we use the information from 3. to build the model and convert the training data into a suitable format for training."]},{"cell_type":"code","metadata":{"id":"WMJnGJpCMa4y","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Create the model and dataset objects\n","\n","# This object holds the image pairs (GT and low), ensuring that CARE compares corresponding images.\n","# This file is saved in .npz format and later called when loading the trainig data.\n","\n","if Use_Data_augmentation == True:\n"," Training_source = Saving_path+'/augmented_source'\n"," Training_target = Saving_path+'/augmented_target'\n","\n","raw_data = RawData.from_folder (\n"," basepath = base,\n"," source_dirs = [Training_source],\n"," target_dir = Training_target,\n"," axes = 'ZYX',\n"," pattern='*.tif*'\n",")\n","X, Y, XY_axes = create_patches (\n"," raw_data = raw_data,\n"," patch_size = (patch_height,patch_size,patch_size),\n"," n_patches_per_image = number_of_patches, \n"," save_file = training_data,\n",")\n","\n","assert X.shape == Y.shape\n","print(\"shape of X,Y =\", X.shape)\n","print(\"axes of X,Y =\", XY_axes)\n","\n","%memit \n","print ('Creating 3D training dataset')\n","\n","# Load Training Data\n","(X,Y), (X_val,Y_val), axes = load_training_data(training_data, validation_split=percentage, verbose=True)\n","c = axes_dict(axes)['C']\n","n_channel_in, n_channel_out = X.shape[c], Y.shape[c]\n","\n","#Plot example patches\n","\n","#plot of training patches.\n","plt.figure(figsize=(12,5))\n","plot_some(X[:5],Y[:5])\n","plt.suptitle('5 example training patches (top row: source, bottom row: target)');\n","\n","#plot of validation patches\n","plt.figure(figsize=(12,5))\n","plot_some(X_val[:5],Y_val[:5])\n","plt.suptitle('5 example validation patches (top row: source, bottom row: target)');\n","\n","%memit \n","\n","#Here we automatically define number_of_step in function of training data and batch size\n","if (Use_Default_Advanced_Parameters): \n"," number_of_steps= int(X.shape[0]/batch_size)+1\n","\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we ensure that the learning rate set correctly when using pre-trained models\n","if Use_pretrained_model:\n"," if Weights_choice == \"last\":\n"," initial_learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\": \n"," initial_learning_rate = bestLearningRate\n","# --------------------- ---------------------- ------------------------\n","\n","\n","#Here, we create the default Config object which sets the hyperparameters of the network training.\n","\n","config = Config(axes, n_channel_in, n_channel_out, train_steps_per_epoch=number_of_steps, train_epochs=number_of_epochs, train_batch_size=batch_size, train_learning_rate=initial_learning_rate)\n","print(config)\n","vars(config)\n","\n","# Compile the CARE model for network training\n","\n","model_training= CARE(config, model_name, basedir=model_path)\n","\n","# --------------------- Using pretrained model ------------------------\n","# Load the pretrained weights \n","if Use_pretrained_model:\n"," model_training.load_weights(h5_file_path)\n","# --------------------- ---------------------- ------------------------\n","\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"wQPz0F6JlvJR","colab_type":"text"},"source":["## **4.2. Train the network**\n","---\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point."]},{"cell_type":"code","metadata":{"id":"j_Qm5JBmlvJg","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Start Training\n","\n","start = time.time()\n","\n","# Start Training\n","history = model_training.train(X,Y, validation_data=(X_val,Y_val))\n","\n","print(\"Training, done.\")\n","\n","# convert the history.history dict to a pandas DataFrame: \n","lossData = pd.DataFrame(history.history) \n","\n","if os.path.exists(model_path+\"/\"+model_name+\"/Quality Control\"):\n"," shutil.rmtree(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","os.makedirs(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","# The training evaluation.csv is saved (overwrites the Files if needed). \n","lossDataCSVpath = model_path+'/'+model_name+'/Quality Control/training_evaluation.csv'\n","with open(lossDataCSVpath, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['loss','val_loss', 'learning rate'])\n"," for i in range(len(history.history['loss'])):\n"," writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])\n","\n","\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"w8Q_uYGgiico","colab_type":"text"},"source":["## **4.3. Download your model(s) from Google Drive**\n","---\n","\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"QYuIOWQ3imuU","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","metadata":{"id":"zazOZ3wDx0zQ","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"yDY9dtzdUTLh","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased."]},{"cell_type":"code","metadata":{"id":"vMzSP50kMv5p","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"biT9FI9Ri77_","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n","This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the \"Source_QC_folder\" and \"Target_QC_folder\" !\n","\n","**1. The SSIM (structural similarity) map** \n","\n","The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). \n","\n","**mSSIM** is the SSIM value calculated across the entire window of both images.\n","\n","**The output below shows the SSIM maps with the mSSIM**\n","\n","**2. The RSE (Root Squared Error) map** \n","\n","This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).\n","\n","\n","**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.\n","\n","**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.\n","\n","**The output below shows the RSE maps with the NRMSE and PSNR values.**\n","\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"nAs4Wni7VYbq","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","path_metrics_save = QC_model_path+'/'+QC_model_name+'/Quality Control/'\n","\n","# Create a quality control/Prediction Folder\n","if os.path.exists(path_metrics_save+'Prediction'):\n"," shutil.rmtree(path_metrics_save+'Prediction')\n","os.makedirs(path_metrics_save+'Prediction')\n","\n","#Here we allow the user to choose the number of tile to be used when predicting the images\n","#@markdown #####To analyse large image, your images need to be divided into tiles. Each tile will then be processed independently and re-assembled to generate the final image. \"Automatic_number_of_tiles\" will search for and use the smallest number of tiles that can be used, at the expanse of your runtime. Alternatively, manually input the number of tiles in each dimension to be used to process your images. \n","\n","Automatic_number_of_tiles = False #@param {type:\"boolean\"}\n","#@markdown #####If you get an Out of memory (OOM) error when using the \"Automatic_number_of_tiles\" option, disable it and manually input the values to be used to process your images. Progressively increases these numbers until the OOM error disappear.\n","n_tiles_Z = 1#@param {type:\"number\"}\n","n_tiles_Y = 2#@param {type:\"number\"}\n","n_tiles_X = 2#@param {type:\"number\"}\n","\n","if (Automatic_number_of_tiles): \n"," n_tilesZYX = None\n","\n","if not (Automatic_number_of_tiles):\n"," n_tilesZYX = (n_tiles_Z, n_tiles_Y, n_tiles_X)\n","\n","# Activate the pretrained model. \n","model_training = CARE(config=None, name=QC_model_name, basedir=QC_model_path)\n","\n","# List Tif images in Source_QC_folder\n","Source_QC_folder_tif = Source_QC_folder+\"/*.tif\"\n","Z = sorted(glob(Source_QC_folder_tif))\n","Z = list(map(imread,Z))\n","print('Number of test dataset found in the folder: '+str(len(Z)))\n","\n","\n","# Perform prediction on all datasets in the Source_QC folder\n","for filename in os.listdir(Source_QC_folder):\n"," img = imread(os.path.join(Source_QC_folder, filename))\n"," n_slices = img.shape[0]\n"," predicted = model_training.predict(img, axes='ZYX', n_tiles=n_tilesZYX)\n"," os.chdir(path_metrics_save+'Prediction/')\n"," imsave('Predicted_'+filename, predicted)\n","\n","\n","def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," \"\"\"Percentile-based image normalization.\"\"\"\n","\n"," mi = np.percentile(x,pmin,axis=axis,keepdims=True)\n"," ma = np.percentile(x,pmax,axis=axis,keepdims=True)\n"," return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)\n","\n","\n","def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," if dtype is not None:\n"," x = x.astype(dtype,copy=False)\n"," mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)\n"," ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)\n"," eps = dtype(eps)\n","\n"," try:\n"," import numexpr\n"," x = numexpr.evaluate(\"(x - mi) / ( ma - mi + eps )\")\n"," except ImportError:\n"," x = (x - mi) / ( ma - mi + eps )\n","\n"," if clip:\n"," x = np.clip(x,0,1)\n","\n"," return x\n","\n","def norm_minmse(gt, x, normalize_gt=True):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n","\n"," \"\"\"\n"," normalizes and affinely scales an image pair such that the MSE is minimized \n"," \n"," Parameters\n"," ----------\n"," gt: ndarray\n"," the ground truth image \n"," x: ndarray\n"," the image that will be affinely scaled \n"," normalize_gt: bool\n"," set to True of gt image should be normalized (default)\n"," Returns\n"," -------\n"," gt_scaled, x_scaled \n"," \"\"\"\n"," if normalize_gt:\n"," gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)\n"," x = x.astype(np.float32, copy=False) - np.mean(x)\n"," #x = x - np.mean(x)\n"," gt = gt.astype(np.float32, copy=False) - np.mean(gt)\n"," #gt = gt - np.mean(gt)\n"," scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())\n"," return gt, scale * x\n","\n","\n","\n","# Open and create the csv file that will contain all the QC metrics\n","with open(path_metrics_save+'QC_metrics_'+QC_model_name+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n","\n"," # Write the header in the csv file\n"," writer.writerow([\"File name\",\"Slice #\",\"Prediction v. GT mSSIM\",\"Input v. GT mSSIM\", \"Prediction v. GT NRMSE\", \"Input v. GT NRMSE\", \"Prediction v. GT PSNR\", \"Input v. GT PSNR\"]) \n"," \n"," # These lists will be used to collect all the metrics values per slice\n"," file_name_list = []\n"," slice_number_list = []\n"," mSSIM_GvP_list = []\n"," mSSIM_GvS_list = []\n"," NRMSE_GvP_list = []\n"," NRMSE_GvS_list = []\n"," PSNR_GvP_list = []\n"," PSNR_GvS_list = []\n","\n"," # These lists will be used to display the mean metrics for the stacks\n"," mSSIM_GvP_list_mean = []\n"," mSSIM_GvS_list_mean = []\n"," NRMSE_GvP_list_mean = []\n"," NRMSE_GvS_list_mean = []\n"," PSNR_GvP_list_mean = []\n"," PSNR_GvS_list_mean = []\n","\n"," # Let's loop through the provided dataset in the QC folders\n"," for thisFile in os.listdir(Source_QC_folder):\n"," if not os.path.isdir(os.path.join(Source_QC_folder, thisFile)):\n"," print('Running QC on: '+thisFile)\n","\n"," test_GT_stack = io.imread(os.path.join(Target_QC_folder, thisFile))\n"," test_source_stack = io.imread(os.path.join(Source_QC_folder,thisFile))\n"," test_prediction_stack = io.imread(os.path.join(path_metrics_save+\"Prediction/\",'Predicted_'+thisFile))\n"," n_slices = test_GT_stack.shape[0]\n","\n"," # Calculating the position of the mid-plane slice\n"," z_mid_plane = int(n_slices / 2)+1\n","\n"," img_SSIM_GTvsPrediction_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n"," img_SSIM_GTvsSource_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n"," img_RSE_GTvsPrediction_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n"," img_RSE_GTvsSource_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n","\n"," for z in range(n_slices): \n"," # -------------------------------- Normalising the dataset --------------------------------\n","\n"," test_GT_norm, test_source_norm = norm_minmse(test_GT_stack[z], test_source_stack[z], normalize_gt=True)\n"," test_GT_norm, test_prediction_norm = norm_minmse(test_GT_stack[z], test_prediction_stack[z], normalize_gt=True)\n","\n"," # -------------------------------- Calculate the SSIM metric and maps --------------------------------\n","\n"," # Calculate the SSIM maps and index\n"," index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = structural_similarity(test_GT_norm, test_prediction_norm, data_range=1.0, full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)\n"," index_SSIM_GTvsSource, img_SSIM_GTvsSource = structural_similarity(test_GT_norm, test_source_norm, data_range=1.0, full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)\n","\n"," #Calculate ssim_maps\n"," img_SSIM_GTvsPrediction_stack[z] = img_as_float32(img_SSIM_GTvsPrediction, force_copy=False)\n"," img_SSIM_GTvsSource_stack[z] = img_as_float32(img_SSIM_GTvsSource, force_copy=False)\n"," \n","\n"," # -------------------------------- Calculate the NRMSE metrics --------------------------------\n","\n"," # Calculate the Root Squared Error (RSE) maps\n"," img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))\n"," img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))\n","\n"," # Calculate SE maps\n"," img_RSE_GTvsPrediction_stack[z] = img_as_float32(img_RSE_GTvsPrediction, force_copy=False)\n"," img_RSE_GTvsSource_stack[z] = img_as_float32(img_RSE_GTvsSource, force_copy=False)\n","\n"," # Normalised Root Mean Squared Error (here it's valid to take the mean of the image)\n"," NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))\n"," NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))\n","\n"," # Calculate the PSNR between the images\n"," PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)\n"," PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)\n","\n"," writer.writerow([thisFile, str(z),str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource), str(PSNR_GTvsPrediction), str(PSNR_GTvsSource)])\n"," \n"," # Collect values to display in dataframe output\n"," slice_number_list.append(z)\n"," mSSIM_GvP_list.append(index_SSIM_GTvsPrediction)\n"," mSSIM_GvS_list.append(index_SSIM_GTvsSource)\n"," NRMSE_GvP_list.append(NRMSE_GTvsPrediction)\n"," NRMSE_GvS_list.append(NRMSE_GTvsSource)\n"," PSNR_GvP_list.append(PSNR_GTvsPrediction)\n"," PSNR_GvS_list.append(PSNR_GTvsSource)\n","\n"," if (z == z_mid_plane): # catch these for display\n"," SSIM_GTvsP_forDisplay = index_SSIM_GTvsPrediction\n"," SSIM_GTvsS_forDisplay = index_SSIM_GTvsSource\n"," NRMSE_GTvsP_forDisplay = NRMSE_GTvsPrediction\n"," NRMSE_GTvsS_forDisplay = NRMSE_GTvsSource\n"," \n"," # If calculating average metrics for dataframe output\n"," file_name_list.append(thisFile)\n"," mSSIM_GvP_list_mean.append(sum(mSSIM_GvP_list)/len(mSSIM_GvP_list))\n"," mSSIM_GvS_list_mean.append(sum(mSSIM_GvS_list)/len(mSSIM_GvS_list))\n"," NRMSE_GvP_list_mean.append(sum(NRMSE_GvP_list)/len(NRMSE_GvP_list))\n"," NRMSE_GvS_list_mean.append(sum(NRMSE_GvS_list)/len(NRMSE_GvS_list))\n"," PSNR_GvP_list_mean.append(sum(PSNR_GvP_list)/len(PSNR_GvP_list))\n"," PSNR_GvS_list_mean.append(sum(PSNR_GvS_list)/len(PSNR_GvS_list))\n","\n"," # ----------- Change the stacks to 32 bit images -----------\n","\n"," img_SSIM_GTvsSource_stack_32 = img_as_float32(img_SSIM_GTvsSource_stack, force_copy=False)\n"," img_SSIM_GTvsPrediction_stack_32 = img_as_float32(img_SSIM_GTvsPrediction_stack, force_copy=False)\n"," img_RSE_GTvsSource_stack_32 = img_as_float32(img_RSE_GTvsSource_stack, force_copy=False)\n"," img_RSE_GTvsPrediction_stack_32 = img_as_float32(img_RSE_GTvsPrediction_stack, force_copy=False)\n","\n"," # ----------- Saving the error map stacks -----------\n"," io.imsave(path_metrics_save+'SSIM_GTvsSource_'+thisFile,img_SSIM_GTvsSource_stack_32)\n"," io.imsave(path_metrics_save+'SSIM_GTvsPrediction_'+thisFile,img_SSIM_GTvsPrediction_stack_32)\n"," io.imsave(path_metrics_save+'RSE_GTvsSource_'+thisFile,img_RSE_GTvsSource_stack_32)\n"," io.imsave(path_metrics_save+'RSE_GTvsPrediction_'+thisFile,img_RSE_GTvsPrediction_stack_32)\n","\n","#Averages of the metrics per stack as dataframe output\n","pdResults = pd.DataFrame(file_name_list, columns = [\"File name\"])\n","pdResults[\"Prediction v. GT mSSIM\"] = mSSIM_GvP_list_mean\n","pdResults[\"Input v. GT mSSIM\"] = mSSIM_GvS_list_mean\n","pdResults[\"Prediction v. GT NRMSE\"] = NRMSE_GvP_list_mean\n","pdResults[\"Input v. GT NRMSE\"] = NRMSE_GvS_list_mean\n","pdResults[\"Prediction v. GT PSNR\"] = PSNR_GvP_list_mean\n","pdResults[\"Input v. GT PSNR\"] = PSNR_GvS_list_mean\n","\n","# All data is now processed saved\n","Test_FileList = os.listdir(Source_QC_folder) # this assumes, as it should, that both source and target are named the same way\n","\n","plt.figure(figsize=(20,20))\n","# Currently only displays the last computed set, from memory\n","# Target (Ground-truth)\n","plt.subplot(3,3,1)\n","plt.axis('off')\n","img_GT = io.imread(os.path.join(Target_QC_folder, Test_FileList[-1]))\n","\n","# Calculating the position of the mid-plane slice\n","z_mid_plane = int(img_GT.shape[0] / 2)+1\n","\n","plt.imshow(img_GT[z_mid_plane], norm=simple_norm(img_GT[z_mid_plane], percent = 99))\n","plt.title('Target (slice #'+str(z_mid_plane)+')')\n","\n","# Source\n","plt.subplot(3,3,2)\n","plt.axis('off')\n","img_Source = io.imread(os.path.join(Source_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_Source[z_mid_plane], norm=simple_norm(img_Source[z_mid_plane], percent = 99))\n","plt.title('Source (slice #'+str(z_mid_plane)+')')\n","\n","#Prediction\n","plt.subplot(3,3,3)\n","plt.axis('off')\n","img_Prediction = io.imread(os.path.join(path_metrics_save+'Prediction/', 'Predicted_'+Test_FileList[-1]))\n","plt.imshow(img_Prediction[z_mid_plane], norm=simple_norm(img_Prediction[z_mid_plane], percent = 99))\n","plt.title('Prediction (slice #'+str(z_mid_plane)+')')\n","\n","#Setting up colours\n","cmap = plt.cm.CMRmap\n","\n","#SSIM between GT and Source\n","plt.subplot(3,3,5)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n","img_SSIM_GTvsSource = io.imread(os.path.join(path_metrics_save, 'SSIM_GTvsSource_'+Test_FileList[-1]))\n","imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource[z_mid_plane], cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(SSIM_GTvsS_forDisplay,3)),fontsize=14)\n","plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#SSIM between GT and Prediction\n","plt.subplot(3,3,6)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","img_SSIM_GTvsPrediction = io.imread(os.path.join(path_metrics_save, 'SSIM_GTvsPrediction_'+Test_FileList[-1]))\n","imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction[z_mid_plane], cmap = cmap, vmin=0,vmax=1)\n","plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(SSIM_GTvsP_forDisplay,3)),fontsize=14)\n","\n","#Root Squared Error between GT and Source\n","plt.subplot(3,3,8)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n","img_RSE_GTvsSource = io.imread(os.path.join(path_metrics_save, 'RSE_GTvsSource_'+Test_FileList[-1]))\n","imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource[z_mid_plane], cmap = cmap, vmin=0, vmax = 1) \n","plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsS_forDisplay,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)\n","#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))\n","plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#Root Squared Error between GT and Prediction\n","plt.subplot(3,3,9)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","img_RSE_GTvsPrediction = io.imread(os.path.join(path_metrics_save, 'RSE_GTvsPrediction_'+Test_FileList[-1]))\n","imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction[z_mid_plane], cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsP_forDisplay,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)\n","\n","print('-----------------------------------')\n","print('Here are the average scores for the stacks you tested in Quality control. To see values for all slices, open the .csv file saved in the Quality Control folder.')\n","pdResults.head()\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"69aJVFfsqXbY","colab_type":"text"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"tcPNRq1TrMPB","colab_type":"text"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n","\n","**`Result_folder`:** This folder will contain the predicted output images."]},{"cell_type":"code","metadata":{"id":"Am2JSmpC0frj","colab_type":"code","cellView":"form","colab":{}},"source":["\n","#@markdown ##Provide the path to your dataset and to the folder where the prediction will be saved, then play the cell to predict output on your unseen images.\n","\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n"," \n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","\n","#Here we allow the user to choose the number of tile to be used when predicting the images\n","#@markdown #####To analyse large image, your images need to be divided into tiles. Each tile will then be processed independently and re-assembled to generate the final image. \"Automatic_number_of_tiles\" will search for and use the smallest number of tiles that can be used, at the expanse of your runtime. Alternatively, manually input the number of tiles in each dimension to be used to process your images. \n","\n","Automatic_number_of_tiles = False #@param {type:\"boolean\"}\n","#@markdown #####If you get an Out of memory (OOM) error when using the \"Automatic_number_of_tiles\" option, disable it and manually input the values to be used to process your images. Progressively increases these numbers until the OOM error disappear.\n","n_tiles_Z = 1#@param {type:\"number\"}\n","n_tiles_Y = 2#@param {type:\"number\"}\n","n_tiles_X = 2#@param {type:\"number\"}\n","\n","if (Automatic_number_of_tiles): \n"," n_tilesZYX = None\n","\n","if not (Automatic_number_of_tiles):\n"," n_tilesZYX = (n_tiles_Z, n_tiles_Y, n_tiles_X)\n","\n","#Activate the pretrained model. \n","model=CARE(config=None, name=Prediction_model_name, basedir=Prediction_model_path)\n","\n","print(\"Restoring images...\")\n","\n","thisdir = Path(Data_folder)\n","outputdir = Path(Result_folder)\n","suffix = '.tif'\n","\n","# r=root, d=directories, f = files\n","for r, d, f in os.walk(thisdir):\n"," for file in f:\n"," if \".tif\" in file:\n"," print(os.path.join(r, file))\n","\n","for r, d, f in os.walk(thisdir):\n"," for file in f:\n"," base_filename = os.path.basename(file)\n"," input_train = imread(os.path.join(r, file))\n"," pred_train = model.predict(input_train, axes='ZYX', n_tiles=n_tilesZYX)\n"," save_tiff_imagej_compatible(os.path.join(outputdir, base_filename), pred_train, axes='ZYX') \n","\n","print(\"Images saved into the result folder:\", Result_folder)\n","\n","#Display an example\n","\n","random_choice=random.choice(os.listdir(Data_folder))\n","x = imread(Data_folder+\"/\"+random_choice)\n","\n","z_mid_plane = int(x.shape[0] / 2)+1\n","\n","@interact\n","def show_results(file=os.listdir(Result_folder), z_plane=widgets.IntSlider(min=0, max=(x.shape[0]-1), step=1, value=z_mid_plane)):\n"," x = imread(Data_folder+\"/\"+file)\n"," y = imread(Result_folder+\"/\"+file)\n","\n"," f=plt.figure(figsize=(16,8))\n"," plt.subplot(1,2,1)\n"," plt.imshow(x[z_plane], norm=simple_norm(x[z_plane], percent = 99), interpolation='nearest')\n"," plt.axis('off')\n"," plt.title('Noisy Input (single Z plane)');\n"," plt.subplot(1,2,2)\n"," plt.imshow(y[z_plane], norm=simple_norm(y[z_plane], percent = 99), interpolation='nearest')\n"," plt.axis('off')\n"," plt.title('Prediction (single Z plane)');\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"hvkd66PldsXB","colab_type":"text"},"source":["## **6.2. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"u4pcBe8Z3T2J","colab_type":"text"},"source":["#**Thank you for using CARE 3D!**"]}]} \ No newline at end of file diff --git a/Colab_notebooks/ChangeLog.txt b/Colab_notebooks/ChangeLog.txt index 43017477..22c66371 100755 --- a/Colab_notebooks/ChangeLog.txt +++ b/Colab_notebooks/ChangeLog.txt @@ -6,6 +6,29 @@ https://www.biorxiv.org/content/10.1101/2020.03.20.000133v1 Latest releases available here: https://github.com/HenriquesLab/ZeroCostDL4Mic/releases + +————————————————————————————————————————————————————————— +ZeroCostDL4Mic v1.8 + +Major changes: + +- Beta Notebooks have now been move to main notebooks +- The StarDist 2D Fiji notebook is now obselete as the model export works in the main notebook. + +Main notebooks: +—————————————— + +- StarDist 2D, StarDist 3D, CARE 2D and CARE 3D notebooks now uses TensorFlow 2.2 (instead of TF 1.5.15) +- YOLOv2 notebook: QC section now uses the same mAP function as the training, for better consistency of results; ground-truth labels and predicted labels in the QC section are exported to the QC folder as csv files which holde the bounding box coordinates and class labels; Display of prediction results now more consistent with display of GT labels; Updated Augmentation Section, now between 2-8 times augmentation of dataset possible; Additional csv file with predicted bounding box coordinates in a format suitable for use in imageJ as Results Table is now exported to the user's results folder in the Prediction section; Added 'training_times' as hyperparameter for improved tuning of model training; Tracking of mAP during training implemented; After training, model with best validation performance, best mAP score and the last model weights are saved to allow easier performance comparison by the user; Updated explanation of parameters and QC section. + +- 3D U-Net: + 1. Added ability to train network on non-binary targets + 2. Added ability to choose loss, metrics and optimizer + 3. Fixed data generator bug leading to erroneous generator length when choosing random_crop + ++ minor modifications and bug fixes + + ————————————————————————————————————————————————————————— ZeroCostDL4Mic v1.7 diff --git a/Colab_notebooks/CycleGAN_ZeroCostDL4Mic.ipynb b/Colab_notebooks/CycleGAN_ZeroCostDL4Mic.ipynb new file mode 100755 index 00000000..1e90d044 --- /dev/null +++ b/Colab_notebooks/CycleGAN_ZeroCostDL4Mic.ipynb @@ -0,0 +1 @@ +{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"CycleGAN_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1mqcexfPBaIWuvMWWbJZUFtPoZoJJwrEA","timestamp":1589278334507},{"file_id":"159ARwlQE7-zi0EHxunOF_YPFLt-ZVU5x","timestamp":1587562499898},{"file_id":"1W-7NHehG5MRFILvZZzhPWWnOdJMkadb2","timestamp":1586332290412},{"file_id":"1pUetEQICxYWkYVaQIgdRH1EZBTl7oc2A","timestamp":1586292199692},{"file_id":"1MD36ZkM6XR9EuV12zimJmfCjzyeYZFWq","timestamp":1586269469061},{"file_id":"16A2mbaHzlEElntS8qkFBOsBvZG-mUeY6","timestamp":1586253795726},{"file_id":"1gJlcjOiSxr2buDOxmcFbT_d-GqwLjXtK","timestamp":1583343225796},{"file_id":"10yGI51WzHfgWgZAyE-EbkZFEvIOd6CP6","timestamp":1583171396283}],"collapsed_sections":[],"toc_visible":true},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.4"},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"V9zNGvape2-I","colab_type":"text"},"source":["# **CycleGAN**\n","\n","---\n","\n","CycleGAN is a method that can capture the characteristics of one image domain and learn how these characteristics can be translated into another image domain, all in the absence of any paired training examples. It was first published by [Zhu *et al.* in 2017](https://arxiv.org/abs/1703.10593). Unlike pix2pix, the image transformation performed does not require paired images for training (unsupervised learning) and is made possible here by using a set of two Generative Adversarial Networks (GANs) that learn to transform images both from the first domain to the second and vice-versa.\n","\n"," **This particular notebook enables unpaired image-to-image translation. If your dataset is paired, you should also consider using the pix2pix notebook.**\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the *Zero-Cost Deep-Learning to Enhance Microscopy* project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is based on the following paper: \n","\n"," **Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks** from Zhu *et al.* published in arXiv in 2018 (https://arxiv.org/abs/1703.10593)\n","\n","The source code of the CycleGAN PyTorch implementation can be found in: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix\n","\n","**Please also cite this original paper when using or developing this notebook.**"]},{"cell_type":"markdown","metadata":{"id":"N3azwKB9O0oW","colab_type":"text"},"source":["# **License**\n","\n","---"]},{"cell_type":"code","metadata":{"id":"ByW6Vqdn9sYV","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Double click to see the license information\n","\n","#------------------------- LICENSE FOR ZeroCostDL4Mic------------------------------------\n","#This ZeroCostDL4Mic notebook is distributed under the MIT licence\n","\n","\n","\n","#------------------------- LICENSE FOR CycleGAN ------------------------------------\n","\n","#Copyright (c) 2017, Jun-Yan Zhu and Taesung Park\n","#All rights reserved.\n","\n","#Redistribution and use in source and binary forms, with or without\n","#modification, are permitted provided that the following conditions are met:\n","\n","#* Redistributions of source code must retain the above copyright notice, this\n","# list of conditions and the following disclaimer.\n","\n","#* Redistributions in binary form must reproduce the above copyright notice,\n","# this list of conditions and the following disclaimer in the documentation\n","# and/or other materials provided with the distribution.\n","\n","#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n","#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n","#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n","#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n","#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n","#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n","#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n","#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n","#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n","#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n","\n","\n","#--------------------------- LICENSE FOR pix2pix --------------------------------\n","#BSD License\n","\n","#For pix2pix software\n","#Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu\n","#All rights reserved.\n","\n","#Redistribution and use in source and binary forms, with or without\n","#modification, are permitted provided that the following conditions are met:\n","\n","#* Redistributions of source code must retain the above copyright notice, this\n","# list of conditions and the following disclaimer.\n","\n","#* Redistributions in binary form must reproduce the above copyright notice,\n","# this list of conditions and the following disclaimer in the documentation\n","# and/or other materials provided with the distribution.\n","\n","#----------------------------- LICENSE FOR DCGAN --------------------------------\n","#BSD License\n","\n","#For dcgan.torch software\n","\n","#Copyright (c) 2015, Facebook, Inc. All rights reserved.\n","\n","#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n","\n","#Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n","\n","#Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n","\n","#Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\n","\n","#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"jWAz2i7RdxUV","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"vNMDQHm0Ah-Z","colab_type":"text"},"source":["#**0. Before getting started**\n","---\n"," To train CycleGAN, **you only need two folders containing PNG images**. The images do not need to be paired.\n","\n","While you do not need paired images to train CycleGAN, if possible, **we strongly recommend that you generate a paired dataset. This means that the same image needs to be acquired in the two conditions. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n","\n"," Please note that you currently can **only use .png files!**\n","\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset (non-matching images) **\n"," - Training_source\n"," - img_1.png, img_2.png, ...\n"," - Training_target\n"," - img_1.png, img_2.png, ...\n"," - **Quality control dataset (matching images)**\n"," - Training_source\n"," - img_1.png, img_2.png\n"," - Training_target\n"," - img_1.png, img_2.png\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"DMNHVZfHmbKb","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"BCPhV-pe-syw","colab_type":"text"},"source":["\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"VNZetvLiS1qV","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"UBrnApIUBgxv","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"01Djr8v-5pPk","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","#mounts user's Google Drive to Google Colab.\n","\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"n4yWFoJNnoin","colab_type":"text"},"source":["# **2. Install CycleGAN and dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"id":"3u2mXn3XsWzd","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Install CycleGAN and dependencies\n","\n","\n","#------- Code from the cycleGAN demo notebook starts here -------\n","\n","#Here, we install libraries which are not already included in Colab.\n","\n","\n","\n","!git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix\n","\n","import os\n","os.chdir('pytorch-CycleGAN-and-pix2pix/')\n","!pip install -r requirements.txt\n","\n","\n","import imageio\n","from skimage import data\n","from skimage import exposure\n","from skimage.exposure import match_histograms\n","\n","from skimage.util import img_as_int\n","\n","\n","\n","\n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","import numpy as np\n","from matplotlib import pyplot as plt\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32\n","from skimage.util import img_as_ubyte\n","from tqdm import tqdm \n","\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","\n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","print(\"Libraries installed\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"Fw0kkTU6CsU4","colab_type":"text"},"source":["# **3. Select your parameters and paths**\n","\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"BLmBseWbRvxL","colab_type":"text"},"source":["## **3.1. Setting main training parameters**\n","---\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"CB6acvUFtWqd","colab_type":"text"},"source":[" **Paths for training, predictions and results**\n","\n","**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source and Training_target training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","**Training Parameters**\n","\n","**`number_of_epochs`:**Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10) epochs, but a full training should run for 200 epochs or more. Evaluate the performance after training (see 5). **Default value: 200**\n","\n","\n","**Advanced Parameters - experienced users only**\n","\n","**`patch_size`:** CycleGAN divides the image into patches for training. Input the size of the patches (length of a side). The value should be smaller than the dimensions of the image and divisible by 4. **Default value: 512**\n","\n","**When choosing the patch_size, the value should be i) large enough that it will enclose many instances, ii) small enough that the resulting patches fit into the RAM.** \n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 1**\n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0002**"]},{"cell_type":"code","metadata":{"id":"pIrTwJjzwV-D","colab_type":"code","cellView":"form","colab":{}},"source":["\n","\n","#@markdown ###Path to training images:\n","\n","Training_source = \"\" #@param {type:\"string\"}\n","InputFile = Training_source+\"/*.png\"\n","\n","Training_target = \"\" #@param {type:\"string\"}\n","OutputFile = Training_target+\"/*.png\"\n","\n","\n","#Define where the patch file will be saved\n","base = \"/content\"\n","\n","\n","# model name and path\n","#@markdown ###Name of the model and path to model folder:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","# other parameters for training.\n","#@markdown ###Training Parameters\n","#@markdown Number of epochs:\n","number_of_epochs = 200#@param {type:\"number\"}\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","#@markdown ###If not, please input:\n","patch_size = 512#@param {type:\"number\"} # in pixels\n","batch_size = 1#@param {type:\"number\"}\n","initial_learning_rate = 0.0002 #@param {type:\"number\"}\n","\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 1\n"," patch_size = 512\n"," initial_learning_rate = 0.0002\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name):\n"," print(bcolors.WARNING +\"!! WARNING: \"+model_name+\" already exists and will be deleted in the following cell !!\")\n"," print(bcolors.WARNING +\"To continue training \"+model_name+\", choose a new model_name here, and load \"+model_name+\" in section 3.3\")\n"," \n","\n","\n","#To use Cyclegan we need to organise the data in a way the model can understand\n","\n","Saving_path= \"/content/\"+model_name\n","#Saving_path= model_path+\"/\"+model_name\n","\n","if os.path.exists(Saving_path):\n"," shutil.rmtree(Saving_path)\n","os.makedirs(Saving_path)\n","\n","TrainA_Folder = Saving_path+\"/trainA\"\n","if os.path.exists(TrainA_Folder):\n"," shutil.rmtree(TrainA_Folder)\n","os.makedirs(TrainA_Folder)\n"," \n","TrainB_Folder = Saving_path+\"/trainB\"\n","if os.path.exists(TrainB_Folder):\n"," shutil.rmtree(TrainB_Folder)\n","os.makedirs(TrainB_Folder)\n","\n","# Here we disable pre-trained model by default (in case the cell is not ran)\n","Use_pretrained_model = False\n","\n","# Here we disable data augmentation by default (in case the cell is not ran)\n","\n","Use_Data_augmentation = True\n","\n","\n","# This will display a randomly chosen dataset input and output\n","random_choice = random.choice(os.listdir(Training_source))\n","x = imageio.imread(Training_source+\"/\"+random_choice)\n","\n","\n","#Find image XY dimension\n","Image_Y = x.shape[0]\n","Image_X = x.shape[1]\n","\n","Image_min_dim = min(Image_Y, Image_X)\n","\n","\n","\n","#Hyperparameters failsafes\n","if patch_size > min(Image_Y, Image_X):\n"," patch_size = min(Image_Y, Image_X)\n"," print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_size is divisible by 4\n","if not patch_size % 4 == 0:\n"," patch_size = ((int(patch_size / 4)-1) * 4)\n"," print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 4; therefore the patch_size chosen is now:\",patch_size)\n","\n","\n","random_choice_2 = random.choice(os.listdir(Training_target))\n","y = imageio.imread(Training_target+\"/\"+random_choice_2)\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x, interpolation='nearest')\n","plt.title('Training source')\n","plt.axis('off');\n","\n","plt.subplot(1,2,2)\n","plt.imshow(y, interpolation='nearest')\n","plt.title('Training target')\n","plt.axis('off');\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"FX6uxFvI-CsQ","colab_type":"text"},"source":["## **3.2. Data augmentation**\n","---\n","\n"]},{"cell_type":"markdown","metadata":{"id":"CwMaFU1T-GtN","colab_type":"text"},"source":["Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n","\n","Data augmentation is performed here by flipping the patches. \n","\n"," By default data augmentation is enabled."]},{"cell_type":"code","metadata":{"id":"kLtHIATT-0un","colab_type":"code","cellView":"form","colab":{}},"source":["#Data augmentation\n","\n","#@markdown ##Play this cell to enable or disable data augmentation: \n","\n","Use_Data_augmentation = True #@param {type:\"boolean\"}\n","\n","if Use_Data_augmentation:\n"," print(\"Data augmentation enabled\")\n","\n","if not Use_Data_augmentation:\n"," print(\"Data augmentation disabled\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"v-leE8pEWRkn","colab_type":"text"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a CycleGAN model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"CbOcS3wiWV9w","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n"," h5_file_path_A = os.path.join(pretrained_model_path, \"latest_net_G_A.pth\")\n"," h5_file_path_B = os.path.join(pretrained_model_path, \"latest_net_G_B.pth\")\n","\n","# --------------------- Check the model exist ------------------------\n","\n"," if not os.path.exists(h5_file_path_A) and os.path.exists(h5_file_path_B):\n"," print(bcolors.WARNING+'WARNING: Pretrained model does not exist')\n"," Use_pretrained_model = False\n"," print(bcolors.WARNING+'No pretrained network will be used.')\n","\n"," if os.path.exists(h5_file_path_A) and os.path.exists(h5_file_path_B):\n"," print(\"Pretrained model \"+os.path.basename(pretrained_model_path)+\" was found and will be loaded prior to training.\")\n"," \n","else:\n"," print(bcolors.WARNING+'No pretrained network will be used.')\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"rQndJj70FzfL","colab_type":"text"},"source":["# **4. Train the network**\n","---"]},{"cell_type":"markdown","metadata":{"id":"-A4ipz8gs3Ew","colab_type":"text"},"source":["## **4.1. Prepare the training data for training**\n","---\n","Here, we use the information from 3. to prepare the training data into a suitable format for training."]},{"cell_type":"code","metadata":{"id":"_V2ujGB60gDv","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Prepare the data for training\n","\n","print(\"Data preparation in progress\")\n","\n","if os.path.exists(model_path+'/'+model_name):\n"," shutil.rmtree(model_path+'/'+model_name)\n","os.makedirs(model_path+'/'+model_name)\n","\n","#--------------- Here we move the files to trainA and train B ---------\n","\n","\n","for f in os.listdir(Training_source):\n"," shutil.copyfile(Training_source+\"/\"+f, TrainA_Folder+\"/\"+f)\n","\n","for files in os.listdir(Training_target):\n"," shutil.copyfile(Training_target+\"/\"+files, TrainB_Folder+\"/\"+files)\n","\n","#---------------------------------------------------------------------\n","\n","# CycleGAN use number of EPOCH withouth lr decay and number of EPOCH with lr decay\n","\n","\n","number_of_epochs_lr_stable = int(number_of_epochs/2)\n","number_of_epochs_lr_decay = int(number_of_epochs/2)\n","\n","if Use_pretrained_model :\n"," for f in os.listdir(pretrained_model_path):\n"," if (f.startswith(\"latest_net_\")): \n"," shutil.copyfile(pretrained_model_path+\"/\"+f, model_path+'/'+model_name+\"/\"+f)\n","\n","print(\"Data ready for training\")\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"wQPz0F6JlvJR","colab_type":"text"},"source":["## **4.2. Train the network**\n","---\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches or continue the training in a second Colab session."]},{"cell_type":"code","metadata":{"id":"eBD50tAgv5qf","colab_type":"code","cellView":"form","colab":{}},"source":["\n","#@markdown ##Start training\n","\n","start = time.time()\n","\n","os.chdir(\"/content\")\n","\n","#--------------------------------- Command line inputs to change CycleGAN paramaters------------\n","\n"," # basic parameters\n"," #('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')\n"," #('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')\n"," #('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')\n"," #('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')\n"," \n"," # model parameters\n"," #('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')\n"," #('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')\n"," #('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')\n"," #('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')\n"," #('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')\n"," #('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')\n"," #('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')\n"," #('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')\n"," #('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')\n"," #('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')\n"," #('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')\n"," #('--no_dropout', action='store_true', help='no dropout for the generator')\n"," \n"," # dataset parameters\n"," #('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')\n"," #('--direction', type=str, default='AtoB', help='AtoB or BtoA')\n"," #('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')\n"," #('--num_threads', default=4, type=int, help='# threads for loading data')\n"," #('--batch_size', type=int, default=1, help='input batch size')\n"," #('--load_size', type=int, default=286, help='scale images to this size')\n"," #('--crop_size', type=int, default=256, help='then crop to this size')\n"," #('--max_dataset_size', type=int, default=float(\"inf\"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')\n"," #('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')\n"," #('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')\n"," #('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')\n"," \n"," # additional parameters\n"," #('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')\n"," #('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')\n"," #('--verbose', action='store_true', help='if specified, print more debugging information')\n"," #('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')\n"," \n"," # visdom and HTML visualization parameters\n"," #('--display_freq', type=int, default=400, help='frequency of showing training results on screen')\n"," #('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')\n"," #('--display_id', type=int, default=1, help='window id of the web display')\n"," #('--display_server', type=str, default=\"http://localhost\", help='visdom server of the web display')\n"," #('--display_env', type=str, default='main', help='visdom display environment name (default is \"main\")')\n"," #('--display_port', type=int, default=8097, help='visdom port of the web display')\n"," #('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')\n"," #('--print_freq', type=int, default=100, help='frequency of showing training results on console')\n"," #('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')\n"," \n"," # network saving and loading parameters\n"," #('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')\n"," #('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')\n"," #('--save_by_iter', action='store_true', help='whether saves model by iteration')\n"," #('--continue_train', action='store_true', help='continue training: load the latest model')\n"," #('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...')\n"," #('--phase', type=str, default='train', help='train, val, test, etc')\n"," \n"," # training parameters\n"," #('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate')\n"," #('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero')\n"," #('--beta1', type=float, default=0.5, help='momentum term of adam')\n"," #('--lr', type=float, default=0.0002, help='initial learning rate for adam')\n"," #('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')\n"," #('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')\n"," #('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')\n"," #('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations'\n","\n","#---------------------------------------------------------\n","\n","#----- Start the training ------------------------------------\n","if not Use_pretrained_model:\n"," if Use_Data_augmentation:\n"," !python pytorch-CycleGAN-and-pix2pix/train.py --dataroot \"$Saving_path\" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir \"$model_path\" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5\n"," if not Use_Data_augmentation:\n"," !python pytorch-CycleGAN-and-pix2pix/train.py --dataroot \"$Saving_path\" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir \"$model_path\" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --no_flip\n","\n","if Use_pretrained_model:\n"," if Use_Data_augmentation:\n"," !python pytorch-CycleGAN-and-pix2pix/train.py --dataroot \"$Saving_path\" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir \"$model_path\" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --continue_train\n"," \n"," if not Use_Data_augmentation:\n"," !python pytorch-CycleGAN-and-pix2pix/train.py --dataroot \"$Saving_path\" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir \"$model_path\" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --continue_train --no_flip\n","\n","#---------------------------------------------------------\n","\n","print(\"Training, done.\")\n","\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n","\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"XQjQb_J_Qyku","colab_type":"text"},"source":["##**4.3. Download your model(s) from Google Drive**\n","\n","\n","---\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"2HbZd7rFqAad","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n","Unfortunately loss functions curve are not very informative for GAN network. Therefore we perform the QC here using a test dataset.\n","\n","\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"PhcOwcgH3JAD","colab_type":"text"},"source":["## **5.1. Choose the model you want to assess**"]},{"cell_type":"code","metadata":{"id":"EdcnkCr9Nbl8","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = False #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"E4Yp7ogh3NGD","colab_type":"text"},"source":["## **5.2. Identify the best checkpoint to use to make predictions**"]},{"cell_type":"markdown","metadata":{"id":"1yauWCc78HKD","colab_type":"text"},"source":[" CycleGAN save model checkpoints every five epochs. Due to the stochastic nature of GAN networks, the last checkpoint is not always the best one to use. As a consequence, it can be challenging to choose the most suitable checkpoint to use to make predictions.\n","\n","This section allows you to perform predictions using all the saved checkpoints and to estimate the quality of these predictions by comparing them to the provided ground truths images. Metric used include:\n","\n","**1. The SSIM (structural similarity) map** \n","\n","The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). \n","\n","**mSSIM** is the SSIM value calculated across the entire window of both images.\n","\n","**The output below shows the SSIM maps with the mSSIM**\n","\n","**2. The RSE (Root Squared Error) map** \n","\n","This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).\n","\n","\n","**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.\n","\n","**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.\n","\n","**The output below shows the RSE maps with the NRMSE and PSNR values.**\n","\n"]},{"cell_type":"code","metadata":{"id":"2nBPucJdK3KS","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","Image_type = \"Grayscale\" #@param [\"Grayscale\", \"RGB\"]\n","\n","\n","\n","# average function\n","def Average(lst): \n"," return sum(lst) / len(lst) \n","\n","\n","# Create a quality control folder\n","\n","if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control\"):\n"," shutil.rmtree(QC_model_path+\"/\"+QC_model_name+\"/Quality Control\")\n","\n","os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control\")\n","\n","# List images in Source_QC_folder\n","# This will find the image dimension of a randomly choosen image in Source_QC_folder \n","random_choice = random.choice(os.listdir(Source_QC_folder))\n","x = imageio.imread(Source_QC_folder+\"/\"+random_choice)\n","\n","#Find image XY dimension\n","Image_Y = x.shape[0]\n","Image_X = x.shape[1]\n","\n","Image_min_dim = min(Image_Y, Image_X)\n","\n","\n","# Here we need to move the data to be analysed so that cycleGAN can find them\n","\n","Saving_path_QC= \"/content/\"+QC_model_name\n","\n","if os.path.exists(Saving_path_QC):\n"," shutil.rmtree(Saving_path_QC)\n","os.makedirs(Saving_path_QC)\n","\n","Saving_path_QC_folder = Saving_path_QC+\"_images\"\n","\n","if os.path.exists(Saving_path_QC_folder):\n"," shutil.rmtree(Saving_path_QC_folder)\n","os.makedirs(Saving_path_QC_folder)\n","\n","\n","#Here we copy and rename the all the checkpoint to be analysed\n","\n","for f in os.listdir(full_QC_model_path):\n"," shortname = f[:-6]\n"," shortname = shortname + \".pth\"\n"," if f.endswith(\"net_G_A.pth\"):\n"," shutil.copyfile(full_QC_model_path+f, Saving_path_QC+\"/\"+shortname)\n","\n","\n","for files in os.listdir(Source_QC_folder):\n"," shutil.copyfile(Source_QC_folder+\"/\"+files, Saving_path_QC_folder+\"/\"+files)\n"," \n","\n","# This will find the image dimension of a randomly choosen image in Source_QC_folder \n","random_choice = random.choice(os.listdir(Source_QC_folder))\n","x = imageio.imread(Source_QC_folder+\"/\"+random_choice)\n","\n","#Find image XY dimension\n","Image_Y = x.shape[0]\n","Image_X = x.shape[1]\n","\n","Image_min_dim = int(min(Image_Y, Image_X))\n","\n","Nb_Checkpoint = len(os.listdir(Saving_path_QC))\n","\n","print(Nb_Checkpoint)\n","\n","\n","\n","## Initiate list\n","\n","Checkpoint_list = []\n","Average_ssim_score_list = []\n","\n","\n","for j in range(1, len(os.listdir(Saving_path_QC))+1):\n"," checkpoints = j*5\n","\n"," if checkpoints == Nb_Checkpoint*5:\n"," checkpoints = \"latest\"\n","\n","\n"," print(\"The checkpoint currently analysed is =\"+str(checkpoints))\n","\n"," Checkpoint_list.append(checkpoints)\n","\n","\n"," # Create a quality control/Prediction Folder\n","\n"," QC_prediction_results = QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints)\n","\n"," if os.path.exists(QC_prediction_results):\n"," shutil.rmtree(QC_prediction_results)\n","\n"," os.makedirs(QC_prediction_results)\n","\n","\n","\n","#---------------------------- Predictions are performed here ----------------------\n","\n"," os.chdir(\"/content\")\n","\n"," !python pytorch-CycleGAN-and-pix2pix/test.py --dataroot \"$Saving_path_QC_folder\" --name \"$QC_model_name\" --model test --epoch $checkpoints --no_dropout --preprocess scale_width --load_size $Image_min_dim --crop_size $Image_min_dim --results_dir \"$QC_prediction_results\" --checkpoints_dir \"/content/\"\n","\n","#-----------------------------------------------------------------------------------\n","\n","#Here we need to move the data again and remove all the unnecessary folders\n","\n"," Checkpoint_name = \"test_\"+str(checkpoints)\n","\n"," QC_results_images = QC_prediction_results+\"/\"+QC_model_name+\"/\"+Checkpoint_name+\"/images\"\n","\n"," QC_results_images_files = os.listdir(QC_results_images)\n","\n"," for f in QC_results_images_files: \n"," shutil.copyfile(QC_results_images+\"/\"+f, QC_prediction_results+\"/\"+f)\n","\n"," os.chdir(\"/content\") \n","\n"," #Here we clean up the extra files\n"," shutil.rmtree(QC_prediction_results+\"/\"+QC_model_name)\n","\n","\n","#-------------------------------- QC for RGB ------------------------------------\n"," if Image_type == \"RGB\":\n","# List images in Source_QC_folder\n","# This will find the image dimension of a randomly choosen image in Source_QC_folder \n"," random_choice = random.choice(os.listdir(Source_QC_folder))\n"," x = imageio.imread(Source_QC_folder+\"/\"+random_choice)\n","\n"," def ssim(img1, img2):\n"," return structural_similarity(img1,img2,data_range=1.,full=True, multichannel=True)\n","\n","# Open and create the csv file that will contain all the QC metrics\n"," with open(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints)+\"/\"+\"QC_metrics_\"+QC_model_name+str(checkpoints)+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n","\n"," # Write the header in the csv file\n"," writer.writerow([\"image #\",\"Prediction v. GT mSSIM\",\"Input v. GT mSSIM\"])\n"," \n"," \n"," # Initiate list\n"," ssim_score_list = [] \n","\n","\n"," # Let's loop through the provided dataset in the QC folders\n","\n","\n"," for i in os.listdir(Source_QC_folder):\n"," if not os.path.isdir(os.path.join(Source_QC_folder,i)):\n"," print('Running QC on: '+i)\n","\n"," shortname_no_PNG = i[:-4]\n"," \n"," # -------------------------------- Target test data (Ground truth) --------------------------------\n"," test_GT = imageio.imread(os.path.join(Target_QC_folder, i), as_gray=False, pilmode=\"RGB\")\n","\n"," # -------------------------------- Source test data --------------------------------\n"," test_source = imageio.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints),shortname_no_PNG+\"_real.png\"))\n"," \n"," \n"," # -------------------------------- Prediction --------------------------------\n"," \n"," test_prediction = imageio.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints),shortname_no_PNG+\"_fake.png\"))\n"," \n"," #--------------------------- Here we normalise using histograms matching--------------------------------\n"," test_prediction_matched = match_histograms(test_prediction, test_GT, multichannel=True)\n"," test_source_matched = match_histograms(test_source, test_GT, multichannel=True)\n"," \n"," # -------------------------------- Calculate the metric maps and save them --------------------------------\n","\n"," # Calculate the SSIM maps\n"," index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT, test_prediction_matched)\n"," index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT, test_source_matched)\n","\n"," ssim_score_list.append(index_SSIM_GTvsPrediction)\n","\n"," #Save ssim_maps\n"," img_SSIM_GTvsPrediction_8bit = (img_SSIM_GTvsPrediction* 255).astype(\"uint8\")\n"," io.imsave(QC_model_path+'/'+QC_model_name+\"/Quality Control/\"+str(checkpoints)+\"/SSIM_GTvsPrediction_\"+shortname_no_PNG+'.tif',img_SSIM_GTvsPrediction_8bit)\n"," img_SSIM_GTvsSource_8bit = (img_SSIM_GTvsSource* 255).astype(\"uint8\")\n"," io.imsave(QC_model_path+'/'+QC_model_name+\"/Quality Control/\"+str(checkpoints)+\"/SSIM_GTvsSource_\"+shortname_no_PNG+'.tif',img_SSIM_GTvsSource_8bit)\n"," \n"," \n"," writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource)])\n","\n"," #Here we calculate the ssim average for each image in each checkpoints\n","\n"," Average_SSIM_checkpoint = Average(ssim_score_list)\n"," Average_ssim_score_list.append(Average_SSIM_checkpoint)\n","\n","\n","\n","\n","#------------------------------------------- QC for Grayscale ----------------------------------------------\n","\n"," if Image_type == \"Grayscale\":\n"," def ssim(img1, img2):\n"," return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)\n","\n","\n"," def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):\n","\n","\n"," mi = np.percentile(x,pmin,axis=axis,keepdims=True)\n"," ma = np.percentile(x,pmax,axis=axis,keepdims=True)\n"," return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)\n","\n","\n"," def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32\n"," \n"," if dtype is not None:\n"," x = x.astype(dtype,copy=False)\n"," mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)\n"," ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)\n"," eps = dtype(eps)\n","\n"," try:\n"," import numexpr\n"," x = numexpr.evaluate(\"(x - mi) / ( ma - mi + eps )\")\n"," except ImportError:\n"," x = (x - mi) / ( ma - mi + eps )\n","\n"," if clip:\n"," x = np.clip(x,0,1)\n","\n"," return x\n","\n"," def norm_minmse(gt, x, normalize_gt=True):\n"," \n"," if normalize_gt:\n"," gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)\n"," x = x.astype(np.float32, copy=False) - np.mean(x)\n"," #x = x - np.mean(x)\n"," gt = gt.astype(np.float32, copy=False) - np.mean(gt)\n"," #gt = gt - np.mean(gt)\n"," scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())\n"," return gt, scale * x\n","\n","# Open and create the csv file that will contain all the QC metrics\n"," with open(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints)+\"/\"+\"QC_metrics_\"+QC_model_name+str(checkpoints)+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n","\n"," # Write the header in the csv file\n"," writer.writerow([\"image #\",\"Prediction v. GT mSSIM\",\"Input v. GT mSSIM\", \"Prediction v. GT NRMSE\", \"Input v. GT NRMSE\", \"Prediction v. GT PSNR\", \"Input v. GT PSNR\"]) \n","\n"," \n"," \n"," # Let's loop through the provided dataset in the QC folders\n","\n","\n"," for i in os.listdir(Source_QC_folder):\n"," if not os.path.isdir(os.path.join(Source_QC_folder,i)):\n"," print('Running QC on: '+i)\n","\n"," ssim_score_list = []\n"," shortname_no_PNG = i[:-4]\n"," # -------------------------------- Target test data (Ground truth) --------------------------------\n"," test_GT_raw = imageio.imread(os.path.join(Target_QC_folder, i), as_gray=False, pilmode=\"RGB\")\n"," \n"," test_GT = test_GT_raw[:,:,2]\n","\n"," # -------------------------------- Source test data --------------------------------\n"," test_source_raw = imageio.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints),shortname_no_PNG+\"_real.png\"))\n"," \n"," test_source = test_source_raw[:,:,2]\n","\n"," # Normalize the images wrt each other by minimizing the MSE between GT and Source image\n"," test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)\n","\n"," # -------------------------------- Prediction --------------------------------\n"," test_prediction_raw = imageio.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints),shortname_no_PNG+\"_fake.png\"))\n"," \n"," test_prediction = test_prediction_raw[:,:,2]\n","\n"," # Normalize the images wrt each other by minimizing the MSE between GT and prediction\n"," test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True) \n","\n","\n"," # -------------------------------- Calculate the metric maps and save them --------------------------------\n","\n"," # Calculate the SSIM maps\n"," index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)\n"," index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)\n","\n"," ssim_score_list.append(index_SSIM_GTvsPrediction)\n","\n"," #Save ssim_maps\n"," \n"," img_SSIM_GTvsPrediction_8bit = (img_SSIM_GTvsPrediction* 255).astype(\"uint8\")\n"," io.imsave(QC_model_path+'/'+QC_model_name+\"/Quality Control/\"+str(checkpoints)+\"/SSIM_GTvsPrediction_\"+shortname_no_PNG+'.tif',img_SSIM_GTvsPrediction_8bit)\n"," img_SSIM_GTvsSource_8bit = (img_SSIM_GTvsSource* 255).astype(\"uint8\")\n"," io.imsave(QC_model_path+'/'+QC_model_name+\"/Quality Control/\"+str(checkpoints)+\"/SSIM_GTvsSource_\"+shortname_no_PNG+'.tif',img_SSIM_GTvsSource_8bit)\n"," \n"," # Calculate the Root Squared Error (RSE) maps\n"," img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))\n"," img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))\n","\n"," # Save SE maps\n"," img_RSE_GTvsPrediction_8bit = (img_RSE_GTvsPrediction* 255).astype(\"uint8\")\n"," io.imsave(QC_model_path+'/'+QC_model_name+\"/Quality Control/\"+str(checkpoints)+\"/RSE_GTvsPrediction_\"+shortname_no_PNG+'.tif',img_RSE_GTvsPrediction_8bit)\n"," img_RSE_GTvsSource_8bit = (img_RSE_GTvsSource* 255).astype(\"uint8\")\n"," io.imsave(QC_model_path+'/'+QC_model_name+\"/Quality Control/\"+str(checkpoints)+\"/RSE_GTvsSource_\"+shortname_no_PNG+'.tif',img_RSE_GTvsSource_8bit)\n","\n","\n"," # -------------------------------- Calculate the RSE metrics and save them --------------------------------\n","\n"," # Normalised Root Mean Squared Error (here it's valid to take the mean of the image)\n"," NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))\n"," NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))\n"," \n"," # We can also measure the peak signal to noise ratio between the images\n"," PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)\n"," PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)\n","\n"," writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])\n","\n"," #Here we calculate the ssim average for each image in each checkpoints\n","\n"," Average_SSIM_checkpoint = Average(ssim_score_list)\n"," Average_ssim_score_list.append(Average_SSIM_checkpoint)\n","\n","\n","# All data is now processed saved\n"," \n","\n","# -------------------------------- Display --------------------------------\n","\n","# Display the IoV vs Threshold plot\n","plt.figure(figsize=(20,5))\n","plt.plot(Checkpoint_list, Average_ssim_score_list, label=\"SSIM\")\n","plt.title('Checkpoints vs. SSIM')\n","plt.ylabel('SSIM')\n","plt.xlabel('Checkpoints')\n","plt.legend()\n","plt.show()\n","\n","\n","\n","# -------------------------------- Display RGB --------------------------------\n","\n","from ipywidgets import interact\n","import ipywidgets as widgets\n","\n","\n","if Image_type == \"RGB\":\n"," random_choice_shortname_no_PNG = shortname_no_PNG\n","\n"," @interact\n"," def show_results(file=os.listdir(Source_QC_folder), checkpoints=Checkpoint_list):\n","\n"," random_choice_shortname_no_PNG = file[:-4]\n","\n"," df1 = pd.read_csv(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints)+\"/\"+\"QC_metrics_\"+QC_model_name+str(checkpoints)+\".csv\", header=0)\n"," df2 = df1.set_index(\"image #\", drop = False)\n"," index_SSIM_GTvsPrediction = df2.loc[file, \"Prediction v. GT mSSIM\"]\n"," index_SSIM_GTvsSource = df2.loc[file, \"Input v. GT mSSIM\"]\n","\n","#Setting up colours\n"," \n"," cmap = None\n","\n"," plt.figure(figsize=(10,10))\n","\n","# Target (Ground-truth)\n"," plt.subplot(3,3,1)\n"," plt.axis('off')\n"," img_GT = imageio.imread(os.path.join(Target_QC_folder, file), as_gray=False, pilmode=\"RGB\")\n"," plt.imshow(img_GT, cmap = cmap)\n"," plt.title('Target',fontsize=15)\n","\n","# Source\n"," plt.subplot(3,3,2)\n"," plt.axis('off')\n"," img_Source = imageio.imread(os.path.join(Source_QC_folder, file), as_gray=False, pilmode=\"RGB\")\n"," plt.imshow(img_Source, cmap = cmap)\n"," plt.title('Source',fontsize=15)\n","\n","#Prediction\n"," plt.subplot(3,3,3)\n"," plt.axis('off')\n","\n"," img_Prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints), random_choice_shortname_no_PNG+\"_fake.png\"))\n","\n"," plt.imshow(img_Prediction, cmap = cmap)\n"," plt.title('Prediction',fontsize=15)\n","\n","\n","#SSIM between GT and Source\n"," plt.subplot(3,3,5)\n","#plt.axis('off')\n"," plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n","\n"," img_SSIM_GTvsSource = imageio.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints), \"SSIM_GTvsSource_\"+random_choice_shortname_no_PNG+\".tif\"))\n","\n"," imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)\n","#plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)\n"," plt.title('Target vs. Source',fontsize=15)\n"," plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)\n"," plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#SSIM between GT and Prediction\n"," plt.subplot(3,3,6)\n","#plt.axis('off')\n"," plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","\n"," img_SSIM_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints), \"SSIM_GTvsPrediction_\"+random_choice_shortname_no_PNG+\".tif\"))\n","\n"," imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)\n","#plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)\n"," plt.title('Target vs. Prediction',fontsize=15)\n"," plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)\n","\n","\n","# -------------------------------- Display Grayscale --------------------------------\n","\n","if Image_type == \"Grayscale\":\n"," random_choice_shortname_no_PNG = shortname_no_PNG\n","\n"," @interact\n"," def show_results(file=os.listdir(Source_QC_folder), checkpoints=Checkpoint_list):\n","\n"," random_choice_shortname_no_PNG = file[:-4]\n","\n"," df1 = pd.read_csv(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints)+\"/\"+\"QC_metrics_\"+QC_model_name+str(checkpoints)+\".csv\", header=0)\n"," df2 = df1.set_index(\"image #\", drop = False)\n"," index_SSIM_GTvsPrediction = df2.loc[file, \"Prediction v. GT mSSIM\"]\n"," index_SSIM_GTvsSource = df2.loc[file, \"Input v. GT mSSIM\"]\n","\n"," NRMSE_GTvsPrediction = df2.loc[file, \"Prediction v. GT NRMSE\"]\n"," NRMSE_GTvsSource = df2.loc[file, \"Input v. GT NRMSE\"]\n"," PSNR_GTvsSource = df2.loc[file, \"Input v. GT PSNR\"]\n"," PSNR_GTvsPrediction = df2.loc[file, \"Prediction v. GT PSNR\"]\n"," \n","\n"," plt.figure(figsize=(15,15))\n","\n"," cmap = None\n"," \n"," # Target (Ground-truth)\n"," plt.subplot(3,3,1)\n"," plt.axis('off')\n"," img_GT = imageio.imread(os.path.join(Target_QC_folder, file), as_gray=True, pilmode=\"RGB\")\n","\n"," plt.imshow(img_GT, norm=simple_norm(img_GT, percent = 99), cmap = 'gray')\n"," plt.title('Target',fontsize=15)\n","\n","# Source\n"," plt.subplot(3,3,2)\n"," plt.axis('off')\n"," img_Source = imageio.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints), random_choice_shortname_no_PNG+\"_real.png\"))\n"," plt.imshow(img_Source, norm=simple_norm(img_Source, percent = 99))\n"," plt.title('Source',fontsize=15)\n","\n","#Prediction\n"," plt.subplot(3,3,3)\n"," plt.axis('off')\n"," img_Prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints), random_choice_shortname_no_PNG+\"_fake.png\"))\n"," plt.imshow(img_Prediction, norm=simple_norm(img_Prediction, percent = 99))\n"," plt.title('Prediction',fontsize=15)\n","\n","#Setting up colours\n"," cmap = plt.cm.CMRmap\n","\n","#SSIM between GT and Source\n"," plt.subplot(3,3,5)\n","#plt.axis('off')\n"," plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n","\n"," img_SSIM_GTvsSource = imageio.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints), \"SSIM_GTvsSource_\"+random_choice_shortname_no_PNG+\".tif\"))\n"," img_SSIM_GTvsSource = img_SSIM_GTvsSource / 255\n"," imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)\n","\n"," \n"," plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)\n"," plt.title('Target vs. Source',fontsize=15)\n"," plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)\n"," plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#SSIM between GT and Prediction\n"," plt.subplot(3,3,6)\n","#plt.axis('off')\n"," plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n"," \n"," \n"," img_SSIM_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints), \"SSIM_GTvsPrediction_\"+random_choice_shortname_no_PNG+\".tif\"))\n"," img_SSIM_GTvsPrediction = img_SSIM_GTvsPrediction / 255\n"," imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)\n","\n"," \n"," plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)\n"," plt.title('Target vs. Prediction',fontsize=15)\n"," plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)\n","\n","#Root Squared Error between GT and Source\n"," plt.subplot(3,3,8)\n","#plt.axis('off')\n"," plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n","\n"," img_RSE_GTvsSource = imageio.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints), \"RSE_GTvsSource_\"+random_choice_shortname_no_PNG+\".tif\"))\n"," img_RSE_GTvsSource = img_RSE_GTvsSource / 255\n"," \n","\n"," imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)\n"," plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)\n"," plt.title('Target vs. Source',fontsize=15)\n"," plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)\n","#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))\n"," plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#Root Squared Error between GT and Prediction\n"," plt.subplot(3,3,9)\n","#plt.axis('off')\n"," plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n","\n"," img_RSE_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/\"+str(checkpoints), \"RSE_GTvsPrediction_\"+random_choice_shortname_no_PNG+\".tif\"))\n","\n"," img_RSE_GTvsPrediction = img_RSE_GTvsPrediction / 255\n","\n"," imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)\n"," plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)\n"," plt.title('Target vs. Prediction',fontsize=15)\n"," plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"Esqnbew8uznk"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"d8wuQGjoq6eN","colab_type":"text"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as PNG images.\n","\n","**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n","\n","**`Result_folder`:** This folder will contain the predicted output images.\n","\n","**`checkpoint`:** Choose the checkpoint number you would like to use to perform predictions. To use the \"latest\" checkpoint, input \"latest\"."]},{"cell_type":"code","metadata":{"id":"yb3suNkfpNA9","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.\n","\n","import glob\n","import os.path\n","\n","\n","latest = \"latest\"\n","\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = False #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#@markdown ###What model checkpoint would you like to use?\n","\n","checkpoint = latest#@param {type:\"raw\"}\n","\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","#here we check if we use the newly trained network or not\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","#here we check if the model exists\n","full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\n","\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","\n","# Here we check that checkpoint exist, if not the closest one will be chosen \n","\n","Nb_Checkpoint = len(glob.glob(os.path.join(full_Prediction_model_path, '*G_A.pth')))\n","print(Nb_Checkpoint)\n","\n","\n","if not checkpoint == \"latest\":\n","\n"," if checkpoint < 10:\n"," checkpoint = 5\n","\n"," if not checkpoint % 5 == 0:\n"," checkpoint = ((int(checkpoint / 5)-1) * 5)\n"," print (bcolors.WARNING + \" Your chosen checkpoints is not divisible by 5; therefore the checkpoints chosen is now:\",checkpoints)\n"," \n"," if checkpoint > Nb_Checkpoint*5:\n"," checkpoint = \"latest\"\n","\n"," if checkpoint == Nb_Checkpoint*5:\n"," checkpoint = \"latest\"\n","\n","\n","\n","\n","# Here we need to move the data to be analysed so that cycleGAN can find them\n","\n","Saving_path_prediction= \"/content/\"+Prediction_model_name\n","\n","if os.path.exists(Saving_path_prediction):\n"," shutil.rmtree(Saving_path_prediction)\n","os.makedirs(Saving_path_prediction)\n","\n","Saving_path_Data_folder = Saving_path_prediction+\"/testA\"\n","\n","if os.path.exists(Saving_path_Data_folder):\n"," shutil.rmtree(Saving_path_Data_folder)\n","os.makedirs(Saving_path_Data_folder)\n","\n","for files in os.listdir(Data_folder):\n"," shutil.copyfile(Data_folder+\"/\"+files, Saving_path_Data_folder+\"/\"+files)\n","\n","\n","Nb_files_Data_folder = len(os.listdir(Data_folder)) +10\n","\n","\n","\n","#Here we copy and rename the checkpoint to be used\n","\n","shutil.copyfile(full_Prediction_model_path+\"/\"+str(checkpoint)+\"_net_G_A.pth\", full_Prediction_model_path+\"/\"+str(checkpoint)+\"_net_G.pth\")\n","\n","\n","# This will find the image dimension of a randomly choosen image in Data_folder \n","random_choice = random.choice(os.listdir(Data_folder))\n","x = imageio.imread(Data_folder+\"/\"+random_choice)\n","\n","#Find image XY dimension\n","Image_Y = x.shape[0]\n","Image_X = x.shape[1]\n","\n","Image_min_dim = min(Image_Y, Image_X)\n","\n","print(Image_min_dim)\n","\n","\n","\n","#-------------------------------- Perform predictions -----------------------------\n","\n","#-------------------------------- Options that can be used to perform predictions -----------------------------\n","\n","# basic parameters\n"," #('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')\n"," #('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')\n"," #('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')\n"," #('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')\n","\n","# model parameters\n"," #('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')\n"," #('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')\n"," #('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')\n"," #('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')\n"," #('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')\n"," #('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')\n"," #('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')\n"," #('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')\n"," #('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')\n"," #('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')\n"," #('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')\n"," #('--no_dropout', action='store_true', help='no dropout for the generator')\n"," \n","# dataset parameters\n"," #('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')\n"," #('--direction', type=str, default='AtoB', help='AtoB or BtoA')\n"," #('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')\n"," #('--num_threads', default=4, type=int, help='# threads for loading data')\n"," #('--batch_size', type=int, default=1, help='input batch size')\n"," #('--load_size', type=int, default=286, help='scale images to this size')\n"," #('--crop_size', type=int, default=256, help='then crop to this size')\n"," #('--max_dataset_size', type=int, default=float(\"inf\"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')\n"," #('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')\n"," #('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')\n"," #('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')\n"," \n","# additional parameters\n"," #('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')\n"," #('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')\n"," #('--verbose', action='store_true', help='if specified, print more debugging information')\n"," #('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')\n"," \n","\n"," #('--ntest', type=int, default=float(\"inf\"), help='# of test examples.')\n"," #('--results_dir', type=str, default='./results/', help='saves results here.')\n"," #('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')\n"," #('--phase', type=str, default='test', help='train, val, test, etc')\n","\n","# Dropout and Batchnorm has different behavioir during training and test.\n"," #('--eval', action='store_true', help='use eval mode during test time.')\n"," #('--num_test', type=int, default=50, help='how many test images to run')\n"," # rewrite devalue values\n"," \n","# To avoid cropping, the load_size should be the same as crop_size\n"," #parser.set_defaults(load_size=parser.get_default('crop_size'))\n","\n","#------------------------------------------------------------------------\n","\n","\n","#---------------------------- Predictions are performed here ----------------------\n","\n","os.chdir(\"/content\")\n","\n","!python pytorch-CycleGAN-and-pix2pix/test.py --dataroot \"$Saving_path_Data_folder\" --name \"$Prediction_model_name\" --model test --no_dropout --preprocess scale_width --load_size $Image_min_dim --crop_size $Image_min_dim --results_dir \"$Result_folder\" --checkpoints_dir \"$Prediction_model_path\" --num_test $Nb_files_Data_folder --epoch $checkpoint\n","\n","#-----------------------------------------------------------------------------------\n","\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"EIe3CRD7XUxa","colab_type":"text"},"source":["## **6.2. Inspect the predicted output**\n","---\n","\n"]},{"cell_type":"code","metadata":{"id":"LmDP8xiwXTTL","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.\n","import os\n","# This will display a randomly chosen dataset input and predicted output\n","random_choice = random.choice(os.listdir(Data_folder))\n","\n","\n","random_choice_no_extension = os.path.splitext(random_choice)\n","\n","\n","x = imageio.imread(Result_folder+\"/\"+Prediction_model_name+\"/test_\"+str(checkpoint)+\"/images/\"+random_choice_no_extension[0]+\"_real.png\")\n","\n","\n","y = imageio.imread(Result_folder+\"/\"+Prediction_model_name+\"/test_\"+str(checkpoint)+\"/images/\"+random_choice_no_extension[0]+\"_fake.png\")\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x, interpolation='nearest')\n","plt.title('Input')\n","plt.axis('off');\n","\n","plt.subplot(1,2,2)\n","plt.imshow(y, interpolation='nearest')\n","plt.title('Prediction')\n","plt.axis('off');\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"hvkd66PldsXB","colab_type":"text"},"source":["## **6.3. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"Rn9zpWpo0xNw","colab_type":"text"},"source":["\n","#**Thank you for using CycleGAN!**"]}]} \ No newline at end of file diff --git a/Colab_notebooks/Deep-STORM_2D_ZeroCostDL4Mic.ipynb b/Colab_notebooks/Deep-STORM_2D_ZeroCostDL4Mic.ipynb new file mode 100755 index 00000000..d480b27b --- /dev/null +++ b/Colab_notebooks/Deep-STORM_2D_ZeroCostDL4Mic.ipynb @@ -0,0 +1 @@ +{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"Deep-STORM_2D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"169qcwQo-yw15PwoGatXAdBvjs4wt_foD","timestamp":1592147948265},{"file_id":"1gjRCgDORKi_GNBu4QnVCBkSWrfPtqL-E","timestamp":1588525976305},{"file_id":"1DFy6aCi1XAVdjA5KLRZirB2aMZkMFdv-","timestamp":1587998755430},{"file_id":"1NpzigQoXGy3GFdxh4_jvG1PnBfyrcpBs","timestamp":1587569988032},{"file_id":"1jdI540qAfMSQwjnMhoAFkGJH9EbHwNSf","timestamp":1587486196143}],"collapsed_sections":[],"toc_visible":true,"machine_shape":"hm"},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"FpCtYevLHfl4","colab_type":"text"},"source":["# **Deep-STORM (2D)**\n","\n","---\n","\n","Deep-STORM is a neural network capable of image reconstruction from high-density single-molecule localization microscopy (SMLM), first published in 2018 by [Nehme *et al.* in Optica](https://www.osapublishing.org/optica/abstract.cfm?uri=optica-5-4-458). The architecture used here is a U-Net based network without skip connections. This network allows image reconstruction of 2D super-resolution images, in a supervised training manner. The network is trained using simulated high-density SMLM data for which the ground-truth is available. These simulations are obtained from random distribution of single molecules in a field-of-view and therefore do not imprint structural priors during training. The network output a super-resolution image with increased pixel density (typically upsampling factor of 8 in each dimension).\n","\n","Deep-STORM has **two key advantages**:\n","- SMLM reconstruction at high density of emitters\n","- fast prediction (reconstruction) once the model is trained appropriately, compared to more common multi-emitter fitting processes.\n","\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the *Zero-Cost Deep-Learning to Enhance Microscopy* project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is based on the following paper: \n","\n","**Deep-STORM: super-resolution single-molecule microscopy by deep learning**, Optica (2018) by *Elias Nehme, Lucien E. Weiss, Tomer Michaeli, and Yoav Shechtman* (https://www.osapublishing.org/optica/abstract.cfm?uri=optica-5-4-458)\n","\n","And source code found in: https://github.com/EliasNehme/Deep-STORM\n","\n","\n","**Please also cite this original paper when using or developing this notebook.**"]},{"cell_type":"markdown","metadata":{"id":"wyzTn3IcHq6Y","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"bEy4EBXHHyAX","colab_type":"text"},"source":["#**0. Before getting started**\n","---\n"," Deep-STORM is able to train on simulated dataset of SMLM data (see https://www.osapublishing.org/optica/abstract.cfm?uri=optica-5-4-458 for more info). Here, we provide a simulator that will generate training dataset (section 3.1.b). A few parameters will allow you to match the simulation to your experimental data. Similarly to what is described in the paper, simulations obtained from ThunderSTORM can also be loaded here (section 3.1.a).\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"E04mOlG_H5Tz","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","---"]},{"cell_type":"markdown","metadata":{"id":"F_tjlGzsH-Dn","colab_type":"text"},"source":["\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"gn-LaaNNICqL","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","# %tensorflow_version 1.x\n","\n","import tensorflow as tf\n","if tf.__version__ != '2.2.0':\n"," !pip install tensorflow==2.2.0\n","\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime settings are correct then Google did not allocate GPU to your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi\n","\n","# from tensorflow.python.client import device_lib \n","# device_lib.list_local_devices()\n","\n","# print the tensorflow version\n","print('Tensorflow version is ' + str(tf.__version__))\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"tnP7wM79IKW-","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"1R-7Fo34_gOd","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","#mounts user's Google Drive to Google Colab.\n","\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"jRnQZWSZhArJ","colab_type":"text"},"source":["# **2. Install Deep-STORM and dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"id":"kSrZMo3X_NhO","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Install Deep-STORM and dependencies\n","\n","# %% Model definition + helper functions\n","\n","# Import keras modules and libraries\n","from tensorflow import keras\n","from tensorflow.keras.models import Model\n","from tensorflow.keras.layers import Input, Activation, UpSampling2D, Convolution2D, MaxPooling2D, BatchNormalization, Layer\n","from tensorflow.keras.callbacks import Callback\n","from tensorflow.keras import backend as K\n","from tensorflow.keras import optimizers, losses\n","\n","from tensorflow.keras.preprocessing.image import ImageDataGenerator\n","from tensorflow.keras.callbacks import ModelCheckpoint\n","from tensorflow.keras.callbacks import ReduceLROnPlateau\n","from skimage.transform import warp\n","from skimage.transform import SimilarityTransform\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from scipy.signal import fftconvolve\n","\n","# Import common libraries\n","import tensorflow as tf\n","import numpy as np\n","import pandas as pd\n","import matplotlib.pyplot as plt\n","import h5py\n","import scipy.io as sio\n","from os.path import abspath\n","from sklearn.model_selection import train_test_split\n","from skimage import io\n","import time\n","import os\n","import shutil\n","import csv\n","from PIL import Image \n","from PIL.TiffTags import TAGS\n","from scipy.ndimage import gaussian_filter\n","import math\n","from astropy.visualization import simple_norm\n","from sys import getsizeof\n","\n","# For sliders and dropdown menu, progress bar\n","from ipywidgets import interact\n","import ipywidgets as widgets\n","from tqdm import tqdm\n","\n","# For Multi-threading in simulation\n","from numba import njit, prange\n","\n","\n","# define a function that projects and rescales an image to the range [0,1]\n","def project_01(im):\n"," im = np.squeeze(im)\n"," min_val = im.min()\n"," max_val = im.max()\n"," return (im - min_val)/(max_val - min_val)\n","\n","# normalize image given mean and std\n","def normalize_im(im, dmean, dstd):\n"," im = np.squeeze(im)\n"," im_norm = np.zeros(im.shape,dtype=np.float32)\n"," im_norm = (im - dmean)/dstd\n"," return im_norm\n","\n","# Define the loss history recorder\n","class LossHistory(Callback):\n"," def on_train_begin(self, logs={}):\n"," self.losses = []\n","\n"," def on_batch_end(self, batch, logs={}):\n"," self.losses.append(logs.get('loss'))\n"," \n","# Define a matlab like gaussian 2D filter\n","def matlab_style_gauss2D(shape=(7,7),sigma=1):\n"," \"\"\" \n"," 2D gaussian filter - should give the same result as:\n"," MATLAB's fspecial('gaussian',[shape],[sigma]) \n"," \"\"\"\n"," m,n = [(ss-1.)/2. for ss in shape]\n"," y,x = np.ogrid[-m:m+1,-n:n+1]\n"," h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n"," h.astype(dtype=K.floatx())\n"," h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n"," sumh = h.sum()\n"," if sumh != 0:\n"," h /= sumh\n"," h = h*2.0\n"," h = h.astype('float32')\n"," return h\n","\n","# Expand the filter dimensions\n","psf_heatmap = matlab_style_gauss2D(shape = (7,7),sigma=1)\n","gfilter = tf.reshape(psf_heatmap, [7, 7, 1, 1])\n","\n","# Combined MSE + L1 loss\n","def L1L2loss(input_shape):\n"," def bump_mse(heatmap_true, spikes_pred):\n","\n"," # generate the heatmap corresponding to the predicted spikes\n"," heatmap_pred = K.conv2d(spikes_pred, gfilter, strides=(1, 1), padding='same')\n","\n"," # heatmaps MSE\n"," loss_heatmaps = losses.mean_squared_error(heatmap_true,heatmap_pred)\n","\n"," # l1 on the predicted spikes\n"," loss_spikes = losses.mean_absolute_error(spikes_pred,tf.zeros(input_shape))\n"," return loss_heatmaps + loss_spikes\n"," return bump_mse\n","\n","# Define the concatenated conv2, batch normalization, and relu block\n","def conv_bn_relu(nb_filter, rk, ck, name):\n"," def f(input):\n"," conv = Convolution2D(nb_filter, kernel_size=(rk, ck), strides=(1,1),\\\n"," padding=\"same\", use_bias=False,\\\n"," kernel_initializer=\"Orthogonal\",name='conv-'+name)(input)\n"," conv_norm = BatchNormalization(name='BN-'+name)(conv)\n"," conv_norm_relu = Activation(activation = \"relu\",name='Relu-'+name)(conv_norm)\n"," return conv_norm_relu\n"," return f\n","\n","# Define the model architechture\n","def CNN(input,names):\n"," Features1 = conv_bn_relu(32,3,3,names+'F1')(input)\n"," pool1 = MaxPooling2D(pool_size=(2,2),name=names+'Pool1')(Features1)\n"," Features2 = conv_bn_relu(64,3,3,names+'F2')(pool1)\n"," pool2 = MaxPooling2D(pool_size=(2, 2),name=names+'Pool2')(Features2)\n"," Features3 = conv_bn_relu(128,3,3,names+'F3')(pool2)\n"," pool3 = MaxPooling2D(pool_size=(2, 2),name=names+'Pool3')(Features3)\n"," Features4 = conv_bn_relu(512,3,3,names+'F4')(pool3)\n"," up5 = UpSampling2D(size=(2, 2),name=names+'Upsample1')(Features4)\n"," Features5 = conv_bn_relu(128,3,3,names+'F5')(up5)\n"," up6 = UpSampling2D(size=(2, 2),name=names+'Upsample2')(Features5)\n"," Features6 = conv_bn_relu(64,3,3,names+'F6')(up6)\n"," up7 = UpSampling2D(size=(2, 2),name=names+'Upsample3')(Features6)\n"," Features7 = conv_bn_relu(32,3,3,names+'F7')(up7)\n"," return Features7\n","\n","# Define the Model building for an arbitrary input size\n","def buildModel(input_dim, initial_learning_rate = 0.001):\n"," input_ = Input (shape = (input_dim))\n"," act_ = CNN (input_,'CNN')\n"," density_pred = Convolution2D(1, kernel_size=(1, 1), strides=(1, 1), padding=\"same\",\\\n"," activation=\"linear\", use_bias = False,\\\n"," kernel_initializer=\"Orthogonal\",name='Prediction')(act_)\n"," model = Model (inputs= input_, outputs=density_pred)\n"," opt = optimizers.Adam(lr = initial_learning_rate)\n"," model.compile(optimizer=opt, loss = L1L2loss(input_dim))\n"," return model\n","\n","\n","# define a function that trains a model for a given data SNR and density\n","def train_model(patches, heatmaps, modelPath, epochs, steps_per_epoch, batch_size, upsampling_factor=8, validation_split = 0.3, initial_learning_rate = 0.001, pretrained_model_path = '', L2_weighting_factor = 100):\n"," \n"," \"\"\"\n"," This function trains a CNN model on the desired training set, given the \n"," upsampled training images and labels generated in MATLAB.\n"," \n"," # Inputs\n"," # TO UPDATE ----------\n","\n"," # Outputs\n"," function saves the weights of the trained model to a hdf5, and the \n"," normalization factors to a mat file. These will be loaded later for testing \n"," the model in test_model. \n"," \"\"\"\n"," \n"," # for reproducibility\n"," np.random.seed(123)\n","\n"," X_train, X_test, y_train, y_test = train_test_split(patches, heatmaps, test_size = validation_split, random_state=42)\n"," print('Number of training examples: %d' % X_train.shape[0])\n"," print('Number of validation examples: %d' % X_test.shape[0])\n"," \n"," # Setting type\n"," X_train = X_train.astype('float32')\n"," X_test = X_test.astype('float32')\n"," y_train = y_train.astype('float32')\n"," y_test = y_test.astype('float32')\n","\n"," \n"," #===================== Training set normalization ==========================\n"," # normalize training images to be in the range [0,1] and calculate the \n"," # training set mean and std\n"," mean_train = np.zeros(X_train.shape[0],dtype=np.float32)\n"," std_train = np.zeros(X_train.shape[0], dtype=np.float32)\n"," for i in range(X_train.shape[0]):\n"," X_train[i, :, :] = project_01(X_train[i, :, :])\n"," mean_train[i] = X_train[i, :, :].mean()\n"," std_train[i] = X_train[i, :, :].std()\n","\n"," # resulting normalized training images\n"," mean_val_train = mean_train.mean()\n"," std_val_train = std_train.mean()\n"," X_train_norm = np.zeros(X_train.shape, dtype=np.float32)\n"," for i in range(X_train.shape[0]):\n"," X_train_norm[i, :, :] = normalize_im(X_train[i, :, :], mean_val_train, std_val_train)\n"," \n"," # patch size\n"," psize = X_train_norm.shape[1]\n","\n"," # Reshaping\n"," X_train_norm = X_train_norm.reshape(X_train.shape[0], psize, psize, 1)\n","\n"," # ===================== Test set normalization ==========================\n"," # normalize test images to be in the range [0,1] and calculate the test set \n"," # mean and std\n"," mean_test = np.zeros(X_test.shape[0],dtype=np.float32)\n"," std_test = np.zeros(X_test.shape[0], dtype=np.float32)\n"," for i in range(X_test.shape[0]):\n"," X_test[i, :, :] = project_01(X_test[i, :, :])\n"," mean_test[i] = X_test[i, :, :].mean()\n"," std_test[i] = X_test[i, :, :].std()\n","\n"," # resulting normalized test images\n"," mean_val_test = mean_test.mean()\n"," std_val_test = std_test.mean()\n"," X_test_norm = np.zeros(X_test.shape, dtype=np.float32)\n"," for i in range(X_test.shape[0]):\n"," X_test_norm[i, :, :] = normalize_im(X_test[i, :, :], mean_val_test, std_val_test)\n"," \n"," # Reshaping\n"," X_test_norm = X_test_norm.reshape(X_test.shape[0], psize, psize, 1)\n","\n"," # Reshaping labels\n"," Y_train = y_train.reshape(y_train.shape[0], psize, psize, 1)\n"," Y_test = y_test.reshape(y_test.shape[0], psize, psize, 1)\n","\n"," # Save datasets to a matfile to open later in matlab\n"," mdict = {\"mean_test\": mean_val_test, \"std_test\": std_val_test, \"upsampling_factor\": upsampling_factor, \"Normalization factor\": L2_weighting_factor}\n"," sio.savemat(os.path.join(modelPath,\"model_metadata.mat\"), mdict)\n","\n","\n"," # Set the dimensions ordering according to tensorflow consensous\n"," # K.set_image_dim_ordering('tf')\n"," K.set_image_data_format('channels_last')\n","\n"," # Save the model weights after each epoch if the validation loss decreased\n"," checkpointer = ModelCheckpoint(filepath=os.path.join(modelPath,\"weights_best.hdf5\"), verbose=1,\n"," save_best_only=True)\n","\n"," # Change learning when loss reaches a plataeu\n"," change_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=0.00005)\n"," \n"," # Model building and complitation\n"," model = buildModel((psize, psize, 1), initial_learning_rate = initial_learning_rate)\n"," model.summary()\n","\n"," # Load pretrained model\n"," if not pretrained_model_path:\n"," print('Using random initial model weights.')\n"," else:\n"," print('Loading model weights from '+pretrained_model_path)\n"," model.load_weights(pretrained_model_path)\n"," \n"," # Create an image data generator for real time data augmentation\n"," datagen = ImageDataGenerator(\n"," featurewise_center=False, # set input mean to 0 over the dataset\n"," samplewise_center=False, # set each sample mean to 0\n"," featurewise_std_normalization=False, # divide inputs by std of the dataset\n"," samplewise_std_normalization=False, # divide each input by its std\n"," zca_whitening=False, # apply ZCA whitening\n"," rotation_range=0., # randomly rotate images in the range (degrees, 0 to 180)\n"," width_shift_range=0., # randomly shift images horizontally (fraction of total width)\n"," height_shift_range=0., # randomly shift images vertically (fraction of total height)\n"," zoom_range=0.,\n"," shear_range=0.,\n"," horizontal_flip=False, # randomly flip images\n"," vertical_flip=False, # randomly flip images\n"," fill_mode='constant',\n"," data_format=K.image_data_format())\n","\n"," # Fit the image generator on the training data\n"," datagen.fit(X_train_norm)\n"," \n"," # loss history recorder\n"," history = LossHistory()\n","\n"," # Inform user training begun\n"," print('-------------------------------')\n"," print('Training model...')\n","\n"," # Fit model on the batches generated by datagen.flow()\n"," train_history = model.fit_generator(datagen.flow(X_train_norm, Y_train, batch_size=batch_size), \n"," steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=1, \n"," validation_data=(X_test_norm, Y_test), \n"," callbacks=[history, checkpointer, change_lr]) \n","\n"," # Inform user training ended\n"," print('-------------------------------')\n"," print('Training Complete!')\n"," \n"," # Save the last model\n"," model.save(os.path.join(modelPath, 'weights_last.hdf5'))\n","\n"," # convert the history.history dict to a pandas DataFrame: \n"," lossData = pd.DataFrame(train_history.history) \n","\n"," if os.path.exists(os.path.join(modelPath,\"Quality Control\")):\n"," shutil.rmtree(os.path.join(modelPath,\"Quality Control\"))\n","\n"," os.makedirs(os.path.join(modelPath,\"Quality Control\"))\n","\n"," # The training evaluation.csv is saved (overwrites the Files if needed). \n"," lossDataCSVpath = os.path.join(modelPath,\"Quality Control/training_evaluation.csv\")\n"," with open(lossDataCSVpath, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['loss','val_loss','learning rate'])\n"," for i in range(len(train_history.history['loss'])):\n"," writer.writerow([train_history.history['loss'][i], train_history.history['val_loss'][i], train_history.history['lr'][i]])\n","\n"," return\n","\n","\n","# Normalization functions from Martin Weigert used in CARE\n","def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," \"\"\"Percentile-based image normalization.\"\"\"\n","\n"," mi = np.percentile(x,pmin,axis=axis,keepdims=True)\n"," ma = np.percentile(x,pmax,axis=axis,keepdims=True)\n"," return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)\n","\n","\n","def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," if dtype is not None:\n"," x = x.astype(dtype,copy=False)\n"," mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)\n"," ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)\n"," eps = dtype(eps)\n","\n"," try:\n"," import numexpr\n"," x = numexpr.evaluate(\"(x - mi) / ( ma - mi + eps )\")\n"," except ImportError:\n"," x = (x - mi) / ( ma - mi + eps )\n","\n"," if clip:\n"," x = np.clip(x,0,1)\n","\n"," return x\n","\n","def norm_minmse(gt, x, normalize_gt=True):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n","\n"," \"\"\"\n"," normalizes and affinely scales an image pair such that the MSE is minimized \n"," \n"," Parameters\n"," ----------\n"," gt: ndarray\n"," the ground truth image \n"," x: ndarray\n"," the image that will be affinely scaled \n"," normalize_gt: bool\n"," set to True of gt image should be normalized (default)\n"," Returns\n"," -------\n"," gt_scaled, x_scaled \n"," \"\"\"\n"," if normalize_gt:\n"," gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)\n"," x = x.astype(np.float32, copy=False) - np.mean(x)\n"," #x = x - np.mean(x)\n"," gt = gt.astype(np.float32, copy=False) - np.mean(gt)\n"," #gt = gt - np.mean(gt)\n"," scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())\n"," return gt, scale * x\n","\n","\n","# Multi-threaded Erf-based image construction\n","@njit(parallel=True)\n","def FromLoc2Image_Erf(xc_array, yc_array, photon_array, sigma_array, image_size = (64,64), pixel_size = 100):\n"," w = image_size[0]\n"," h = image_size[1]\n"," erfImage = np.zeros((w, h))\n"," for ij in prange(w*h):\n"," j = int(ij/w)\n"," i = ij - j*w\n"," for (xc, yc, photon, sigma) in zip(xc_array, yc_array, photon_array, sigma_array):\n"," # Don't bother if the emitter has photons <= 0 or if Sigma <= 0\n"," if (sigma > 0) and (photon > 0):\n"," S = sigma*math.sqrt(2)\n"," x = i*pixel_size - xc\n"," y = j*pixel_size - yc\n"," # Don't bother if the emitter is further than 4 sigma from the centre of the pixel\n"," if (x+pixel_size/2)**2 + (y+pixel_size/2)**2 < 16*sigma**2:\n"," ErfX = math.erf((x+pixel_size)/S) - math.erf(x/S)\n"," ErfY = math.erf((y+pixel_size)/S) - math.erf(y/S)\n"," erfImage[j][i] += 0.25*photon*ErfX*ErfY\n"," return erfImage\n","\n","\n","@njit(parallel=True)\n","def FromLoc2Image_SimpleHistogram(xc_array, yc_array, image_size = (64,64), pixel_size = 100):\n"," w = image_size[0]\n"," h = image_size[1]\n"," locImage = np.zeros((image_size[0],image_size[1]) )\n"," n_locs = len(xc_array)\n","\n"," for e in prange(n_locs):\n"," locImage[int(max(min(round(yc_array[e]/pixel_size),w-1),0))][int(max(min(round(xc_array[e]/pixel_size),h-1),0))] += 1\n","\n"," return locImage\n","\n","\n","\n","def getPixelSizeTIFFmetadata(TIFFpath, display=False):\n"," with Image.open(TIFFpath) as img:\n"," meta_dict = {TAGS[key] : img.tag[key] for key in img.tag.keys()}\n","\n","\n"," # TIFF tags\n"," # https://www.loc.gov/preservation/digital/formats/content/tiff_tags.shtml\n"," # https://www.awaresystems.be/imaging/tiff/tifftags/resolutionunit.html\n"," ResolutionUnit = meta_dict['ResolutionUnit'][0] # unit of resolution\n"," width = meta_dict['ImageWidth'][0]\n"," height = meta_dict['ImageLength'][0]\n","\n"," xResolution = meta_dict['XResolution'][0] # number of pixels / ResolutionUnit\n","\n"," if len(xResolution) == 1:\n"," xResolution = xResolution[0]\n"," elif len(xResolution) == 2:\n"," xResolution = xResolution[0]/xResolution[1]\n"," else:\n"," print('Image resolution not defined.')\n"," xResolution = 1\n","\n"," if ResolutionUnit == 2:\n"," # Units given are in inches\n"," pixel_size = 0.025*1e9/xResolution\n"," elif ResolutionUnit == 3:\n"," # Units given are in cm\n"," pixel_size = 0.01*1e9/xResolution\n"," else: \n"," # ResolutionUnit is therefore 1\n"," print('Resolution unit not defined. Assuming: um')\n"," pixel_size = 1e3/xResolution\n","\n"," if display:\n"," print('Pixel size obtained from metadata: '+str(pixel_size)+' nm')\n"," print('Image size: '+str(width)+'x'+str(height))\n"," \n"," return (pixel_size, width, height)\n","\n","\n","def saveAsTIF(path, filename, array, pixel_size):\n"," \"\"\"\n"," Image saving using PIL to save as .tif format\n"," # Input \n"," path - path where it will be saved\n"," filename - name of the file to save (no extension)\n"," array - numpy array conatining the data at the required format\n"," pixel_size - physical size of pixels in nanometers (identical for x and y)\n"," \"\"\"\n","\n"," # print('Data type: '+str(array.dtype))\n"," if (array.dtype == np.uint16):\n"," mode = 'I;16'\n"," elif (array.dtype == np.uint32):\n"," mode = 'I'\n"," else:\n"," mode = 'F'\n","\n"," # Rounding the pixel size to the nearest number that divides exactly 1cm.\n"," # Resolution needs to be a rational number --> see TIFF format\n"," # pixel_size = 10000/(round(10000/pixel_size))\n","\n"," if len(array.shape) == 2:\n"," im = Image.fromarray(array)\n"," im.save(os.path.join(path, filename+'.tif'),\n"," mode = mode, \n"," resolution_unit = 3,\n"," resolution = 0.01*1e9/pixel_size)\n","\n","\n"," elif len(array.shape) == 3:\n"," imlist = []\n"," for frame in array:\n"," imlist.append(Image.fromarray(frame))\n","\n"," imlist[0].save(os.path.join(path, filename+'.tif'), save_all=True,\n"," append_images=imlist[1:],\n"," mode = mode, \n"," resolution_unit = 3,\n"," resolution = 0.01*1e9/pixel_size)\n","\n"," return\n","\n","\n","\n","\n","class Maximafinder(Layer):\n"," def __init__(self, thresh, neighborhood_size, use_local_avg, **kwargs):\n"," super(Maximafinder, self).__init__(**kwargs)\n"," self.thresh = tf.constant(thresh, dtype=tf.float32)\n"," self.nhood = neighborhood_size\n"," self.use_local_avg = use_local_avg\n","\n"," def build(self, input_shape):\n"," if self.use_local_avg is True:\n"," self.kernel_x = tf.reshape(tf.constant([[-1,0,1],[-1,0,1],[-1,0,1]], dtype=tf.float32), [3, 3, 1, 1])\n"," self.kernel_y = tf.reshape(tf.constant([[-1,-1,-1],[0,0,0],[1,1,1]], dtype=tf.float32), [3, 3, 1, 1])\n"," self.kernel_sum = tf.reshape(tf.constant([[1,1,1],[1,1,1],[1,1,1]], dtype=tf.float32), [3, 3, 1, 1])\n","\n"," def call(self, inputs):\n","\n"," # local maxima positions\n"," max_pool_image = MaxPooling2D(pool_size=(self.nhood,self.nhood), strides=(1,1), padding='same')(inputs)\n"," cond = tf.math.greater(max_pool_image, self.thresh) & tf.math.equal(max_pool_image, inputs)\n"," indices = tf.where(cond)\n"," bind, xind, yind = indices[:, 0], indices[:, 2], indices[:, 1]\n"," confidence = tf.gather_nd(inputs, indices)\n","\n"," # local CoG estimator\n"," if self.use_local_avg:\n"," x_image = K.conv2d(inputs, self.kernel_x, padding='same')\n"," y_image = K.conv2d(inputs, self.kernel_y, padding='same')\n"," sum_image = K.conv2d(inputs, self.kernel_sum, padding='same')\n"," confidence = tf.cast(tf.gather_nd(sum_image, indices), dtype=tf.float32)\n"," x_local = tf.math.divide(tf.gather_nd(x_image, indices),tf.gather_nd(sum_image, indices))\n"," y_local = tf.math.divide(tf.gather_nd(y_image, indices),tf.gather_nd(sum_image, indices))\n"," xind = tf.cast(xind, dtype=tf.float32) + tf.cast(x_local, dtype=tf.float32)\n"," yind = tf.cast(yind, dtype=tf.float32) + tf.cast(y_local, dtype=tf.float32)\n"," else:\n"," xind = tf.cast(xind, dtype=tf.float32)\n"," yind = tf.cast(yind, dtype=tf.float32)\n"," \n"," return bind, xind, yind, confidence\n","\n"," def get_config(self):\n","\n"," # Implement get_config to enable serialization. This is optional.\n"," base_config = super(Maximafinder, self).get_config()\n"," config = {}\n"," return dict(list(base_config.items()) + list(config.items()))\n","\n","\n","\n","# ------------------------------- Prediction with postprocessing function-------------------------------\n","def batchFramePredictionLocalization(dataPath, filename, modelPath, savePath, batch_size=1, thresh=0.1, neighborhood_size=3, use_local_avg = False, pixel_size = None):\n"," \"\"\"\n"," This function tests a trained model on the desired test set, given the \n"," tiff stack of test images, learned weights, and normalization factors.\n"," \n"," # Inputs\n"," dataPath - the path to the folder containing the tiff stack(s) to run prediction on \n"," filename - the name of the file to process\n"," modelPath - the path to the folder containing the weights file and the mean and standard deviation file generated in train_model\n"," savePath - the path to the folder where to save the prediction\n"," batch_size. - the number of frames to predict on for each iteration\n"," thresh - threshoold percentage from the maximum of the gaussian scaling\n"," neighborhood_size - the size of the neighborhood for local maxima finding\n"," use_local_average - Boolean whether to perform local averaging or not\n"," \"\"\"\n"," \n"," # load mean and std\n"," matfile = sio.loadmat(os.path.join(modelPath,'model_metadata.mat'))\n"," test_mean = np.array(matfile['mean_test'])\n"," test_std = np.array(matfile['std_test']) \n"," upsampling_factor = np.array(matfile['upsampling_factor'])\n"," upsampling_factor = upsampling_factor.item() # convert to scalar\n"," L2_weighting_factor = np.array(matfile['Normalization factor'])\n"," L2_weighting_factor = L2_weighting_factor.item() # convert to scalar\n","\n"," # Read in the raw file\n"," Images = io.imread(os.path.join(dataPath, filename))\n"," if pixel_size == None:\n"," pixel_size, _, _ = getPixelSizeTIFFmetadata(os.path.join(dataPath, filename), display=True)\n"," pixel_size_hr = pixel_size/upsampling_factor\n","\n"," # get dataset dimensions\n"," (nFrames, M, N) = Images.shape\n"," print('Input image is '+str(N)+'x'+str(M)+' with '+str(nFrames)+' frames.')\n","\n"," # Build the model for a bigger image\n"," model = buildModel((upsampling_factor*M, upsampling_factor*N, 1))\n","\n"," # Load the trained weights\n"," model.load_weights(os.path.join(modelPath,'weights_best.hdf5'))\n","\n"," # add a post-processing module\n"," max_layer = Maximafinder(thresh*L2_weighting_factor, neighborhood_size, use_local_avg)\n","\n"," # Initialise the results: lists will be used to collect all the localizations\n"," frame_number_list, x_nm_list, y_nm_list, confidence_au_list = [], [], [], []\n","\n"," # Initialise the results\n"," Prediction = np.zeros((M*upsampling_factor, N*upsampling_factor), dtype=np.float32)\n"," Widefield = np.zeros((M, N), dtype=np.float32)\n","\n"," # run model in batches\n"," n_batches = math.ceil(nFrames/batch_size)\n"," for b in tqdm(range(n_batches)):\n","\n"," nF = min(batch_size, nFrames - b*batch_size)\n"," Images_norm = np.zeros((nF, M, N),dtype=np.float32)\n"," Images_upsampled = np.zeros((nF, M*upsampling_factor, N*upsampling_factor), dtype=np.float32)\n","\n"," # Upsampling using a simple nearest neighbor interp and calculating - MULTI-THREAD this?\n"," for f in range(nF):\n"," Images_norm[f,:,:] = project_01(Images[b*batch_size+f,:,:])\n"," Images_norm[f,:,:] = normalize_im(Images_norm[f,:,:], test_mean, test_std)\n"," Images_upsampled[f,:,:] = np.kron(Images_norm[f,:,:], np.ones((upsampling_factor,upsampling_factor)))\n"," Widefield += Images[b*batch_size+f,:,:]\n","\n"," # Reshaping\n"," Images_upsampled = np.expand_dims(Images_upsampled,axis=3)\n","\n"," # Run prediction and local amxima finding\n"," predicted_density = model.predict_on_batch(Images_upsampled)\n"," predicted_density[predicted_density < 0] = 0\n"," Prediction += predicted_density.sum(axis = 3).sum(axis = 0)\n","\n"," bind, xind, yind, confidence = max_layer(predicted_density)\n"," \n"," # normalizing the confidence by the L2_weighting_factor\n"," confidence /= L2_weighting_factor \n","\n"," # turn indices to nms and append to the results\n"," xind, yind = xind*pixel_size_hr, yind*pixel_size_hr\n"," frmind = (bind.numpy() + b*batch_size + 1).tolist()\n"," xind = xind.numpy().tolist()\n"," yind = yind.numpy().tolist()\n"," confidence = confidence.numpy().tolist()\n"," frame_number_list += frmind\n"," x_nm_list += xind\n"," y_nm_list += yind\n"," confidence_au_list += confidence\n","\n"," # Open and create the csv file that will contain all the localizations\n"," if use_local_avg:\n"," ext = '_avg'\n"," else:\n"," ext = '_max'\n"," with open(os.path.join(savePath, 'Localizations_' + os.path.splitext(filename)[0] + ext + '.csv'), \"w\", newline='') as file:\n"," writer = csv.writer(file)\n"," writer.writerow(['frame', 'x [nm]', 'y [nm]', 'confidence [a.u]'])\n"," locs = list(zip(frame_number_list, x_nm_list, y_nm_list, confidence_au_list))\n"," writer.writerows(locs)\n","\n"," # Save the prediction and widefield image\n"," Widefield = np.kron(Widefield, np.ones((upsampling_factor,upsampling_factor)))\n"," Widefield = np.float32(Widefield)\n","\n"," # io.imsave(os.path.join(savePath, 'Predicted_'+os.path.splitext(filename)[0]+'.tif'), Prediction)\n"," # io.imsave(os.path.join(savePath, 'Widefield_'+os.path.splitext(filename)[0]+'.tif'), Widefield)\n","\n"," saveAsTIF(savePath, 'Predicted_'+os.path.splitext(filename)[0], Prediction, pixel_size_hr)\n"," saveAsTIF(savePath, 'Widefield_'+os.path.splitext(filename)[0], Widefield, pixel_size_hr)\n","\n","\n"," return\n","\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n"," NORMAL = '\\033[0m' # white (normal)\n","\n","\n","\n","def list_files(directory, extension):\n"," return (f for f in os.listdir(directory) if f.endswith('.' + extension))\n","\n","\n","# @njit(parallel=True)\n","def subPixelMaxLocalization(array, method = 'CoM', patch_size = 3):\n"," xMaxInd, yMaxInd = np.unravel_index(array.argmax(), array.shape, order='C')\n"," centralPatch = XC[(xMaxInd-patch_size):(xMaxInd+patch_size+1),(yMaxInd-patch_size):(yMaxInd+patch_size+1)]\n","\n"," if (method == 'MAX'):\n"," x0 = xMaxInd\n"," y0 = yMaxInd\n","\n"," elif (method == 'CoM'):\n"," x0 = 0\n"," y0 = 0\n"," S = 0\n"," for xy in range(patch_size*patch_size):\n"," y = math.floor(xy/patch_size)\n"," x = xy - y*patch_size\n"," x0 += x*array[x,y]\n"," y0 += y*array[x,y]\n"," S = array[x,y]\n"," \n"," x0 = x0/S - patch_size/2 + xMaxInd\n"," y0 = y0/S - patch_size/2 + yMaxInd\n"," \n"," elif (method == 'Radiality'):\n"," # Not implemented yet\n"," x0 = xMaxInd\n"," y0 = yMaxInd\n"," \n"," return (x0, y0)\n","\n","\n","@njit(parallel=True)\n","def correctDriftLocalization(xc_array, yc_array, frames, xDrift, yDrift):\n"," n_locs = xc_array.shape[0]\n"," xc_array_Corr = np.empty(n_locs)\n"," yc_array_Corr = np.empty(n_locs)\n"," \n"," for loc in prange(n_locs):\n"," xc_array_Corr[loc] = xc_array[loc] - xDrift[frames[loc]]\n"," yc_array_Corr[loc] = yc_array[loc] - yDrift[frames[loc]]\n","\n"," return (xc_array_Corr, yc_array_Corr)\n","\n","\n","print('--------------------------------')\n","print('DeepSTORM installation complete.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"vu8f5NGJkJos","colab_type":"text"},"source":["\n","# **3. Generate patches for training**\n","---\n","\n","For Deep-STORM the training data can be obtained in two ways:\n","* Simulated using ThunderSTORM or other simulation tool and loaded here (**using Section 3.1.a**)\n","* Directly simulated in this notebook (**using Section 3.1.b**)\n"]},{"cell_type":"markdown","metadata":{"id":"WSV8xnlynp0l","colab_type":"text"},"source":["## **3.1.a Load training data**\n","---\n","\n","Here you can load your simulated data along with its corresponding localization file.\n","* The `pixel_size` is defined in nanometer (nm). "]},{"cell_type":"code","metadata":{"id":"CT6SNcfNg6j0","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Load raw data\n","\n","# Get user input\n","ImageData_path = \"\" #@param {type:\"string\"}\n","LocalizationData_path = \"\" #@param {type: \"string\"}\n","#@markdown Get pixel size from file?\n","get_pixel_size_from_file = True #@param {type:\"boolean\"}\n","#@markdown Otherwise, use this value:\n","pixel_size = 100 #@param {type:\"number\"}\n","\n","if get_pixel_size_from_file:\n"," pixel_size,_,_ = getPixelSizeTIFFmetadata(ImageData_path, True)\n","\n","# load the tiff data\n","Images = io.imread(ImageData_path)\n","# get dataset dimensions\n","if len(Images.shape) == 3:\n"," (number_of_frames, M, N) = Images.shape\n","elif len(Images.shape) == 2:\n"," (M, N) = Images.shape\n"," number_of_frames = 1\n","print('Loaded images: '+str(M)+'x'+str(N)+' with '+str(number_of_frames)+' frames')\n","\n","# Interactive display of the stack\n","def scroll_in_time(frame):\n"," f=plt.figure(figsize=(6,6))\n"," plt.imshow(Images[frame-1], interpolation='nearest', cmap = 'gray')\n"," plt.title('Training source at frame = ' + str(frame))\n"," plt.axis('off');\n","\n","if number_of_frames > 1:\n"," interact(scroll_in_time, frame=widgets.IntSlider(min=1, max=Images.shape[0], step=1, value=0, continuous_update=False));\n","else:\n"," f=plt.figure(figsize=(6,6))\n"," plt.imshow(Images, interpolation='nearest', cmap = 'gray')\n"," plt.title('Training source')\n"," plt.axis('off');\n","\n","# Load the localization file and display the first\n","LocData = pd.read_csv(LocalizationData_path, index_col=0)\n","LocData.tail()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"K9xE5GeYiks9","colab_type":"text"},"source":["## **3.1.b Simulate training data**\n","---\n","This simulation tool allows you to generate SMLM data of randomly distrubuted emitters in a field-of-view. \n","The assumptions are as follows:\n","\n","* Gaussian Point Spread Function (PSF) with standard deviation defined by `Sigma`. The nominal value of `sigma` can be evaluated using `sigma = 0.21 x Lambda / NA`. \n","* Each emitter will emit `n_photons` per frame, and generate their equivalent Poisson noise.\n","* The camera will contribute Gaussian noise to the signal with a standard deviation defined by `ReadOutNoise_ADC` in ADC\n","* The `emitter_density` is defined as the number of emitters / um^2 on any given frame. Variability in the emitter density can be applied by adjusting `emitter_density_std`. The latter parameter represents the standard deviation of the normal distribution that the density is drawn from for each individual frame. `emitter_density` **is defined in number of emitters / um^2**.\n","* The `n_photons` and `sigma` can additionally include some Gaussian variability by setting `n_photons_std` and `sigma_std`.\n","\n","Important note:\n","- All dimensions are in nanometer (e.g. `FOV_size` = 6400 represents a field of view of 6.4 um x 6.4 um).\n","\n"]},{"cell_type":"code","metadata":{"id":"sQyLXpEhitsg","colab_type":"code","cellView":"form","colab":{}},"source":["\n","# ---------------------------- User input ----------------------------\n","#@markdown Run the simulation\n","#@markdown --- \n","#@markdown Camera settings: \n","FOV_size = 6400#@param {type:\"number\"}\n","pixel_size = 100#@param {type:\"number\"}\n","ADC_per_photon_conversion = 1 #@param {type:\"number\"}\n","ReadOutNoise_ADC = 4.5#@param {type:\"number\"}\n","ADC_offset = 50#@param {type:\"number\"}\n","\n","#@markdown Acquisition settings: \n","emitter_density = 6#@param {type:\"number\"}\n","emitter_density_std = 0#@param {type:\"number\"}\n","\n","number_of_frames = 20#@param {type:\"integer\"}\n","\n","sigma = 110 #@param {type:\"number\"}\n","sigma_std = 5 #@param {type:\"number\"}\n","# NA = 1.1 #@param {type:\"number\"}\n","# wavelength = 800#@param {type:\"number\"}\n","# wavelength_std = 150#@param {type:\"number\"}\n","n_photons = 2250#@param {type:\"number\"}\n","n_photons_std = 250#@param {type:\"number\"}\n","\n","\n","# ---------------------------- Variable initialisation ----------------------------\n","# Start the clock to measure how long it takes\n","start = time.time()\n","\n","print('-----------------------------------------------------------')\n","n_molecules = emitter_density*FOV_size*FOV_size/10**6\n","n_molecules_std = emitter_density_std*FOV_size*FOV_size/10**6\n","print('Number of molecules / FOV: '+str(round(n_molecules,2))+' +/- '+str((round(n_molecules_std,2))))\n","\n","# sigma = 0.21*wavelength/NA\n","# sigma_std = 0.21*wavelength_std/NA\n","# print('Gaussian PSF sigma: '+str(round(sigma,2))+' +/- '+str(round(sigma_std,2))+' nm')\n","\n","M = N = round(FOV_size/pixel_size)\n","FOV_size = M*pixel_size\n","print('Final image size: '+str(M)+'x'+str(M)+' ('+str(round(FOV_size/1000, 3))+'um x'+str(round(FOV_size/1000,3))+' um)')\n","\n","np.random.seed(1)\n","display_upsampling = 8 # used to display the loc map here\n","NoiseFreeImages = np.zeros((number_of_frames, M, M))\n","locImage = np.zeros((number_of_frames, display_upsampling*M, display_upsampling*N))\n","\n","frames = []\n","all_xloc = []\n","all_yloc = []\n","all_photons = []\n","all_sigmas = []\n","\n","# ---------------------------- Main simulation loop ----------------------------\n","print('-----------------------------------------------------------')\n","for f in tqdm(range(number_of_frames)):\n"," \n"," # Define the coordinates of emitters by randomly distributing them across the FOV\n"," n_mol = int(max(round(np.random.normal(n_molecules, n_molecules_std, size=1)[0]), 0))\n"," x_c = np.random.uniform(low=0.0, high=FOV_size, size=n_mol)\n"," y_c = np.random.uniform(low=0.0, high=FOV_size, size=n_mol)\n"," photon_array = np.random.normal(n_photons, n_photons_std, size=n_mol)\n"," sigma_array = np.random.normal(sigma, sigma_std, size=n_mol)\n"," # x_c = np.linspace(0,3000,5)\n"," # y_c = np.linspace(0,3000,5)\n","\n"," all_xloc += x_c.tolist()\n"," all_yloc += y_c.tolist()\n"," frames += ((f+1)*np.ones(x_c.shape[0])).tolist()\n"," all_photons += photon_array.tolist()\n"," all_sigmas += sigma_array.tolist()\n","\n"," locImage[f] = FromLoc2Image_SimpleHistogram(x_c, y_c, image_size = (N*display_upsampling, M*display_upsampling), pixel_size = pixel_size/display_upsampling)\n","\n"," # # Get the approximated locations according to the grid pixel size\n"," # Chr_emitters = [int(max(min(round(display_upsampling*x_c[i]/pixel_size),N*display_upsampling-1),0)) for i in range(len(x_c))]\n"," # Rhr_emitters = [int(max(min(round(display_upsampling*y_c[i]/pixel_size),M*display_upsampling-1),0)) for i in range(len(y_c))]\n","\n"," # # Build Localization image\n"," # for (r,c) in zip(Rhr_emitters, Chr_emitters):\n"," # locImage[f][r][c] += 1\n","\n"," NoiseFreeImages[f] = FromLoc2Image_Erf(x_c, y_c, photon_array, sigma_array, image_size = (M,M), pixel_size = pixel_size)\n","\n","\n","# ---------------------------- Create DataFrame fof localization file ----------------------------\n","# Table with localization info as dataframe output\n","LocData = pd.DataFrame()\n","LocData[\"frame\"] = frames\n","LocData[\"x [nm]\"] = all_xloc\n","LocData[\"y [nm]\"] = all_yloc\n","LocData[\"Photon #\"] = all_photons\n","LocData[\"Sigma [nm]\"] = all_sigmas\n","LocData.index += 1 # set indices to start at 1 and not 0 (same as ThunderSTORM)\n","\n","\n","# ---------------------------- Estimation of SNR ----------------------------\n","n_frames_for_SNR = 100\n","M_SNR = 10\n","x_c = np.random.uniform(low=0.0, high=pixel_size*M_SNR, size=n_frames_for_SNR)\n","y_c = np.random.uniform(low=0.0, high=pixel_size*M_SNR, size=n_frames_for_SNR)\n","photon_array = np.random.normal(n_photons, n_photons_std, size=n_frames_for_SNR)\n","sigma_array = np.random.normal(sigma, sigma_std, size=n_frames_for_SNR)\n","\n","SNR = np.zeros(n_frames_for_SNR)\n","for i in range(n_frames_for_SNR):\n"," SingleEmitterImage = FromLoc2Image_Erf(np.array([x_c[i]]), np.array([x_c[i]]), np.array([photon_array[i]]), np.array([sigma_array[i]]), (M_SNR, M_SNR), pixel_size)\n"," Signal_photon = np.max(SingleEmitterImage)\n"," Noise_photon = math.sqrt((ReadOutNoise_ADC/ADC_per_photon_conversion)**2 + Signal_photon)\n"," SNR[i] = Signal_photon/Noise_photon\n","\n","print('SNR: '+str(round(np.mean(SNR),2))+' +/- '+str(round(np.std(SNR),2)))\n","# ---------------------------- ----------------------------\n","\n","\n","# Table with info\n","simParameters = pd.DataFrame()\n","simParameters[\"FOV size (nm)\"] = [FOV_size]\n","simParameters[\"Pixel size (nm)\"] = [pixel_size]\n","simParameters[\"ADC/photon\"] = [ADC_per_photon_conversion]\n","simParameters[\"Read-out noise (ADC)\"] = [ReadOutNoise_ADC]\n","simParameters[\"Constant offset (ADC)\"] = [ADC_offset]\n","\n","simParameters[\"Emitter density (emitters/um^2)\"] = [emitter_density]\n","simParameters[\"STD of emitter density (emitters/um^2)\"] = [emitter_density_std]\n","simParameters[\"Number of frames\"] = [number_of_frames]\n","# simParameters[\"NA\"] = [NA]\n","# simParameters[\"Wavelength (nm)\"] = [wavelength]\n","# simParameters[\"STD of wavelength (nm)\"] = [wavelength_std]\n","simParameters[\"Sigma (nm))\"] = [sigma]\n","simParameters[\"STD of Sigma (nm))\"] = [sigma_std]\n","simParameters[\"Number of photons\"] = [n_photons]\n","simParameters[\"STD of number of photons\"] = [n_photons_std]\n","simParameters[\"SNR\"] = [np.mean(SNR)]\n","simParameters[\"STD of SNR\"] = [np.std(SNR)]\n","\n","\n","# ---------------------------- Finish simulation ----------------------------\n","# Calculating the noisy image\n","Images = ADC_per_photon_conversion * np.random.poisson(NoiseFreeImages) + ReadOutNoise_ADC * np.random.normal(size = (number_of_frames, M, N)) + ADC_offset\n","Images[Images <= 0] = 0\n","\n","# Convert to 16-bit or 32-bits integers\n","if Images.max() < (2**16-1):\n"," Images = Images.astype(np.uint16)\n","else:\n"," Images = Images.astype(np.uint32)\n","\n","\n","# ---------------------------- Display ----------------------------\n","# Displaying the time elapsed for simulation\n","dt = time.time() - start\n","minutes, seconds = divmod(dt, 60) \n","hours, minutes = divmod(minutes, 60) \n","print(\"Time elapsed:\",hours, \"hour(s)\",minutes,\"min(s)\",round(seconds,1),\"sec(s)\")\n","\n","\n","# Interactively display the results using Widgets\n","def scroll_in_time(frame):\n"," f = plt.figure(figsize=(18,6))\n"," plt.subplot(1,3,1)\n"," plt.imshow(locImage[frame-1], interpolation='bilinear', vmin = 0, vmax=0.1)\n"," plt.title('Localization image')\n"," plt.axis('off');\n","\n"," plt.subplot(1,3,2)\n"," plt.imshow(NoiseFreeImages[frame-1], interpolation='nearest', cmap='gray')\n"," plt.title('Noise-free simulation')\n"," plt.axis('off');\n","\n"," plt.subplot(1,3,3)\n"," plt.imshow(Images[frame-1], interpolation='nearest', cmap='gray')\n"," plt.title('Noisy simulation')\n"," plt.axis('off');\n","\n","interact(scroll_in_time, frame=widgets.IntSlider(min=1, max=Images.shape[0], step=1, value=0, continuous_update=False));\n","\n","# Display the head of the dataframe with localizations\n","LocData.tail()\n"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"Pz7RfSuoeJeq","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ---\n","# @markdown #Play this cell to save the simulated stack\n","# @markdown ####Please select a path to the folder where to save the simulated data. It is not necesary to save the data to run the training, but keeping the simulated for your own record can be useful to check its validity.\n","Save_path = \"\" #@param {type:\"string\"}\n","\n","if not os.path.exists(Save_path):\n"," os.makedirs(Save_path)\n"," print('Folder created.')\n","else:\n"," print('Training data already exists in folder: Data overwritten.')\n","\n","saveAsTIF(Save_path, 'SimulatedDataset', Images, pixel_size)\n","# io.imsave(os.path.join(Save_path, 'SimulatedDataset.tif'),Images)\n","LocData.to_csv(os.path.join(Save_path, 'SimulatedDataset.csv'))\n","simParameters.to_csv(os.path.join(Save_path, 'SimulatedParameters.csv'))\n","print('Training dataset saved.')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"K_8e3kE-JhVY","colab_type":"text"},"source":["## **3.2. Generate training patches**\n","---\n","\n","Training patches need to be created from the training data generated above. \n","* The `patch_size` needs to give sufficient contextual information and for most cases a `patch_size` of 26 (corresponding to patches of 26x26 pixels) works fine. **DEFAULT: 26**\n","* The `upsampling_factor` defines the effective magnification of the final super-resolved image compared to the input image (this is called magnification in ThunderSTORM). This is used to generate the super-resolved patches as target dataset. Using an `upsampling_factor` of 16 will require the use of more memory and it may be necessary to decreae the `patch_size` to 16 for example. **DEFAULT: 8**\n","* The `num_patches_per_frame` defines the number of patches extracted from each frame generated in section 3.1. **DEFAULT: 500**\n","* The `min_number_of_emitters_per_patch` defines the minimum number of emitters that need to be present in the patch to be a valid patch. An empty patch does not contain useful information for the network to learn from. **DEFAULT: 7**\n","* The `max_num_patches` defines the maximum number of patches to generate. Fewer may be generated depending on how many pacthes are rejected and how many frames are available. **DEFAULT: 10000**\n","* The `gaussian_sigma` defines the Gaussian standard deviation (in magnified pixels) applied to generate the super-resolved target image. **DEFAULT: 1**\n","* The `L2_weighting_factor` is a normalization factor used in the loss function. It helps balancing the loss from the L2 norm. When using higher densities, this factor should be decreased and vice-versa. This factor can be autimatically calculated using an empiraical formula. **DEFAULT: 100**\n","\n"]},{"cell_type":"code","metadata":{"id":"AsNx5KzcFNvC","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ## **Provide patch parameters**\n","\n","\n","# -------------------- User input --------------------\n","patch_size = 26 #@param {type:\"integer\"}\n","upsampling_factor = 8 #@param [\"4\", \"8\", \"16\"] {type:\"raw\"}\n","num_patches_per_frame = 500#@param {type:\"integer\"}\n","min_number_of_emitters_per_patch = 7#@param {type:\"integer\"}\n","max_num_patches = 10000#@param {type:\"integer\"}\n","gaussian_sigma = 1#@param {type:\"integer\"}\n","\n","#@markdown Estimate the optimal normalization factor automatically?\n","Automatic_normalization = True #@param {type:\"boolean\"}\n","#@markdown Otherwise, it will use the following value:\n","L2_weighting_factor = 100 #@param {type:\"number\"}\n","\n","\n","# -------------------- Prepare variables --------------------\n","# Start the clock to measure how long it takes\n","start = time.time()\n","\n","# Initialize some parameters\n","pixel_size_hr = pixel_size/upsampling_factor # in nm\n","n_patches = min(number_of_frames*num_patches_per_frame, max_num_patches)\n","patch_size = patch_size*upsampling_factor\n","\n","# Dimensions of the high-res grid\n","Mhr = upsampling_factor*M # in pixels\n","Nhr = upsampling_factor*N # in pixels\n","\n","# Initialize the training patches and labels\n","patches = np.zeros((n_patches, patch_size, patch_size), dtype = np.float32)\n","spikes = np.zeros((n_patches, patch_size, patch_size), dtype = np.float32)\n","heatmaps = np.zeros((n_patches, patch_size, patch_size), dtype = np.float32)\n","\n","# Run over all frames and construct the training examples\n","k = 1 # current patch count\n","skip_counter = 0 # number of dataset skipped due to low density\n","id_start = 0 # id position in LocData for current frame\n","print('Generating '+str(n_patches)+' patches of '+str(patch_size)+'x'+str(patch_size))\n","\n","n_locs = len(LocData.index)\n","print('Total number of localizations: '+str(n_locs))\n","density = n_locs/(M*N*number_of_frames*(0.001*pixel_size)**2)\n","print('Density: '+str(round(density,2))+' locs/um^2')\n","n_locs_per_patch = patch_size**2*density\n","\n","if Automatic_normalization:\n"," # This empirical formulae attempts to balance the loss L2 function between the background and the bright spikes\n"," # A value of 100 was originally chosen to balance L2 for a patch size of 2.6x2.6^2 0.1um pixel size and density of 3 (hence the 20.28), at upsampling_factor = 8\n"," L2_weighting_factor = 100/math.sqrt(min(n_locs_per_patch, min_number_of_emitters_per_patch)*8**2/(upsampling_factor**2*20.28))\n"," print('Normalization factor: '+str(round(L2_weighting_factor,2)))\n","\n","# -------------------- Patch generation loop --------------------\n","\n","print('-----------------------------------------------------------')\n","for (f, thisFrame) in enumerate(tqdm(Images)):\n","\n"," # Upsample the frame\n"," upsampledFrame = np.kron(thisFrame, np.ones((upsampling_factor,upsampling_factor)))\n"," # Read all the provided high-resolution locations for current frame\n"," DataFrame = LocData[LocData['frame'] == f+1].copy()\n","\n"," # Get the approximated locations according to the high-res grid pixel size\n"," Chr_emitters = [int(max(min(round(DataFrame['x [nm]'][i]/pixel_size_hr),Nhr-1),0)) for i in range(id_start+1,id_start+1+len(DataFrame.index))]\n"," Rhr_emitters = [int(max(min(round(DataFrame['y [nm]'][i]/pixel_size_hr),Mhr-1),0)) for i in range(id_start+1,id_start+1+len(DataFrame.index))]\n"," id_start += len(DataFrame.index)\n","\n"," # Build Localization image\n"," LocImage = np.zeros((Mhr,Nhr))\n"," LocImage[(Rhr_emitters, Chr_emitters)] = 1\n","\n"," # Here, there's a choice between the original Gaussian (classification approach) and using the erf function\n"," HeatMapImage = L2_weighting_factor*gaussian_filter(LocImage, gaussian_sigma) \n"," # HeatMapImage = L2_weighting_factor*FromLoc2Image_MultiThreaded(np.array(list(DataFrame['x [nm]'])), np.array(list(DataFrame['y [nm]'])), \n"," # np.ones(len(DataFrame.index)), pixel_size_hr*gaussian_sigma*np.ones(len(DataFrame.index)), \n"," # Mhr, pixel_size_hr)\n"," \n","\n"," # Generate random position for the top left corner of the patch\n"," xc = np.random.randint(0, Mhr-patch_size, size=num_patches_per_frame)\n"," yc = np.random.randint(0, Nhr-patch_size, size=num_patches_per_frame)\n","\n"," for c in range(len(xc)):\n"," if LocImage[xc[c]:xc[c]+patch_size, yc[c]:yc[c]+patch_size].sum() < min_number_of_emitters_per_patch:\n"," skip_counter += 1\n"," continue\n"," \n"," else:\n"," # Limit maximal number of training examples to 15k\n"," if k > max_num_patches:\n"," break\n"," else:\n"," # Assign the patches to the right part of the images\n"," patches[k-1] = upsampledFrame[xc[c]:xc[c]+patch_size, yc[c]:yc[c]+patch_size]\n"," spikes[k-1] = LocImage[xc[c]:xc[c]+patch_size, yc[c]:yc[c]+patch_size]\n"," heatmaps[k-1] = HeatMapImage[xc[c]:xc[c]+patch_size, yc[c]:yc[c]+patch_size]\n"," k += 1 # increment current patch count\n","\n","# Remove the empty data\n","patches = patches[:k-1]\n","spikes = spikes[:k-1]\n","heatmaps = heatmaps[:k-1]\n","n_patches = k-1\n","\n","# -------------------- Failsafe --------------------\n","# Check if the size of the training set is smaller than 5k to notify user to simulate more images using ThunderSTORM\n","if ((k-1) < 5000):\n"," # W = '\\033[0m' # white (normal)\n"," # R = '\\033[31m' # red\n"," print(bcolors.WARNING+'!! WARNING: Training set size is below 5K - Consider simulating more images in ThunderSTORM. !!'+bcolors.NORMAL)\n","\n","\n","\n","# -------------------- Displays --------------------\n","print('Number of patches skipped due to low density: '+str(skip_counter))\n","# dataSize = int((getsizeof(patches)+getsizeof(heatmaps)+getsizeof(spikes))/(1024*1024)) #rounded in MB\n","# print('Size of patches: '+str(dataSize)+' MB')\n","print(str(n_patches)+' patches were generated.')\n","\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","minutes, seconds = divmod(dt, 60) \n","hours, minutes = divmod(minutes, 60) \n","print(\"Time elapsed:\",hours, \"hour(s)\",minutes,\"min(s)\",round(seconds),\"sec(s)\")\n","\n","# Display patches interactively with a slider\n","def scroll_patches(patch):\n"," f = plt.figure(figsize=(16,6))\n"," plt.subplot(1,3,1)\n"," plt.imshow(patches[patch-1], interpolation='nearest', cmap='gray')\n"," plt.title('Raw data (frame #'+str(patch)+')')\n"," plt.axis('off');\n","\n"," plt.subplot(1,3,2)\n"," plt.imshow(heatmaps[patch-1], interpolation='nearest')\n"," plt.title('Heat map')\n"," plt.axis('off');\n","\n"," plt.subplot(1,3,3)\n"," plt.imshow(spikes[patch-1], interpolation='nearest')\n"," plt.title('Localization map')\n"," plt.axis('off');\n","\n","interact(scroll_patches, patch=widgets.IntSlider(min=1, max=patches.shape[0], step=1, value=0, continuous_update=False));\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"DSjXFMevK7Iz","colab_type":"text"},"source":["# **4. Train the network**\n","---"]},{"cell_type":"markdown","metadata":{"id":"hVeyKU0MdAPx","colab_type":"text"},"source":["## **4.1. Select your paths and parameters**\n","\n","---\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n","\n","\n","**Training parameters**\n","\n","**`number_of_epochs`:**Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10-30) epochs, but a full training should run for ~100 epochs. Evaluate the performance after training (see 5). **Default value: 80**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 16**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. **If this value is set to 0**, by default this parameter is calculated so that each patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during training. **Default value: 30** \n","\n","**`initial_learning_rate`:** This parameter represents the initial value to be used as learning rate in the optimizer. **Default value: 0.001**"]},{"cell_type":"code","metadata":{"id":"oa5cDZ7f_PF6","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ###Path to training images and parameters\n","\n","model_path = \"\" #@param {type: \"string\"} \n","model_name = \"\" #@param {type: \"string\"} \n","number_of_epochs = 80#@param {type:\"integer\"}\n","batch_size = 16#@param {type:\"integer\"}\n","\n","number_of_steps = 0#@param {type:\"integer\"}\n","percentage_validation = 30 #@param {type:\"number\"}\n","initial_learning_rate = 0.001 #@param {type:\"number\"}\n","\n","\n","percentage_validation /= 100\n","if number_of_steps == 0: \n"," number_of_steps = int((1-percentage_validation)*n_patches/batch_size)\n"," print('Number of steps: '+str(number_of_steps))\n","\n","# Pretrained model path initialised here so next cell does not need to be run\n","h5_file_path = ''\n","Use_pretrained_model = False\n","\n","if not ('patches' in locals()):\n"," # W = '\\033[0m' # white (normal)\n"," # R = '\\033[31m' # red\n"," print(WARNING+'!! WARNING: No patches were found in memory currently. !!')\n","\n","Save_path = os.path.join(model_path, model_name)\n","if os.path.exists(Save_path):\n"," print(bcolors.WARNING+'The model folder already exists and will be overwritten.'+bcolors.NORMAL)\n","\n","print('-----------------------------')\n","print('Training parameters set.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"WIyEvQBWLp9n","colab_type":"text"},"source":["\n","## **4.2. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a Deep-STORM 2D model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"oHL5g0w8LqR0","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","pretrained_model_choice = \"Model_from_file\" #@param [\"Model_from_file\"]\n","Weights_choice = \"best\" #@param [\"last\", \"best\"]\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".hdf5\")\n","\n","# --------------------- Download the a model provided in the XXX ------------------------\n","\n"," if pretrained_model_choice == \"Model_name\":\n"," pretrained_model_name = \"Model_name\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path) \n"," wget.download(\"\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".hdf5\")\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(bcolors.WARNING+'WARNING: weights_'+Weights_choice+'.hdf5 pretrained model does not exist'+bcolors.NORMAL)\n"," Use_pretrained_model = False\n","\n"," \n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead.'+bcolors.NORMAL)\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+bcolors.NORMAL)\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n","else:\n"," print('No pretrained network will be used.')\n"," h5_file_path = ''\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"OADNcie-LHxA","colab_type":"text"},"source":["## **4.2. Train the network**\n","---\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches."]},{"cell_type":"code","metadata":{"id":"qDgMu_mAK8US","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Start training\n","\n","# Start the clock to measure how long it takes\n","start = time.time()\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we ensure that the learning rate set correctly when using pre-trained models\n","if Use_pretrained_model:\n"," if Weights_choice == \"last\":\n"," initial_learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\": \n"," initial_learning_rate = bestLearningRate\n","# --------------------- ---------------------- ------------------------\n","\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(Save_path):\n"," shutil.rmtree(Save_path)\n","\n","# Create the model folder!\n","os.makedirs(Save_path)\n","\n","# Let's go !\n","train_model(patches, heatmaps, Save_path, \n"," steps_per_epoch=number_of_steps, epochs=number_of_epochs, batch_size=batch_size,\n"," upsampling_factor = upsampling_factor,\n"," validation_split = percentage_validation,\n"," initial_learning_rate = initial_learning_rate, \n"," pretrained_model_path = h5_file_path,\n"," L2_weighting_factor = L2_weighting_factor)\n","\n","# # Show info about the GPU memory useage\n","# !nvidia-smi\n","\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","minutes, seconds = divmod(dt, 60) \n","hours, minutes = divmod(minutes, 60) \n","print(\"Time elapsed:\",hours, \"hour(s)\",minutes,\"min(s)\",round(seconds),\"sec(s)\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"CHVTRjEOLRDH","colab_type":"text"},"source":["##**4.3. Download your model(s) from Google Drive**\n","\n","\n","---\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"4N7-ShZpLhwr","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**"]},{"cell_type":"code","metadata":{"id":"JDRsm7uKoBa-","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","#@markdown #####During training, the model files are automatically saved inside a folder named after the parameter `model_name` (see section 4.1). Provide the name of this folder as `QC_model_path` . \n","\n","QC_model_path = \"\" #@param {type:\"string\"}\n","\n","if (Use_the_current_trained_model): \n"," QC_model_path = os.path.join(model_path, model_name)\n","\n","if os.path.exists(QC_model_path):\n"," print(\"The \"+os.path.basename(QC_model_path)+\" model will be evaluated\")\n","else:\n"," print(bcolors.WARNING+'!! WARNING: The chosen model does not exist !!'+bcolors.NORMAL)\n"," print('Please make sure you provide a valid model path before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"Gw7KaHZUoHC4","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased."]},{"cell_type":"code","metadata":{"id":"qUc-JMOcoGNZ","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","import csv\n","from matplotlib import pyplot as plt\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(os.path.join(QC_model_path,'Quality Control/training_evaluation.csv'),'r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(os.path.join(QC_model_path,'Quality Control/lossCurvePlots.png'))\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"32eNQjFioQkY","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n","This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the \"QC_image_folder\" using teh corresponding localization data contained in \"QC_loc_folder\" !\n","\n","**1. The SSIM (structural similarity) map** \n","\n","The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). \n","\n","**mSSIM** is the SSIM value calculated across the entire window of both images.\n","\n","**The output below shows the SSIM maps with the mSSIM**\n","\n","**2. The RSE (Root Squared Error) map** \n","\n","This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).\n","\n","\n","**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.\n","\n","**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.\n","\n","**The output below shows the RSE maps with the NRMSE and PSNR values.**\n","\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"dhlTnxC5lUZy","colab_type":"code","cellView":"form","colab":{}},"source":["\n","# ------------------------ User input ------------------------\n","#@markdown ##Choose the folders that contain your Quality Control dataset\n","QC_image_folder = \"\" #@param{type:\"string\"}\n","QC_loc_folder = \"\" #@param{type:\"string\"}\n","#@markdown Get pixel size from file?\n","get_pixel_size_from_file = True #@param {type:\"boolean\"}\n","#@markdown Otherwise, use this value:\n","pixel_size = 100 #@param {type:\"number\"}\n","\n","if get_pixel_size_from_file:\n"," pixel_size_INPUT = None\n","else:\n"," pixel_size_INPUT = pixel_size\n","\n","\n","# ------------------------ QC analysis loop over provided dataset ------------------------\n","\n","savePath = os.path.join(QC_model_path, 'Quality Control')\n","\n","# Open and create the csv file that will contain all the QC metrics\n","with open(os.path.join(savePath, \"QC_metrics.csv\"), \"w\", newline='') as file:\n"," writer = csv.writer(file)\n","\n"," # Write the header in the csv file\n"," writer.writerow([\"image #\",\"Prediction v. GT mSSIM\",\"WF v. GT mSSIM\", \"Prediction v. GT NRMSE\",\"WF v. GT NRMSE\", \"Prediction v. GT PSNR\", \"WF v. GT PSNR\"])\n","\n"," # These lists will be used to collect all the metrics values per slice\n"," file_name_list = []\n"," slice_number_list = []\n"," mSSIM_GvP_list = []\n"," mSSIM_GvWF_list = []\n"," NRMSE_GvP_list = []\n"," NRMSE_GvWF_list = []\n"," PSNR_GvP_list = []\n"," PSNR_GvWF_list = []\n","\n"," # Let's loop through the provided dataset in the QC folders\n","\n"," for (imageFilename, locFilename) in zip(list_files(QC_image_folder, 'tif'), list_files(QC_loc_folder, 'csv')):\n"," print('--------------')\n"," print(imageFilename)\n"," print(locFilename)\n","\n"," # Get the prediction\n"," batchFramePredictionLocalization(QC_image_folder, imageFilename, QC_model_path, savePath, pixel_size = pixel_size_INPUT)\n","\n"," # test_model(QC_image_folder, imageFilename, QC_model_path, savePath, display=False);\n"," thisPrediction = io.imread(os.path.join(savePath, 'Predicted_'+imageFilename))\n"," thisWidefield = io.imread(os.path.join(savePath, 'Widefield_'+imageFilename))\n","\n"," Mhr = thisPrediction.shape[0]\n"," Nhr = thisPrediction.shape[1]\n","\n"," if pixel_size_INPUT == None:\n"," pixel_size, N, M = getPixelSizeTIFFmetadata(os.path.join(QC_image_folder,imageFilename))\n","\n"," upsampling_factor = int(Mhr/M)\n"," print('Upsampling factor: '+str(upsampling_factor))\n"," pixel_size_hr = pixel_size/upsampling_factor # in nm\n","\n"," # Load the localization file and display the first\n"," LocData = pd.read_csv(os.path.join(QC_loc_folder,locFilename), index_col=0)\n","\n"," x = np.array(list(LocData['x [nm]']))\n"," y = np.array(list(LocData['y [nm]']))\n"," locImage = FromLoc2Image_SimpleHistogram(x, y, image_size = (Mhr,Nhr), pixel_size = pixel_size_hr)\n","\n"," # Remove extension from filename\n"," imageFilename_no_extension = os.path.splitext(imageFilename)[0]\n","\n"," # io.imsave(os.path.join(savePath, 'GT_image_'+imageFilename), locImage)\n"," saveAsTIF(savePath, 'GT_image_'+imageFilename_no_extension, locImage, pixel_size_hr)\n","\n"," # Normalize the images wrt each other by minimizing the MSE between GT and prediction\n"," test_GT_norm, test_prediction_norm = norm_minmse(locImage, thisPrediction, normalize_gt=True)\n"," # Normalize the images wrt each other by minimizing the MSE between GT and Source image\n"," test_GT_norm, test_wf_norm = norm_minmse(locImage, thisWidefield, normalize_gt=True)\n","\n"," # -------------------------------- Calculate the metric maps and save them --------------------------------\n","\n"," # Calculate the SSIM maps\n"," index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = structural_similarity(test_GT_norm, test_prediction_norm, data_range=1., full=True)\n"," index_SSIM_GTvsWF, img_SSIM_GTvsWF = structural_similarity(test_GT_norm, test_wf_norm, data_range=1., full=True)\n","\n","\n"," # Save ssim_maps\n"," img_SSIM_GTvsPrediction_32bit = np.float32(img_SSIM_GTvsPrediction)\n"," # io.imsave(os.path.join(savePath,'SSIM_GTvsPrediction_'+imageFilename),img_SSIM_GTvsPrediction_32bit)\n"," saveAsTIF(savePath,'SSIM_GTvsPrediction_'+imageFilename_no_extension, img_SSIM_GTvsPrediction_32bit, pixel_size_hr)\n","\n","\n"," img_SSIM_GTvsWF_32bit = np.float32(img_SSIM_GTvsWF)\n"," # io.imsave(os.path.join(savePath,'SSIM_GTvsWF_'+imageFilename),img_SSIM_GTvsWF_32bit)\n"," saveAsTIF(savePath,'SSIM_GTvsWF_'+imageFilename_no_extension, img_SSIM_GTvsWF_32bit, pixel_size_hr)\n","\n"," \n"," # Calculate the Root Squared Error (RSE) maps\n"," img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))\n"," img_RSE_GTvsWF = np.sqrt(np.square(test_GT_norm - test_wf_norm))\n","\n"," # Save SE maps\n"," img_RSE_GTvsPrediction_32bit = np.float32(img_RSE_GTvsPrediction)\n"," # io.imsave(os.path.join(savePath,'RSE_GTvsPrediction_'+imageFilename),img_RSE_GTvsPrediction_32bit)\n"," saveAsTIF(savePath,'RSE_GTvsPrediction_'+imageFilename_no_extension, img_RSE_GTvsPrediction_32bit, pixel_size_hr)\n","\n"," img_RSE_GTvsWF_32bit = np.float32(img_RSE_GTvsWF)\n"," # io.imsave(os.path.join(savePath,'RSE_GTvsWF_'+imageFilename),img_RSE_GTvsWF_32bit)\n"," saveAsTIF(savePath,'RSE_GTvsWF_'+imageFilename_no_extension, img_RSE_GTvsWF_32bit, pixel_size_hr)\n","\n","\n"," # -------------------------------- Calculate the RSE metrics and save them --------------------------------\n","\n"," # Normalised Root Mean Squared Error (here it's valid to take the mean of the image)\n"," NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))\n"," NRMSE_GTvsWF = np.sqrt(np.mean(img_RSE_GTvsWF))\n"," \n"," # We can also measure the peak signal to noise ratio between the images\n"," PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)\n"," PSNR_GTvsWF = psnr(test_GT_norm,test_wf_norm,data_range=1.0)\n","\n"," writer.writerow([imageFilename,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsWF),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsWF),str(PSNR_GTvsPrediction), str(PSNR_GTvsWF)])\n","\n"," # Collect values to display in dataframe output\n"," file_name_list.append(imageFilename)\n"," mSSIM_GvP_list.append(index_SSIM_GTvsPrediction)\n"," mSSIM_GvWF_list.append(index_SSIM_GTvsWF)\n"," NRMSE_GvP_list.append(NRMSE_GTvsPrediction)\n"," NRMSE_GvWF_list.append(NRMSE_GTvsWF)\n"," PSNR_GvP_list.append(PSNR_GTvsPrediction)\n"," PSNR_GvWF_list.append(PSNR_GTvsWF)\n","\n","\n","# Table with metrics as dataframe output\n","pdResults = pd.DataFrame(index = file_name_list)\n","pdResults[\"Prediction v. GT mSSIM\"] = mSSIM_GvP_list\n","pdResults[\"Wide-field v. GT mSSIM\"] = mSSIM_GvWF_list\n","pdResults[\"Prediction v. GT NRMSE\"] = NRMSE_GvP_list\n","pdResults[\"Wide-field v. GT NRMSE\"] = NRMSE_GvWF_list\n","pdResults[\"Prediction v. GT PSNR\"] = PSNR_GvP_list\n","pdResults[\"Wide-field v. GT PSNR\"] = PSNR_GvWF_list\n","\n","\n","# ------------------------ Display ------------------------\n","\n","print('--------------------------------------------')\n","@interact\n","def show_QC_results(file = list_files(QC_image_folder, 'tif')):\n","\n"," plt.figure(figsize=(15,15))\n"," # Target (Ground-truth)\n"," plt.subplot(3,3,1)\n"," plt.axis('off')\n"," img_GT = io.imread(os.path.join(savePath, 'GT_image_'+file))\n"," plt.imshow(img_GT, norm = simple_norm(img_GT, percent = 99.5))\n"," plt.title('Target',fontsize=15)\n","\n"," # Wide-field\n"," plt.subplot(3,3,2)\n"," plt.axis('off')\n"," img_Source = io.imread(os.path.join(savePath, 'Widefield_'+file))\n"," plt.imshow(img_Source, norm = simple_norm(img_Source, percent = 99.5))\n"," plt.title('Widefield',fontsize=15)\n","\n"," #Prediction\n"," plt.subplot(3,3,3)\n"," plt.axis('off')\n"," img_Prediction = io.imread(os.path.join(savePath, 'Predicted_'+file))\n"," plt.imshow(img_Prediction, norm = simple_norm(img_Prediction, percent = 99.5))\n"," plt.title('Prediction',fontsize=15)\n","\n"," #Setting up colours\n"," cmap = plt.cm.CMRmap\n","\n"," #SSIM between GT and Source\n"," plt.subplot(3,3,5)\n"," #plt.axis('off')\n"," plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n"," img_SSIM_GTvsWF = io.imread(os.path.join(savePath, 'SSIM_GTvsWF_'+file))\n"," imSSIM_GTvsWF = plt.imshow(img_SSIM_GTvsWF, cmap = cmap, vmin=0, vmax=1)\n"," plt.colorbar(imSSIM_GTvsWF,fraction=0.046, pad=0.04)\n"," plt.title('Target vs. Widefield',fontsize=15)\n"," plt.xlabel('mSSIM: '+str(round(pdResults.loc[file][\"Wide-field v. GT mSSIM\"],3)),fontsize=14)\n"," plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)\n","\n"," #SSIM between GT and Prediction\n"," plt.subplot(3,3,6)\n"," #plt.axis('off')\n"," plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n"," img_SSIM_GTvsPrediction = io.imread(os.path.join(savePath, 'SSIM_GTvsPrediction_'+file))\n"," imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)\n"," plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)\n"," plt.title('Target vs. Prediction',fontsize=15)\n"," plt.xlabel('mSSIM: '+str(round(pdResults.loc[file][\"Prediction v. GT mSSIM\"],3)),fontsize=14)\n","\n"," #Root Squared Error between GT and Source\n"," plt.subplot(3,3,8)\n"," #plt.axis('off')\n"," plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n"," img_RSE_GTvsWF = io.imread(os.path.join(savePath, 'RSE_GTvsWF_'+file))\n"," imRSE_GTvsWF = plt.imshow(img_RSE_GTvsWF, cmap = cmap, vmin=0, vmax = 1)\n"," plt.colorbar(imRSE_GTvsWF,fraction=0.046,pad=0.04)\n"," plt.title('Target vs. Widefield',fontsize=15)\n"," plt.xlabel('NRMSE: '+str(round(pdResults.loc[file][\"Wide-field v. GT NRMSE\"],3))+', PSNR: '+str(round(pdResults.loc[file][\"Wide-field v. GT PSNR\"],3)),fontsize=14)\n"," plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)\n","\n"," #Root Squared Error between GT and Prediction\n"," plt.subplot(3,3,9)\n"," #plt.axis('off')\n"," plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n"," img_RSE_GTvsPrediction = io.imread(os.path.join(savePath, 'RSE_GTvsPrediction_'+file))\n"," imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)\n"," plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)\n"," plt.title('Target vs. Prediction',fontsize=15)\n"," plt.xlabel('NRMSE: '+str(round(pdResults.loc[file][\"Prediction v. GT NRMSE\"],3))+', PSNR: '+str(round(pdResults.loc[file][\"Prediction v. GT PSNR\"],3)),fontsize=14)\n","\n","print('--------------------------------------------')\n","pdResults.head()\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"yTRou0izLjhd","colab_type":"text"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"eAf8aBDmWTx7"},"source":["## **6.1 Generate image prediction and localizations from unseen dataset**\n","---\n","\n","The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n","\n","**`Result_folder`:** This folder will contain the found localizations csv.\n","\n","**`batch_size`:** This paramter determines how many frames are processed by any single pass on the GPU. A higher `batch_size` will make the prediction faster but will use more GPU memory. If an OutOfMemory (OOM) error occurs, decrease the `batch_size`. **DEFAULT: 4**\n","\n","**`threshold`:** This paramter determines threshold for local maxima finding. The value is expected to reside in the range **[0,1]**. A higher `threshold` will result in less localizations. **DEFAULT: 0.1**\n","\n","**`neighborhood_size`:** This paramter determines size of the neighborhood within which the prediction needs to be a local maxima in recovery pixels (CCD pixel/upsampling_factor). A high `neighborhood_size` will make the prediction slower and potentially discard nearby localizations. **DEFAULT: 3**\n","\n","**`use_local_average`:** This paramter determines whether to locally average the prediction in a 3x3 neighborhood to get the final localizations. If set to **True** it will make inference slightly slower depending on the size of the FOV. **DEFAULT: True**\n"]},{"cell_type":"code","metadata":{"id":"7qn06T_A0lxf","colab_type":"code","cellView":"form","colab":{}},"source":["\n","# ------------------------------- User input -------------------------------\n","#@markdown ### Data parameters\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","#@markdown Get pixel size from file?\n","get_pixel_size_from_file = True #@param {type:\"boolean\"}\n","#@markdown Otherwise, use this value (in nm):\n","pixel_size = 100 #@param {type:\"number\"}\n","\n","#@markdown ### Model parameters\n","#@markdown Do you want to use the model you just trained?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","#@markdown Otherwise, please provide path to the model folder below\n","prediction_model_path = \"\" #@param {type:\"string\"}\n","\n","#@markdown ### Prediction parameters\n","batch_size = 4#@param {type:\"integer\"}\n","\n","#@markdown ### Post processing parameters\n","threshold = 0.1#@param {type:\"number\"}\n","neighborhood_size = 3#@param {type:\"integer\"}\n","#@markdown Do you want to locally average the model output with CoG estimator ?\n","use_local_average = True #@param {type:\"boolean\"}\n","\n","\n","if get_pixel_size_from_file:\n"," pixel_size = None\n","\n","if (Use_the_current_trained_model): \n"," prediction_model_path = os.path.join(model_path, model_name)\n","\n","if os.path.exists(prediction_model_path):\n"," print(\"The \"+os.path.basename(prediction_model_path)+\" model will be used.\")\n","else:\n"," print(bcolors.WARNING+'!! WARNING: The chosen model does not exist !!'+bcolors.NORMAL)\n"," print('Please make sure you provide a valid model path before proceeding further.')\n","\n","# inform user whether local averaging is being used\n","if use_local_average == True: \n"," print('Using local averaging')\n","\n","if not os.path.exists(Result_folder):\n"," print('Result folder was created.')\n"," os.makedirs(Result_folder)\n","\n","\n","# ------------------------------- Run predictions -------------------------------\n","\n","start = time.time()\n","#%% This script tests the trained fully convolutional network based on the \n","# saved training weights, and normalization created using train_model.\n","\n","if os.path.isdir(Data_folder): \n"," for filename in list_files(Data_folder, 'tif'):\n"," # run the testing/reconstruction process\n"," print(\"------------------------------------\")\n"," print(\"Running prediction on: \"+ filename)\n"," batchFramePredictionLocalization(Data_folder, filename, prediction_model_path, Result_folder, \n"," batch_size, \n"," threshold, \n"," neighborhood_size, \n"," use_local_average,\n"," pixel_size = pixel_size)\n","\n","elif os.path.isfile(Data_folder):\n"," batchFramePredictionLocalization(os.path.dirname(Data_folder), os.path.basename(Data_folder), prediction_model_path, Result_folder, \n"," batch_size, \n"," threshold, \n"," neighborhood_size, \n"," use_local_average, \n"," pixel_size = pixel_size)\n","\n","\n","\n","print('--------------------------------------------------------------------')\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","minutes, seconds = divmod(dt, 60) \n","hours, minutes = divmod(minutes, 60) \n","print(\"Time elapsed:\",hours, \"hour(s)\",minutes,\"min(s)\",round(seconds),\"sec(s)\")\n","\n","\n","# ------------------------------- Interactive display -------------------------------\n","\n","print('--------------------------------------------------------------------')\n","print('---------------------------- Previews ------------------------------')\n","print('--------------------------------------------------------------------')\n","\n","if os.path.isdir(Data_folder): \n"," @interact\n"," def show_QC_results(file = list_files(Data_folder, 'tif')):\n","\n"," plt.figure(figsize=(15,7.5))\n"," # Wide-field\n"," plt.subplot(1,2,1)\n"," plt.axis('off')\n"," img_Source = io.imread(os.path.join(Result_folder, 'Widefield_'+file))\n"," plt.imshow(img_Source, norm = simple_norm(img_Source, percent = 99.5))\n"," plt.title('Widefield', fontsize=15)\n"," # Prediction\n"," plt.subplot(1,2,2)\n"," plt.axis('off')\n"," img_Prediction = io.imread(os.path.join(Result_folder, 'Predicted_'+file))\n"," plt.imshow(img_Prediction, norm = simple_norm(img_Prediction, percent = 99.5))\n"," plt.title('Predicted',fontsize=15)\n","\n","if os.path.isfile(Data_folder):\n","\n"," plt.figure(figsize=(15,7.5))\n"," # Wide-field\n"," plt.subplot(1,2,1)\n"," plt.axis('off')\n"," img_Source = io.imread(os.path.join(Result_folder, 'Widefield_'+os.path.basename(Data_folder)))\n"," plt.imshow(img_Source, norm = simple_norm(img_Source, percent = 99.5))\n"," plt.title('Widefield', fontsize=15)\n"," # Prediction\n"," plt.subplot(1,2,2)\n"," plt.axis('off')\n"," img_Prediction = io.imread(os.path.join(Result_folder, 'Predicted_'+os.path.basename(Data_folder)))\n"," plt.imshow(img_Prediction, norm = simple_norm(img_Prediction, percent = 99.5))\n"," plt.title('Predicted',fontsize=15)\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"ZekzexaPmzFZ","colab_type":"text"},"source":["## **6.2 Drift correction**\n","---\n","\n","The visualization above is the raw output of the network and displayed at the `upsampling_factor` chosen during model training. The display is a preview without any drift correction applied. This section performs drift correction using cross-correlation between time bins to estimate the drift.\n","\n","**`Loc_file_path`:** is the path to the localization file to use for visualization.\n","\n","**`original_image_path`:** is the path to the original image. This only serves to extract the original image size and pixel size to shape the visualization properly.\n","\n","**`visualization_pixel_size`:** This parameter corresponds to the pixel size to use for the image reconstructions used for the Drift Correction estmication (in **nm**). A smaller pixel size will be more precise but will take longer to compute. **DEFAULT: 20**\n","\n","**`number_of_bins`:** This parameter defines how many temporal bins are used across the full dataset. All localizations in each bins are used ot build an image. This image is used to find the drift with respect to the image obtained from the very first bin. A typical value would correspond to about 500 frames per bin. **DEFAULT: Total number of frames / 500**\n","\n","**`polynomial_fit_degree`:** The drift obtained for each temporal bins needs to be interpolated to every single frames. This is performed by polynomial fit, the degree of which is defined here. **DEFAULT: 4**\n","\n"," The drift-corrected localization data is automaticaly saved in the `save_path` folder."]},{"cell_type":"code","metadata":{"id":"hYtP_vh6mzUP","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Data parameters\n","Loc_file_path = \"\" #@param {type:\"string\"}\n","# @markdown Provide information about original data. Get the info automatically from the raw data?\n","Get_info_from_file = True #@param {type:\"boolean\"}\n","# Loc_file_path = \"/content/gdrive/My Drive/Colab notebooks testing/DeepSTORM/Glia data from CL/Results from prediction/20200615-M6 with CoM localizations/Localizations_glia_actin_2D - 1-500fr_avg.csv\" #@param {type:\"string\"}\n","original_image_path = \"\" #@param {type:\"string\"}\n","# @markdown Otherwise, please provide image width, height (in pixels) and pixel size (in nm)\n","image_width = 256#@param {type:\"integer\"}\n","image_height = 256#@param {type:\"integer\"}\n","pixel_size = 100 #@param {type:\"number\"}\n","\n","# @markdown ##Drift correction parameters\n","visualization_pixel_size = 20#@param {type:\"number\"}\n","number_of_bins = 50#@param {type:\"integer\"}\n","polynomial_fit_degree = 4#@param {type:\"integer\"}\n","\n","# @markdown ##Saving parameters\n","save_path = '' #@param {type:\"string\"}\n","\n","\n","# Let's go !\n","start = time.time()\n","\n","# Get info from the raw file if selected\n","if Get_info_from_file:\n"," pixel_size, image_width, image_height = getPixelSizeTIFFmetadata(original_image_path, display=True)\n","\n","# Read the localizations in\n","LocData = pd.read_csv(Loc_file_path)\n","\n","# Calculate a few variables \n","Mhr = int(math.ceil(image_height*pixel_size/visualization_pixel_size))\n","Nhr = int(math.ceil(image_width*pixel_size/visualization_pixel_size))\n","nFrames = max(LocData['frame'])\n","x_max = max(LocData['x [nm]'])\n","y_max = max(LocData['y [nm]'])\n","image_size = (Mhr, Nhr)\n","n_locs = len(LocData.index)\n","\n","print('Image size: '+str(image_size))\n","print('Number of frames in data: '+str(nFrames))\n","print('Number of localizations in data: '+str(n_locs))\n","\n","blocksize = math.ceil(nFrames/number_of_bins)\n","print('Number of frames per block: '+str(blocksize))\n","\n","blockDataFrame = LocData[(LocData['frame'] < blocksize)].copy()\n","xc_array = blockDataFrame['x [nm]'].to_numpy(dtype=np.float32)\n","yc_array = blockDataFrame['y [nm]'].to_numpy(dtype=np.float32)\n","\n","# Preparing the Reference image\n","photon_array = np.ones(yc_array.shape[0])\n","sigma_array = np.ones(yc_array.shape[0])\n","ImageRef = FromLoc2Image_SimpleHistogram(xc_array, yc_array, image_size = image_size, pixel_size = visualization_pixel_size)\n","ImagesRef = np.rot90(ImageRef, k=2)\n","\n","xDrift = np.zeros(number_of_bins)\n","yDrift = np.zeros(number_of_bins)\n","\n","filename_no_extension = os.path.splitext(os.path.basename(Loc_file_path))[0]\n","\n","with open(os.path.join(save_path, filename_no_extension+\"_DriftCorrectionData.csv\"), \"w\", newline='') as file:\n"," writer = csv.writer(file)\n","\n"," # Write the header in the csv file\n"," writer.writerow([\"Block #\", \"x-drift [nm]\",\"y-drift [nm]\"])\n","\n"," for b in tqdm(range(number_of_bins)):\n","\n"," blockDataFrame = LocData[(LocData['frame'] >= (b*blocksize)) & (LocData['frame'] < ((b+1)*blocksize))].copy()\n"," xc_array = blockDataFrame['x [nm]'].to_numpy(dtype=np.float32)\n"," yc_array = blockDataFrame['y [nm]'].to_numpy(dtype=np.float32)\n","\n"," photon_array = np.ones(yc_array.shape[0])\n"," sigma_array = np.ones(yc_array.shape[0])\n"," ImageBlock = FromLoc2Image_SimpleHistogram(xc_array, yc_array, image_size = image_size, pixel_size = visualization_pixel_size)\n","\n"," XC = fftconvolve(ImagesRef, ImageBlock, mode = 'same')\n"," yDrift[b], xDrift[b] = subPixelMaxLocalization(XC, method = 'CoM')\n","\n"," # saveAsTIF(save_path, 'ImageBlock'+str(b), ImageBlock, visualization_pixel_size)\n"," # saveAsTIF(save_path, 'XCBlock'+str(b), XC, visualization_pixel_size)\n"," writer.writerow([str(b), str((xDrift[b]-xDrift[0])*visualization_pixel_size), str((yDrift[b]-yDrift[0])*visualization_pixel_size)])\n","\n","\n","print('--------------------------------------------------------------------')\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","minutes, seconds = divmod(dt, 60) \n","hours, minutes = divmod(minutes, 60) \n","print(\"Time elapsed:\",hours, \"hour(s)\",minutes,\"min(s)\",round(seconds),\"sec(s)\")\n","\n","print('Fitting drift data...')\n","bin_number = np.arange(number_of_bins)*blocksize + blocksize/2\n","xDrift = (xDrift-xDrift[0])*visualization_pixel_size\n","yDrift = (yDrift-yDrift[0])*visualization_pixel_size\n","\n","xDriftCoeff = np.polyfit(bin_number, xDrift, polynomial_fit_degree)\n","yDriftCoeff = np.polyfit(bin_number, yDrift, polynomial_fit_degree)\n","\n","xDriftFit = np.poly1d(xDriftCoeff)\n","yDriftFit = np.poly1d(yDriftCoeff)\n","bins = np.arange(nFrames)\n","xDriftInterpolated = xDriftFit(bins)\n","yDriftInterpolated = yDriftFit(bins)\n","\n","\n","# ------------------ Displaying the image results ------------------\n","\n","plt.figure(figsize=(15,10))\n","plt.plot(bin_number,xDrift, 'r+', label='x-drift')\n","plt.plot(bin_number,yDrift, 'b+', label='y-drift')\n","plt.plot(bins,xDriftInterpolated, 'r-', label='y-drift (fit)')\n","plt.plot(bins,yDriftInterpolated, 'b-', label='y-drift (fit)')\n","plt.title('Cross-correlation estimated drift')\n","plt.ylabel('Drift [nm]')\n","plt.xlabel('Bin number')\n","plt.legend();\n","\n","dt = time.time() - start\n","minutes, seconds = divmod(dt, 60) \n","hours, minutes = divmod(minutes, 60) \n","print(\"Time elapsed:\", hours, \"hour(s)\",minutes,\"min(s)\",round(seconds),\"sec(s)\")\n","\n","\n","# ------------------ Actual drift correction -------------------\n","\n","print('Correcting localization data...')\n","xc_array = LocData['x [nm]'].to_numpy(dtype=np.float32)\n","yc_array = LocData['y [nm]'].to_numpy(dtype=np.float32)\n","frames = LocData['frame'].to_numpy(dtype=np.int32)\n","\n","\n","xc_array_Corr, yc_array_Corr = correctDriftLocalization(xc_array, yc_array, frames, xDriftInterpolated, yDriftInterpolated)\n","ImageRaw = FromLoc2Image_SimpleHistogram(xc_array, yc_array, image_size = image_size, pixel_size = visualization_pixel_size)\n","ImageCorr = FromLoc2Image_SimpleHistogram(xc_array_Corr, yc_array_Corr, image_size = image_size, pixel_size = visualization_pixel_size)\n","\n","\n","# ------------------ Displaying the imge results ------------------\n","plt.figure(figsize=(15,7.5))\n","# Raw\n","plt.subplot(1,2,1)\n","plt.axis('off')\n","plt.imshow(ImageRaw, norm = simple_norm(ImageRaw, percent = 99.5))\n","plt.title('Raw', fontsize=15);\n","# Corrected\n","plt.subplot(1,2,2)\n","plt.axis('off')\n","plt.imshow(ImageCorr, norm = simple_norm(ImageCorr, percent = 99.5))\n","plt.title('Corrected',fontsize=15);\n","\n","\n","# ------------------ Table with info -------------------\n","driftCorrectedLocData = pd.DataFrame()\n","driftCorrectedLocData['frame'] = frames\n","driftCorrectedLocData['x [nm]'] = xc_array_Corr\n","driftCorrectedLocData['y [nm]'] = yc_array_Corr\n","driftCorrectedLocData['confidence [a.u]'] = LocData['confidence [a.u]']\n","\n","driftCorrectedLocData.to_csv(os.path.join(save_path, filename_no_extension+'_DriftCorrected.csv'))\n","print('-------------------------------')\n","print('Corrected localizations saved.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"mzOuc-V7rB-r","colab_type":"text"},"source":["## **6.3 Visualization of the localizations**\n","---\n","\n","\n","The visualization in section 6.1 is the raw output of the network and displayed at the `upsampling_factor` chosen during model training. This section performs visualization of the result by plotting the localizations as a simple histogram.\n","\n","**`Loc_file_path`:** is the path to the localization file to use for visualization.\n","\n","**`original_image_path`:** is the path to the original image. This only serves to extract the original image size and pixel size to shape the visualization properly.\n","\n","**`visualization_pixel_size`:** This parameter corresponds to the pixel size to use for the final image reconstruction (in **nm**). **DEFAULT: 10**\n","\n","**`visualization_mode`:** This parameter defines what visualization method is used to visualize the final image. NOTES: The Integrated Gaussian can be quite slow. **DEFAULT: Simple histogram.**\n","\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"876yIXnqq-nW","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Data parameters\n","Use_current_drift_corrected_localizations = True #@param {type:\"boolean\"}\n","# @markdown Otherwise provide a localization file path\n","Loc_file_path = \"\" #@param {type:\"string\"}\n","# @markdown Provide information about original data. Get the info automatically from the raw data?\n","Get_info_from_file = True #@param {type:\"boolean\"}\n","# Loc_file_path = \"/content/gdrive/My Drive/Colab notebooks testing/DeepSTORM/Glia data from CL/Results from prediction/20200615-M6 with CoM localizations/Localizations_glia_actin_2D - 1-500fr_avg.csv\" #@param {type:\"string\"}\n","original_image_path = \"\" #@param {type:\"string\"}\n","# @markdown Otherwise, please provide image width, height (in pixels) and pixel size (in nm)\n","image_width = 256#@param {type:\"integer\"}\n","image_height = 256#@param {type:\"integer\"}\n","pixel_size = 100#@param {type:\"number\"}\n","\n","# @markdown ##Visualization parameters\n","visualization_pixel_size = 10#@param {type:\"number\"}\n","visualization_mode = \"Simple histogram\" #@param [\"Simple histogram\", \"Integrated Gaussian (SLOW!)\"]\n","\n","if not Use_current_drift_corrected_localizations:\n"," filename_no_extension = os.path.splitext(os.path.basename(Loc_file_path))[0]\n","\n","\n","if Get_info_from_file:\n"," pixel_size, image_width, image_height = getPixelSizeTIFFmetadata(original_image_path, display=True)\n","\n","if Use_current_drift_corrected_localizations:\n"," LocData = driftCorrectedLocData\n","else:\n"," LocData = pd.read_csv(Loc_file_path)\n","\n","Mhr = int(math.ceil(image_height*pixel_size/visualization_pixel_size))\n","Nhr = int(math.ceil(image_width*pixel_size/visualization_pixel_size))\n","\n","\n","nFrames = max(LocData['frame'])\n","x_max = max(LocData['x [nm]'])\n","y_max = max(LocData['y [nm]'])\n","image_size = (Mhr, Nhr)\n","\n","print('Image size: '+str(image_size))\n","print('Number of frames in data: '+str(nFrames))\n","print('Number of localizations in data: '+str(len(LocData.index)))\n","\n","xc_array = LocData['x [nm]'].to_numpy()\n","yc_array = LocData['y [nm]'].to_numpy()\n","if (visualization_mode == 'Simple histogram'):\n"," locImage = FromLoc2Image_SimpleHistogram(xc_array, yc_array, image_size = image_size, pixel_size = visualization_pixel_size)\n","elif (visualization_mode == 'Shifted histogram'):\n"," print(bcolors.WARNING+'Method not implemented yet!'+bcolors.NORMAL)\n"," locImage = np.zeros(image_size)\n","elif (visualization_mode == 'Integrated Gaussian (SLOW!)'):\n"," photon_array = np.ones(xc_array.shape)\n"," sigma_array = np.ones(xc_array.shape)\n"," locImage = FromLoc2Image_Erf(xc_array, yc_array, photon_array, sigma_array, image_size = image_size, pixel_size = visualization_pixel_size)\n","\n","print('--------------------------------------------------------------------')\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","minutes, seconds = divmod(dt, 60) \n","hours, minutes = divmod(minutes, 60) \n","print(\"Time elapsed:\",hours, \"hour(s)\",minutes,\"min(s)\",round(seconds),\"sec(s)\")\n","\n","# Display\n","plt.figure(figsize=(20,10))\n","plt.axis('off')\n","# plt.imshow(locImage, cmap='gray');\n","plt.imshow(locImage, norm = simple_norm(locImage, percent = 99.5));\n","\n","\n","LocData.head()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"PdOhWwMn1zIT","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ---\n","# @markdown #Play this cell to save the visualization\n","# @markdown ####Please select a path to the folder where to save the visualization.\n","save_path = \"\" #@param {type:\"string\"}\n","\n","if not os.path.exists(save_path):\n"," os.makedirs(save_path)\n"," print('Folder created.')\n","\n","saveAsTIF(save_path, filename_no_extension+'_Visualization', locImage, visualization_pixel_size)\n","print('Image saved.')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"1EszIF4Dkz_n","colab_type":"text"},"source":["## **6.4. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"UgN-NooKk3nV","colab_type":"text"},"source":["\n","#**Thank you for using Deep-STORM 2D!**"]}]} \ No newline at end of file diff --git a/Colab_notebooks/Noise2VOID_2D_ZeroCostDL4Mic.ipynb b/Colab_notebooks/Noise2VOID_2D_ZeroCostDL4Mic.ipynb index 3f739b2c..469219a5 100755 --- a/Colab_notebooks/Noise2VOID_2D_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/Noise2VOID_2D_ZeroCostDL4Mic.ipynb @@ -1 +1 @@ -{"nbformat":4,"nbformat_minor":0,"metadata":{"accelerator":"GPU","colab":{"name":"Noise2VOID_2D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1hzAI0joLETcG5sI2Qvo8AKDr0TWRKySJ","timestamp":1587653755731},{"file_id":"1QFcz4NnQv4rMwDNl7AzHajN-Ola9sUFW","timestamp":1586411847878},{"file_id":"12UDRQ7abcnXcf5FctR9IUStgCpBiQWn7","timestamp":1584466922281},{"file_id":"1zXCn3A39GI1MCnXK_g_Z-AWh9vkB0YhU","timestamp":1583244415636}],"collapsed_sections":[],"toc_visible":true},"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.9"}},"cells":[{"cell_type":"markdown","metadata":{"colab_type":"text","id":"IkSguVy8Xv83"},"source":["# **Image denoising using Noise2Void 2D**\n","\n","---\n","\n"," Noise2Void 2D is deep-learning method that can be used to denoise 2D microscopy images. By running this notebook, you can train your own network and denoise your images. \n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is largely based on the paper: **Noise2Void - Learning Denoising from Single Noisy Images**\n","Alexander Krull, Tim-Oliver Buchholz, Florian Jug\n","https://arxiv.org/abs/1811.10980\n","\n","And source code found in: https://github.com/juglab/n2v\n","\n","\n","\n","**Please also cite this original paper when using or developing this notebook.**\n"]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"jWAz2i7RdxUV"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"gKDLkLWUd-YX"},"source":["# **0. Before getting started**\n","---\n","\n","Before you run the notebook, please ensure that you are logged into your Google account and have the training and/or data to process in your Google Drive.\n","\n","For Noise2Void to train, it only requires a single noisy image but multiple images can be used. Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\n","\n","Please note that you currently can **only use .tif files!**\n","\n","**We strongly recommend that you generate high signal to noise ration version of your noisy images (Quality control dataset). These images can be used to assess the quality of your trained model**. The quality control assessment can be done directly in this notebook.\n","\n"," You can also provide a folder that contains the data that you wish to analyse with the trained network once all training has been performed.\n","\n","Here is a common data structure that can work:\n","\n","* Data\n"," - **Training dataset**\n"," - **Quality control dataset** (Optional but recomended)\n"," - Low SNR images\n"," - img_1.tif, img_2.tif\n"," - High SNR images\n"," - img_1.tif, img_2.tif \n"," - **Data to be predicted** \n"," - Results\n","\n","\n","The **Results** folder will contain the processed images, trained model and network parameters as csv file. Your original images remain unmodified.\n","\n","---\n","**Important note**\n","\n","- If you wish to **train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---\n"]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"cbTknRcviyT7"},"source":["# **1. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"DMNHVZfHmbKb"},"source":["## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"h5i5CS2bSmZr","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"n3B3meGTbYVi"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"01Djr8v-5pPk","colab":{}},"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"n4yWFoJNnoin"},"source":["# **2. Install Noise2Void and Dependencies**\n","---"]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"fq21zJVFNASx","colab":{}},"source":["#@markdown ##Install Noise2Void and dependencies\n","\n","# Here we enable Tensorflow 1. \n","%tensorflow_version 1.x\n","import tensorflow\n","print(tensorflow.__version__)\n","print(\"Tensorflow enabled.\")\n","\n","\n","# Here we install Noise2Void and other required packages\n","!pip install n2v\n","!pip install wget\n","!pip install memory_profiler\n","%load_ext memory_profiler\n","\n","print(\"Noise2Void installed.\")\n","\n","# Here we install all libraries and other depencies to run the notebook.\n","\n","# ------- Variable specific to N2V -------\n","from n2v.models import N2VConfig, N2V\n","from csbdeep.utils import plot_history\n","from n2v.utils.n2v_utils import manipulate_val_data\n","from n2v.internals.N2V_DataGenerator import N2V_DataGenerator\n","from csbdeep.io import save_tiff_imagej_compatible\n","\n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","import numpy as np\n","from matplotlib import pyplot as plt\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n","import wget\n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","W = '\\033[0m' # white (normal)\n","R = '\\033[31m' # red\n","\n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","print(\"Libraries installed\")\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"HLYcZR9gMv42"},"source":["# **3. Select your parameters and paths**\n","---"]},{"cell_type":"markdown","metadata":{"id":"Kbn9_JdqnNnK","colab_type":"text"},"source":["## **3.1. Setting main training parameters**\n","---\n"," "]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"CB6acvUFtWqd"},"source":[" **Paths for training, predictions and results**\n","\n","**`Training_source:`:** These is the path to your folders containing the Training_source (noisy images). To find the path of the folder containing your datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Do not re-use the name of an existing model (saved in the same folder), otherwise it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","\n","**Training Parameters**\n","\n","**`number_of_epochs`:** Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10-30) epochs, but a full training should run for 100-200 epochs. Evaluate the performance after training (see 4.3.). **Default value: 30**\n"," \n","**`patch_size`:** Noise2Void divides the image into patches for training. Input the size of the patches (length of a side). The value should be between 64 and the dimensions of the image and divisible by 8. **Default value: 64**\n","\n","**Advanced Parameters - experienced users only**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each image / patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Noise2Void requires a large batch size for stable training. Reduce this parameter if your GPU runs out of memory. **Default value: 128**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10**\n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0004**\n"]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"ewpNJ_I0Mv47","colab":{}},"source":["# create DataGenerator-object.\n","\n","datagen = N2V_DataGenerator()\n","\n","#@markdown ###Path to training image(s): \n","Training_source = \"\" #@param {type:\"string\"}\n","\n","#compatibility to easily change the name of the parameters\n","training_images = Training_source \n","imgs = datagen.load_imgs_from_directory(directory = Training_source)\n","\n","#@markdown ### Model name and path:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","\n","#@markdown ###Training Parameters\n","#@markdown Number of epochs:\n","number_of_epochs = 30#@param {type:\"number\"}\n","\n","#@markdown Patch size (pixels)\n","patch_size = 64#@param {type:\"number\"}\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True#@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please input:\n","number_of_steps = 100#@param {type:\"number\"}\n","batch_size = 128#@param {type:\"number\"}\n","percentage_validation = 10#@param {type:\"number\"}\n","initial_learning_rate = 0.0004 #@param {type:\"number\"}\n","\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," # number_of_steps is defined in the following cell in this case\n"," batch_size = 128\n"," percentage_validation = 10\n"," initial_learning_rate = 0.0004\n"," \n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name): \n"," print(R + \"!! WARNING: Folder already exists and has been removed !!\" + W)\n"," shutil.rmtree(model_path+'/'+model_name)\n"," \n","\n","# This will open a randomly chosen dataset input image\n","random_choice = random.choice(os.listdir(Training_source))\n","x = imread(Training_source+\"/\"+random_choice)\n","\n","# Here we check that the input images contains the expected dimensions\n","if len(x.shape) == 2:\n"," print(\"Image dimensions (y,x)\",x.shape)\n","\n","if not len(x.shape) == 2:\n"," print(bcolors.WARNING +\"Your images appear to have the wrong dimensions. Image dimension\",x.shape)\n","\n","\n","#Find image XY dimension\n","Image_Y = x.shape[0]\n","Image_X = x.shape[1]\n","\n","#Hyperparameters failsafes\n","\n","# Here we check that patch_size is smaller than the smallest xy dimension of the image \n","if patch_size > min(Image_Y, Image_X):\n"," patch_size = min(Image_Y, Image_X)\n"," print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_size is divisible by 8\n","if not patch_size % 8 == 0:\n"," patch_size = ((int(patch_size / 8)-1) * 8)\n"," print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we disable pre-trained model by default (in case the next cell is not run)\n","Use_pretrained_model = False\n","\n","# Here we enable data augmentation by default (in case the cell is not ran)\n","Use_Data_augmentation = True\n","\n","print(\"Parameters initiated.\")\n","\n","#Here we display one image\n","norm = simple_norm(x, percent = 99)\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x, interpolation='nearest', norm=norm, cmap='magma')\n","plt.title('Training source')\n","plt.axis('off');\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"STDOuNOFsTTJ","colab_type":"text"},"source":["## **3.2. Data augmentation**\n","---\n",""]},{"cell_type":"markdown","metadata":{"id":"E4QW-tvYsWhX","colab_type":"text"},"source":["Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n","\n","Data augmentation is performed here by rotating the patches in XY-Plane and flip them along X-Axis. This only works if the patches are square in XY.\n","\n"," **By default data augmentation is enabled. Disable this option is you run out of RAM during the training**.\n"," "]},{"cell_type":"code","metadata":{"id":"-Vy-vV7ssabS","colab_type":"code","cellView":"form","colab":{}},"source":["#Data augmentation\n","\n","#@markdown ##Play this cell to enable or disable data augmentation: \n","\n","Use_Data_augmentation = True #@param {type:\"boolean\"}\n","\n","if Use_Data_augmentation:\n"," print(\"Data augmentation enabled\")\n","\n","if not Use_Data_augmentation:\n"," print(\"Data augmentation disabled\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"W6pZg0KVnPzf","colab_type":"text"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a N2V 2D model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"l-EDcv3Wyvqb","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"Model_from_file\" #@param [\"Model_from_file\"]\n","\n","Weights_choice = \"last\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","\n","# --------------------- Download the a model provided in the XXX ------------------------\n","\n"," if pretrained_model_choice == \"Model_name\":\n"," pretrained_model_name = \"Model_name\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path) \n"," wget.download(\"\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist')\n"," Use_pretrained_model = False\n","\n"," \n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n","\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," \n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n","\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n","\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n","else:\n"," print(bcolors.WARNING+'No pretrained nerwork will be used.')\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"keIQhCmOMv5S"},"source":["# **4. Train the network**\n","---"]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"PXcLuX5jbNUv"},"source":["## **4.1. Prepare the training data and model for training**\n","---\n","Here, we use the information from 3. to build the model and convert the training data into a suitable format for training."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"rBelu-LtbOTh","colab":{}},"source":["#@markdown ##Create the model and dataset objects\n","\n","# split patches from the training images\n","Xdata = datagen.generate_patches_from_list(imgs, shape=(patch_size,patch_size), augment=Use_Data_augmentation)\n","shape_of_Xdata = Xdata.shape\n","# create a threshold (10 % patches for the validation)\n","threshold = int(shape_of_Xdata[0]*(percentage_validation/100))\n","# split the patches into training patches and validation patches\n","X = Xdata[threshold:]\n","X_val = Xdata[:threshold]\n","print(Xdata.shape[0],\"patches created.\")\n","print(threshold,\"patch images for validation (\",percentage_validation,\"%).\")\n","print(X.shape[0]-threshold,\"patch images for training.\")\n","%memit\n","\n","#Here we automatically define number_of_step in function of training data and batch size\n","if (Use_Default_Advanced_Parameters): \n"," number_of_steps= int(X.shape[0]/batch_size)+1\n","\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we ensure that the learning rate set correctly when using pre-trained models\n","if Use_pretrained_model:\n"," if Weights_choice == \"last\":\n"," initial_learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\": \n"," initial_learning_rate = bestLearningRate\n","# --------------------- ---------------------- ------------------------\n","\n","# create a Config object\n","config = N2VConfig(X, unet_kern_size=3, \n"," train_steps_per_epoch=number_of_steps, train_epochs=number_of_epochs, \n"," train_loss='mse', batch_norm=True, train_batch_size=batch_size, n2v_perc_pix=0.198, \n"," n2v_manipulator='uniform_withCP', n2v_neighborhood_radius=5, train_learning_rate = initial_learning_rate)\n","\n","# Let's look at the parameters stored in the config-object.\n","vars(config)\n"," \n"," \n","# create network model.\n","model = N2V(config=config, name=model_name, basedir=model_path)\n","\n","# --------------------- Using pretrained model ------------------------\n","# Load the pretrained weights \n","if Use_pretrained_model:\n"," model.load_weights(h5_file_path)\n","# --------------------- ---------------------- ------------------------\n","\n","\n","print(\"Setup done.\")\n","print(config)\n","\n","\n","# creates a plot and shows one training patch and one validation patch.\n","plt.figure(figsize=(16,87))\n","plt.subplot(1,2,1)\n","plt.imshow(X[0,...,0], cmap='magma')\n","plt.axis('off')\n","plt.title('Training Patch');\n","plt.subplot(1,2,2)\n","plt.imshow(X_val[0,...,0], cmap='magma')\n","plt.axis('off')\n","plt.title('Validation Patch');"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"0Dfn8ZsEMv5d"},"source":["## **4.2. Train the network**\n","---\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"fisJmA13Mv5e","scrolled":true,"colab":{}},"source":["start = time.time()\n","\n","#@markdown ##Start Training\n","%memit\n","\n","history = model.train(X, X_val)\n","print(\"Training done.\")\n","%memit\n","\n","\n","print(\"Training, done.\")\n","\n","# convert the history.history dict to a pandas DataFrame: \n","lossData = pd.DataFrame(history.history) \n","\n","if os.path.exists(model_path+\"/\"+model_name+\"/Quality Control\"):\n"," shutil.rmtree(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","os.makedirs(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","# The training evaluation.csv is saved (overwrites the Files if needed). \n","lossDataCSVpath = model_path+'/'+model_name+'/Quality Control/training_evaluation.csv'\n","with open(lossDataCSVpath, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['loss','val_loss', 'learning rate'])\n"," for i in range(len(history.history['loss'])):\n"," writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])\n","\n","\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n","\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"Vd9igRYvSnTr"},"source":["## **4.3. Download your model(s) from Google Drive**\n","---\n","\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"sTMDT1u7rK9g","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","metadata":{"id":"OVxLyPyPiv85","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else:\n"," \n"," print(bcolors.WARNING + '!! WARNING: The chosen model does not exist !!')\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"WZDvRjLZu-Lm"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","It is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact noise patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"vMzSP50kMv5p","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"lreUY7-SsGkI","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n","This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the \"Source_QC_folder\" and \"Target_QC_folder\" !\n","\n","**1. The SSIM (structural similarity) map** \n","\n","The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). \n","\n","**mSSIM** is the SSIM value calculated across the entire window of both images.\n","\n","**The output below shows the SSIM maps with the mSSIM**\n","\n","**2. The RSE (Root Squared Error) map** \n","\n","This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).\n","\n","\n","**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.\n","\n","**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.\n","\n","**The output below shows the RSE maps with the NRMSE and PSNR values.**\n"]},{"cell_type":"code","metadata":{"id":"kjbHJHbtsg2R","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","# Create a quality control/Prediction Folder\n","if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\"):\n"," shutil.rmtree(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","# Activate the pretrained model. \n","model_training = N2V(config=None, name=QC_model_name, basedir=QC_model_path)\n","\n","\n","# List Tif images in Source_QC_folder\n","Source_QC_folder_tif = Source_QC_folder+\"/*.tif\"\n","Z = sorted(glob(Source_QC_folder_tif))\n","Z = list(map(imread,Z))\n","\n","print('Number of test dataset found in the folder: '+str(len(Z)))\n","\n","\n","# Perform prediction on all datasets in the Source_QC folder\n","for filename in os.listdir(Source_QC_folder):\n"," img = imread(os.path.join(Source_QC_folder, filename))\n"," predicted = model.predict(img, axes='YX', n_tiles=(2,1))\n"," os.chdir(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n"," imsave(filename, predicted)\n","\n","def ssim(img1, img2):\n"," return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)\n","\n","\n","def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," \"\"\"Percentile-based image normalization.\"\"\"\n","\n"," mi = np.percentile(x,pmin,axis=axis,keepdims=True)\n"," ma = np.percentile(x,pmax,axis=axis,keepdims=True)\n"," return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)\n","\n","\n","def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," if dtype is not None:\n"," x = x.astype(dtype,copy=False)\n"," mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)\n"," ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)\n"," eps = dtype(eps)\n","\n"," try:\n"," import numexpr\n"," x = numexpr.evaluate(\"(x - mi) / ( ma - mi + eps )\")\n"," except ImportError:\n"," x = (x - mi) / ( ma - mi + eps )\n","\n"," if clip:\n"," x = np.clip(x,0,1)\n","\n"," return x\n","\n","def norm_minmse(gt, x, normalize_gt=True):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n","\n"," \"\"\"\n"," normalizes and affinely scales an image pair such that the MSE is minimized \n"," \n"," Parameters\n"," ----------\n"," gt: ndarray\n"," the ground truth image \n"," x: ndarray\n"," the image that will be affinely scaled \n"," normalize_gt: bool\n"," set to True of gt image should be normalized (default)\n"," Returns\n"," -------\n"," gt_scaled, x_scaled \n"," \"\"\"\n"," if normalize_gt:\n"," gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)\n"," x = x.astype(np.float32, copy=False) - np.mean(x)\n"," #x = x - np.mean(x)\n"," gt = gt.astype(np.float32, copy=False) - np.mean(gt)\n"," #gt = gt - np.mean(gt)\n"," scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())\n"," return gt, scale * x\n","\n","# Open and create the csv file that will contain all the QC metrics\n","with open(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/QC_metrics_\"+QC_model_name+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n","\n"," # Write the header in the csv file\n"," writer.writerow([\"image #\",\"Prediction v. GT mSSIM\",\"Input v. GT mSSIM\", \"Prediction v. GT NRMSE\", \"Input v. GT NRMSE\", \"Prediction v. GT PSNR\", \"Input v. GT PSNR\"]) \n","\n"," # Let's loop through the provided dataset in the QC folders\n","\n","\n"," for i in os.listdir(Source_QC_folder):\n"," if not os.path.isdir(os.path.join(Source_QC_folder,i)):\n"," print('Running QC on: '+i)\n"," # -------------------------------- Target test data (Ground truth) --------------------------------\n"," test_GT = io.imread(os.path.join(Target_QC_folder, i))\n","\n"," # -------------------------------- Source test data --------------------------------\n"," test_source = io.imread(os.path.join(Source_QC_folder,i))\n","\n"," # Normalize the images wrt each other by minimizing the MSE between GT and Source image\n"," test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)\n","\n"," # -------------------------------- Prediction --------------------------------\n"," test_prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\",i))\n","\n"," # Normalize the images wrt each other by minimizing the MSE between GT and prediction\n"," test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True) \n","\n","\n"," # -------------------------------- Calculate the metric maps and save them --------------------------------\n","\n"," # Calculate the SSIM maps\n"," index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)\n"," index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)\n","\n"," #Save ssim_maps\n"," img_SSIM_GTvsPrediction_32bit = np.float32(img_SSIM_GTvsPrediction)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsPrediction_'+i,img_SSIM_GTvsPrediction_32bit)\n"," img_SSIM_GTvsSource_32bit = np.float32(img_SSIM_GTvsSource)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsSource_'+i,img_SSIM_GTvsSource_32bit)\n"," \n"," # Calculate the Root Squared Error (RSE) maps\n"," img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))\n"," img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))\n","\n"," # Save SE maps\n"," img_RSE_GTvsPrediction_32bit = np.float32(img_RSE_GTvsPrediction)\n"," img_RSE_GTvsSource_32bit = np.float32(img_RSE_GTvsSource)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsPrediction_'+i,img_RSE_GTvsPrediction_32bit)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsSource_'+i,img_RSE_GTvsSource_32bit)\n","\n","\n"," # -------------------------------- Calculate the RSE metrics and save them --------------------------------\n","\n"," # Normalised Root Mean Squared Error (here it's valid to take the mean of the image)\n"," NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))\n"," NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))\n"," \n"," # We can also measure the peak signal to noise ratio between the images\n"," PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)\n"," PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)\n","\n"," writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])\n","\n","\n","# All data is now processed saved\n","Test_FileList = os.listdir(Source_QC_folder) # this assumes, as it should, that both source and target are named the same\n","\n","plt.figure(figsize=(15,15))\n","# Currently only displays the last computed set, from memory\n","# Target (Ground-truth)\n","plt.subplot(3,3,1)\n","plt.axis('off')\n","img_GT = io.imread(os.path.join(Target_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_GT)\n","plt.title('Target',fontsize=15)\n","\n","# Source\n","plt.subplot(3,3,2)\n","plt.axis('off')\n","img_Source = io.imread(os.path.join(Source_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_Source)\n","plt.title('Source',fontsize=15)\n","\n","#Prediction\n","plt.subplot(3,3,3)\n","plt.axis('off')\n","img_Prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction/\", Test_FileList[-1]))\n","plt.imshow(img_Prediction)\n","plt.title('Prediction',fontsize=15)\n","\n","#Setting up colours\n","cmap = plt.cm.CMRmap\n","\n","#SSIM between GT and Source\n","plt.subplot(3,3,5)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)\n","plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#SSIM between GT and Prediction\n","plt.subplot(3,3,6)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)\n","plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)\n","\n","#Root Squared Error between GT and Source\n","plt.subplot(3,3,8)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)\n","plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)\n","#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))\n","plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#Root Squared Error between GT and Prediction\n","plt.subplot(3,3,9)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"DWAhOBc7gpzN"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"KAILvLGFS2-1"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.2) can now be used to process images. If an older model needs to be used, please untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contains the images that you want to predict using the network that you will train.\n","\n","**`Result_folder`:** This folder will contain the predicted output images."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"bl3EdYFVS7X9","colab":{}},"source":["#Activate the pretrained model. \n","\n","#@markdown ### Provide the path to your dataset and to the folder where the prediction will be saved, then play the cell to predict output on your unseen images.\n","\n","#@markdown ###Path to data to analyse and where predicted output should be saved:\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," print(bcolors.WARNING +'!! WARNING: The chosen model does not exist !!')\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","\n","#Activate the pretrained model. \n","config = None\n","model = N2V(config, Prediction_model_name, basedir=Prediction_model_path)\n","\n","\n","# creates a loop, creating filenames and saving them\n","print(\"Saving the images...\")\n","thisdir = Path(Data_folder)\n","outputdir = Path(Result_folder)\n","\n","# r=root, d=directories, f = files\n","for r, d, f in os.walk(thisdir):\n"," for file in f:\n"," if \".tif\" in file:\n"," print(os.path.join(r, file))\n","\n","# The code by Lucas von Chamier.\n","for r, d, f in os.walk(thisdir):\n"," for file in f:\n"," base_filename = os.path.basename(file)\n"," input_train = imread(os.path.join(r, file))\n"," pred_train = model.predict(input_train, axes='YX', n_tiles=(2,1))\n"," save_tiff_imagej_compatible(os.path.join(outputdir, base_filename), pred_train, axes='YX') \n","\n","print(\"Images saved into folder:\", Result_folder)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"PfTw_pQUUAqB"},"source":["## **6.2. Assess predicted output**\n","---\n","\n","\n"]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"jFp-0y4zT_gL","colab":{}},"source":["# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.\n","\n","# This will display a randomly chosen dataset input and predicted output\n","random_choice = random.choice(os.listdir(Data_folder))\n","x = imread(Data_folder+\"/\"+random_choice)\n","\n","os.chdir(Result_folder)\n","y = imread(Result_folder+\"/\"+random_choice)\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x, interpolation='nearest')\n","plt.title('Input')\n","plt.axis('off');\n","plt.subplot(1,2,2)\n","plt.imshow(y, interpolation='nearest')\n","plt.title('Predicted output')\n","plt.axis('off');"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"wgO7Ok1PBFQj"},"source":["## **6.3. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"nlyPYwZu4VVS","colab_type":"text"},"source":["#**Thank you for using Noise2Void 2D!**"]}]} \ No newline at end of file +{"nbformat":4,"nbformat_minor":0,"metadata":{"accelerator":"GPU","colab":{"name":"Noise2Void_2D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1hzAI0joLETcG5sI2Qvo8AKDr0TWRKySJ","timestamp":1587653755731},{"file_id":"1QFcz4NnQv4rMwDNl7AzHajN-Ola9sUFW","timestamp":1586411847878},{"file_id":"12UDRQ7abcnXcf5FctR9IUStgCpBiQWn7","timestamp":1584466922281},{"file_id":"1zXCn3A39GI1MCnXK_g_Z-AWh9vkB0YhU","timestamp":1583244415636}],"collapsed_sections":[],"toc_visible":true},"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.9"}},"cells":[{"cell_type":"markdown","metadata":{"colab_type":"text","id":"IkSguVy8Xv83"},"source":["# **Noise2Void (2D)**\n","\n","---\n","\n"," Noise2Void is a deep-learning method that can be used to denoise many types of images, including microscopy images and which was originally published by [Krull *et al.* on arXiv](https://arxiv.org/abs/1811.10980). It allows denoising of image data in a self-supervised manner, therefore high-quality, low noise equivalent images are not necessary to train this network. This is performed by \"masking\" a random subset of pixels in the noisy image and training the network to predict the values in these pixels. The resulting output is a denoised version of the image. Noise2Void is based on the popular U-Net network architecture, adapted from [CARE](https://www.nature.com/articles/s41592-018-0216-7).\n","\n"," **This particular notebook enables self-supervised denoised of 2D dataset. If you are interested in 3D dataset, you should use the Noise2Void 3D notebook instead.**\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is largely based on the following paper:\n","\n","**Noise2Void - Learning Denoising from Single Noisy Images**\n","from Krull *et al.* published on arXiv in 2018 (https://arxiv.org/abs/1811.10980)\n","\n","And source code found in: https://github.com/juglab/n2v\n","\n","**Please also cite this original paper when using or developing this notebook.**\n"]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"jWAz2i7RdxUV"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"gKDLkLWUd-YX"},"source":["# **0. Before getting started**\n","---\n","\n","Before you run the notebook, please ensure that you are logged into your Google account and have the training and/or data to process in your Google Drive.\n","\n","For Noise2Void to train, it only requires a single noisy image but multiple images can be used. Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\n","\n","Please note that you currently can **only use .tif files!**\n","\n","**We strongly recommend that you generate high signal to noise ration version of your noisy images (Quality control dataset). These images can be used to assess the quality of your trained model**. The quality control assessment can be done directly in this notebook.\n","\n"," You can also provide a folder that contains the data that you wish to analyse with the trained network once all training has been performed.\n","\n","Here is a common data structure that can work:\n","\n","* Data\n"," - **Training dataset**\n"," - **Quality control dataset** (Optional but recomended)\n"," - Low SNR images\n"," - img_1.tif, img_2.tif\n"," - High SNR images\n"," - img_1.tif, img_2.tif \n"," - **Data to be predicted** \n"," - Results\n","\n","\n","The **Results** folder will contain the processed images, trained model and network parameters as csv file. Your original images remain unmodified.\n","\n","---\n","**Important note**\n","\n","- If you wish to **train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---\n"]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"cbTknRcviyT7"},"source":["# **1. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"DMNHVZfHmbKb"},"source":["## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"h5i5CS2bSmZr","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"n3B3meGTbYVi"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"01Djr8v-5pPk","colab":{}},"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"n4yWFoJNnoin"},"source":["# **2. Install Noise2Void and dependencies**\n","---"]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"fq21zJVFNASx","colab":{}},"source":["#@markdown ##Install Noise2Void and dependencies\n","\n","# Here we enable Tensorflow 1. \n","%tensorflow_version 1.x\n","import tensorflow\n","print(tensorflow.__version__)\n","print(\"Tensorflow enabled.\")\n","\n","\n","# Here we install Noise2Void and other required packages\n","!pip install n2v\n","!pip install wget\n","!pip install memory_profiler\n","%load_ext memory_profiler\n","\n","print(\"Noise2Void installed.\")\n","\n","# Here we install all libraries and other depencies to run the notebook.\n","\n","# ------- Variable specific to N2V -------\n","from n2v.models import N2VConfig, N2V\n","from csbdeep.utils import plot_history\n","from n2v.utils.n2v_utils import manipulate_val_data\n","from n2v.internals.N2V_DataGenerator import N2V_DataGenerator\n","from csbdeep.io import save_tiff_imagej_compatible\n","\n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","import numpy as np\n","from matplotlib import pyplot as plt\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n","import wget\n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","W = '\\033[0m' # white (normal)\n","R = '\\033[31m' # red\n","\n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","print(\"Libraries installed\")\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"HLYcZR9gMv42"},"source":["# **3. Select your parameters and paths**\n","---"]},{"cell_type":"markdown","metadata":{"id":"Kbn9_JdqnNnK","colab_type":"text"},"source":["## **3.1. Setting main training parameters**\n","---\n"," "]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"CB6acvUFtWqd"},"source":[" **Paths for training, predictions and results**\n","\n","**`Training_source:`:** These is the path to your folders containing the Training_source (noisy images). To find the path of the folder containing your datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Do not re-use the name of an existing model (saved in the same folder), otherwise it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","\n","**Training Parameters**\n","\n","**`number_of_epochs`:** Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10-30) epochs, but a full training should run for 100-200 epochs. Evaluate the performance after training (see 4.3.). **Default value: 30**\n"," \n","**`patch_size`:** Noise2Void divides the image into patches for training. Input the size of the patches (length of a side). The value should be between 64 and the dimensions of the image and divisible by 8. **Default value: 64**\n","\n","**Advanced Parameters - experienced users only**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Noise2Void requires a large batch size for stable training. Reduce this parameter if your GPU runs out of memory. **Default value: 128**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each image / patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10**\n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0004**\n"]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"ewpNJ_I0Mv47","colab":{}},"source":["# create DataGenerator-object.\n","\n","datagen = N2V_DataGenerator()\n","\n","#@markdown ###Path to training image(s): \n","Training_source = \"\" #@param {type:\"string\"}\n","\n","#compatibility to easily change the name of the parameters\n","training_images = Training_source \n","imgs = datagen.load_imgs_from_directory(directory = Training_source)\n","\n","#@markdown ### Model name and path:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","\n","#@markdown ###Training Parameters\n","#@markdown Number of epochs:\n","number_of_epochs = 30#@param {type:\"number\"}\n","\n","#@markdown Patch size (pixels)\n","patch_size = 64#@param {type:\"number\"}\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True#@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please input:\n","batch_size = 128#@param {type:\"number\"}\n","number_of_steps = 100#@param {type:\"number\"}\n","percentage_validation = 10#@param {type:\"number\"}\n","initial_learning_rate = 0.0004 #@param {type:\"number\"}\n","\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," # number_of_steps is defined in the following cell in this case\n"," batch_size = 128\n"," percentage_validation = 10\n"," initial_learning_rate = 0.0004\n"," \n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name): \n"," print(R + \"!! WARNING: Folder already exists and has been removed !!\" + W)\n"," shutil.rmtree(model_path+'/'+model_name)\n"," \n","\n","# This will open a randomly chosen dataset input image\n","random_choice = random.choice(os.listdir(Training_source))\n","x = imread(Training_source+\"/\"+random_choice)\n","\n","# Here we check that the input images contains the expected dimensions\n","if len(x.shape) == 2:\n"," print(\"Image dimensions (y,x)\",x.shape)\n","\n","if not len(x.shape) == 2:\n"," print(bcolors.WARNING +\"Your images appear to have the wrong dimensions. Image dimension\",x.shape)\n","\n","\n","#Find image XY dimension\n","Image_Y = x.shape[0]\n","Image_X = x.shape[1]\n","\n","#Hyperparameters failsafes\n","\n","# Here we check that patch_size is smaller than the smallest xy dimension of the image \n","if patch_size > min(Image_Y, Image_X):\n"," patch_size = min(Image_Y, Image_X)\n"," print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_size is divisible by 8\n","if not patch_size % 8 == 0:\n"," patch_size = ((int(patch_size / 8)-1) * 8)\n"," print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we disable pre-trained model by default (in case the next cell is not run)\n","Use_pretrained_model = False\n","\n","# Here we enable data augmentation by default (in case the cell is not ran)\n","Use_Data_augmentation = True\n","\n","print(\"Parameters initiated.\")\n","\n","#Here we display one image\n","norm = simple_norm(x, percent = 99)\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x, interpolation='nearest', norm=norm, cmap='magma')\n","plt.title('Training source')\n","plt.axis('off');\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"STDOuNOFsTTJ","colab_type":"text"},"source":["## **3.2. Data augmentation**\n","---\n",""]},{"cell_type":"markdown","metadata":{"id":"E4QW-tvYsWhX","colab_type":"text"},"source":["Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n","\n","Data augmentation is performed here by rotating the patches in XY-Plane and flip them along X-Axis. This only works if the patches are square in XY.\n","\n"," **By default data augmentation is enabled. Disable this option is you run out of RAM during the training**.\n"," "]},{"cell_type":"code","metadata":{"id":"-Vy-vV7ssabS","colab_type":"code","cellView":"form","colab":{}},"source":["#Data augmentation\n","\n","#@markdown ##Play this cell to enable or disable data augmentation: \n","\n","Use_Data_augmentation = True #@param {type:\"boolean\"}\n","\n","if Use_Data_augmentation:\n"," print(\"Data augmentation enabled\")\n","\n","if not Use_Data_augmentation:\n"," print(\"Data augmentation disabled\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"W6pZg0KVnPzf","colab_type":"text"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a N2V 2D model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"l-EDcv3Wyvqb","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"Model_from_file\" #@param [\"Model_from_file\"]\n","\n","Weights_choice = \"last\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","\n","# --------------------- Download the a model provided in the XXX ------------------------\n","\n"," if pretrained_model_choice == \"Model_name\":\n"," pretrained_model_name = \"Model_name\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path) \n"," wget.download(\"\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist')\n"," Use_pretrained_model = False\n","\n"," \n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n","\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," \n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n","\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n","\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n","else:\n"," print(bcolors.WARNING+'No pretrained nerwork will be used.')\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"keIQhCmOMv5S"},"source":["# **4. Train the network**\n","---"]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"PXcLuX5jbNUv"},"source":["## **4.1. Prepare the training data and model for training**\n","---\n","Here, we use the information from 3. to build the model and convert the training data into a suitable format for training."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"rBelu-LtbOTh","colab":{}},"source":["#@markdown ##Create the model and dataset objects\n","\n","# split patches from the training images\n","Xdata = datagen.generate_patches_from_list(imgs, shape=(patch_size,patch_size), augment=Use_Data_augmentation)\n","shape_of_Xdata = Xdata.shape\n","# create a threshold (10 % patches for the validation)\n","threshold = int(shape_of_Xdata[0]*(percentage_validation/100))\n","# split the patches into training patches and validation patches\n","X = Xdata[threshold:]\n","X_val = Xdata[:threshold]\n","print(Xdata.shape[0],\"patches created.\")\n","print(threshold,\"patch images for validation (\",percentage_validation,\"%).\")\n","print(X.shape[0]-threshold,\"patch images for training.\")\n","%memit\n","\n","#Here we automatically define number_of_step in function of training data and batch size\n","if (Use_Default_Advanced_Parameters): \n"," number_of_steps= int(X.shape[0]/batch_size)+1\n","\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we ensure that the learning rate set correctly when using pre-trained models\n","if Use_pretrained_model:\n"," if Weights_choice == \"last\":\n"," initial_learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\": \n"," initial_learning_rate = bestLearningRate\n","# --------------------- ---------------------- ------------------------\n","\n","# create a Config object\n","config = N2VConfig(X, unet_kern_size=3, \n"," train_steps_per_epoch=number_of_steps, train_epochs=number_of_epochs, \n"," train_loss='mse', batch_norm=True, train_batch_size=batch_size, n2v_perc_pix=0.198, \n"," n2v_manipulator='uniform_withCP', n2v_neighborhood_radius=5, train_learning_rate = initial_learning_rate)\n","\n","# Let's look at the parameters stored in the config-object.\n","vars(config)\n"," \n"," \n","# create network model.\n","model = N2V(config=config, name=model_name, basedir=model_path)\n","\n","# --------------------- Using pretrained model ------------------------\n","# Load the pretrained weights \n","if Use_pretrained_model:\n"," model.load_weights(h5_file_path)\n","# --------------------- ---------------------- ------------------------\n","\n","\n","print(\"Setup done.\")\n","print(config)\n","\n","\n","# creates a plot and shows one training patch and one validation patch.\n","plt.figure(figsize=(16,87))\n","plt.subplot(1,2,1)\n","plt.imshow(X[0,...,0], cmap='magma')\n","plt.axis('off')\n","plt.title('Training Patch');\n","plt.subplot(1,2,2)\n","plt.imshow(X_val[0,...,0], cmap='magma')\n","plt.axis('off')\n","plt.title('Validation Patch');"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"0Dfn8ZsEMv5d"},"source":["## **4.2. Train the network**\n","---\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"fisJmA13Mv5e","scrolled":true,"colab":{}},"source":["start = time.time()\n","\n","#@markdown ##Start training\n","%memit\n","\n","history = model.train(X, X_val)\n","print(\"Training done.\")\n","%memit\n","\n","\n","print(\"Training, done.\")\n","\n","# convert the history.history dict to a pandas DataFrame: \n","lossData = pd.DataFrame(history.history) \n","\n","if os.path.exists(model_path+\"/\"+model_name+\"/Quality Control\"):\n"," shutil.rmtree(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","os.makedirs(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","# The training evaluation.csv is saved (overwrites the Files if needed). \n","lossDataCSVpath = model_path+'/'+model_name+'/Quality Control/training_evaluation.csv'\n","with open(lossDataCSVpath, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['loss','val_loss', 'learning rate'])\n"," for i in range(len(history.history['loss'])):\n"," writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])\n","\n","\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n","\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"Vd9igRYvSnTr"},"source":["## **4.3. Download your model(s) from Google Drive**\n","---\n","\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"sTMDT1u7rK9g","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","metadata":{"id":"OVxLyPyPiv85","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else:\n"," \n"," print(bcolors.WARNING + '!! WARNING: The chosen model does not exist !!')\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"WZDvRjLZu-Lm"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","It is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact noise patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"vMzSP50kMv5p","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"lreUY7-SsGkI","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n","This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the \"Source_QC_folder\" and \"Target_QC_folder\" !\n","\n","**1. The SSIM (structural similarity) map** \n","\n","The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). \n","\n","**mSSIM** is the SSIM value calculated across the entire window of both images.\n","\n","**The output below shows the SSIM maps with the mSSIM**\n","\n","**2. The RSE (Root Squared Error) map** \n","\n","This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).\n","\n","\n","**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.\n","\n","**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.\n","\n","**The output below shows the RSE maps with the NRMSE and PSNR values.**\n"]},{"cell_type":"code","metadata":{"id":"kjbHJHbtsg2R","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","# Create a quality control/Prediction Folder\n","if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\"):\n"," shutil.rmtree(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","# Activate the pretrained model. \n","model_training = N2V(config=None, name=QC_model_name, basedir=QC_model_path)\n","\n","\n","# List Tif images in Source_QC_folder\n","Source_QC_folder_tif = Source_QC_folder+\"/*.tif\"\n","Z = sorted(glob(Source_QC_folder_tif))\n","Z = list(map(imread,Z))\n","\n","print('Number of test dataset found in the folder: '+str(len(Z)))\n","\n","\n","# Perform prediction on all datasets in the Source_QC folder\n","for filename in os.listdir(Source_QC_folder):\n"," img = imread(os.path.join(Source_QC_folder, filename))\n"," predicted = model.predict(img, axes='YX', n_tiles=(2,1))\n"," os.chdir(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n"," imsave(filename, predicted)\n","\n","def ssim(img1, img2):\n"," return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)\n","\n","\n","def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," \"\"\"Percentile-based image normalization.\"\"\"\n","\n"," mi = np.percentile(x,pmin,axis=axis,keepdims=True)\n"," ma = np.percentile(x,pmax,axis=axis,keepdims=True)\n"," return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)\n","\n","\n","def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," if dtype is not None:\n"," x = x.astype(dtype,copy=False)\n"," mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)\n"," ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)\n"," eps = dtype(eps)\n","\n"," try:\n"," import numexpr\n"," x = numexpr.evaluate(\"(x - mi) / ( ma - mi + eps )\")\n"," except ImportError:\n"," x = (x - mi) / ( ma - mi + eps )\n","\n"," if clip:\n"," x = np.clip(x,0,1)\n","\n"," return x\n","\n","def norm_minmse(gt, x, normalize_gt=True):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n","\n"," \"\"\"\n"," normalizes and affinely scales an image pair such that the MSE is minimized \n"," \n"," Parameters\n"," ----------\n"," gt: ndarray\n"," the ground truth image \n"," x: ndarray\n"," the image that will be affinely scaled \n"," normalize_gt: bool\n"," set to True of gt image should be normalized (default)\n"," Returns\n"," -------\n"," gt_scaled, x_scaled \n"," \"\"\"\n"," if normalize_gt:\n"," gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)\n"," x = x.astype(np.float32, copy=False) - np.mean(x)\n"," #x = x - np.mean(x)\n"," gt = gt.astype(np.float32, copy=False) - np.mean(gt)\n"," #gt = gt - np.mean(gt)\n"," scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())\n"," return gt, scale * x\n","\n","# Open and create the csv file that will contain all the QC metrics\n","with open(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/QC_metrics_\"+QC_model_name+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n","\n"," # Write the header in the csv file\n"," writer.writerow([\"image #\",\"Prediction v. GT mSSIM\",\"Input v. GT mSSIM\", \"Prediction v. GT NRMSE\", \"Input v. GT NRMSE\", \"Prediction v. GT PSNR\", \"Input v. GT PSNR\"]) \n","\n"," # Let's loop through the provided dataset in the QC folders\n","\n","\n"," for i in os.listdir(Source_QC_folder):\n"," if not os.path.isdir(os.path.join(Source_QC_folder,i)):\n"," print('Running QC on: '+i)\n"," # -------------------------------- Target test data (Ground truth) --------------------------------\n"," test_GT = io.imread(os.path.join(Target_QC_folder, i))\n","\n"," # -------------------------------- Source test data --------------------------------\n"," test_source = io.imread(os.path.join(Source_QC_folder,i))\n","\n"," # Normalize the images wrt each other by minimizing the MSE between GT and Source image\n"," test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)\n","\n"," # -------------------------------- Prediction --------------------------------\n"," test_prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\",i))\n","\n"," # Normalize the images wrt each other by minimizing the MSE between GT and prediction\n"," test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True) \n","\n","\n"," # -------------------------------- Calculate the metric maps and save them --------------------------------\n","\n"," # Calculate the SSIM maps\n"," index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)\n"," index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)\n","\n"," #Save ssim_maps\n"," img_SSIM_GTvsPrediction_32bit = np.float32(img_SSIM_GTvsPrediction)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsPrediction_'+i,img_SSIM_GTvsPrediction_32bit)\n"," img_SSIM_GTvsSource_32bit = np.float32(img_SSIM_GTvsSource)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsSource_'+i,img_SSIM_GTvsSource_32bit)\n"," \n"," # Calculate the Root Squared Error (RSE) maps\n"," img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))\n"," img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))\n","\n"," # Save SE maps\n"," img_RSE_GTvsPrediction_32bit = np.float32(img_RSE_GTvsPrediction)\n"," img_RSE_GTvsSource_32bit = np.float32(img_RSE_GTvsSource)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsPrediction_'+i,img_RSE_GTvsPrediction_32bit)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsSource_'+i,img_RSE_GTvsSource_32bit)\n","\n","\n"," # -------------------------------- Calculate the RSE metrics and save them --------------------------------\n","\n"," # Normalised Root Mean Squared Error (here it's valid to take the mean of the image)\n"," NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))\n"," NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))\n"," \n"," # We can also measure the peak signal to noise ratio between the images\n"," PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)\n"," PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)\n","\n"," writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])\n","\n","\n","# All data is now processed saved\n","Test_FileList = os.listdir(Source_QC_folder) # this assumes, as it should, that both source and target are named the same\n","\n","plt.figure(figsize=(15,15))\n","# Currently only displays the last computed set, from memory\n","# Target (Ground-truth)\n","plt.subplot(3,3,1)\n","plt.axis('off')\n","img_GT = io.imread(os.path.join(Target_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_GT)\n","plt.title('Target',fontsize=15)\n","\n","# Source\n","plt.subplot(3,3,2)\n","plt.axis('off')\n","img_Source = io.imread(os.path.join(Source_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_Source)\n","plt.title('Source',fontsize=15)\n","\n","#Prediction\n","plt.subplot(3,3,3)\n","plt.axis('off')\n","img_Prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction/\", Test_FileList[-1]))\n","plt.imshow(img_Prediction)\n","plt.title('Prediction',fontsize=15)\n","\n","#Setting up colours\n","cmap = plt.cm.CMRmap\n","\n","#SSIM between GT and Source\n","plt.subplot(3,3,5)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)\n","plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#SSIM between GT and Prediction\n","plt.subplot(3,3,6)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)\n","plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)\n","\n","#Root Squared Error between GT and Source\n","plt.subplot(3,3,8)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)\n","plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)\n","#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))\n","plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#Root Squared Error between GT and Prediction\n","plt.subplot(3,3,9)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"DWAhOBc7gpzN"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"KAILvLGFS2-1"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.2) can now be used to process images. If an older model needs to be used, please untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contains the images that you want to predict using the network that you will train.\n","\n","**`Result_folder`:** This folder will contain the predicted output images."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"bl3EdYFVS7X9","colab":{}},"source":["#Activate the pretrained model. \n","\n","#@markdown ### Provide the path to your dataset and to the folder where the prediction will be saved, then play the cell to predict output on your unseen images.\n","\n","#@markdown ###Path to data to analyse and where predicted output should be saved:\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," print(bcolors.WARNING +'!! WARNING: The chosen model does not exist !!')\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","\n","#Activate the pretrained model. \n","config = None\n","model = N2V(config, Prediction_model_name, basedir=Prediction_model_path)\n","\n","\n","# creates a loop, creating filenames and saving them\n","print(\"Saving the images...\")\n","thisdir = Path(Data_folder)\n","outputdir = Path(Result_folder)\n","\n","# r=root, d=directories, f = files\n","for r, d, f in os.walk(thisdir):\n"," for file in f:\n"," if \".tif\" in file:\n"," print(os.path.join(r, file))\n","\n","# The code by Lucas von Chamier.\n","for r, d, f in os.walk(thisdir):\n"," for file in f:\n"," base_filename = os.path.basename(file)\n"," input_train = imread(os.path.join(r, file))\n"," pred_train = model.predict(input_train, axes='YX', n_tiles=(2,1))\n"," save_tiff_imagej_compatible(os.path.join(outputdir, base_filename), pred_train, axes='YX') \n","\n","print(\"Images saved into folder:\", Result_folder)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"PfTw_pQUUAqB"},"source":["## **6.2. Assess predicted output**\n","---\n","\n","\n"]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"jFp-0y4zT_gL","colab":{}},"source":["# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.\n","\n","# This will display a randomly chosen dataset input and predicted output\n","random_choice = random.choice(os.listdir(Data_folder))\n","x = imread(Data_folder+\"/\"+random_choice)\n","\n","os.chdir(Result_folder)\n","y = imread(Result_folder+\"/\"+random_choice)\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x, interpolation='nearest')\n","plt.title('Input')\n","plt.axis('off');\n","plt.subplot(1,2,2)\n","plt.imshow(y, interpolation='nearest')\n","plt.title('Predicted output')\n","plt.axis('off');"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"wgO7Ok1PBFQj"},"source":["## **6.3. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"nlyPYwZu4VVS","colab_type":"text"},"source":["#**Thank you for using Noise2Void 2D!**"]}]} \ No newline at end of file diff --git a/Colab_notebooks/Noise2VOID_3D_ZeroCostDL4Mic.ipynb b/Colab_notebooks/Noise2VOID_3D_ZeroCostDL4Mic.ipynb index 09686655..21bd0b6d 100755 --- a/Colab_notebooks/Noise2VOID_3D_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/Noise2VOID_3D_ZeroCostDL4Mic.ipynb @@ -1 +1 @@ -{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"Noise2VOID_3D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1WZRIoSBNcRUEq4-Rq5M4mDkIaOlEHnxz","timestamp":1588762142860},{"file_id":"10weAY0es-pEfHlACCaBCKK7PmgdoJqdh","timestamp":1587728072051},{"file_id":"10Ze0rFZoooyyTL_OIVWGdFJEhWE6_cSB","timestamp":1586789421439},{"file_id":"1SsGyUbWcMaLGHFepMuKElRNYLdEBUwf6","timestamp":1583244509550}],"collapsed_sections":[],"toc_visible":true,"machine_shape":"hm"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.7"},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"IkSguVy8Xv83","colab_type":"text"},"source":["# **Image denoising using Noise2Void 3D**\n","\n","---\n","\n"," Noise2Void 3D is deep-learning method that can be used to denoise 3D microscopy images (xyz). By running this notebook, you can train your own network and denoise your images. \n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is largely based on the paper: **Noise2Void - Learning Denoising from Single Noisy Images**\n","Alexander Krull, Tim-Oliver Buchholz, Florian Jug\n","https://arxiv.org/abs/1811.10980\n","\n","And source code found in: https://github.com/juglab/n2v\n","\n","\n","\n","**Please also cite this original paper when using or developing this notebook.**\n"]},{"cell_type":"markdown","metadata":{"id":"jWAz2i7RdxUV","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","\n","\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"gKDLkLWUd-YX","colab_type":"text"},"source":["# **0. Before getting started**\n","---\n","\n","Before you run the notebook, please ensure that you are logged into your Google account and have the training and/or data to process in your Google Drive.\n","\n","For Noise2Void to train, it only requires a single noisy image but multiple images can be used. Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\n","\n","Please note that you currently can **only use .tif files!**\n","\n","**We strongly recommend that you generate high signal to noise ration version of your noisy images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n"," You can also provide a folder that contains the data that you wish to analyse with the trained network once all training has been performed.\n","\n","Here is a common data structure that can work:\n","\n","* Data\n"," - **Training dataset**\n"," - **Quality control dataset** (Optional but recomended)\n"," - Low SNR images\n"," - img_1.tif, img_2.tif\n"," - High SNR images\n"," - img_1.tif, img_2.tif \n"," - **Data to be predicted** \n"," - **Results**\n","\n","\n","The **Results** folder will contain the processed images, trained model and network parameters as csv file. Your original images remain unmodified.\n","\n","---\n","**Important note**\n","\n","- If you wish to **train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"n4yWFoJNnoin","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"DMNHVZfHmbKb","colab_type":"text"},"source":["\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"zCvebubeSaGY","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"sNIVx8_CLolt","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"01Djr8v-5pPk","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"AdN8B91xZO0x"},"source":["# **2. Install Noise2Void**\n","---"]},{"cell_type":"code","metadata":{"id":"fq21zJVFNASx","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Install Noise2Void and dependencies\n","\n","# Enable the Tensorflow 1 instead of the Tensorflow 2.\n","%tensorflow_version 1.x\n","import tensorflow\n","print(tensorflow.__version__)\n","\n","print(\"Tensorflow enabled.\")\n","\n","# Here we install Noise2Void and other required packages\n","!pip install n2v\n","!pip install wget\n","!pip install memory_profiler\n","%load_ext memory_profiler\n","\n","print(\"Noise2Void installed.\")\n","\n","# Here we install all libraries and other depencies to run the notebook.\n","\n","# ------- Variable specific to N2V -------\n","from n2v.models import N2VConfig, N2V\n","from csbdeep.utils import plot_history\n","from n2v.utils.n2v_utils import manipulate_val_data\n","from n2v.internals.N2V_DataGenerator import N2V_DataGenerator\n","from csbdeep.io import save_tiff_imagej_compatible\n","\n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","import numpy as np\n","from matplotlib import pyplot as plt\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n","import wget\n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32\n","\n","\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","W = '\\033[0m' # white (normal)\n","R = '\\033[31m' # red\n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","print(\"Libraries installed\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"HLYcZR9gMv42","colab_type":"text"},"source":["# **3. Select your parameters and paths**\n","---"]},{"cell_type":"markdown","metadata":{"id":"FQ_QxtSWQ7CL","colab_type":"text"},"source":["## **3.1. Setting main training parameters**\n","---\n"," "]},{"cell_type":"markdown","metadata":{"id":"AuESFimvMv43","colab_type":"text"},"source":[" **Paths for training, predictions and results**\n","\n","**`Training_source:`:** This is the path to your folders containing the Training_source (noisy images). To find the path of the folder containing your datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Do not re-use the name of an existing model (saved in the same folder), otherwise it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","\n","**Training Parameters**\n","\n","**`number_of_epochs`:** Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10-30) epochs, but a full training should run for 100-200 epochs. Evaluate the performance after training (see 5.). **Default value: 30**\n","\n","**`patch_size`:** Noise2Void divides the image into patches for training. Input the size of the patches (length of a side). The value should be smaller than the dimensions of the image and divisible by 8. **Default value: 64**\n","\n","**`patch_height`:** The value should be smaller than the Z dimensions of the image and divisible by 4. When analysing isotropic stacks patch_size and patch_height should have similar values.\n","\n","**If you get an Out of memory (OOM) error during the training, manually decrease the patch_size and patch_height values until the OOM error disappear.**\n","\n","**Advanced Parameters - experienced users only**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each image / patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Noise2Void requires a large batch size for stable training. Reduce this parameter if your GPU runs out of memory. **Default value: 128**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10** \n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0004**\n"]},{"cell_type":"code","metadata":{"id":"ewpNJ_I0Mv47","colab_type":"code","cellView":"form","colab":{}},"source":["\n","# Create DataGenerator-object.\n","datagen = N2V_DataGenerator()\n","\n","#@markdown ###Path to training images: \n","Training_source = \"\" #@param {type:\"string\"}\n","\n","imgs = datagen.load_imgs_from_directory(directory = Training_source, dims='ZYX')\n","\n","#@markdown ### Model name and path:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","#@markdown ###Training Parameters\n","#@markdown Number of steps and epochs:\n","\n","number_of_epochs = 30#@param {type:\"number\"}\n","\n","#@markdown Patch size (pixels) and number\n","patch_size = 64#@param {type:\"number\"}\n","\n","patch_height = 4#@param {type:\"number\"}\n","\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please input:\n","number_of_steps = 100#@param {type:\"number\"}\n","batch_size = 128#@param {type:\"number\"}\n","percentage_validation = 10 #@param {type:\"number\"}\n","initial_learning_rate = 0.0004 #@param {type:\"number\"}\n","\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," # number_of_steps is defined in the following cell in this case\n"," batch_size = 128\n"," percentage_validation = 10\n"," initial_learning_rate = 0.0004\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name): \n"," print(bcolors.WARNING +\"!! WARNING: Folder already exists and has been removed !!\" + W)\n"," shutil.rmtree(model_path+'/'+model_name)\n"," \n","\n","#Load one randomly chosen training target file\n","\n","random_choice=random.choice(os.listdir(Training_source))\n","x = imread(Training_source+\"/\"+random_choice)\n","\n","# Here we check that the input images are stacks\n","if len(x.shape) == 3:\n"," print(\"Image dimensions (z,y,x)\",x.shape)\n","\n","if not len(x.shape) == 3:\n"," print(bcolors.WARNING + \"Your images appear to have the wrong dimensions. Image dimension\",x.shape)\n","\n","#Find image Z dimension and select the mid-plane\n","Image_Z = x.shape[0]\n","mid_plane = int(Image_Z / 2)+1\n","\n","#Find image XY dimension\n","Image_Y = x.shape[1]\n","Image_X = x.shape[2]\n","\n","#Hyperparameters failsafes\n","\n","# Here we check that patch_size is smaller than the smallest xy dimension of the image \n","if patch_size > min(Image_Y, Image_X):\n"," patch_size = min(Image_Y, Image_X)\n"," print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_size is divisible by 8\n","if not patch_size % 8 == 0:\n"," patch_size = ((int(patch_size / 8)-1) * 8)\n"," print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_height is smaller than the z dimension of the image \n","if patch_height > Image_Z :\n"," patch_height = Image_Z\n"," print (bcolors.WARNING + \" Your chosen patch_height is bigger than the z dimension of your image; therefore the patch_size chosen is now:\",patch_height)\n","\n","# Here we check that patch_height is divisible by 4\n","if not patch_height % 4 == 0:\n"," patch_height = ((int(patch_height / 4)-1) * 4)\n"," if patch_height == 0:\n"," patch_height = 4\n"," print (bcolors.WARNING + \" Your chosen patch_height is not divisible by 4; therefore the patch_size chosen is now:\",patch_height)\n","\n","# Here we disable pre-trained model by default (in case the next cell is not run)\n","Use_pretrained_model = False\n","\n","# Here we enable data augmentation by default (in case the cell is not ran)\n","\n","Use_Data_augmentation = True\n","\n","print(\"Parameters initiated.\")\n","\n","\n","#Here we display a single z plane\n","\n","norm = simple_norm(x[mid_plane], percent = 99)\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x[mid_plane], interpolation='nearest', norm=norm, cmap='magma')\n","plt.title('Training source')\n","plt.axis('off');\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"xyQZKby8yFME","colab_type":"text"},"source":["## **3.2. Data augmentation**\n","---\n",""]},{"cell_type":"markdown","metadata":{"id":"w_jCy7xOx2g3","colab_type":"text"},"source":["Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n","\n","Data augmentation is performed here by rotating the patches in XY-Plane and flip them along X-Axis. This only works if the patches are square in XY.\n","\n"," By default data augmentation is enabled. Disable this option is you run out of RAM during the training.\n"," "]},{"cell_type":"code","metadata":{"id":"DMqWq5-AxnFU","colab_type":"code","cellView":"form","colab":{}},"source":["#Data augmentation\n","#@markdown ##Play this cell to enable or disable data augmentation: \n","Use_Data_augmentation = True #@param {type:\"boolean\"}\n","\n","if Use_Data_augmentation:\n"," print(\"Data augmentation enabled\")\n","\n","if not Use_Data_augmentation:\n"," print(\"Data augmentation disabled\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"3L9zSGtORKYI","colab_type":"text"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a N2V 3D model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"9vC2n-HeLdiJ","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"Model_from_file\" #@param [\"Model_from_file\"]\n","\n","Weights_choice = \"last\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","\n","# --------------------- Download the a model provided in the XXX ------------------------\n","\n"," if pretrained_model_choice == \"Model_name\":\n"," pretrained_model_name = \"Model_name\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path) \n"," wget.download(\"\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist')\n"," Use_pretrained_model = False\n","\n"," \n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n","\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," \n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n","\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n","\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n","else:\n"," print(bcolors.WARNING+'No pretrained network will be used.')\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"MCGklf1vZf2M","colab_type":"text"},"source":["#**4. Train your network**\n","---"]},{"cell_type":"markdown","metadata":{"id":"1KYOuygETJkT","colab_type":"text"},"source":["## **4.1. Prepare the training data and model for training**\n","---\n","Here, we use the information from 3. to build the model and convert the training data into a suitable format for training."]},{"cell_type":"code","metadata":{"id":"lIUAOJ_LMv5E","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Create the model and dataset objects\n","\n","#Disable some of the warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","# Create batches from the training data.\n","patches = datagen.generate_patches_from_list(imgs, shape=(patch_height, patch_size, patch_size), augment=Use_Data_augmentation)\n","\n","# Patches are divited into training and validation patch set. This inhibits over-lapping of patches. \n","number_train_images =int(len(patches)*(percentage_validation/100))\n","X = patches[number_train_images:]\n","X_val = patches[:number_train_images]\n","\n","print(len(patches),\"patches created.\")\n","print(number_train_images,\"patch images for validation (\",percentage_validation,\"%).\")\n","print((len(patches)-number_train_images),\"patch images for training.\")\n","%memit \n","\n","#Here we automatically define number_of_step in function of training data and batch size\n","if (Use_Default_Advanced_Parameters): \n"," number_of_steps= int(X.shape[0]/batch_size) + 1\n","\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we ensure that the learning rate set correctly when using pre-trained models\n","if Use_pretrained_model:\n"," if Weights_choice == \"last\":\n"," initial_learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\": \n"," initial_learning_rate = bestLearningRate\n","# --------------------- ---------------------- ------------------------\n","\n","\n","# creates Congfig object. \n","config = N2VConfig(X, unet_kern_size=3, \n"," train_steps_per_epoch=number_of_steps,train_epochs=number_of_epochs, train_loss='mse', batch_norm=True, \n"," train_batch_size=batch_size, n2v_perc_pix=0.198, n2v_patch_shape=(patch_height, patch_size, patch_size), \n"," n2v_manipulator='uniform_withCP', n2v_neighborhood_radius=5, train_learning_rate = initial_learning_rate)\n","\n","vars(config)\n","\n","# Create the default model.\n","model = N2V(config=config, name=model_name, basedir=model_path)\n","\n","# --------------------- Using pretrained model ------------------------\n","# Load the pretrained weights \n","if Use_pretrained_model:\n"," model.load_weights(h5_file_path)\n","# --------------------- ---------------------- ------------------------\n","\n","print(\"Parameters transferred into the model.\")\n","print(config)\n","\n","# Shows a training batch and a validation batch.\n","plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(X[0,1,...,0],cmap='magma')\n","plt.axis('off')\n","plt.title('Training Patch');\n","plt.subplot(1,2,2)\n","plt.imshow(X_val[0,1,...,0],cmap='magma')\n","plt.axis('off')\n","plt.title('Validation Patch');\n","\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"0Dfn8ZsEMv5d","colab_type":"text"},"source":["## **4.2. Train the network**\n","---\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point."]},{"cell_type":"code","metadata":{"scrolled":true,"colab_type":"code","cellView":"form","id":"iwNmp1PUzRDQ","colab":{}},"source":["start = time.time()\n","\n","#@markdown ##Start training\n","%memit\n","# the training starts.\n","history = model.train(X, X_val)\n","%memit\n","print(\"Model training is now done.\")\n","\n","# convert the history.history dict to a pandas DataFrame: \n","lossData = pd.DataFrame(history.history) \n","\n","if os.path.exists(model_path+\"/\"+model_name+\"/Quality Control\"):\n"," shutil.rmtree(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","os.makedirs(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","# The training evaluation.csv is saved (overwrites the Files if needed). \n","lossDataCSVpath = model_path+'/'+model_name+'/Quality Control/training_evaluation.csv'\n","with open(lossDataCSVpath, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['loss','val_loss', 'learning rate'])\n"," for i in range(len(history.history['loss'])):\n"," writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])\n","\n","\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"nRaaG02xZh_N","colab_type":"text"},"source":["## **4.3. Download your model(s) from Google Drive**\n","---\n","\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"_0Hynw3-xHp1","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","metadata":{"id":"eAJzMwPA6tlH","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else: \n"," print(bcolors.WARNING + '!! WARNING: The chosen model does not exist !!')\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"dhJROwlAMv5o","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased."]},{"cell_type":"code","metadata":{"id":"vMzSP50kMv5p","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"X5_92nL2xdP6","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n","This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the \"Source_QC_folder\" and \"Target_QC_folder\" !\n","\n","**1. The SSIM (structural similarity) map** \n","\n","The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). \n","\n","**mSSIM** is the SSIM value calculated across the entire window of both images.\n","\n","**The output below shows the SSIM maps with the mSSIM**\n","\n","**2. The RSE (Root Squared Error) map** \n","\n","This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).\n","\n","\n","**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.\n","\n","**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.\n","\n","**The output below shows the RSE maps with the NRMSE and PSNR values.**\n"]},{"cell_type":"code","metadata":{"id":"w90MdriMxhjD","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","path_metrics_save = QC_model_path+'/'+QC_model_name+'/Quality Control/'\n","\n","# Create a quality control/Prediction Folder\n","if os.path.exists(path_metrics_save+'Prediction'):\n"," shutil.rmtree(path_metrics_save+'Prediction')\n","os.makedirs(path_metrics_save+'Prediction')\n","\n","#Here we allow the user to choose the number of tile to be used when predicting the images\n","#@markdown #####To analyse large image, your images need to be divided into tiles. Each tile will then be processed independently and re-assembled to generate the final image. \"Automatic_number_of_tiles\" will search for and use the smallest number of tiles that can be used, at the expanse of your runtime. Alternatively, manually input the number of tiles in each dimension to be used to process your images. \n","\n","Automatic_number_of_tiles = True #@param {type:\"boolean\"}\n","#@markdown #####If you get an Out of memory (OOM) error when using the \"Automatic_number_of_tiles\" option, disable it and manually input the values to be used to process your images. Progressively increases these numbers until the OOM error disappear.\n","n_tiles_Z = 1#@param {type:\"number\"}\n","n_tiles_Y = 2#@param {type:\"number\"}\n","n_tiles_X = 2#@param {type:\"number\"}\n","\n","if (Automatic_number_of_tiles): \n"," n_tilesZYX = None\n","\n","if not (Automatic_number_of_tiles):\n"," n_tilesZYX = (n_tiles_Z, n_tiles_Y, n_tiles_X)\n","\n","\n","# Activate the pretrained model. \n","model_training = N2V(config=None, name=QC_model_name, basedir=QC_model_path)\n","\n","# List Tif images in Source_QC_folder\n","Source_QC_folder_tif = Source_QC_folder+\"/*.tif\"\n","Z = sorted(glob(Source_QC_folder_tif))\n","Z = list(map(imread,Z))\n","print('Number of test dataset found in the folder: '+str(len(Z)))\n","\n","\n","# Perform prediction on all datasets in the Source_QC folder\n","for filename in os.listdir(Source_QC_folder):\n"," img = imread(os.path.join(Source_QC_folder, filename))\n"," n_slices = img.shape[0]\n"," predicted = model_training.predict(img, axes='ZYX', n_tiles=n_tilesZYX)\n"," os.chdir(path_metrics_save+'Prediction/')\n"," imsave('Predicted_'+filename, predicted)\n","\n","\n","def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," \"\"\"Percentile-based image normalization.\"\"\"\n","\n"," mi = np.percentile(x,pmin,axis=axis,keepdims=True)\n"," ma = np.percentile(x,pmax,axis=axis,keepdims=True)\n"," return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)\n","\n","\n","def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," if dtype is not None:\n"," x = x.astype(dtype,copy=False)\n"," mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)\n"," ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)\n"," eps = dtype(eps)\n","\n"," try:\n"," import numexpr\n"," x = numexpr.evaluate(\"(x - mi) / ( ma - mi + eps )\")\n"," except ImportError:\n"," x = (x - mi) / ( ma - mi + eps )\n","\n"," if clip:\n"," x = np.clip(x,0,1)\n","\n"," return x\n","\n","def norm_minmse(gt, x, normalize_gt=True):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n","\n"," \"\"\"\n"," normalizes and affinely scales an image pair such that the MSE is minimized \n"," \n"," Parameters\n"," ----------\n"," gt: ndarray\n"," the ground truth image \n"," x: ndarray\n"," the image that will be affinely scaled \n"," normalize_gt: bool\n"," set to True of gt image should be normalized (default)\n"," Returns\n"," -------\n"," gt_scaled, x_scaled \n"," \"\"\"\n"," if normalize_gt:\n"," gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)\n"," x = x.astype(np.float32, copy=False) - np.mean(x)\n"," #x = x - np.mean(x)\n"," gt = gt.astype(np.float32, copy=False) - np.mean(gt)\n"," #gt = gt - np.mean(gt)\n"," scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())\n"," return gt, scale * x\n","\n","# Open and create the csv file that will contain all the QC metrics\n","with open(path_metrics_save+'QC_metrics_'+QC_model_name+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n","\n"," # Write the header in the csv file\n"," writer.writerow([\"File name\",\"Slice #\",\"Prediction v. GT mSSIM\",\"Input v. GT mSSIM\", \"Prediction v. GT NRMSE\", \"Input v. GT NRMSE\", \"Prediction v. GT PSNR\", \"Input v. GT PSNR\"]) \n"," \n"," # These lists will be used to collect all the metrics values per slice\n"," file_name_list = []\n"," slice_number_list = []\n"," mSSIM_GvP_list = []\n"," mSSIM_GvS_list = []\n"," NRMSE_GvP_list = []\n"," NRMSE_GvS_list = []\n"," PSNR_GvP_list = []\n"," PSNR_GvS_list = []\n","\n"," # These lists will be used to display the mean metrics for the stacks\n"," mSSIM_GvP_list_mean = []\n"," mSSIM_GvS_list_mean = []\n"," NRMSE_GvP_list_mean = []\n"," NRMSE_GvS_list_mean = []\n"," PSNR_GvP_list_mean = []\n"," PSNR_GvS_list_mean = []\n","\n"," # Let's loop through the provided dataset in the QC folders\n"," for thisFile in os.listdir(Source_QC_folder):\n"," if not os.path.isdir(os.path.join(Source_QC_folder, thisFile)):\n"," print('Running QC on: '+thisFile)\n","\n"," test_GT_stack = io.imread(os.path.join(Target_QC_folder, thisFile))\n"," test_source_stack = io.imread(os.path.join(Source_QC_folder,thisFile))\n"," test_prediction_stack = io.imread(os.path.join(path_metrics_save+\"Prediction/\",'Predicted_'+thisFile))\n"," n_slices = test_GT_stack.shape[0]\n","\n"," # Calculating the position of the mid-plane slice\n"," z_mid_plane = int(n_slices / 2)+1\n","\n"," img_SSIM_GTvsPrediction_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n"," img_SSIM_GTvsSource_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n"," img_RSE_GTvsPrediction_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n"," img_RSE_GTvsSource_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n","\n"," for z in range(n_slices): \n"," # -------------------------------- Normalising the dataset --------------------------------\n","\n"," test_GT_norm,test_source_norm = norm_minmse(test_GT_stack[z], test_source_stack[z], normalize_gt=True)\n"," test_GT_norm,test_prediction_norm = norm_minmse(test_GT_stack[z], test_prediction_stack[z], normalize_gt=True)\n","\n"," # -------------------------------- Calculate the SSIM metric and maps --------------------------------\n"," # Calculate the SSIM maps and index\n"," index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = structural_similarity(test_GT_norm, test_prediction_norm, data_range=1.0, full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)\n"," index_SSIM_GTvsSource, img_SSIM_GTvsSource = structural_similarity(test_GT_norm, test_source_norm, data_range=1.0, full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)\n","\n"," #Calculate ssim_maps\n"," img_SSIM_GTvsPrediction_stack[z] = img_as_float32(img_SSIM_GTvsPrediction,force_copy=False)\n"," img_SSIM_GTvsSource_stack[z] = img_as_float32(img_SSIM_GTvsSource,force_copy=False)\n"," \n","\n"," # -------------------------------- Calculate the NRMSE metrics --------------------------------\n","\n"," # Calculate the Root Squared Error (RSE) maps\n"," img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))\n"," img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))\n","\n"," # Calculate SE maps\n"," img_RSE_GTvsPrediction_stack[z] = img_as_float32(img_RSE_GTvsPrediction)\n"," img_RSE_GTvsSource_stack[z] = img_as_float32(img_RSE_GTvsSource)\n","\n"," # Normalised Root Mean Squared Error (here it's valid to take the mean of the image)\n"," NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))\n"," NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))\n","\n"," # Calculate the PSNR between the images\n"," PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)\n"," PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)\n","\n"," writer.writerow([thisFile, str(z),str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource), str(PSNR_GTvsPrediction), str(PSNR_GTvsSource)])\n"," \n"," # Collect values to display in dataframe output\n"," slice_number_list.append(z)\n"," mSSIM_GvP_list.append(index_SSIM_GTvsPrediction)\n"," mSSIM_GvS_list.append(index_SSIM_GTvsSource)\n"," NRMSE_GvP_list.append(NRMSE_GTvsPrediction)\n"," NRMSE_GvS_list.append(NRMSE_GTvsSource)\n"," PSNR_GvP_list.append(PSNR_GTvsPrediction)\n"," PSNR_GvS_list.append(PSNR_GTvsSource)\n","\n"," if (z == z_mid_plane): # catch these for display\n"," SSIM_GTvsP_forDisplay = index_SSIM_GTvsPrediction\n"," SSIM_GTvsS_forDisplay = index_SSIM_GTvsSource\n"," NRMSE_GTvsP_forDisplay = NRMSE_GTvsPrediction\n"," NRMSE_GTvsS_forDisplay = NRMSE_GTvsSource\n"," \n"," # If calculating average metrics for dataframe output\n"," file_name_list.append(thisFile)\n"," mSSIM_GvP_list_mean.append(sum(mSSIM_GvP_list)/len(mSSIM_GvP_list))\n"," mSSIM_GvS_list_mean.append(sum(mSSIM_GvS_list)/len(mSSIM_GvS_list))\n"," NRMSE_GvP_list_mean.append(sum(NRMSE_GvP_list)/len(NRMSE_GvP_list))\n"," NRMSE_GvS_list_mean.append(sum(NRMSE_GvS_list)/len(NRMSE_GvS_list))\n"," PSNR_GvP_list_mean.append(sum(PSNR_GvP_list)/len(PSNR_GvP_list))\n"," PSNR_GvS_list_mean.append(sum(PSNR_GvS_list)/len(PSNR_GvS_list))\n","\n","\n"," # ----------- Change the stacks to 32 bit images -----------\n","\n"," img_SSIM_GTvsSource_stack_32 = img_as_float32(img_SSIM_GTvsSource_stack, force_copy=False)\n"," img_SSIM_GTvsPrediction_stack_32 = img_as_float32(img_SSIM_GTvsPrediction_stack, force_copy=False)\n"," img_RSE_GTvsSource_stack_32 = img_as_float32(img_RSE_GTvsSource_stack, force_copy=False)\n"," img_RSE_GTvsPrediction_stack_32 = img_as_float32(img_RSE_GTvsPrediction_stack, force_copy=False)\n","\n"," # ----------- Saving the error map stacks -----------\n"," io.imsave(path_metrics_save+'SSIM_GTvsSource_'+thisFile,img_SSIM_GTvsSource_stack_32)\n"," io.imsave(path_metrics_save+'SSIM_GTvsPrediction_'+thisFile,img_SSIM_GTvsPrediction_stack_32)\n"," io.imsave(path_metrics_save+'RSE_GTvsSource_'+thisFile,img_RSE_GTvsSource_stack_32)\n"," io.imsave(path_metrics_save+'RSE_GTvsPrediction_'+thisFile,img_RSE_GTvsPrediction_stack_32)\n","\n","#Averages of the metrics per stack as dataframe output\n","pdResults = pd.DataFrame(file_name_list, columns = [\"File name\"])\n","pdResults[\"Prediction v. GT mSSIM\"] = mSSIM_GvP_list_mean\n","pdResults[\"Input v. GT mSSIM\"] = mSSIM_GvS_list_mean\n","pdResults[\"Prediction v. GT NRMSE\"] = NRMSE_GvP_list_mean\n","pdResults[\"Input v. GT NRMSE\"] = NRMSE_GvS_list_mean\n","pdResults[\"Prediction v. GT PSNR\"] = PSNR_GvP_list_mean\n","pdResults[\"Input v. GT PSNR\"] = PSNR_GvS_list_mean\n","\n","\n","# All data is now processed saved\n","Test_FileList = os.listdir(Source_QC_folder) # this assumes, as it should, that both source and target are named the same way\n","\n","plt.figure(figsize=(15,15))\n","# Currently only displays the last computed set, from memory\n","# Target (Ground-truth)\n","plt.subplot(3,3,1)\n","plt.axis('off')\n","img_GT = io.imread(os.path.join(Target_QC_folder, Test_FileList[-1]))\n","\n","# Calculating the position of the mid-plane slice\n","z_mid_plane = int(img_GT.shape[0] / 2)+1\n","\n","plt.imshow(img_GT[z_mid_plane])\n","plt.title('Target (slice #'+str(z_mid_plane)+')')\n","\n","# Source\n","plt.subplot(3,3,2)\n","plt.axis('off')\n","img_Source = io.imread(os.path.join(Source_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_Source[z_mid_plane])\n","plt.title('Source (slice #'+str(z_mid_plane)+')')\n","\n","#Prediction\n","plt.subplot(3,3,3)\n","plt.axis('off')\n","img_Prediction = io.imread(os.path.join(path_metrics_save+'Prediction/', 'Predicted_'+Test_FileList[-1]))\n","plt.imshow(img_Prediction[z_mid_plane])\n","plt.title('Prediction (slice #'+str(z_mid_plane)+')')\n","\n","#Setting up colours\n","cmap = plt.cm.CMRmap\n","\n","#SSIM between GT and Source\n","plt.subplot(3,3,5)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n","img_SSIM_GTvsSource = io.imread(os.path.join(path_metrics_save, 'SSIM_GTvsSource_'+Test_FileList[-1]))\n","imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource[z_mid_plane], cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(SSIM_GTvsS_forDisplay,3)),fontsize=14)\n","plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#SSIM between GT and Prediction\n","plt.subplot(3,3,6)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","img_SSIM_GTvsPrediction = io.imread(os.path.join(path_metrics_save, 'SSIM_GTvsPrediction_'+Test_FileList[-1]))\n","imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction[z_mid_plane], cmap = cmap, vmin=0,vmax=1)\n","plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(SSIM_GTvsP_forDisplay,3)),fontsize=14)\n","\n","#Root Squared Error between GT and Source\n","plt.subplot(3,3,8)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n","img_RSE_GTvsSource = io.imread(os.path.join(path_metrics_save, 'RSE_GTvsSource_'+Test_FileList[-1]))\n","imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource[z_mid_plane], cmap = cmap, vmin=0, vmax = 1) \n","plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsS_forDisplay,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)\n","#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))\n","plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#Root Squared Error between GT and Prediction\n","plt.subplot(3,3,9)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","img_RSE_GTvsPrediction = io.imread(os.path.join(path_metrics_save, 'RSE_GTvsPrediction_'+Test_FileList[-1]))\n","imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction[z_mid_plane], cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsP_forDisplay,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)\n","\n","print('-----------------------------------')\n","print('Here are the average scores for the stacks you tested in Quality control. To see values for all slices, open the .csv file saved in the Qulity Control folder.')\n","pdResults.head()\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"-tJeeJjLnRkP","colab_type":"text"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"d8wuQGjoq6eN","colab_type":"text"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n","\n","**`Result_folder`:** This folder will contain the predicted output images."]},{"cell_type":"code","metadata":{"id":"y2TD5p7MZrEb","colab_type":"code","cellView":"form","colab":{}},"source":["#Activate the pretrained model. \n","#model_training = CARE(config=None, name=model_name, basedir=model_path)\n","\n","#@markdown ### Provide the path to your dataset and to the folder where the prediction will be saved, then play the cell to predict output on your unseen images.\n","\n","#@markdown ###Path to data to analyse and where predicted output should be saved:\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else: \n"," print(bcolors.WARNING + '!! WARNING: The chosen model does not exist !!')\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","#Here we allow the user to choose the number of tile to be used when predicting the images\n","#@markdown #####To analyse large image, your images need to be divided into tiles. Each tile will then be processed independently and re-assembled to generate the final image. \"Automatic_number_of_tiles\" will search for and use the smallest number of tiles that can be used, at the expanse of your runtime. Alternatively, manually input the number of tiles in each dimension to be used to process your images. \n","\n","Automatic_number_of_tiles = True #@param {type:\"boolean\"}\n","#@markdown #####If you get an Out of memory (OOM) error when using the \"Automatic_number_of_tiles\" option, disable it and manually input the values to be used to process your images. Progressively increases these numbers until the OOM error disappear.\n","n_tiles_Z = 1#@param {type:\"number\"}\n","n_tiles_Y = 5#@param {type:\"number\"}\n","n_tiles_X = 5#@param {type:\"number\"}\n","\n","if (Automatic_number_of_tiles): \n"," n_tilesZYX = None\n","\n","if not (Automatic_number_of_tiles):\n"," n_tilesZYX = (n_tiles_Z, n_tiles_Y, n_tiles_X)\n","\n","#Activate the pretrained model.\n","config = None\n","model = N2V(config, Prediction_model_name, basedir=Prediction_model_path)\n","\n","print(\"Denoising images...\")\n","\n","thisdir = Path(Data_folder)\n","outputdir = Path(Result_folder)\n","suffix = '.tif'\n","\n","# r=root, d=directories, f = files\n","for r, d, f in os.walk(thisdir):\n"," for file in f:\n"," if \".tif\" in file:\n"," print(os.path.join(r, file))\n","\n","# The code by Lucas von Chamier\n","for r, d, f in os.walk(thisdir):\n"," for file in f:\n"," base_filename = os.path.basename(file)\n"," input_train = imread(os.path.join(r, file))\n"," pred_train = model.predict(input_train, axes='ZYX', n_tiles=n_tilesZYX)\n"," save_tiff_imagej_compatible(os.path.join(outputdir, base_filename), pred_train, axes='ZYX')\n"," \n","print(\"Prediction of images done.\")\n","\n","print(\"One example is displayed here.\")\n","\n","\n","#Display an example\n","random_choice=random.choice(os.listdir(Data_folder))\n","x = imread(Data_folder+\"/\"+random_choice)\n","\n","#Find image Z dimension and select the mid-plane\n","Image_Z = x.shape[0]\n","mid_plane = int(Image_Z / 2)+1\n","\n","os.chdir(Result_folder)\n","y = imread(Result_folder+\"/\"+random_choice)\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x[mid_plane], interpolation='nearest')\n","plt.title('Noisy Input (single Z plane)');\n","plt.axis('off');\n","plt.subplot(1,2,2)\n","plt.imshow(y[mid_plane], interpolation='nearest')\n","plt.title('Prediction (single Z plane)');\n","plt.axis('off');"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"hvkd66PldsXB","colab_type":"text"},"source":["## **6.2. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"UvSlTaH14s3t","colab_type":"text"},"source":["#**Thank you for using Noise2Void 3D!**"]}]} \ No newline at end of file +{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"Noise2Void_3D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1WZRIoSBNcRUEq4-Rq5M4mDkIaOlEHnxz","timestamp":1588762142860},{"file_id":"10weAY0es-pEfHlACCaBCKK7PmgdoJqdh","timestamp":1587728072051},{"file_id":"10Ze0rFZoooyyTL_OIVWGdFJEhWE6_cSB","timestamp":1586789421439},{"file_id":"1SsGyUbWcMaLGHFepMuKElRNYLdEBUwf6","timestamp":1583244509550}],"collapsed_sections":[],"toc_visible":true,"machine_shape":"hm"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.7"},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"IkSguVy8Xv83","colab_type":"text"},"source":["# **Noise2Void (3D)**\n","\n","---\n","\n"," Noise2Void is a deep-learning method that can be used to denoise many types of images, including microscopy images and which was originally published by [Krull *et al.* on arXiv](https://arxiv.org/abs/1811.10980). It allows denoising of image data in a self-supervised manner, therefore high-quality, low noise equivalent images are not necessary to train this network. This is performed by \"masking\" a random subset of pixels in the noisy image and training the network to predict the values in these pixels. The resulting output is a denoised version of the image. Noise2Void is based on the popular U-Net network architecture, adapted from [CARE](https://www.nature.com/articles/s41592-018-0216-7).\n","\n"," **This particular notebook enables self-supervised denoised of 3D dataset. If you are interested in 2D dataset, you should use the Noise2Void 2D notebook instead.**\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is largely based on the following paper:\n","\n","**Noise2Void - Learning Denoising from Single Noisy Images**\n","from Krull *et al.* published on arXiv in 2018 (https://arxiv.org/abs/1811.10980)\n","\n","And source code found in: https://github.com/juglab/n2v\n","\n","**Please also cite this original paper when using or developing this notebook.**\n"]},{"cell_type":"markdown","metadata":{"id":"jWAz2i7RdxUV","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","\n","\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"gKDLkLWUd-YX","colab_type":"text"},"source":["# **0. Before getting started**\n","---\n","\n","Before you run the notebook, please ensure that you are logged into your Google account and have the training and/or data to process in your Google Drive.\n","\n","For Noise2Void to train, it only requires a single noisy image but multiple images can be used. Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\n","\n","Please note that you currently can **only use .tif files!**\n","\n","**We strongly recommend that you generate high signal to noise ration version of your noisy images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n"," You can also provide a folder that contains the data that you wish to analyse with the trained network once all training has been performed.\n","\n","Here is a common data structure that can work:\n","\n","* Data\n"," - **Training dataset**\n"," - **Quality control dataset** (Optional but recomended)\n"," - Low SNR images\n"," - img_1.tif, img_2.tif\n"," - High SNR images\n"," - img_1.tif, img_2.tif \n"," - **Data to be predicted** \n"," - **Results**\n","\n","\n","The **Results** folder will contain the processed images, trained model and network parameters as csv file. Your original images remain unmodified.\n","\n","---\n","**Important note**\n","\n","- If you wish to **train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"n4yWFoJNnoin","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"DMNHVZfHmbKb","colab_type":"text"},"source":["\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"zCvebubeSaGY","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"sNIVx8_CLolt","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"01Djr8v-5pPk","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"AdN8B91xZO0x"},"source":["# **2. Install Noise2Void and dependencies**\n","---"]},{"cell_type":"code","metadata":{"id":"fq21zJVFNASx","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Install Noise2Void and dependencies\n","\n","# Enable the Tensorflow 1 instead of the Tensorflow 2.\n","%tensorflow_version 1.x\n","import tensorflow\n","print(tensorflow.__version__)\n","\n","print(\"Tensorflow enabled.\")\n","\n","# Here we install Noise2Void and other required packages\n","!pip install n2v\n","!pip install wget\n","!pip install memory_profiler\n","%load_ext memory_profiler\n","\n","print(\"Noise2Void installed.\")\n","\n","# Here we install all libraries and other depencies to run the notebook.\n","\n","# ------- Variable specific to N2V -------\n","from n2v.models import N2VConfig, N2V\n","from csbdeep.utils import plot_history\n","from n2v.utils.n2v_utils import manipulate_val_data\n","from n2v.internals.N2V_DataGenerator import N2V_DataGenerator\n","from csbdeep.io import save_tiff_imagej_compatible\n","\n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","import numpy as np\n","from matplotlib import pyplot as plt\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n","import wget\n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32\n","\n","\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","W = '\\033[0m' # white (normal)\n","R = '\\033[31m' # red\n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","print(\"Libraries installed\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"HLYcZR9gMv42","colab_type":"text"},"source":["# **3. Select your parameters and paths**\n","---"]},{"cell_type":"markdown","metadata":{"id":"FQ_QxtSWQ7CL","colab_type":"text"},"source":["## **3.1. Setting main training parameters**\n","---\n"," "]},{"cell_type":"markdown","metadata":{"id":"AuESFimvMv43","colab_type":"text"},"source":[" **Paths for training, predictions and results**\n","\n","**`Training_source:`:** This is the path to your folders containing the Training_source (noisy images). To find the path of the folder containing your datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Do not re-use the name of an existing model (saved in the same folder), otherwise it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","\n","**Training parameters**\n","\n","**`number_of_epochs`:** Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10-30) epochs, but a full training should run for 100-200 epochs. Evaluate the performance after training (see 5.). **Default value: 30**\n","\n","**`patch_size`:** Noise2Void divides the image into patches for training. Input the size of the patches (length of a side). The value should be smaller than the dimensions of the image and divisible by 8. **Default value: 64**\n","\n","**`patch_height`:** The value should be smaller than the Z dimensions of the image and divisible by 4. When analysing isotropic stacks patch_size and patch_height should have similar values.\n","\n","**If you get an Out of memory (OOM) error during the training, manually decrease the patch_size and patch_height values until the OOM error disappear.**\n","\n","**Advanced Parameters - experienced users only**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Noise2Void requires a large batch size for stable training. Reduce this parameter if your GPU runs out of memory. **Default value: 128**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each image / patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10** \n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0004**\n"]},{"cell_type":"code","metadata":{"id":"ewpNJ_I0Mv47","colab_type":"code","cellView":"form","colab":{}},"source":["\n","# Create DataGenerator-object.\n","datagen = N2V_DataGenerator()\n","\n","#@markdown ###Path to training images: \n","Training_source = \"\" #@param {type:\"string\"}\n","\n","imgs = datagen.load_imgs_from_directory(directory = Training_source, dims='ZYX')\n","\n","#@markdown ### Model name and path:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","#@markdown ###Training Parameters\n","#@markdown Number of steps and epochs:\n","\n","number_of_epochs = 30#@param {type:\"number\"}\n","\n","#@markdown Patch size (pixels) and number\n","patch_size = 64#@param {type:\"number\"}\n","\n","patch_height = 4#@param {type:\"number\"}\n","\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please input:\n","batch_size = 128#@param {type:\"number\"}\n","number_of_steps = 100#@param {type:\"number\"}\n","percentage_validation = 10 #@param {type:\"number\"}\n","initial_learning_rate = 0.0004 #@param {type:\"number\"}\n","\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," # number_of_steps is defined in the following cell in this case\n"," batch_size = 128\n"," percentage_validation = 10\n"," initial_learning_rate = 0.0004\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name): \n"," print(bcolors.WARNING +\"!! WARNING: Folder already exists and has been removed !!\" + W)\n"," shutil.rmtree(model_path+'/'+model_name)\n"," \n","\n","#Load one randomly chosen training target file\n","\n","random_choice=random.choice(os.listdir(Training_source))\n","x = imread(Training_source+\"/\"+random_choice)\n","\n","# Here we check that the input images are stacks\n","if len(x.shape) == 3:\n"," print(\"Image dimensions (z,y,x)\",x.shape)\n","\n","if not len(x.shape) == 3:\n"," print(bcolors.WARNING + \"Your images appear to have the wrong dimensions. Image dimension\",x.shape)\n","\n","#Find image Z dimension and select the mid-plane\n","Image_Z = x.shape[0]\n","mid_plane = int(Image_Z / 2)+1\n","\n","#Find image XY dimension\n","Image_Y = x.shape[1]\n","Image_X = x.shape[2]\n","\n","#Hyperparameters failsafes\n","\n","# Here we check that patch_size is smaller than the smallest xy dimension of the image \n","if patch_size > min(Image_Y, Image_X):\n"," patch_size = min(Image_Y, Image_X)\n"," print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_size is divisible by 8\n","if not patch_size % 8 == 0:\n"," patch_size = ((int(patch_size / 8)-1) * 8)\n"," print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_height is smaller than the z dimension of the image \n","if patch_height > Image_Z :\n"," patch_height = Image_Z\n"," print (bcolors.WARNING + \" Your chosen patch_height is bigger than the z dimension of your image; therefore the patch_size chosen is now:\",patch_height)\n","\n","# Here we check that patch_height is divisible by 4\n","if not patch_height % 4 == 0:\n"," patch_height = ((int(patch_height / 4)-1) * 4)\n"," if patch_height == 0:\n"," patch_height = 4\n"," print (bcolors.WARNING + \" Your chosen patch_height is not divisible by 4; therefore the patch_size chosen is now:\",patch_height)\n","\n","# Here we disable pre-trained model by default (in case the next cell is not run)\n","Use_pretrained_model = False\n","\n","# Here we enable data augmentation by default (in case the cell is not ran)\n","\n","Use_Data_augmentation = True\n","\n","print(\"Parameters initiated.\")\n","\n","\n","#Here we display a single z plane\n","\n","norm = simple_norm(x[mid_plane], percent = 99)\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x[mid_plane], interpolation='nearest', norm=norm, cmap='magma')\n","plt.title('Training source')\n","plt.axis('off');\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"xyQZKby8yFME","colab_type":"text"},"source":["## **3.2. Data augmentation**\n","---\n",""]},{"cell_type":"markdown","metadata":{"id":"w_jCy7xOx2g3","colab_type":"text"},"source":["Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n","\n","Data augmentation is performed here by rotating the patches in XY-Plane and flip them along X-Axis. This only works if the patches are square in XY.\n","\n"," By default data augmentation is enabled. Disable this option is you run out of RAM during the training.\n"," "]},{"cell_type":"code","metadata":{"id":"DMqWq5-AxnFU","colab_type":"code","cellView":"form","colab":{}},"source":["#Data augmentation\n","#@markdown ##Play this cell to enable or disable data augmentation: \n","Use_Data_augmentation = True #@param {type:\"boolean\"}\n","\n","if Use_Data_augmentation:\n"," print(\"Data augmentation enabled\")\n","\n","if not Use_Data_augmentation:\n"," print(\"Data augmentation disabled\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"3L9zSGtORKYI","colab_type":"text"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a N2V 3D model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"9vC2n-HeLdiJ","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"Model_from_file\" #@param [\"Model_from_file\"]\n","\n","Weights_choice = \"last\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","\n","# --------------------- Download the a model provided in the XXX ------------------------\n","\n"," if pretrained_model_choice == \"Model_name\":\n"," pretrained_model_name = \"Model_name\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path) \n"," wget.download(\"\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist')\n"," Use_pretrained_model = False\n","\n"," \n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n","\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," \n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n","\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n","\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n","else:\n"," print(bcolors.WARNING+'No pretrained network will be used.')\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"MCGklf1vZf2M","colab_type":"text"},"source":["#**4. Train your network**\n","---"]},{"cell_type":"markdown","metadata":{"id":"1KYOuygETJkT","colab_type":"text"},"source":["## **4.1. Prepare the training data and model for training**\n","---\n","Here, we use the information from 3. to build the model and convert the training data into a suitable format for training."]},{"cell_type":"code","metadata":{"id":"lIUAOJ_LMv5E","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Create the model and dataset objects\n","\n","#Disable some of the warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","# Create batches from the training data.\n","patches = datagen.generate_patches_from_list(imgs, shape=(patch_height, patch_size, patch_size), augment=Use_Data_augmentation)\n","\n","# Patches are divited into training and validation patch set. This inhibits over-lapping of patches. \n","number_train_images =int(len(patches)*(percentage_validation/100))\n","X = patches[number_train_images:]\n","X_val = patches[:number_train_images]\n","\n","print(len(patches),\"patches created.\")\n","print(number_train_images,\"patch images for validation (\",percentage_validation,\"%).\")\n","print((len(patches)-number_train_images),\"patch images for training.\")\n","%memit \n","\n","#Here we automatically define number_of_step in function of training data and batch size\n","if (Use_Default_Advanced_Parameters): \n"," number_of_steps= int(X.shape[0]/batch_size) + 1\n","\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we ensure that the learning rate set correctly when using pre-trained models\n","if Use_pretrained_model:\n"," if Weights_choice == \"last\":\n"," initial_learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\": \n"," initial_learning_rate = bestLearningRate\n","# --------------------- ---------------------- ------------------------\n","\n","\n","# creates Congfig object. \n","config = N2VConfig(X, unet_kern_size=3, \n"," train_steps_per_epoch=number_of_steps,train_epochs=number_of_epochs, train_loss='mse', batch_norm=True, \n"," train_batch_size=batch_size, n2v_perc_pix=0.198, n2v_patch_shape=(patch_height, patch_size, patch_size), \n"," n2v_manipulator='uniform_withCP', n2v_neighborhood_radius=5, train_learning_rate = initial_learning_rate)\n","\n","vars(config)\n","\n","# Create the default model.\n","model = N2V(config=config, name=model_name, basedir=model_path)\n","\n","# --------------------- Using pretrained model ------------------------\n","# Load the pretrained weights \n","if Use_pretrained_model:\n"," model.load_weights(h5_file_path)\n","# --------------------- ---------------------- ------------------------\n","\n","print(\"Parameters transferred into the model.\")\n","print(config)\n","\n","# Shows a training batch and a validation batch.\n","plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(X[0,1,...,0],cmap='magma')\n","plt.axis('off')\n","plt.title('Training Patch');\n","plt.subplot(1,2,2)\n","plt.imshow(X_val[0,1,...,0],cmap='magma')\n","plt.axis('off')\n","plt.title('Validation Patch');\n","\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"0Dfn8ZsEMv5d","colab_type":"text"},"source":["## **4.2. Train the network**\n","---\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point."]},{"cell_type":"code","metadata":{"scrolled":true,"colab_type":"code","cellView":"form","id":"iwNmp1PUzRDQ","colab":{}},"source":["start = time.time()\n","\n","#@markdown ##Start training\n","%memit\n","# the training starts.\n","history = model.train(X, X_val)\n","%memit\n","print(\"Model training is now done.\")\n","\n","# convert the history.history dict to a pandas DataFrame: \n","lossData = pd.DataFrame(history.history) \n","\n","if os.path.exists(model_path+\"/\"+model_name+\"/Quality Control\"):\n"," shutil.rmtree(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","os.makedirs(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","# The training evaluation.csv is saved (overwrites the Files if needed). \n","lossDataCSVpath = model_path+'/'+model_name+'/Quality Control/training_evaluation.csv'\n","with open(lossDataCSVpath, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['loss','val_loss', 'learning rate'])\n"," for i in range(len(history.history['loss'])):\n"," writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])\n","\n","\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"nRaaG02xZh_N","colab_type":"text"},"source":["## **4.3. Download your model(s) from Google Drive**\n","---\n","\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"_0Hynw3-xHp1","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","metadata":{"id":"eAJzMwPA6tlH","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else: \n"," print(bcolors.WARNING + '!! WARNING: The chosen model does not exist !!')\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"dhJROwlAMv5o","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased."]},{"cell_type":"code","metadata":{"id":"vMzSP50kMv5p","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"X5_92nL2xdP6","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n","This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the \"Source_QC_folder\" and \"Target_QC_folder\" !\n","\n","**1. The SSIM (structural similarity) map** \n","\n","The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). \n","\n","**mSSIM** is the SSIM value calculated across the entire window of both images.\n","\n","**The output below shows the SSIM maps with the mSSIM**\n","\n","**2. The RSE (Root Squared Error) map** \n","\n","This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).\n","\n","\n","**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.\n","\n","**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.\n","\n","**The output below shows the RSE maps with the NRMSE and PSNR values.**\n"]},{"cell_type":"code","metadata":{"id":"w90MdriMxhjD","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","path_metrics_save = QC_model_path+'/'+QC_model_name+'/Quality Control/'\n","\n","# Create a quality control/Prediction Folder\n","if os.path.exists(path_metrics_save+'Prediction'):\n"," shutil.rmtree(path_metrics_save+'Prediction')\n","os.makedirs(path_metrics_save+'Prediction')\n","\n","#Here we allow the user to choose the number of tile to be used when predicting the images\n","#@markdown #####To analyse large image, your images need to be divided into tiles. Each tile will then be processed independently and re-assembled to generate the final image. \"Automatic_number_of_tiles\" will search for and use the smallest number of tiles that can be used, at the expanse of your runtime. Alternatively, manually input the number of tiles in each dimension to be used to process your images. \n","\n","Automatic_number_of_tiles = True #@param {type:\"boolean\"}\n","#@markdown #####If you get an Out of memory (OOM) error when using the \"Automatic_number_of_tiles\" option, disable it and manually input the values to be used to process your images. Progressively increases these numbers until the OOM error disappear.\n","n_tiles_Z = 1#@param {type:\"number\"}\n","n_tiles_Y = 2#@param {type:\"number\"}\n","n_tiles_X = 2#@param {type:\"number\"}\n","\n","if (Automatic_number_of_tiles): \n"," n_tilesZYX = None\n","\n","if not (Automatic_number_of_tiles):\n"," n_tilesZYX = (n_tiles_Z, n_tiles_Y, n_tiles_X)\n","\n","\n","# Activate the pretrained model. \n","model_training = N2V(config=None, name=QC_model_name, basedir=QC_model_path)\n","\n","# List Tif images in Source_QC_folder\n","Source_QC_folder_tif = Source_QC_folder+\"/*.tif\"\n","Z = sorted(glob(Source_QC_folder_tif))\n","Z = list(map(imread,Z))\n","print('Number of test dataset found in the folder: '+str(len(Z)))\n","\n","\n","# Perform prediction on all datasets in the Source_QC folder\n","for filename in os.listdir(Source_QC_folder):\n"," img = imread(os.path.join(Source_QC_folder, filename))\n"," n_slices = img.shape[0]\n"," predicted = model_training.predict(img, axes='ZYX', n_tiles=n_tilesZYX)\n"," os.chdir(path_metrics_save+'Prediction/')\n"," imsave('Predicted_'+filename, predicted)\n","\n","\n","def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," \"\"\"Percentile-based image normalization.\"\"\"\n","\n"," mi = np.percentile(x,pmin,axis=axis,keepdims=True)\n"," ma = np.percentile(x,pmax,axis=axis,keepdims=True)\n"," return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)\n","\n","\n","def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," if dtype is not None:\n"," x = x.astype(dtype,copy=False)\n"," mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)\n"," ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)\n"," eps = dtype(eps)\n","\n"," try:\n"," import numexpr\n"," x = numexpr.evaluate(\"(x - mi) / ( ma - mi + eps )\")\n"," except ImportError:\n"," x = (x - mi) / ( ma - mi + eps )\n","\n"," if clip:\n"," x = np.clip(x,0,1)\n","\n"," return x\n","\n","def norm_minmse(gt, x, normalize_gt=True):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n","\n"," \"\"\"\n"," normalizes and affinely scales an image pair such that the MSE is minimized \n"," \n"," Parameters\n"," ----------\n"," gt: ndarray\n"," the ground truth image \n"," x: ndarray\n"," the image that will be affinely scaled \n"," normalize_gt: bool\n"," set to True of gt image should be normalized (default)\n"," Returns\n"," -------\n"," gt_scaled, x_scaled \n"," \"\"\"\n"," if normalize_gt:\n"," gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)\n"," x = x.astype(np.float32, copy=False) - np.mean(x)\n"," #x = x - np.mean(x)\n"," gt = gt.astype(np.float32, copy=False) - np.mean(gt)\n"," #gt = gt - np.mean(gt)\n"," scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())\n"," return gt, scale * x\n","\n","# Open and create the csv file that will contain all the QC metrics\n","with open(path_metrics_save+'QC_metrics_'+QC_model_name+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n","\n"," # Write the header in the csv file\n"," writer.writerow([\"File name\",\"Slice #\",\"Prediction v. GT mSSIM\",\"Input v. GT mSSIM\", \"Prediction v. GT NRMSE\", \"Input v. GT NRMSE\", \"Prediction v. GT PSNR\", \"Input v. GT PSNR\"]) \n"," \n"," # These lists will be used to collect all the metrics values per slice\n"," file_name_list = []\n"," slice_number_list = []\n"," mSSIM_GvP_list = []\n"," mSSIM_GvS_list = []\n"," NRMSE_GvP_list = []\n"," NRMSE_GvS_list = []\n"," PSNR_GvP_list = []\n"," PSNR_GvS_list = []\n","\n"," # These lists will be used to display the mean metrics for the stacks\n"," mSSIM_GvP_list_mean = []\n"," mSSIM_GvS_list_mean = []\n"," NRMSE_GvP_list_mean = []\n"," NRMSE_GvS_list_mean = []\n"," PSNR_GvP_list_mean = []\n"," PSNR_GvS_list_mean = []\n","\n"," # Let's loop through the provided dataset in the QC folders\n"," for thisFile in os.listdir(Source_QC_folder):\n"," if not os.path.isdir(os.path.join(Source_QC_folder, thisFile)):\n"," print('Running QC on: '+thisFile)\n","\n"," test_GT_stack = io.imread(os.path.join(Target_QC_folder, thisFile))\n"," test_source_stack = io.imread(os.path.join(Source_QC_folder,thisFile))\n"," test_prediction_stack = io.imread(os.path.join(path_metrics_save+\"Prediction/\",'Predicted_'+thisFile))\n"," n_slices = test_GT_stack.shape[0]\n","\n"," # Calculating the position of the mid-plane slice\n"," z_mid_plane = int(n_slices / 2)+1\n","\n"," img_SSIM_GTvsPrediction_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n"," img_SSIM_GTvsSource_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n"," img_RSE_GTvsPrediction_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n"," img_RSE_GTvsSource_stack = np.zeros((n_slices, test_GT_stack.shape[1], test_GT_stack.shape[2]))\n","\n"," for z in range(n_slices): \n"," # -------------------------------- Normalising the dataset --------------------------------\n","\n"," test_GT_norm,test_source_norm = norm_minmse(test_GT_stack[z], test_source_stack[z], normalize_gt=True)\n"," test_GT_norm,test_prediction_norm = norm_minmse(test_GT_stack[z], test_prediction_stack[z], normalize_gt=True)\n","\n"," # -------------------------------- Calculate the SSIM metric and maps --------------------------------\n"," # Calculate the SSIM maps and index\n"," index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = structural_similarity(test_GT_norm, test_prediction_norm, data_range=1.0, full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)\n"," index_SSIM_GTvsSource, img_SSIM_GTvsSource = structural_similarity(test_GT_norm, test_source_norm, data_range=1.0, full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)\n","\n"," #Calculate ssim_maps\n"," img_SSIM_GTvsPrediction_stack[z] = img_as_float32(img_SSIM_GTvsPrediction,force_copy=False)\n"," img_SSIM_GTvsSource_stack[z] = img_as_float32(img_SSIM_GTvsSource,force_copy=False)\n"," \n","\n"," # -------------------------------- Calculate the NRMSE metrics --------------------------------\n","\n"," # Calculate the Root Squared Error (RSE) maps\n"," img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))\n"," img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))\n","\n"," # Calculate SE maps\n"," img_RSE_GTvsPrediction_stack[z] = img_as_float32(img_RSE_GTvsPrediction)\n"," img_RSE_GTvsSource_stack[z] = img_as_float32(img_RSE_GTvsSource)\n","\n"," # Normalised Root Mean Squared Error (here it's valid to take the mean of the image)\n"," NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))\n"," NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))\n","\n"," # Calculate the PSNR between the images\n"," PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)\n"," PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)\n","\n"," writer.writerow([thisFile, str(z),str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource), str(PSNR_GTvsPrediction), str(PSNR_GTvsSource)])\n"," \n"," # Collect values to display in dataframe output\n"," slice_number_list.append(z)\n"," mSSIM_GvP_list.append(index_SSIM_GTvsPrediction)\n"," mSSIM_GvS_list.append(index_SSIM_GTvsSource)\n"," NRMSE_GvP_list.append(NRMSE_GTvsPrediction)\n"," NRMSE_GvS_list.append(NRMSE_GTvsSource)\n"," PSNR_GvP_list.append(PSNR_GTvsPrediction)\n"," PSNR_GvS_list.append(PSNR_GTvsSource)\n","\n"," if (z == z_mid_plane): # catch these for display\n"," SSIM_GTvsP_forDisplay = index_SSIM_GTvsPrediction\n"," SSIM_GTvsS_forDisplay = index_SSIM_GTvsSource\n"," NRMSE_GTvsP_forDisplay = NRMSE_GTvsPrediction\n"," NRMSE_GTvsS_forDisplay = NRMSE_GTvsSource\n"," \n"," # If calculating average metrics for dataframe output\n"," file_name_list.append(thisFile)\n"," mSSIM_GvP_list_mean.append(sum(mSSIM_GvP_list)/len(mSSIM_GvP_list))\n"," mSSIM_GvS_list_mean.append(sum(mSSIM_GvS_list)/len(mSSIM_GvS_list))\n"," NRMSE_GvP_list_mean.append(sum(NRMSE_GvP_list)/len(NRMSE_GvP_list))\n"," NRMSE_GvS_list_mean.append(sum(NRMSE_GvS_list)/len(NRMSE_GvS_list))\n"," PSNR_GvP_list_mean.append(sum(PSNR_GvP_list)/len(PSNR_GvP_list))\n"," PSNR_GvS_list_mean.append(sum(PSNR_GvS_list)/len(PSNR_GvS_list))\n","\n","\n"," # ----------- Change the stacks to 32 bit images -----------\n","\n"," img_SSIM_GTvsSource_stack_32 = img_as_float32(img_SSIM_GTvsSource_stack, force_copy=False)\n"," img_SSIM_GTvsPrediction_stack_32 = img_as_float32(img_SSIM_GTvsPrediction_stack, force_copy=False)\n"," img_RSE_GTvsSource_stack_32 = img_as_float32(img_RSE_GTvsSource_stack, force_copy=False)\n"," img_RSE_GTvsPrediction_stack_32 = img_as_float32(img_RSE_GTvsPrediction_stack, force_copy=False)\n","\n"," # ----------- Saving the error map stacks -----------\n"," io.imsave(path_metrics_save+'SSIM_GTvsSource_'+thisFile,img_SSIM_GTvsSource_stack_32)\n"," io.imsave(path_metrics_save+'SSIM_GTvsPrediction_'+thisFile,img_SSIM_GTvsPrediction_stack_32)\n"," io.imsave(path_metrics_save+'RSE_GTvsSource_'+thisFile,img_RSE_GTvsSource_stack_32)\n"," io.imsave(path_metrics_save+'RSE_GTvsPrediction_'+thisFile,img_RSE_GTvsPrediction_stack_32)\n","\n","#Averages of the metrics per stack as dataframe output\n","pdResults = pd.DataFrame(file_name_list, columns = [\"File name\"])\n","pdResults[\"Prediction v. GT mSSIM\"] = mSSIM_GvP_list_mean\n","pdResults[\"Input v. GT mSSIM\"] = mSSIM_GvS_list_mean\n","pdResults[\"Prediction v. GT NRMSE\"] = NRMSE_GvP_list_mean\n","pdResults[\"Input v. GT NRMSE\"] = NRMSE_GvS_list_mean\n","pdResults[\"Prediction v. GT PSNR\"] = PSNR_GvP_list_mean\n","pdResults[\"Input v. GT PSNR\"] = PSNR_GvS_list_mean\n","\n","\n","# All data is now processed saved\n","Test_FileList = os.listdir(Source_QC_folder) # this assumes, as it should, that both source and target are named the same way\n","\n","plt.figure(figsize=(15,15))\n","# Currently only displays the last computed set, from memory\n","# Target (Ground-truth)\n","plt.subplot(3,3,1)\n","plt.axis('off')\n","img_GT = io.imread(os.path.join(Target_QC_folder, Test_FileList[-1]))\n","\n","# Calculating the position of the mid-plane slice\n","z_mid_plane = int(img_GT.shape[0] / 2)+1\n","\n","plt.imshow(img_GT[z_mid_plane])\n","plt.title('Target (slice #'+str(z_mid_plane)+')')\n","\n","# Source\n","plt.subplot(3,3,2)\n","plt.axis('off')\n","img_Source = io.imread(os.path.join(Source_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_Source[z_mid_plane])\n","plt.title('Source (slice #'+str(z_mid_plane)+')')\n","\n","#Prediction\n","plt.subplot(3,3,3)\n","plt.axis('off')\n","img_Prediction = io.imread(os.path.join(path_metrics_save+'Prediction/', 'Predicted_'+Test_FileList[-1]))\n","plt.imshow(img_Prediction[z_mid_plane])\n","plt.title('Prediction (slice #'+str(z_mid_plane)+')')\n","\n","#Setting up colours\n","cmap = plt.cm.CMRmap\n","\n","#SSIM between GT and Source\n","plt.subplot(3,3,5)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n","img_SSIM_GTvsSource = io.imread(os.path.join(path_metrics_save, 'SSIM_GTvsSource_'+Test_FileList[-1]))\n","imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource[z_mid_plane], cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(SSIM_GTvsS_forDisplay,3)),fontsize=14)\n","plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#SSIM between GT and Prediction\n","plt.subplot(3,3,6)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","img_SSIM_GTvsPrediction = io.imread(os.path.join(path_metrics_save, 'SSIM_GTvsPrediction_'+Test_FileList[-1]))\n","imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction[z_mid_plane], cmap = cmap, vmin=0,vmax=1)\n","plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(SSIM_GTvsP_forDisplay,3)),fontsize=14)\n","\n","#Root Squared Error between GT and Source\n","plt.subplot(3,3,8)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False)\n","img_RSE_GTvsSource = io.imread(os.path.join(path_metrics_save, 'RSE_GTvsSource_'+Test_FileList[-1]))\n","imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource[z_mid_plane], cmap = cmap, vmin=0, vmax = 1) \n","plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsS_forDisplay,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)\n","#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))\n","plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#Root Squared Error between GT and Prediction\n","plt.subplot(3,3,9)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","img_RSE_GTvsPrediction = io.imread(os.path.join(path_metrics_save, 'RSE_GTvsPrediction_'+Test_FileList[-1]))\n","imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction[z_mid_plane], cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsP_forDisplay,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)\n","\n","print('-----------------------------------')\n","print('Here are the average scores for the stacks you tested in Quality control. To see values for all slices, open the .csv file saved in the Qulity Control folder.')\n","pdResults.head()\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"-tJeeJjLnRkP","colab_type":"text"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"d8wuQGjoq6eN","colab_type":"text"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n","\n","**`Result_folder`:** This folder will contain the predicted output images."]},{"cell_type":"code","metadata":{"id":"y2TD5p7MZrEb","colab_type":"code","cellView":"form","colab":{}},"source":["#Activate the pretrained model. \n","#model_training = CARE(config=None, name=model_name, basedir=model_path)\n","\n","#@markdown ### Provide the path to your dataset and to the folder where the prediction will be saved, then play the cell to predict output on your unseen images.\n","\n","#@markdown ###Path to data to analyse and where predicted output should be saved:\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else: \n"," print(bcolors.WARNING + '!! WARNING: The chosen model does not exist !!')\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","#Here we allow the user to choose the number of tile to be used when predicting the images\n","#@markdown #####To analyse large image, your images need to be divided into tiles. Each tile will then be processed independently and re-assembled to generate the final image. \"Automatic_number_of_tiles\" will search for and use the smallest number of tiles that can be used, at the expanse of your runtime. Alternatively, manually input the number of tiles in each dimension to be used to process your images. \n","\n","Automatic_number_of_tiles = True #@param {type:\"boolean\"}\n","#@markdown #####If you get an Out of memory (OOM) error when using the \"Automatic_number_of_tiles\" option, disable it and manually input the values to be used to process your images. Progressively increases these numbers until the OOM error disappear.\n","n_tiles_Z = 1#@param {type:\"number\"}\n","n_tiles_Y = 5#@param {type:\"number\"}\n","n_tiles_X = 5#@param {type:\"number\"}\n","\n","if (Automatic_number_of_tiles): \n"," n_tilesZYX = None\n","\n","if not (Automatic_number_of_tiles):\n"," n_tilesZYX = (n_tiles_Z, n_tiles_Y, n_tiles_X)\n","\n","#Activate the pretrained model.\n","config = None\n","model = N2V(config, Prediction_model_name, basedir=Prediction_model_path)\n","\n","print(\"Denoising images...\")\n","\n","thisdir = Path(Data_folder)\n","outputdir = Path(Result_folder)\n","suffix = '.tif'\n","\n","# r=root, d=directories, f = files\n","for r, d, f in os.walk(thisdir):\n"," for file in f:\n"," if \".tif\" in file:\n"," print(os.path.join(r, file))\n","\n","# The code by Lucas von Chamier\n","for r, d, f in os.walk(thisdir):\n"," for file in f:\n"," base_filename = os.path.basename(file)\n"," input_train = imread(os.path.join(r, file))\n"," pred_train = model.predict(input_train, axes='ZYX', n_tiles=n_tilesZYX)\n"," save_tiff_imagej_compatible(os.path.join(outputdir, base_filename), pred_train, axes='ZYX')\n"," \n","print(\"Prediction of images done.\")\n","\n","print(\"One example is displayed here.\")\n","\n","\n","#Display an example\n","random_choice=random.choice(os.listdir(Data_folder))\n","x = imread(Data_folder+\"/\"+random_choice)\n","\n","#Find image Z dimension and select the mid-plane\n","Image_Z = x.shape[0]\n","mid_plane = int(Image_Z / 2)+1\n","\n","os.chdir(Result_folder)\n","y = imread(Result_folder+\"/\"+random_choice)\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x[mid_plane], interpolation='nearest')\n","plt.title('Noisy Input (single Z plane)');\n","plt.axis('off');\n","plt.subplot(1,2,2)\n","plt.imshow(y[mid_plane], interpolation='nearest')\n","plt.title('Prediction (single Z plane)');\n","plt.axis('off');"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"hvkd66PldsXB","colab_type":"text"},"source":["## **6.2. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"UvSlTaH14s3t","colab_type":"text"},"source":["#**Thank you for using Noise2Void 3D!**"]}]} \ No newline at end of file diff --git a/Colab_notebooks/Stardist_2D_ZeroCostDL4Mic.ipynb b/Colab_notebooks/Stardist_2D_ZeroCostDL4Mic.ipynb index 552afabe..f78918aa 100755 --- a/Colab_notebooks/Stardist_2D_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/Stardist_2D_ZeroCostDL4Mic.ipynb @@ -1 +1 @@ -{"nbformat":4,"nbformat_minor":0,"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.4"},"colab":{"name":"Stardist_2D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1WAfQW1Mj3wy1XQZZUfU4DJVS_R_E8Cn3","timestamp":1585665697353},{"file_id":"1PKVyox_mx2rEE3VlMFQtdnVULJFhYPaD","timestamp":1583443864213},{"file_id":"1XSclOkhhHmn-9LQc9k8c3Y6seT1LEi-Y","timestamp":1583264105465},{"file_id":"1VPZYk3MeSVyZVVEmesz10VtujbD4diJk","timestamp":1579481583477},{"file_id":"1ENdOZir1Gytf6JxzyfbjgfxO3_C1dLHK","timestamp":1575415287126},{"file_id":"1G8b4dF2kCs3ePBGZthPUGOyjJpZ2G_Dm","timestamp":1575379725785},{"file_id":"1P0tT0RR_b3SFKvOcON_MzcAIcxRUQK5B","timestamp":1575377313115},{"file_id":"1hQz8PyJzBRkBZc9NwxM9mU9azRSvghBk","timestamp":1574783624098},{"file_id":"14mWTNjHgIbuuWAxb-0lhmhdIvMoZgrI0","timestamp":1574099686195},{"file_id":"1IWvFuBb0gqaJcUXhhfbcTWNh9cZEXW4S","timestamp":1573647131082},{"file_id":"1hFulBwI57YU6GoVc8sBt5KNIkCS7ynQ3","timestamp":1573579952409},{"file_id":"1Ba_Bu-PXN_2Mq5W6YHMgUYsJEfgbPtS-","timestamp":1573035984524},{"file_id":"1ePC44Qq_C2hSFGPM3PKyb0J6UBXSPddp","timestamp":1573032545399},{"file_id":"https://github.com/mpicbg-csbd/stardist/blob/master/examples/2D/2_training.ipynb","timestamp":1572984225873}],"collapsed_sections":[],"toc_visible":true},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"kiFRRolPa-Rb","colab_type":"text"},"source":["# **Cell nuclei detection by Stardist 2D**\n","---\n","\n","**Stardist** is a deep-learning method that can be used to segment cell nuclei in 2D (xy) single images or in stacks (xyz). \n","\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is largely based on the paper:\n","\n","[Cell Detection with Star-convex Polygons](https://arxiv.org/abs/1806.03535)\n","\n","Uwe Schmidt, Martin Weigert, Coleman Broaddus, and Gene Myers.\n","International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI), Granada, Spain, September 2018.\n","\n","[Star-convex Polyhedra for 3D Object Detection and Segmentation in Microscopy](https://arxiv.org/abs/1908.03636)\n","\n","Martin Weigert, Uwe Schmidt, Robert Haase, Ko Sugawara, and Gene Myers. arXiv, 2019\n","\n","**The Original code** is freely available in GitHub:\n","https://github.com/mpicbg-csbd/stardist\n","\n","**Please also cite this original paper when using or developing this notebook.**\n"]},{"cell_type":"markdown","metadata":{"id":"iSuNqQ2ZMVGM","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"4-oByBSdE6DE","colab_type":"text"},"source":["#**0. Before getting started**\n","---\n"," For Stardist to train, **it needs to have access to a paired training dataset made of images of nuclei and their corresponding masks**. Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\n","\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model**. The quality control assessment can be done directly in this notebook.\n","\n","The data structure is important. It is necessary that all the input data are in the same folder and that all the output data is in a separate folder. The provided training dataset is already split in two folders called \"Training - Images\" (Training_source) and \"Training - Masks\" (Training_target).\n","\n","Additionally, the corresponding Training_source and Training_target files need to have **the same name**.\n","\n","Please note that you currently can **only use .tif files!**\n","\n","You can also provide a folder that contains the data that you wish to analyse with the trained network once all training has been performed. This can include Test dataset for which you have the equivalent output and can compare to what the network provides.\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset**\n"," - Images of nuclei (Training_source)\n"," - img_1.tif, img_2.tif, ...\n"," - Masks (Training_target)\n"," - img_1.tif, img_2.tif, ...\n"," - **Quality control dataset**\n"," - Images of nuclei\n"," - img_1.tif, img_2.tif\n"," - Masks \n"," - img_1.tif, img_2.tif\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"t1sYuLChbRV3","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"CDxBu1-19OyC","colab_type":"text"},"source":["\n","\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"4waLStm0RPFo","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"ZLY4qhgj8w-R","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"Ukil4yuS8seC","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"bB0IaQMZmWYM","colab_type":"text"},"source":["# **2. Install Stardist and Dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"id":"j0w7C8P5zPIp","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Install Stardist and dependencies\n","\n","%tensorflow_version 1.x\n","import tensorflow\n","print(tensorflow.__version__)\n","print(\"Tensorflow enabled.\")\n","\n","# Install packages which are not included in Google Colab\n","\n","!pip install tifffile # contains tools to operate tiff-files\n","!pip install csbdeep # contains tools for restoration of fluorescence microcopy images (Content-aware Image Restoration, CARE). It uses Keras and Tensorflow.\n","!pip install stardist # contains tools to operate STARDIST.\n","!pip install gputools # improves STARDIST performances\n","!pip install edt # improves STARDIST performances\n","!pip install wget\n","\n","\n","# ------- Variable specific to Stardist -------\n","from stardist import fill_label_holes, random_label_cmap, calculate_extents, gputools_available, relabel_image_stardist, random_label_cmap, relabel_image_stardist, _draw_polygons, export_imagej_rois\n","from stardist.models import Config2D, StarDist2D, StarDistData2D # import objects\n","from stardist.matching import matching_dataset\n","from __future__ import print_function, unicode_literals, absolute_import, division\n","from csbdeep.utils import Path, normalize, download_and_extract_zip_file, plot_history # for loss plot\n","from csbdeep.io import save_tiff_imagej_compatible\n","import numpy as np\n","np.random.seed(42)\n","lbl_cmap = random_label_cmap()\n","%matplotlib inline\n","%config InlineBackend.figure_format = 'retina'\n","\n","\n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","import numpy as np\n","from matplotlib import pyplot as plt\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n","import wget\n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32, img_as_ubyte, img_as_float\n","from skimage.util import img_as_ubyte\n","from tqdm import tqdm \n","import cv2\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","W = '\\033[0m' # white (normal)\n","R = '\\033[31m' # red\n","\n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","print(\"Libraries installed\")\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"DPWhXaltAYgH","colab_type":"text"},"source":["# **3. Select your parameters and paths**\n","\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"KWpu5p8utpE2","colab_type":"text"},"source":["## **3.1. Setting main training parameters**\n","---\n"," "]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"HJKFAmuXc6d1"},"source":[" **Paths for training, predictions and results**\n","\n","\n","**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source (images of nuclei) and Training_target (masks) training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","\n","**Training Parameters**\n","\n","**`number_of_epochs`:** Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a 50-100 epochs, but a full training should run for up to 400 epochs. Evaluate the performance after training (see 5.). **Default value: 100**\n","\n","**Advanced Parameters - experienced users only**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each image / patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`patch_size`:** Input the size of the patches use to train stardist 2D (length of a side). The value should be smaller or equal to the dimensions of the image. Make the patch size as large as possible and divisible by 8. **Default value: dimension of the training images** \n","\n","**If you get an Out of memory (OOM) error during the training, manually decrease the patch_size value until the OOM error disappear.**\n","\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 2** \n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10** \n","\n","**`n_rays`:** Set number of rays (corners) used for Stardist (for instance, a square has 4 corners). **Default value: 32** \n","\n","**`grid_parameter`:** increase this number if the cells/nuclei are very large or decrease it if they are very small. **Default value: 2**\n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0003**\n","\n"]},{"cell_type":"code","metadata":{"colab_type":"code","cellView":"form","id":"CNJImzzVnr7h","colab":{}},"source":["#@markdown ###Path to training images: \n","Training_source = \"\" #@param {type:\"string\"}\n","\n","Training_target = \"\" #@param {type:\"string\"}\n","\n","\n","#@markdown ###Name of the model and path to model folder:\n","model_name = \"\" #@param {type:\"string\"}\n","\n","model_path = \"\" #@param {type:\"string\"}\n","#trained_model = model_path \n","\n","\n","#@markdown ### Other parameters for training:\n","number_of_epochs = 100#@param {type:\"number\"}\n","\n","#@markdown ###Advanced Parameters\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please input:\n","\n","#GPU_limit = 90 #@param {type:\"number\"}\n","number_of_steps = 37#@param {type:\"number\"}\n","patch_size = 1024 #@param {type:\"number\"}\n","batch_size = 2 #@param {type:\"number\"}\n","percentage_validation = 10 #@param {type:\"number\"}\n","n_rays = 32 #@param {type:\"number\"}\n","grid_parameter = 2#@param [1, 2, 4, 8, 16, 32] {type:\"raw\"}\n","initial_learning_rate = 0.0003 #@param {type:\"number\"}\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 2\n"," n_rays = 32\n"," percentage_validation = 10\n"," grid_parameter = 2\n"," initial_learning_rate = 0.0003\n","\n","percentage = percentage_validation/100\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name):\n"," print(bcolors.WARNING +\"!! WARNING: Folder already exists and has been removed !!\" + W)\n"," shutil.rmtree(model_path+'/'+model_name)\n"," \n","# Here we open will randomly chosen input and output image\n","random_choice = random.choice(os.listdir(Training_source))\n","x = imread(Training_source+\"/\"+random_choice)\n","\n","# Here we check the image dimensions\n","\n","Image_Y = x.shape[0]\n","Image_X = x.shape[1]\n","\n","print('Loaded images (width, length) =', x.shape)\n","\n","# If default parameters, patch size is the same as image size\n","if (Use_Default_Advanced_Parameters):\n"," patch_size = min(Image_Y, Image_X)\n"," \n","#Hyperparameters failsafes\n","\n","# Here we check that patch_size is smaller than the smallest xy dimension of the image \n","\n","if patch_size > min(Image_Y, Image_X):\n"," patch_size = min(Image_Y, Image_X)\n"," print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n","\n","\n","# Here we check that the patch_size is divisible by 8\n","if not patch_size % 8 == 0:\n"," patch_size = ((int(patch_size / 8)-1) * 8)\n"," print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is:\",patch_size)\n","\n","# Here we disable pre-trained model by default (in case the next cell is not ran)\n","Use_pretrained_model = False\n","\n","# Here we disable data augmentation by default (in case the cell is not ran)\n","\n","Use_Data_augmentation = False\n","\n","\n","print(\"Parameters initiated.\")\n","\n","\n","os.chdir(Training_target)\n","y = imread(Training_target+\"/\"+random_choice)\n","\n","#Here we use a simple normalisation strategy to visualise the image\n","norm = simple_norm(x, percent = 99)\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x, interpolation='nearest', norm=norm, cmap='magma')\n","plt.title('Training source')\n","plt.axis('off');\n","\n","plt.subplot(1,2,2)\n","plt.imshow(y, interpolation='nearest', cmap=lbl_cmap)\n","plt.title('Training target')\n","plt.axis('off');\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"vgT0NU3P6Bwt","colab_type":"text"},"source":["## **3.2. Data augmentation**\n","---\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"8in3wzAw6G6g","colab_type":"text"},"source":["Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n","\n"," **However, data augmentation is not a magic solution and may also introduce issues. Therefore, we recommend that you train your network with and without augmentation, and use the QC section to validate that it improves overall performances.** \n","\n","Data augmentation is performed here by [Augmentor.](https://github.com/mdbloice/Augmentor)\n","\n","[Augmentor](https://github.com/mdbloice/Augmentor) was described in the following article:\n","\n","Marcus D Bloice, Peter M Roth, Andreas Holzinger, Biomedical image augmentation using Augmentor, Bioinformatics, https://doi.org/10.1093/bioinformatics/btz259\n","\n","**Please also cite this original paper when publishing results obtained using this notebook with augmentation enabled.** "]},{"cell_type":"code","metadata":{"id":"2zk1H8J06aJH","colab_type":"code","cellView":"form","colab":{}},"source":["#Data augmentation\n","\n","Use_Data_augmentation = False #@param {type:\"boolean\"}\n","\n","if Use_Data_augmentation:\n"," !pip install Augmentor\n"," import Augmentor\n","\n","\n","#@markdown ####Choose a factor by which you want to multiply your original dataset\n","\n","Multiply_dataset_by = 2 #@param {type:\"slider\", min:1, max:30, step:1}\n","\n","Save_augmented_images = False #@param {type:\"boolean\"}\n","\n","Saving_path = \"\" #@param {type:\"string\"}\n","\n","\n","Use_Default_Augmentation_Parameters = True #@param {type:\"boolean\"}\n","#@markdown ###If not, please choose the probability of the following image manipulations to be used to augment your dataset (1 = always used; 0 = disabled ):\n","\n","#@markdown ####Mirror and rotate images\n","rotate_90_degrees = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","rotate_270_degrees = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","flip_left_right = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","flip_top_bottom = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","#@markdown ####Random image Zoom\n","\n","random_zoom = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","random_zoom_magnification = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","#@markdown ####Random image distortion\n","\n","random_distortion = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","\n","#@markdown ####Image shearing and skewing \n","\n","image_shear = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","max_image_shear = 1 #@param {type:\"slider\", min:1, max:25, step:1}\n","\n","skew_image = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","skew_image_magnitude = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","\n","if Use_Default_Augmentation_Parameters:\n"," rotate_90_degrees = 0.5\n"," rotate_270_degrees = 0.5\n"," flip_left_right = 0.5\n"," flip_top_bottom = 0.5\n","\n"," if not Multiply_dataset_by >5:\n"," random_zoom = 0\n"," random_zoom_magnification = 0.9\n"," random_distortion = 0\n"," image_shear = 0\n"," max_image_shear = 10\n"," skew_image = 0\n"," skew_image_magnitude = 0\n","\n"," if Multiply_dataset_by >5:\n"," random_zoom = 0.1\n"," random_zoom_magnification = 0.9\n"," random_distortion = 0.5\n"," image_shear = 0.2\n"," max_image_shear = 5\n"," skew_image = 0.2\n"," skew_image_magnitude = 0.4\n","\n"," if Multiply_dataset_by >25:\n"," random_zoom = 0.5\n"," random_zoom_magnification = 0.8\n"," random_distortion = 0.5\n"," image_shear = 0.5\n"," max_image_shear = 20\n"," skew_image = 0.5\n"," skew_image_magnitude = 0.6\n","\n","\n","list_files = os.listdir(Training_source)\n","Nb_files = len(list_files)\n","\n","Nb_augmented_files = (Nb_files * Multiply_dataset_by)\n","\n","\n","if Use_Data_augmentation:\n"," print(\"Data augmentation enabled\")\n","# Here we set the path for the various folder were the augmented images will be loaded\n","\n","# All images are first saved into the augmented folder\n"," #Augmented_folder = \"/content/Augmented_Folder\"\n"," \n"," if not Save_augmented_images:\n"," Saving_path= \"/content\"\n","\n"," Augmented_folder = Saving_path+\"/Augmented_Folder\"\n"," if os.path.exists(Augmented_folder):\n"," shutil.rmtree(Augmented_folder)\n"," os.makedirs(Augmented_folder)\n","\n"," #Training_source_augmented = \"/content/Training_source_augmented\"\n"," Training_source_augmented = Saving_path+\"/Training_source_augmented\"\n","\n"," if os.path.exists(Training_source_augmented):\n"," shutil.rmtree(Training_source_augmented)\n"," os.makedirs(Training_source_augmented)\n","\n"," #Training_target_augmented = \"/content/Training_target_augmented\"\n"," Training_target_augmented = Saving_path+\"/Training_target_augmented\"\n","\n"," if os.path.exists(Training_target_augmented):\n"," shutil.rmtree(Training_target_augmented)\n"," os.makedirs(Training_target_augmented)\n","\n","\n","# Here we generate the augmented images\n","#Load the images\n"," p = Augmentor.Pipeline(Training_source, Augmented_folder)\n","\n","#Define the matching images\n"," p.ground_truth(Training_target)\n","#Define the augmentation possibilities\n"," if not rotate_90_degrees == 0:\n"," p.rotate90(probability=rotate_90_degrees)\n"," \n"," if not rotate_270_degrees == 0:\n"," p.rotate270(probability=rotate_270_degrees)\n","\n"," if not flip_left_right == 0:\n"," p.flip_left_right(probability=flip_left_right)\n","\n"," if not flip_top_bottom == 0:\n"," p.flip_top_bottom(probability=flip_top_bottom)\n","\n"," if not random_zoom == 0:\n"," p.zoom_random(probability=random_zoom, percentage_area=random_zoom_magnification)\n"," \n"," if not random_distortion == 0:\n"," p.random_distortion(probability=random_distortion, grid_width=4, grid_height=4, magnitude=8)\n","\n"," if not image_shear == 0:\n"," p.shear(probability=image_shear,max_shear_left=20,max_shear_right=20)\n"," \n"," if not skew_image == 0:\n"," p.skew(probability=skew_image,magnitude=skew_image_magnitude)\n","\n"," p.sample(int(Nb_augmented_files))\n","\n"," print(int(Nb_augmented_files),\"matching images generated\")\n","\n","# Here we sort through the images and move them back to augmented trainning source and targets folders\n","\n"," augmented_files = os.listdir(Augmented_folder)\n","\n"," for f in augmented_files:\n","\n"," if (f.startswith(\"_groundtruth_(1)_\")):\n"," shortname_noprefix = f[17:]\n"," shutil.copyfile(Augmented_folder+\"/\"+f, Training_target_augmented+\"/\"+shortname_noprefix) \n"," if not (f.startswith(\"_groundtruth_(1)_\")):\n"," shutil.copyfile(Augmented_folder+\"/\"+f, Training_source_augmented+\"/\"+f)\n"," \n","\n"," for filename in os.listdir(Training_source_augmented):\n"," os.chdir(Training_source_augmented)\n"," os.rename(filename, filename.replace('_original', ''))\n"," \n"," #Here we clean up the extra files\n"," shutil.rmtree(Augmented_folder)\n","\n","if not Use_Data_augmentation:\n"," print(bcolors.WARNING+\"Data augmentation disabled\") \n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"x4zMG4lMths-","colab_type":"text"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a Stardist model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"SfQeukJJtv9u","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"2D_versatile_fluo_from_Stardist_Fiji\" #@param [\"Model_from_file\", \"2D_versatile_fluo_from_Stardist_Fiji\", \"2D_Demo_Model_from_Stardist_Github\", \"Versatile_H&E_nuclei\"]\n","\n","Weights_choice = \"best\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","\n","# --------------------- Download the Demo 2D model provided in the Stardist 2D github ------------------------\n","\n"," if pretrained_model_choice == \"2D_Demo_Model_from_Stardist_Github\":\n"," pretrained_model_name = \"2D_Demo\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the 2D_Demo_Model_from_Stardist_Github\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"https://github.com/mpicbg-csbd/stardist/raw/master/models/examples/2D_demo/config.json\", pretrained_model_path)\n"," wget.download(\"https://github.com/mpicbg-csbd/stardist/raw/master/models/examples/2D_demo/thresholds.json\", pretrained_model_path)\n"," wget.download(\"https://github.com/mpicbg-csbd/stardist/blob/master/models/examples/2D_demo/weights_best.h5?raw=true\", pretrained_model_path) \n"," wget.download(\"https://github.com/mpicbg-csbd/stardist/blob/master/models/examples/2D_demo/weights_last.h5?raw=true\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Download the Demo 2D_versatile_fluo_from_Stardist_Fiji ------------------------\n","\n"," if pretrained_model_choice == \"2D_versatile_fluo_from_Stardist_Fiji\":\n"," print(\"Downloading the 2D_versatile_fluo_from_Stardist_Fiji\")\n"," pretrained_model_name = \"2D_versatile_fluo\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," \n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," \n"," wget.download(\"https://cloud.mpi-cbg.de/index.php/s/1k5Zcy7PpFWRb0Q/download?path=/versatile&files=2D_versatile_fluo.zip\", pretrained_model_path)\n"," \n"," with zipfile.ZipFile(pretrained_model_path+\"/2D_versatile_fluo.zip\", 'r') as zip_ref:\n"," zip_ref.extractall(pretrained_model_path)\n"," \n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_best.h5\")\n","\n","# --------------------- Download the Versatile (H&E nuclei)_fluo_from_Stardist_Fiji ------------------------\n","\n"," if pretrained_model_choice == \"Versatile_H&E_nuclei\":\n"," print(\"Downloading the Versatile_H&E_nuclei from_Stardist_Fiji\")\n"," pretrained_model_name = \"2D_versatile_he\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," \n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," \n"," wget.download(\"https://cloud.mpi-cbg.de/index.php/s/1k5Zcy7PpFWRb0Q/download?path=/versatile&files=2D_versatile_he.zip\", pretrained_model_path)\n"," \n"," with zipfile.ZipFile(pretrained_model_path+\"/2D_versatile_he.zip\", 'r') as zip_ref:\n"," zip_ref.extractall(pretrained_model_path)\n"," \n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_best.h5\")\n","\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist' + W)\n"," Use_pretrained_model = False\n","\n"," \n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n","\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," \n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n","\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n","\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n","else:\n"," print(bcolors.WARNING+'No pretrained network will be used.')\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"DECuc3HZDbwG","colab_type":"text"},"source":["#**4. Train your network**\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"NwV5LweiavgQ","colab_type":"text"},"source":["## **4.1. Prepare the training data and model for training**\n","---\n","\n","Here, we use the information from 3. to build the model and convert the training data into a suitable format for training."]},{"cell_type":"code","metadata":{"id":"uTM781rCKT8r","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Create the model and dataset objects\n","\n","\n","# --------------------- Here we load the augmented data or the raw data ------------------------\n","\n","if Use_Data_augmentation:\n"," Training_source_dir = Training_source_augmented\n"," Training_target_dir = Training_target_augmented\n","\n","if not Use_Data_augmentation:\n"," Training_source_dir = Training_source\n"," Training_target_dir = Training_target\n","# --------------------- ------------------------------------------------\n","\n","training_images_tiff=Training_source_dir+\"/*.tif\"\n","mask_images_tiff=Training_target_dir+\"/*.tif\"\n","\n","# this funtion imports training images and masks and sorts them suitable for the network\n","X = sorted(glob(training_images_tiff)) \n","Y = sorted(glob(mask_images_tiff)) \n","\n","# assert -funtion check that X and Y really have images. If not this cell raises an error\n","assert all(Path(x).name==Path(y).name for x,y in zip(X,Y))\n","\n","# Here we map the training dataset (images and masks).\n","X = list(map(imread,X))\n","Y = list(map(imread,Y))\n","n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]\n","\n","#Normalize images and fill small label holes.\n","axis_norm = (0,1) # normalize channels independently\n","# axis_norm = (0,1,2) # normalize channels jointly\n","if n_channel > 1:\n"," print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n"," sys.stdout.flush()\n","\n","X = [normalize(x,1,99.8,axis=axis_norm) for x in tqdm(X)]\n","Y = [fill_label_holes(y) for y in tqdm(Y)]\n","\n","#Here we split the your training dataset into training images (90 %) and validation images (10 %). \n","#It is advisable to use 10 % of your training dataset for validation. This ensures the truthfull validation error value. If only few validation images are used network may choose too easy or too challenging images for validation. \n","# split training data (images and masks) into training images and validation images.\n","assert len(X) > 1, \"not enough training data\"\n","rng = np.random.RandomState(42)\n","ind = rng.permutation(len(X))\n","n_val = max(1, int(round(percentage * len(ind))))\n","ind_train, ind_val = ind[:-n_val], ind[-n_val:]\n","X_val, Y_val = [X[i] for i in ind_val] , [Y[i] for i in ind_val]\n","X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train] \n","print('number of images: %3d' % len(X))\n","print('- training: %3d' % len(X_trn))\n","print('- validation: %3d' % len(X_val))\n","\n","# Use OpenCL-based computations for data generator during training (requires 'gputools')\n","use_gpu = False and gputools_available()\n","\n","#Here we ensure that our network has a minimal number of steps\n","if (Use_Default_Advanced_Parameters): \n"," number_of_steps= int(len(X)/batch_size)+1\n","\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we ensure that the learning rate set correctly when using pre-trained models\n","if Use_pretrained_model:\n"," if Weights_choice == \"last\":\n"," initial_learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\": \n"," initial_learning_rate = bestLearningRate\n","# --------------------- ---------------------- ------------------------\n","\n","\n","\n","conf = Config2D (\n"," n_rays = n_rays,\n"," use_gpu = use_gpu,\n"," train_batch_size = batch_size,\n"," n_channel_in = n_channel,\n"," train_patch_size = (patch_size, patch_size),\n"," grid = (grid_parameter, grid_parameter),\n"," train_learning_rate = initial_learning_rate,\n",")\n","\n","# Here we create a model according to section 5.3.\n","model = StarDist2D(conf, name=model_name, basedir=model_path)\n","\n","# --------------------- Using pretrained model ------------------------\n","# Load the pretrained weights \n","if Use_pretrained_model:\n"," model.load_weights(h5_file_path)\n","\n","\n","# --------------------- ---------------------- ------------------------\n","\n","#Here we check the FOV of the network.\n","median_size = calculate_extents(list(Y), np.median)\n","fov = np.array(model._axes_tile_overlap('YX'))\n","if any(median_size > fov):\n"," print(bcolors.WARNING+\"WARNING: median object size larger than field of view of the neural network.\")\n","print(conf)\n","\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"nnMCvu2PKT9W","colab_type":"text"},"source":["\n","## **4.2. Train the network**\n","---\n","\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point."]},{"cell_type":"code","metadata":{"id":"XfCF-Q4lKT9e","colab_type":"code","cellView":"form","colab":{}},"source":["start = time.time()\n","\n","#@markdown ##Start Training\n","augmenter = None\n","\n","# def augmenter(X_batch, Y_batch):\n","# \"\"\"Augmentation for data batch.\n","# X_batch is a list of input images (length at most batch_size)\n","# Y_batch is the corresponding list of ground-truth label images\n","# \"\"\"\n","# # ...\n","# return X_batch, Y_batch\n","\n","# Training the model. \n","# 'input_epochs' and 'steps' refers to your input data in section 5.1 \n","history = model.train(X_trn, Y_trn, validation_data=(X_val,Y_val), augmenter=augmenter,\n"," epochs=number_of_epochs, steps_per_epoch=number_of_steps)\n","None;\n","\n","print(\"Training done\")\n","\n","print(\"Network optimization in progress\")\n","#Here we optimize the network.\n","model.optimize_thresholds(X_val, Y_val)\n","\n","print(\"Done\")\n","\n","# convert the history.history dict to a pandas DataFrame: \n","lossData = pd.DataFrame(history.history) \n","\n","if os.path.exists(model_path+\"/\"+model_name+\"/Quality Control\"):\n"," shutil.rmtree(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","os.makedirs(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","# The training evaluation.csv is saved (overwrites the Files if needed). \n","lossDataCSVpath = model_path+'/'+model_name+'/Quality Control/training_evaluation.csv'\n","with open(lossDataCSVpath, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['loss','val_loss', 'learning rate'])\n"," for i in range(len(history.history['loss'])):\n"," writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])\n","\n","\n","\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"iYRrmh0dCrNs","colab_type":"text"},"source":["## **4.3. Download your model(s) from Google Drive**\n","---\n","\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"U8H7QRfKBzI8","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"o2O0QnO4PFlz","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else: \n"," print(bcolors.WARNING+'!! WARNING: The chosen model does not exist !!')\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"-2b4RMU_Ec2y","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased.\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"KG8wZrA3Ef4n","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"GFJBwr5TEgcq","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","This section will calculate the Intersection over Union score for all the images provided in the Source_QC_folder and Target_QC_folder ! The result for one of the image will also be displayed.\n","\n","The **Intersection over Union** metric is a method that can be used to quantify the percent overlap between the target mask and your prediction output. **Therefore, the closer to 1, the better the performance.** This metric can be used to assess the quality of your model to accurately predict nuclei. \n","\n"," The results can be found in the \"*Quality Control*\" folder which is located inside your \"model_folder\"."]},{"cell_type":"code","metadata":{"id":"EvCMiYaeElc4","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","\n","#Create a quality control Folder and check if the folder already exist\n","if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control\") == False:\n"," os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control\")\n","\n","if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\"):\n"," shutil.rmtree(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","\n","# Generate predictions from the Source_QC_folder and save them in the QC folder\n","\n","Source_QC_folder_tif = Source_QC_folder+\"/*.tif\"\n","\n","np.random.seed(16)\n","lbl_cmap = random_label_cmap()\n","Z = sorted(glob(Source_QC_folder_tif))\n","Z = list(map(imread,Z))\n","n_channel = 1 if Z[0].ndim == 2 else Z[0].shape[-1]\n","axis_norm = (0,1) # normalize channels independently\n","\n","print('Number of test dataset found in the folder: '+str(len(Z)))\n"," \n"," # axis_norm = (0,1,2) # normalize channels jointly\n","if n_channel > 1:\n"," print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n","\n","model = StarDist2D(None, name=QC_model_name, basedir=QC_model_path)\n","\n","names = [os.path.basename(f) for f in sorted(glob(Source_QC_folder_tif))]\n","\n"," \n","# modify the names to suitable form: path_images/image_numberX.tif\n"," \n","lenght_of_Z = len(Z)\n"," \n","for i in range(lenght_of_Z):\n"," img = normalize(Z[i], 1,99.8, axis=axis_norm)\n"," labels, polygons = model.predict_instances(img)\n"," os.chdir(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n"," imsave(names[i], labels, polygons)\n","\n","\n","# Here we start testing the differences between GT and predicted masks\n","\n","\n","with open(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Quality_Control for \"+QC_model_name+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n"," writer.writerow([\"image\",\"Prediction v. GT Intersection over Union\"]) \n","\n","# define the images\n","\n"," for n in os.listdir(Source_QC_folder):\n"," \n"," if not os.path.isdir(os.path.join(Source_QC_folder,n)):\n"," print('Running QC on: '+n)\n"," test_input = io.imread(os.path.join(Source_QC_folder,n))\n"," test_prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\",n))\n"," test_ground_truth_image = io.imread(os.path.join(Target_QC_folder, n))\n","\n"," #Convert pixel values to 0 or 255\n"," test_prediction_0_to_255 = test_prediction\n"," test_prediction_0_to_255[test_prediction_0_to_255>0] = 255\n","\n"," #Convert pixel values to 0 or 255\n"," test_ground_truth_0_to_255 = test_ground_truth_image\n"," test_ground_truth_0_to_255[test_ground_truth_0_to_255>0] = 255\n","\n"," # Intersection over Union metric\n","\n"," intersection = np.logical_and(test_ground_truth_0_to_255, test_prediction_0_to_255)\n"," union = np.logical_or(test_ground_truth_0_to_255, test_prediction_0_to_255)\n"," iou_score = np.sum(intersection) / np.sum(union)\n"," writer.writerow([n, str(iou_score)])\n","\n","\n","#Display the last image\n","\n","f = plt.figure(figsize=(25,25))\n","\n","from astropy.visualization import simple_norm\n","norm = simple_norm(test_input, percent = 99)\n","\n","#Input\n","plt.subplot(1,4,1)\n","plt.axis('off')\n","plt.imshow(test_input, aspect='equal', norm=norm, cmap='magma', interpolation='nearest')\n","plt.title('Input')\n","\n","\n","#Ground-truth\n","plt.subplot(1,4,2)\n","plt.axis('off')\n","plt.imshow(test_ground_truth_0_to_255, aspect='equal', cmap='Greens')\n","plt.title('Ground Truth')\n","\n","#Prediction\n","plt.subplot(1,4,3)\n","plt.axis('off')\n","plt.imshow(test_prediction_0_to_255, aspect='equal', cmap='Purples')\n","plt.title('Prediction')\n","\n","#Overlay\n","plt.subplot(1,4,4)\n","plt.axis('off')\n","plt.imshow(test_ground_truth_0_to_255, cmap='Greens')\n","plt.imshow(test_prediction_0_to_255, alpha=0.5, cmap='Purples')\n","plt.title('Ground Truth and Prediction, Intersection over Union:'+str(round(iou_score,3)));\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"iAPmwlxCEzxQ","colab_type":"text"},"source":["# **6. Using the trained model**\n","---"]},{"cell_type":"markdown","metadata":{"id":"btXwwnVpBEMB","colab_type":"text"},"source":["\n","\n","## **6.1 Generate prediction(s) from unseen dataset**\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive.\n","\n","---\n","\n","The current trained model (from section 4.3) can now be used to process images. If an older model needs to be used, please untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Prediction_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contains the images that you want to predict using the network that you will train.\n","\n","**`Result_folder`:** This folder will contain the predicted output ROI.\n","\n","**`Data_type`:** Please indicate if the images you want to predict are single images or stacks\n","\n","\n","In stardist the following results can be exported:\n","- Region of interest (ROI) that can be opened in ImageJ / Fiji. The ROI are saved inside of a .zip file in your choosen result folder. To open the ROI in Fiji, just drag and drop the zip file !**\n","- The predicted mask images\n","- A tracking file that can easily be imported into Trackmate to track the nuclei (Stacks only).\n","- A CSV file that contains the number of nuclei detected per image (single image only). \n","\n"]},{"cell_type":"code","metadata":{"id":"x8UXP8S2eoo_","colab_type":"code","cellView":"form","colab":{}},"source":["Single_Images = 1\n","Stacks = 2\n","\n","#@markdown ### Provide the path to your dataset and to the folder where the prediction will be saved (Result folder), then play the cell to predict output on your unseen images.\n","\n","Data_folder = \"\" #@param {type:\"string\"}\n","Results_folder = \"\" #@param {type:\"string\"}\n","\n","#@markdown ###Are your data single images or stacks?\n","\n","Data_type = Single_Images #@param [\"Single_Images\", \"Stacks\"] {type:\"raw\"}\n","\n","#@markdown ###What outputs would you like to generate?\n","Region_of_interests = True #@param {type:\"boolean\"}\n","Mask_images = True #@param {type:\"boolean\"}\n","Tracking_file = False #@param {type:\"boolean\"}\n","\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," print(bcolors.WARNING+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","#single images\n","Data_folder = Data_folder+\"/*.tif\"\n","\n","if Data_type == 1 :\n"," print(\"Single images are now beeing predicted\")\n"," np.random.seed(16)\n"," lbl_cmap = random_label_cmap()\n"," X = sorted(glob(Data_folder))\n"," X = list(map(imread,X))\n"," n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]\n"," axis_norm = (0,1) # normalize channels independently\n"," \n"," # axis_norm = (0,1,2) # normalize channels jointly\n"," if n_channel > 1:\n"," print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n"," model = StarDist2D(None, name = Prediction_model_name, basedir = Prediction_model_path)\n"," \n"," names = [os.path.basename(f) for f in sorted(glob(Data_folder))]\n"," \n"," Nuclei_number = []\n","\n"," # modify the names to suitable form: path_images/image_numberX.tif\n"," FILEnames = []\n"," for m in names:\n"," m = Results_folder+'/'+m\n"," FILEnames.append(m)\n","\n"," # Create a list of name with no extension\n"," \n"," name_no_extension=[]\n"," for n in names:\n"," name_no_extension.append(os.path.splitext(n)[0])\n"," \n","\n"," # Save all ROIs and masks into results folder\n"," \n"," for i in range(len(X)):\n"," img = normalize(X[i], 1,99.8, axis = axis_norm)\n"," labels, polygons = model.predict_instances(img)\n"," \n"," os.chdir(Results_folder)\n","\n"," if Mask_images:\n"," imsave(FILEnames[i], labels, polygons)\n","\n"," if Region_of_interests:\n"," export_imagej_rois(name_no_extension[i], polygons['coord'])\n","\n"," if Tracking_file:\n"," print(bcolors.WARNING+\"Tracking files are only generated when stacks are predicted\"+W) \n"," \n"," \n"," Nuclei_array = polygons['coord']\n"," Nuclei_array2 = [names[i], Nuclei_array.shape[0]]\n"," Nuclei_number.append(Nuclei_array2) \n","\n"," my_df = pd.DataFrame(Nuclei_number)\n"," my_df.to_csv(Results_folder+'/Nuclei_count.csv', index=False, header=False)\n"," \n","\n"," # One example is displayed\n","\n"," print(\"One example image is displayed bellow:\")\n"," plt.figure(figsize=(10,10))\n"," plt.imshow(img if img.ndim==2 else img[...,:3], clim=(0,1), cmap='gray')\n"," plt.imshow(labels, cmap=lbl_cmap, alpha=0.5)\n"," plt.axis('off');\n"," plt.savefig(name_no_extension[i]+\"_overlay.tif\")\n","\n","if Data_type == 2 :\n"," print(\"Stacks are now beeing predicted\")\n"," np.random.seed(42)\n"," lbl_cmap = random_label_cmap()\n"," Y = sorted(glob(Data_folder))\n"," X = list(map(imread,Y))\n"," n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]\n"," axis_norm = (0,1) # normalize channels independently\n"," # axis_norm = (0,1,2) # normalize channels jointly\n"," if n_channel > 1:\n"," print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n"," #Load a pretrained network\n"," model = StarDist2D(None, name = Prediction_model_name, basedir = Prediction_model_path)\n"," \n"," names = [os.path.basename(f) for f in sorted(glob(Data_folder))]\n","\n"," # Create a list of name with no extension\n"," \n"," name_no_extension = []\n"," for n in names:\n"," name_no_extension.append(os.path.splitext(n)[0])\n","\n"," outputdir = Path(Results_folder)\n","\n","# Save all ROIs and images in Results folder.\n"," for num, i in enumerate(X):\n"," print(\"Performing prediction on: \"+names[num])\n","\n"," \n"," timelapse = np.stack(i)\n"," timelapse = normalize(timelapse, 1,99.8, axis=(0,)+tuple(1+np.array(axis_norm)))\n"," timelapse.shape\n","\n"," if Region_of_interests: \n"," polygons = [model.predict_instances(frame)[1]['coord'] for frame in tqdm(timelapse)] \n"," export_imagej_rois(os.path.join(outputdir, name_no_extension[num]), polygons) \n"," \n"," n_timepoint = timelapse.shape[0]\n"," prediction_stack = np.zeros((n_timepoint, timelapse.shape[1], timelapse.shape[2]))\n"," Tracking_stack = np.zeros((n_timepoint, timelapse.shape[1], timelapse.shape[2]))\n","\n","# Save the masks in the result folder\n"," if Mask_images or Tracking_file:\n"," for t in range(n_timepoint):\n"," img_t = timelapse[t]\n"," labels, polygons = model.predict_instances(img_t) \n"," prediction_stack[t] = labels\n","\n","# Create a tracking file for trackmate\n","\n"," for point in polygons['points']:\n"," cv2.circle(Tracking_stack[t],tuple(point),0,(1), -1)\n","\n"," prediction_stack_32 = img_as_float32(prediction_stack, force_copy=False)\n"," Tracking_stack_32 = img_as_float32(Tracking_stack, force_copy=False)\n"," Tracking_stack_8 = img_as_ubyte(Tracking_stack_32, force_copy=True)\n"," \n"," Tracking_stack_8_rot = np.rot90(Tracking_stack_8, axes=(1,2))\n"," Tracking_stack_8_rot_flip = np.fliplr(Tracking_stack_8_rot)\n","\n"," os.chdir(Results_folder)\n"," if Mask_images:\n"," imsave(names[num], prediction_stack_32)\n"," if Tracking_file:\n"," imsave(name_no_extension[num]+\"_tracking_file.tif\", Tracking_stack_8_rot_flip)\n","\n"," \n","\n","print(\"Predictions completed\") "],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"SxJsrw3kTcFx","colab_type":"text"},"source":["## **6.2. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"rH_J20ydXWRQ","colab_type":"text"},"source":["\n","#**Thank you for using Stardist 2D!**"]}]} \ No newline at end of file +{"nbformat":4,"nbformat_minor":0,"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.4"},"colab":{"name":"StarDist_2D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1WAfQW1Mj3wy1XQZZUfU4DJVS_R_E8Cn3","timestamp":1585665697353},{"file_id":"1PKVyox_mx2rEE3VlMFQtdnVULJFhYPaD","timestamp":1583443864213},{"file_id":"1XSclOkhhHmn-9LQc9k8c3Y6seT1LEi-Y","timestamp":1583264105465},{"file_id":"1VPZYk3MeSVyZVVEmesz10VtujbD4diJk","timestamp":1579481583477},{"file_id":"1ENdOZir1Gytf6JxzyfbjgfxO3_C1dLHK","timestamp":1575415287126},{"file_id":"1G8b4dF2kCs3ePBGZthPUGOyjJpZ2G_Dm","timestamp":1575379725785},{"file_id":"1P0tT0RR_b3SFKvOcON_MzcAIcxRUQK5B","timestamp":1575377313115},{"file_id":"1hQz8PyJzBRkBZc9NwxM9mU9azRSvghBk","timestamp":1574783624098},{"file_id":"14mWTNjHgIbuuWAxb-0lhmhdIvMoZgrI0","timestamp":1574099686195},{"file_id":"1IWvFuBb0gqaJcUXhhfbcTWNh9cZEXW4S","timestamp":1573647131082},{"file_id":"1hFulBwI57YU6GoVc8sBt5KNIkCS7ynQ3","timestamp":1573579952409},{"file_id":"1Ba_Bu-PXN_2Mq5W6YHMgUYsJEfgbPtS-","timestamp":1573035984524},{"file_id":"1ePC44Qq_C2hSFGPM3PKyb0J6UBXSPddp","timestamp":1573032545399},{"file_id":"https://github.com/mpicbg-csbd/stardist/blob/master/examples/2D/2_training.ipynb","timestamp":1572984225873}],"collapsed_sections":[],"toc_visible":true},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"kiFRRolPa-Rb","colab_type":"text"},"source":["# **StarDist (2D)**\n","---\n","\n","**StarDist 2D** is a deep-learning method that can be used to segment cell nuclei from bioimages and was first published by [Schmidt *et al.* in 2018, on arXiv](https://arxiv.org/abs/1806.03535). It uses a shape representation based on star-convex polygons for nuclei in an image to predict the presence and the shape of these nuclei. This StarDist 2D network is based on an adapted U-Net network architecture.\n","\n"," **This particular notebook enables nuclei segmentation of 2D dataset. If you are interested in 3D dataset, you should use the StarDist 3D notebook instead.**\n","\n","---\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is largely based on the paper:\n","\n","**Cell Detection with Star-convex Polygons** from Schmidt *et al.*, International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI), Granada, Spain, September 2018. (https://arxiv.org/abs/1806.03535)\n","\n","and the 3D extension of the approach:\n","\n","**Star-convex Polyhedra for 3D Object Detection and Segmentation in Microscopy** from Weigert *et al.* published on arXiv in 2019 (https://arxiv.org/abs/1908.03636)\n","\n","**The Original code** is freely available in GitHub:\n","https://github.com/mpicbg-csbd/stardist\n","\n","**Please also cite this original paper when using or developing this notebook.**\n"]},{"cell_type":"markdown","metadata":{"id":"iSuNqQ2ZMVGM","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"4-oByBSdE6DE","colab_type":"text"},"source":["#**0. Before getting started**\n","---\n"," For StarDist to train, **it needs to have access to a paired training dataset made of images of nuclei and their corresponding masks**. Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\n","\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model**. The quality control assessment can be done directly in this notebook.\n","\n","The data structure is important. It is necessary that all the input data are in the same folder and that all the output data is in a separate folder. The provided training dataset is already split in two folders called \"Training - Images\" (Training_source) and \"Training - Masks\" (Training_target).\n","\n","Additionally, the corresponding Training_source and Training_target files need to have **the same name**.\n","\n","Please note that you currently can **only use .tif files!**\n","\n","You can also provide a folder that contains the data that you wish to analyse with the trained network once all training has been performed. This can include Test dataset for which you have the equivalent output and can compare to what the network provides.\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset**\n"," - Images of nuclei (Training_source)\n"," - img_1.tif, img_2.tif, ...\n"," - Masks (Training_target)\n"," - img_1.tif, img_2.tif, ...\n"," - **Quality control dataset**\n"," - Images of nuclei\n"," - img_1.tif, img_2.tif\n"," - Masks \n"," - img_1.tif, img_2.tif\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"t1sYuLChbRV3","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"CDxBu1-19OyC","colab_type":"text"},"source":["\n","\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"4waLStm0RPFo","colab_type":"code","cellView":"form","colab":{"base_uri":"https://localhost:8080/","height":362},"executionInfo":{"status":"ok","timestamp":1596557087130,"user_tz":-60,"elapsed":9715,"user":{"displayName":"Romain Laine","photoUrl":"","userId":"09656923706700292222"}},"outputId":"128b12db-f59a-46d3-c9de-81918e83960b"},"source":["#@markdown ##Run this cell to check if you have GPU access\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"],"execution_count":1,"outputs":[{"output_type":"stream","text":["You have GPU access\n","Tue Aug 4 16:04:44 2020 \n","+-----------------------------------------------------------------------------+\n","| NVIDIA-SMI 450.57 Driver Version: 418.67 CUDA Version: 10.1 |\n","|-------------------------------+----------------------+----------------------+\n","| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n","| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n","| | | MIG M. |\n","|===============================+======================+======================|\n","| 0 Tesla K80 Off | 00000000:00:04.0 Off | 0 |\n","| N/A 37C P0 57W / 149W | 134MiB / 11441MiB | 0% Default |\n","| | | ERR! |\n","+-------------------------------+----------------------+----------------------+\n"," \n","+-----------------------------------------------------------------------------+\n","| Processes: |\n","| GPU GI CI PID Type Process name GPU Memory |\n","| ID ID Usage |\n","|=============================================================================|\n","| No running processes found |\n","+-----------------------------------------------------------------------------+\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"ZLY4qhgj8w-R","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"Ukil4yuS8seC","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n","\n","\n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"bB0IaQMZmWYM","colab_type":"text"},"source":["# **2. Install StarDist and dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"id":"j0w7C8P5zPIp","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Install StarDist and dependencies\n","\n","import tensorflow\n","print(tensorflow.__version__)\n","print(\"Tensorflow enabled.\")\n","\n","# Install packages which are not included in Google Colab\n","\n","!pip install tifffile # contains tools to operate tiff-files\n","!pip install csbdeep # contains tools for restoration of fluorescence microcopy images (Content-aware Image Restoration, CARE). It uses Keras and Tensorflow.\n","!pip install stardist # contains tools to operate STARDIST.\n","!pip install gputools # improves STARDIST performances\n","!pip install edt # improves STARDIST performances\n","!pip install wget\n","\n","\n","# ------- Variable specific to Stardist -------\n","from stardist import fill_label_holes, random_label_cmap, calculate_extents, gputools_available, relabel_image_stardist, random_label_cmap, relabel_image_stardist, _draw_polygons, export_imagej_rois\n","from stardist.models import Config2D, StarDist2D, StarDistData2D # import objects\n","from stardist.matching import matching_dataset\n","from __future__ import print_function, unicode_literals, absolute_import, division\n","from csbdeep.utils import Path, normalize, download_and_extract_zip_file, plot_history # for loss plot\n","from csbdeep.io import save_tiff_imagej_compatible\n","import numpy as np\n","np.random.seed(42)\n","lbl_cmap = random_label_cmap()\n","%matplotlib inline\n","%config InlineBackend.figure_format = 'retina'\n","\n","\n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","import numpy as np\n","from matplotlib import pyplot as plt\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n","import wget\n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32, img_as_ubyte, img_as_float\n","from skimage.util import img_as_ubyte\n","from tqdm import tqdm \n","import cv2\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","W = '\\033[0m' # white (normal)\n","R = '\\033[31m' # red\n","\n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","print(\"Libraries installed\")\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"DPWhXaltAYgH","colab_type":"text"},"source":["# **3. Select your parameters and paths**\n","\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"KWpu5p8utpE2","colab_type":"text"},"source":["## **3.1. Setting main training parameters**\n","---\n"," "]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"HJKFAmuXc6d1"},"source":[" **Paths for training, predictions and results**\n","\n","\n","**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source (images of nuclei) and Training_target (masks) training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","\n","**Training parameters**\n","\n","**`number_of_epochs`:** Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a 50-100 epochs, but a full training should run for up to 400 epochs. Evaluate the performance after training (see 5.). **Default value: 100**\n","\n","**Advanced Parameters - experienced users only**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 2**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each image / patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`patch_size`:** Input the size of the patches use to train StarDist 2D (length of a side). The value should be smaller or equal to the dimensions of the image. Make the patch size as large as possible and divisible by 8. **Default value: dimension of the training images** \n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10** \n","\n","**`n_rays`:** Set number of rays (corners) used for StarDist (for instance, a square has 4 corners). **Default value: 32** \n","\n","**`grid_parameter`:** increase this number if the cells/nuclei are very large or decrease it if they are very small. **Default value: 2**\n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0003**\n","\n","**If you get an Out of memory (OOM) error during the training, manually decrease the patch_size value until the OOM error disappear.**\n","\n","\n","\n"]},{"cell_type":"code","metadata":{"colab_type":"code","cellView":"form","id":"CNJImzzVnr7h","colab":{}},"source":["#@markdown ###Path to training images: \n","Training_source = \"\" #@param {type:\"string\"}\n","\n","Training_target = \"\" #@param {type:\"string\"}\n","\n","\n","#@markdown ###Name of the model and path to model folder:\n","model_name = \"\" #@param {type:\"string\"}\n","\n","model_path = \"\" #@param {type:\"string\"}\n","#trained_model = model_path \n","\n","\n","#@markdown ### Other parameters for training:\n","number_of_epochs = 100#@param {type:\"number\"}\n","\n","#@markdown ###Advanced Parameters\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please input:\n","\n","#GPU_limit = 90 #@param {type:\"number\"}\n","batch_size = 2 #@param {type:\"number\"}\n","number_of_steps = 20#@param {type:\"number\"}\n","patch_size = 1024 #@param {type:\"number\"}\n","percentage_validation = 10 #@param {type:\"number\"}\n","n_rays = 32 #@param {type:\"number\"}\n","grid_parameter = 2#@param [1, 2, 4, 8, 16, 32] {type:\"raw\"}\n","initial_learning_rate = 0.0003 #@param {type:\"number\"}\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 2\n"," n_rays = 32\n"," percentage_validation = 10\n"," grid_parameter = 2\n"," initial_learning_rate = 0.0003\n","\n","percentage = percentage_validation/100\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name):\n"," print(bcolors.WARNING +\"!! WARNING: Folder already exists and has been removed !!\" + W)\n"," shutil.rmtree(model_path+'/'+model_name)\n"," \n","# Here we open will randomly chosen input and output image\n","random_choice = random.choice(os.listdir(Training_source))\n","x = imread(Training_source+\"/\"+random_choice)\n","\n","# Here we check the image dimensions\n","\n","Image_Y = x.shape[0]\n","Image_X = x.shape[1]\n","\n","print('Loaded images (width, length) =', x.shape)\n","\n","# If default parameters, patch size is the same as image size\n","if (Use_Default_Advanced_Parameters):\n"," patch_size = min(Image_Y, Image_X)\n"," \n","#Hyperparameters failsafes\n","\n","# Here we check that patch_size is smaller than the smallest xy dimension of the image \n","\n","if patch_size > min(Image_Y, Image_X):\n"," patch_size = min(Image_Y, Image_X)\n"," print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n","\n","\n","# Here we check that the patch_size is divisible by 8\n","if not patch_size % 8 == 0:\n"," patch_size = ((int(patch_size / 8)-1) * 8)\n"," print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is:\",patch_size)\n","\n","# Here we disable pre-trained model by default (in case the next cell is not ran)\n","Use_pretrained_model = False\n","\n","# Here we disable data augmentation by default (in case the cell is not ran)\n","\n","Use_Data_augmentation = False\n","\n","\n","print(\"Parameters initiated.\")\n","\n","\n","os.chdir(Training_target)\n","y = imread(Training_target+\"/\"+random_choice)\n","\n","#Here we use a simple normalisation strategy to visualise the image\n","norm = simple_norm(x, percent = 99)\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x, interpolation='nearest', norm=norm, cmap='magma')\n","plt.title('Training source')\n","plt.axis('off');\n","\n","plt.subplot(1,2,2)\n","plt.imshow(y, interpolation='nearest', cmap=lbl_cmap)\n","plt.title('Training target')\n","plt.axis('off');\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"vgT0NU3P6Bwt","colab_type":"text"},"source":["## **3.2. Data augmentation**\n","---\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"8in3wzAw6G6g","colab_type":"text"},"source":["Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n","\n"," **However, data augmentation is not a magic solution and may also introduce issues. Therefore, we recommend that you train your network with and without augmentation, and use the QC section to validate that it improves overall performances.** \n","\n","Data augmentation is performed here by [Augmentor.](https://github.com/mdbloice/Augmentor)\n","\n","[Augmentor](https://github.com/mdbloice/Augmentor) was described in the following article:\n","\n","Marcus D Bloice, Peter M Roth, Andreas Holzinger, Biomedical image augmentation using Augmentor, Bioinformatics, https://doi.org/10.1093/bioinformatics/btz259\n","\n","**Please also cite this original paper when publishing results obtained using this notebook with augmentation enabled.** "]},{"cell_type":"code","metadata":{"id":"2zk1H8J06aJH","colab_type":"code","cellView":"form","colab":{}},"source":["#Data augmentation\n","\n","Use_Data_augmentation = False #@param {type:\"boolean\"}\n","\n","if Use_Data_augmentation:\n"," !pip install Augmentor\n"," import Augmentor\n","\n","\n","#@markdown ####Choose a factor by which you want to multiply your original dataset\n","\n","Multiply_dataset_by = 2 #@param {type:\"slider\", min:1, max:30, step:1}\n","\n","Save_augmented_images = False #@param {type:\"boolean\"}\n","\n","Saving_path = \"\" #@param {type:\"string\"}\n","\n","\n","Use_Default_Augmentation_Parameters = True #@param {type:\"boolean\"}\n","#@markdown ###If not, please choose the probability of the following image manipulations to be used to augment your dataset (1 = always used; 0 = disabled ):\n","\n","#@markdown ####Mirror and rotate images\n","rotate_90_degrees = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","rotate_270_degrees = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","flip_left_right = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","flip_top_bottom = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","#@markdown ####Random image Zoom\n","\n","random_zoom = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","random_zoom_magnification = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","#@markdown ####Random image distortion\n","\n","random_distortion = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","\n","#@markdown ####Image shearing and skewing \n","\n","image_shear = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","max_image_shear = 1 #@param {type:\"slider\", min:1, max:25, step:1}\n","\n","skew_image = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","skew_image_magnitude = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n","\n","\n","if Use_Default_Augmentation_Parameters:\n"," rotate_90_degrees = 0.5\n"," rotate_270_degrees = 0.5\n"," flip_left_right = 0.5\n"," flip_top_bottom = 0.5\n","\n"," if not Multiply_dataset_by >5:\n"," random_zoom = 0\n"," random_zoom_magnification = 0.9\n"," random_distortion = 0\n"," image_shear = 0\n"," max_image_shear = 10\n"," skew_image = 0\n"," skew_image_magnitude = 0\n","\n"," if Multiply_dataset_by >5:\n"," random_zoom = 0.1\n"," random_zoom_magnification = 0.9\n"," random_distortion = 0.5\n"," image_shear = 0.2\n"," max_image_shear = 5\n"," skew_image = 0.2\n"," skew_image_magnitude = 0.4\n","\n"," if Multiply_dataset_by >25:\n"," random_zoom = 0.5\n"," random_zoom_magnification = 0.8\n"," random_distortion = 0.5\n"," image_shear = 0.5\n"," max_image_shear = 20\n"," skew_image = 0.5\n"," skew_image_magnitude = 0.6\n","\n","\n","list_files = os.listdir(Training_source)\n","Nb_files = len(list_files)\n","\n","Nb_augmented_files = (Nb_files * Multiply_dataset_by)\n","\n","\n","if Use_Data_augmentation:\n"," print(\"Data augmentation enabled\")\n","# Here we set the path for the various folder were the augmented images will be loaded\n","\n","# All images are first saved into the augmented folder\n"," #Augmented_folder = \"/content/Augmented_Folder\"\n"," \n"," if not Save_augmented_images:\n"," Saving_path= \"/content\"\n","\n"," Augmented_folder = Saving_path+\"/Augmented_Folder\"\n"," if os.path.exists(Augmented_folder):\n"," shutil.rmtree(Augmented_folder)\n"," os.makedirs(Augmented_folder)\n","\n"," #Training_source_augmented = \"/content/Training_source_augmented\"\n"," Training_source_augmented = Saving_path+\"/Training_source_augmented\"\n","\n"," if os.path.exists(Training_source_augmented):\n"," shutil.rmtree(Training_source_augmented)\n"," os.makedirs(Training_source_augmented)\n","\n"," #Training_target_augmented = \"/content/Training_target_augmented\"\n"," Training_target_augmented = Saving_path+\"/Training_target_augmented\"\n","\n"," if os.path.exists(Training_target_augmented):\n"," shutil.rmtree(Training_target_augmented)\n"," os.makedirs(Training_target_augmented)\n","\n","\n","# Here we generate the augmented images\n","#Load the images\n"," p = Augmentor.Pipeline(Training_source, Augmented_folder)\n","\n","#Define the matching images\n"," p.ground_truth(Training_target)\n","#Define the augmentation possibilities\n"," if not rotate_90_degrees == 0:\n"," p.rotate90(probability=rotate_90_degrees)\n"," \n"," if not rotate_270_degrees == 0:\n"," p.rotate270(probability=rotate_270_degrees)\n","\n"," if not flip_left_right == 0:\n"," p.flip_left_right(probability=flip_left_right)\n","\n"," if not flip_top_bottom == 0:\n"," p.flip_top_bottom(probability=flip_top_bottom)\n","\n"," if not random_zoom == 0:\n"," p.zoom_random(probability=random_zoom, percentage_area=random_zoom_magnification)\n"," \n"," if not random_distortion == 0:\n"," p.random_distortion(probability=random_distortion, grid_width=4, grid_height=4, magnitude=8)\n","\n"," if not image_shear == 0:\n"," p.shear(probability=image_shear,max_shear_left=20,max_shear_right=20)\n"," \n"," if not skew_image == 0:\n"," p.skew(probability=skew_image,magnitude=skew_image_magnitude)\n","\n"," p.sample(int(Nb_augmented_files))\n","\n"," print(int(Nb_augmented_files),\"matching images generated\")\n","\n","# Here we sort through the images and move them back to augmented trainning source and targets folders\n","\n"," augmented_files = os.listdir(Augmented_folder)\n","\n"," for f in augmented_files:\n","\n"," if (f.startswith(\"_groundtruth_(1)_\")):\n"," shortname_noprefix = f[17:]\n"," shutil.copyfile(Augmented_folder+\"/\"+f, Training_target_augmented+\"/\"+shortname_noprefix) \n"," if not (f.startswith(\"_groundtruth_(1)_\")):\n"," shutil.copyfile(Augmented_folder+\"/\"+f, Training_source_augmented+\"/\"+f)\n"," \n","\n"," for filename in os.listdir(Training_source_augmented):\n"," os.chdir(Training_source_augmented)\n"," os.rename(filename, filename.replace('_original', ''))\n"," \n"," #Here we clean up the extra files\n"," shutil.rmtree(Augmented_folder)\n","\n","if not Use_Data_augmentation:\n"," print(bcolors.WARNING+\"Data augmentation disabled\") \n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"x4zMG4lMths-","colab_type":"text"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a StarDist model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"SfQeukJJtv9u","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"2D_versatile_fluo_from_Stardist_Fiji\" #@param [\"Model_from_file\", \"2D_versatile_fluo_from_Stardist_Fiji\", \"2D_Demo_Model_from_Stardist_Github\", \"Versatile_H&E_nuclei\"]\n","\n","Weights_choice = \"best\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","\n","# --------------------- Download the Demo 2D model provided in the Stardist 2D github ------------------------\n","\n"," if pretrained_model_choice == \"2D_Demo_Model_from_Stardist_Github\":\n"," pretrained_model_name = \"2D_Demo\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the 2D_Demo_Model_from_Stardist_Github\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"https://github.com/mpicbg-csbd/stardist/raw/master/models/examples/2D_demo/config.json\", pretrained_model_path)\n"," wget.download(\"https://github.com/mpicbg-csbd/stardist/raw/master/models/examples/2D_demo/thresholds.json\", pretrained_model_path)\n"," wget.download(\"https://github.com/mpicbg-csbd/stardist/blob/master/models/examples/2D_demo/weights_best.h5?raw=true\", pretrained_model_path) \n"," wget.download(\"https://github.com/mpicbg-csbd/stardist/blob/master/models/examples/2D_demo/weights_last.h5?raw=true\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Download the Demo 2D_versatile_fluo_from_Stardist_Fiji ------------------------\n","\n"," if pretrained_model_choice == \"2D_versatile_fluo_from_Stardist_Fiji\":\n"," print(\"Downloading the 2D_versatile_fluo_from_Stardist_Fiji\")\n"," pretrained_model_name = \"2D_versatile_fluo\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," \n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," \n"," wget.download(\"https://cloud.mpi-cbg.de/index.php/s/1k5Zcy7PpFWRb0Q/download?path=/versatile&files=2D_versatile_fluo.zip\", pretrained_model_path)\n"," \n"," with zipfile.ZipFile(pretrained_model_path+\"/2D_versatile_fluo.zip\", 'r') as zip_ref:\n"," zip_ref.extractall(pretrained_model_path)\n"," \n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_best.h5\")\n","\n","# --------------------- Download the Versatile (H&E nuclei)_fluo_from_Stardist_Fiji ------------------------\n","\n"," if pretrained_model_choice == \"Versatile_H&E_nuclei\":\n"," print(\"Downloading the Versatile_H&E_nuclei from_Stardist_Fiji\")\n"," pretrained_model_name = \"2D_versatile_he\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," \n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," \n"," wget.download(\"https://cloud.mpi-cbg.de/index.php/s/1k5Zcy7PpFWRb0Q/download?path=/versatile&files=2D_versatile_he.zip\", pretrained_model_path)\n"," \n"," with zipfile.ZipFile(pretrained_model_path+\"/2D_versatile_he.zip\", 'r') as zip_ref:\n"," zip_ref.extractall(pretrained_model_path)\n"," \n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_best.h5\")\n","\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist' + W)\n"," Use_pretrained_model = False\n","\n"," \n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n","\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," \n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n","\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n","\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n","else:\n"," print(bcolors.WARNING+'No pretrained network will be used.')\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"DECuc3HZDbwG","colab_type":"text"},"source":["#**4. Train your network**\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"NwV5LweiavgQ","colab_type":"text"},"source":["## **4.1. Prepare the training data and model for training**\n","---\n","\n","Here, we use the information from 3. to build the model and convert the training data into a suitable format for training."]},{"cell_type":"code","metadata":{"id":"uTM781rCKT8r","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Create the model and dataset objects\n","\n","\n","# --------------------- Here we load the augmented data or the raw data ------------------------\n","\n","if Use_Data_augmentation:\n"," Training_source_dir = Training_source_augmented\n"," Training_target_dir = Training_target_augmented\n","\n","if not Use_Data_augmentation:\n"," Training_source_dir = Training_source\n"," Training_target_dir = Training_target\n","# --------------------- ------------------------------------------------\n","\n","training_images_tiff=Training_source_dir+\"/*.tif\"\n","mask_images_tiff=Training_target_dir+\"/*.tif\"\n","\n","# this funtion imports training images and masks and sorts them suitable for the network\n","X = sorted(glob(training_images_tiff)) \n","Y = sorted(glob(mask_images_tiff)) \n","\n","# assert -funtion check that X and Y really have images. If not this cell raises an error\n","assert all(Path(x).name==Path(y).name for x,y in zip(X,Y))\n","\n","# Here we map the training dataset (images and masks).\n","X = list(map(imread,X))\n","Y = list(map(imread,Y))\n","n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]\n","\n","#Normalize images and fill small label holes.\n","axis_norm = (0,1) # normalize channels independently\n","# axis_norm = (0,1,2) # normalize channels jointly\n","if n_channel > 1:\n"," print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n"," sys.stdout.flush()\n","\n","X = [normalize(x,1,99.8,axis=axis_norm) for x in tqdm(X)]\n","Y = [fill_label_holes(y) for y in tqdm(Y)]\n","\n","#Here we split the your training dataset into training images (90 %) and validation images (10 %). \n","#It is advisable to use 10 % of your training dataset for validation. This ensures the truthfull validation error value. If only few validation images are used network may choose too easy or too challenging images for validation. \n","# split training data (images and masks) into training images and validation images.\n","assert len(X) > 1, \"not enough training data\"\n","rng = np.random.RandomState(42)\n","ind = rng.permutation(len(X))\n","n_val = max(1, int(round(percentage * len(ind))))\n","ind_train, ind_val = ind[:-n_val], ind[-n_val:]\n","X_val, Y_val = [X[i] for i in ind_val] , [Y[i] for i in ind_val]\n","X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train] \n","print('number of images: %3d' % len(X))\n","print('- training: %3d' % len(X_trn))\n","print('- validation: %3d' % len(X_val))\n","\n","# Use OpenCL-based computations for data generator during training (requires 'gputools')\n","use_gpu = False and gputools_available()\n","\n","#Here we ensure that our network has a minimal number of steps\n","if (Use_Default_Advanced_Parameters): \n"," number_of_steps= int(len(X)/batch_size)+1\n","\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we ensure that the learning rate set correctly when using pre-trained models\n","if Use_pretrained_model:\n"," if Weights_choice == \"last\":\n"," initial_learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\": \n"," initial_learning_rate = bestLearningRate\n","# --------------------- ---------------------- ------------------------\n","\n","\n","\n","conf = Config2D (\n"," n_rays = n_rays,\n"," use_gpu = use_gpu,\n"," train_batch_size = batch_size,\n"," n_channel_in = n_channel,\n"," train_patch_size = (patch_size, patch_size),\n"," grid = (grid_parameter, grid_parameter),\n"," train_learning_rate = initial_learning_rate,\n",")\n","\n","# Here we create a model according to section 5.3.\n","model = StarDist2D(conf, name=model_name, basedir=model_path)\n","\n","# --------------------- Using pretrained model ------------------------\n","# Load the pretrained weights \n","if Use_pretrained_model:\n"," model.load_weights(h5_file_path)\n","\n","\n","# --------------------- ---------------------- ------------------------\n","\n","#Here we check the FOV of the network.\n","median_size = calculate_extents(list(Y), np.median)\n","fov = np.array(model._axes_tile_overlap('YX'))\n","if any(median_size > fov):\n"," print(bcolors.WARNING+\"WARNING: median object size larger than field of view of the neural network.\")\n","print(conf)\n","\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"nnMCvu2PKT9W","colab_type":"text"},"source":["\n","## **4.2. Train the network**\n","---\n","\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point.\n","\n","**Of Note:** At the end of the training, your model will be automatically exported so it can be used in the Stardist Fiji plugin. You can find it in your model folder (TF_SavedModel.zip). In Fiji, Make sure to choose the right version of tensorflow. You can check at: Edit-- Options-- Tensorflow. Choose the version 1.4 (CPU or GPU depending on your system)."]},{"cell_type":"code","metadata":{"id":"XfCF-Q4lKT9e","colab_type":"code","cellView":"form","colab":{}},"source":["start = time.time()\n","\n","#@markdown ##Start training\n","augmenter = None\n","\n","# def augmenter(X_batch, Y_batch):\n","# \"\"\"Augmentation for data batch.\n","# X_batch is a list of input images (length at most batch_size)\n","# Y_batch is the corresponding list of ground-truth label images\n","# \"\"\"\n","# # ...\n","# return X_batch, Y_batch\n","\n","# Training the model. \n","# 'input_epochs' and 'steps' refers to your input data in section 5.1 \n","history = model.train(X_trn, Y_trn, validation_data=(X_val,Y_val), augmenter=augmenter,\n"," epochs=number_of_epochs, steps_per_epoch=number_of_steps)\n","None;\n","\n","print(\"Training done\")\n","\n","print(\"Network optimization in progress\")\n","#Here we optimize the network.\n","model.optimize_thresholds(X_val, Y_val)\n","\n","print(\"Done\")\n","\n","# convert the history.history dict to a pandas DataFrame: \n","lossData = pd.DataFrame(history.history) \n","\n","if os.path.exists(model_path+\"/\"+model_name+\"/Quality Control\"):\n"," shutil.rmtree(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","os.makedirs(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","# The training evaluation.csv is saved (overwrites the Files if needed). \n","lossDataCSVpath = model_path+'/'+model_name+'/Quality Control/training_evaluation.csv'\n","with open(lossDataCSVpath, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['loss','val_loss', 'learning rate'])\n"," for i in range(len(history.history['loss'])):\n"," writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])\n","\n","\n","\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n","\n","model.export_TF()\n","\n","print(\"Your model has been sucessfully exported and can now also be used in the Stardist Fiji plugin\")\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"iYRrmh0dCrNs","colab_type":"text"},"source":["## **4.3. Download your model(s) from Google Drive**\n","---\n","\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder.\n","\n"]},{"cell_type":"markdown","metadata":{"id":"U8H7QRfKBzI8","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"o2O0QnO4PFlz","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else: \n"," print(bcolors.WARNING+'!! WARNING: The chosen model does not exist !!')\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"-2b4RMU_Ec2y","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased.\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"KG8wZrA3Ef4n","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"GFJBwr5TEgcq","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","This section will calculate the Intersection over Union score for all the images provided in the Source_QC_folder and Target_QC_folder ! The result for one of the image will also be displayed.\n","\n","The **Intersection over Union** metric is a method that can be used to quantify the percent overlap between the target mask and your prediction output. **Therefore, the closer to 1, the better the performance.** This metric can be used to assess the quality of your model to accurately predict nuclei. \n","\n"," The results can be found in the \"*Quality Control*\" folder which is located inside your \"model_folder\"."]},{"cell_type":"code","metadata":{"id":"EvCMiYaeElc4","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","\n","#Create a quality control Folder and check if the folder already exist\n","if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control\") == False:\n"," os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control\")\n","\n","if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\"):\n"," shutil.rmtree(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","\n","# Generate predictions from the Source_QC_folder and save them in the QC folder\n","\n","Source_QC_folder_tif = Source_QC_folder+\"/*.tif\"\n","\n","np.random.seed(16)\n","lbl_cmap = random_label_cmap()\n","Z = sorted(glob(Source_QC_folder_tif))\n","Z = list(map(imread,Z))\n","n_channel = 1 if Z[0].ndim == 2 else Z[0].shape[-1]\n","axis_norm = (0,1) # normalize channels independently\n","\n","print('Number of test dataset found in the folder: '+str(len(Z)))\n"," \n"," # axis_norm = (0,1,2) # normalize channels jointly\n","if n_channel > 1:\n"," print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n","\n","model = StarDist2D(None, name=QC_model_name, basedir=QC_model_path)\n","\n","names = [os.path.basename(f) for f in sorted(glob(Source_QC_folder_tif))]\n","\n"," \n","# modify the names to suitable form: path_images/image_numberX.tif\n"," \n","lenght_of_Z = len(Z)\n"," \n","for i in range(lenght_of_Z):\n"," img = normalize(Z[i], 1,99.8, axis=axis_norm)\n"," labels, polygons = model.predict_instances(img)\n"," os.chdir(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n"," imsave(names[i], labels, polygons)\n","\n","\n","# Here we start testing the differences between GT and predicted masks\n","\n","\n","with open(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Quality_Control for \"+QC_model_name+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n"," writer.writerow([\"image\",\"Prediction v. GT Intersection over Union\"]) \n","\n","# define the images\n","\n"," for n in os.listdir(Source_QC_folder):\n"," \n"," if not os.path.isdir(os.path.join(Source_QC_folder,n)):\n"," print('Running QC on: '+n)\n"," test_input = io.imread(os.path.join(Source_QC_folder,n))\n"," test_prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\",n))\n"," test_ground_truth_image = io.imread(os.path.join(Target_QC_folder, n))\n","\n"," #Convert pixel values to 0 or 255\n"," test_prediction_0_to_255 = test_prediction\n"," test_prediction_0_to_255[test_prediction_0_to_255>0] = 255\n","\n"," #Convert pixel values to 0 or 255\n"," test_ground_truth_0_to_255 = test_ground_truth_image\n"," test_ground_truth_0_to_255[test_ground_truth_0_to_255>0] = 255\n","\n"," # Intersection over Union metric\n","\n"," intersection = np.logical_and(test_ground_truth_0_to_255, test_prediction_0_to_255)\n"," union = np.logical_or(test_ground_truth_0_to_255, test_prediction_0_to_255)\n"," iou_score = np.sum(intersection) / np.sum(union)\n"," writer.writerow([n, str(iou_score)])\n","\n","\n","#Display the last image\n","\n","f = plt.figure(figsize=(25,25))\n","\n","from astropy.visualization import simple_norm\n","norm = simple_norm(test_input, percent = 99)\n","\n","#Input\n","plt.subplot(1,4,1)\n","plt.axis('off')\n","plt.imshow(test_input, aspect='equal', norm=norm, cmap='magma', interpolation='nearest')\n","plt.title('Input')\n","\n","\n","#Ground-truth\n","plt.subplot(1,4,2)\n","plt.axis('off')\n","plt.imshow(test_ground_truth_0_to_255, aspect='equal', cmap='Greens')\n","plt.title('Ground Truth')\n","\n","#Prediction\n","plt.subplot(1,4,3)\n","plt.axis('off')\n","plt.imshow(test_prediction_0_to_255, aspect='equal', cmap='Purples')\n","plt.title('Prediction')\n","\n","#Overlay\n","plt.subplot(1,4,4)\n","plt.axis('off')\n","plt.imshow(test_ground_truth_0_to_255, cmap='Greens')\n","plt.imshow(test_prediction_0_to_255, alpha=0.5, cmap='Purples')\n","plt.title('Ground Truth and Prediction, Intersection over Union:'+str(round(iou_score,3)));\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"iAPmwlxCEzxQ","colab_type":"text"},"source":["# **6. Using the trained model**\n","---"]},{"cell_type":"markdown","metadata":{"id":"btXwwnVpBEMB","colab_type":"text"},"source":["\n","\n","## **6.1 Generate prediction(s) from unseen dataset**\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive.\n","\n","---\n","\n","The current trained model (from section 4.3) can now be used to process images. If an older model needs to be used, please untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Prediction_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contains the images that you want to predict using the network that you will train.\n","\n","**`Result_folder`:** This folder will contain the predicted output ROI.\n","\n","**`Data_type`:** Please indicate if the images you want to predict are single images or stacks\n","\n","\n","In stardist the following results can be exported:\n","- Region of interest (ROI) that can be opened in ImageJ / Fiji. The ROI are saved inside of a .zip file in your choosen result folder. To open the ROI in Fiji, just drag and drop the zip file !**\n","- The predicted mask images\n","- A tracking file that can easily be imported into Trackmate to track the nuclei (Stacks only).\n","- A CSV file that contains the number of nuclei detected per image (single image only). \n","\n"]},{"cell_type":"code","metadata":{"id":"x8UXP8S2eoo_","colab_type":"code","cellView":"form","colab":{}},"source":["Single_Images = 1\n","Stacks = 2\n","\n","#@markdown ### Provide the path to your dataset and to the folder where the prediction will be saved (Result folder), then play the cell to predict output on your unseen images.\n","\n","Data_folder = \"\" #@param {type:\"string\"}\n","Results_folder = \"\" #@param {type:\"string\"}\n","\n","#@markdown ###Are your data single images or stacks?\n","\n","Data_type = Single_Images #@param [\"Single_Images\", \"Stacks\"] {type:\"raw\"}\n","\n","#@markdown ###What outputs would you like to generate?\n","Region_of_interests = True #@param {type:\"boolean\"}\n","Mask_images = True #@param {type:\"boolean\"}\n","Tracking_file = False #@param {type:\"boolean\"}\n","\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," print(bcolors.WARNING+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","#single images\n","Data_folder = Data_folder+\"/*.tif\"\n","\n","if Data_type == 1 :\n"," print(\"Single images are now beeing predicted\")\n"," np.random.seed(16)\n"," lbl_cmap = random_label_cmap()\n"," X = sorted(glob(Data_folder))\n"," X = list(map(imread,X))\n"," n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]\n"," axis_norm = (0,1) # normalize channels independently\n"," \n"," # axis_norm = (0,1,2) # normalize channels jointly\n"," if n_channel > 1:\n"," print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n"," model = StarDist2D(None, name = Prediction_model_name, basedir = Prediction_model_path)\n"," \n"," names = [os.path.basename(f) for f in sorted(glob(Data_folder))]\n"," \n"," Nuclei_number = []\n","\n"," # modify the names to suitable form: path_images/image_numberX.tif\n"," FILEnames = []\n"," for m in names:\n"," m = Results_folder+'/'+m\n"," FILEnames.append(m)\n","\n"," # Create a list of name with no extension\n"," \n"," name_no_extension=[]\n"," for n in names:\n"," name_no_extension.append(os.path.splitext(n)[0])\n"," \n","\n"," # Save all ROIs and masks into results folder\n"," \n"," for i in range(len(X)):\n"," img = normalize(X[i], 1,99.8, axis = axis_norm)\n"," labels, polygons = model.predict_instances(img)\n"," \n"," os.chdir(Results_folder)\n","\n"," if Mask_images:\n"," imsave(FILEnames[i], labels, polygons)\n","\n"," if Region_of_interests:\n"," export_imagej_rois(name_no_extension[i], polygons['coord'])\n","\n"," if Tracking_file:\n"," print(bcolors.WARNING+\"Tracking files are only generated when stacks are predicted\"+W) \n"," \n"," \n"," Nuclei_array = polygons['coord']\n"," Nuclei_array2 = [names[i], Nuclei_array.shape[0]]\n"," Nuclei_number.append(Nuclei_array2) \n","\n"," my_df = pd.DataFrame(Nuclei_number)\n"," my_df.to_csv(Results_folder+'/Nuclei_count.csv', index=False, header=False)\n"," \n","\n"," # One example is displayed\n","\n"," print(\"One example image is displayed bellow:\")\n"," plt.figure(figsize=(10,10))\n"," plt.imshow(img if img.ndim==2 else img[...,:3], clim=(0,1), cmap='gray')\n"," plt.imshow(labels, cmap=lbl_cmap, alpha=0.5)\n"," plt.axis('off');\n"," plt.savefig(name_no_extension[i]+\"_overlay.tif\")\n","\n","if Data_type == 2 :\n"," print(\"Stacks are now beeing predicted\")\n"," np.random.seed(42)\n"," lbl_cmap = random_label_cmap()\n"," Y = sorted(glob(Data_folder))\n"," X = list(map(imread,Y))\n"," n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]\n"," axis_norm = (0,1) # normalize channels independently\n"," # axis_norm = (0,1,2) # normalize channels jointly\n"," if n_channel > 1:\n"," print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n"," #Load a pretrained network\n"," model = StarDist2D(None, name = Prediction_model_name, basedir = Prediction_model_path)\n"," \n"," names = [os.path.basename(f) for f in sorted(glob(Data_folder))]\n","\n"," # Create a list of name with no extension\n"," \n"," name_no_extension = []\n"," for n in names:\n"," name_no_extension.append(os.path.splitext(n)[0])\n","\n"," outputdir = Path(Results_folder)\n","\n","# Save all ROIs and images in Results folder.\n"," for num, i in enumerate(X):\n"," print(\"Performing prediction on: \"+names[num])\n","\n"," \n"," timelapse = np.stack(i)\n"," timelapse = normalize(timelapse, 1,99.8, axis=(0,)+tuple(1+np.array(axis_norm)))\n"," timelapse.shape\n","\n"," if Region_of_interests: \n"," polygons = [model.predict_instances(frame)[1]['coord'] for frame in tqdm(timelapse)] \n"," export_imagej_rois(os.path.join(outputdir, name_no_extension[num]), polygons) \n"," \n"," n_timepoint = timelapse.shape[0]\n"," prediction_stack = np.zeros((n_timepoint, timelapse.shape[1], timelapse.shape[2]))\n"," Tracking_stack = np.zeros((n_timepoint, timelapse.shape[1], timelapse.shape[2]))\n","\n","# Save the masks in the result folder\n"," if Mask_images or Tracking_file:\n"," for t in range(n_timepoint):\n"," img_t = timelapse[t]\n"," labels, polygons = model.predict_instances(img_t) \n"," prediction_stack[t] = labels\n","\n","# Create a tracking file for trackmate\n","\n"," for point in polygons['points']:\n"," cv2.circle(Tracking_stack[t],tuple(point),0,(1), -1)\n","\n"," prediction_stack_32 = img_as_float32(prediction_stack, force_copy=False)\n"," Tracking_stack_32 = img_as_float32(Tracking_stack, force_copy=False)\n"," Tracking_stack_8 = img_as_ubyte(Tracking_stack_32, force_copy=True)\n"," \n"," Tracking_stack_8_rot = np.rot90(Tracking_stack_8, axes=(1,2))\n"," Tracking_stack_8_rot_flip = np.fliplr(Tracking_stack_8_rot)\n","\n"," os.chdir(Results_folder)\n"," if Mask_images:\n"," imsave(names[num], prediction_stack_32)\n"," if Tracking_file:\n"," imsave(name_no_extension[num]+\"_tracking_file.tif\", Tracking_stack_8_rot_flip)\n","\n"," \n","\n","print(\"Predictions completed\") "],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"SxJsrw3kTcFx","colab_type":"text"},"source":["## **6.2. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"rH_J20ydXWRQ","colab_type":"text"},"source":["\n","#**Thank you for using StarDist 2D!**"]}]} \ No newline at end of file diff --git a/Colab_notebooks/Stardist_3D_ZeroCostDL4Mic.ipynb b/Colab_notebooks/Stardist_3D_ZeroCostDL4Mic.ipynb index 1ea833d6..2a7b2e0b 100755 --- a/Colab_notebooks/Stardist_3D_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/Stardist_3D_ZeroCostDL4Mic.ipynb @@ -1 +1 @@ -{"nbformat":4,"nbformat_minor":0,"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.4"},"colab":{"name":"Stardist_3D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1Ur-4VIQ6gf4ONupD6hK0M-AcJkoTzMlU","timestamp":1586789439593},{"file_id":"1PKVyox_mx2rEE3VlMFQtdnVULJFhYPaD","timestamp":1583443864213},{"file_id":"1XSclOkhhHmn-9LQc9k8c3Y6seT1LEi-Y","timestamp":1583264105465},{"file_id":"1VPZYk3MeSVyZVVEmesz10VtujbD4diJk","timestamp":1579481583477},{"file_id":"1ENdOZir1Gytf6JxzyfbjgfxO3_C1dLHK","timestamp":1575415287126},{"file_id":"1G8b4dF2kCs3ePBGZthPUGOyjJpZ2G_Dm","timestamp":1575379725785},{"file_id":"1P0tT0RR_b3SFKvOcON_MzcAIcxRUQK5B","timestamp":1575377313115},{"file_id":"1hQz8PyJzBRkBZc9NwxM9mU9azRSvghBk","timestamp":1574783624098},{"file_id":"14mWTNjHgIbuuWAxb-0lhmhdIvMoZgrI0","timestamp":1574099686195},{"file_id":"1IWvFuBb0gqaJcUXhhfbcTWNh9cZEXW4S","timestamp":1573647131082},{"file_id":"1hFulBwI57YU6GoVc8sBt5KNIkCS7ynQ3","timestamp":1573579952409},{"file_id":"1Ba_Bu-PXN_2Mq5W6YHMgUYsJEfgbPtS-","timestamp":1573035984524},{"file_id":"1ePC44Qq_C2hSFGPM3PKyb0J6UBXSPddp","timestamp":1573032545399},{"file_id":"https://github.com/mpicbg-csbd/stardist/blob/master/examples/2D/2_training.ipynb","timestamp":1572984225873}],"collapsed_sections":[],"toc_visible":true},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"kiFRRolPa-Rb","colab_type":"text"},"source":["# **Cell nuclei detection by Stardist 3D**\n","---\n","\n","**Stardist** is a deep-learning method that can be used to segment cell nuclei in 3D (xyz) images. \n","\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is largely based on the paper:\n","\n","[Cell Detection with Star-convex Polygons](https://arxiv.org/abs/1806.03535)\n","\n","Uwe Schmidt, Martin Weigert, Coleman Broaddus, and Gene Myers.\n","International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI), Granada, Spain, September 2018.\n","\n","[Star-convex Polyhedra for 3D Object Detection and Segmentation in Microscopy](https://arxiv.org/abs/1908.03636)\n","\n","Martin Weigert, Uwe Schmidt, Robert Haase, Ko Sugawara, and Gene Myers. arXiv, 2019\n","\n","**The Original code** is freely available in GitHub:\n","https://github.com/mpicbg-csbd/stardist\n","\n","**Please also cite this original paper when using or developing this notebook.**\n"]},{"cell_type":"markdown","metadata":{"id":"iSuNqQ2ZMVGM","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"4-oByBSdE6DE","colab_type":"text"},"source":["#**0. Before getting started**\n","---\n"," For Stardist to train, **it needs to have access to a paired training dataset made of images of nuclei and their corresponding masks**. Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\n","\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n","The data structure is important. It is necessary that all the input data are in the same folder and that all the output data is in a separate folder. The provided training dataset is already split in two folders called \"Training - Images\" (Training_source) and \"Training - Masks\" (Training_target).\n","\n","Additionally, the corresponding Training_source and Training_target files need to have **the same name**.\n","\n","Please note that you currently can **only use .tif files!**\n","\n","You can also provide a folder that contains the data that you wish to analyse with the trained network once all training has been performed.\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset**\n"," - Images of nuclei (Training_source)\n"," - img_1.tif, img_2.tif, ...\n"," - Masks (Training_target)\n"," - img_1.tif, img_2.tif, ...\n"," - **Quality control dataset**\n"," - Images of nuclei\n"," - img_1.tif, img_2.tif\n"," - **Masks** \n"," - img_1.tif, img_2.tif\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"t1sYuLChbRV3","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"CDxBu1-19OyC","colab_type":"text"},"source":["\n","\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"4waLStm0RPFo","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"ZLY4qhgj8w-R","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"Ukil4yuS8seC","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"bB0IaQMZmWYM","colab_type":"text"},"source":["# **2. Install Stardist and Dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"id":"j0w7C8P5zPIp","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Install Stardist and dependencies\n","\n","%tensorflow_version 1.x\n","import tensorflow\n","print(tensorflow.__version__)\n","print(\"Tensorflow enabled.\")\n","\n","# Install packages which are not included in Google Colab\n","\n","!pip install tifffile # contains tools to operate tiff-files\n","!pip install csbdeep # contains tools for restoration of fluorescence microcopy images (Content-aware Image Restoration, CARE). It uses Keras and Tensorflow.\n","!pip install stardist # contains tools to operate STARDIST.\n","!pip install gputools\n","!pip install edt\n","!pip install wget\n","\n","\n","# ------- Variable specific to Stardist -------\n","from stardist import fill_label_holes, random_label_cmap, calculate_extents, gputools_available\n","from stardist.models import Config3D, StarDist3D, StarDistData3D\n","from stardist import relabel_image_stardist3D, Rays_GoldenSpiral, calculate_extents\n","from stardist.matching import matching_dataset\n","from csbdeep.utils import Path, normalize, download_and_extract_zip_file, plot_history # for loss plot\n","from csbdeep.io import save_tiff_imagej_compatible\n","import numpy as np\n","np.random.seed(42)\n","lbl_cmap = random_label_cmap()\n","from __future__ import print_function, unicode_literals, absolute_import, division\n","import cv2\n","%matplotlib inline\n","%config InlineBackend.figure_format = 'retina'\n","\n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","import numpy as np\n","from matplotlib import pyplot as plt\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n","import wget\n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32\n","from skimage.util import img_as_ubyte\n","from tqdm import tqdm \n","\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","\n","W = '\\033[0m' # white (normal)\n","R = '\\033[31m' # red\n","\n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","print(\"Libraries installed\")\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"DPWhXaltAYgH","colab_type":"text"},"source":["# **3. Select your parameters and paths**\n","\n","---\n","\n"]},{"cell_type":"markdown","metadata":{"id":"nAW3oU60htR_","colab_type":"text"},"source":["## **3.1. Setting main training parameters**\n","---\n"," "]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"HJKFAmuXc6d1"},"source":[" **Paths for training, predictions and results**\n","\n","**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source (images of nuclei) and Training_target (masks) training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","**Training Parameters**\n","\n","**`number_of_epochs`:** Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a 400 epochs, but a full training should run for more. Evaluate the performance after training (see 5.). **Default value: 400**\n","\n","**Advanced Parameters - experienced users only**\n","\n","**`patch_size`:** and **`patch_height`:** Input the size of the patches use to train stardist 3D (length of a side). The value should be smaller or equal to the dimensions of the image. Make patch size and patch_height as large as possible and divisible by 8 and 4, respectively. **Default value: dimension of the training images**\n","\n","**If you get an Out of memory (OOM) error during the training, manually decrease the patch_size and patch_height values until the OOM error disappear.**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each image / patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 1** \n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10** \n","\n","**`n_rays`:** Set number of rays (corners) used for Stardist (for instance square has 4 corners). **Default value: 96** \n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0003**"]},{"cell_type":"code","metadata":{"colab_type":"code","cellView":"form","id":"CNJImzzVnr7h","colab":{}},"source":["\n","\n","#@markdown ###Path to training images: \n","Training_source = \"\" #@param {type:\"string\"}\n","training_images = Training_source\n","\n","\n","Training_target = \"\" #@param {type:\"string\"}\n","mask_images = Training_target \n","\n","\n","#@markdown ###Name of the model and path to model folder:\n","model_name = \"\" #@param {type:\"string\"}\n","\n","model_path = \"\" #@param {type:\"string\"}\n","trained_model = model_path \n","\n","#@markdown ### Other parameters for training:\n","number_of_epochs = 400#@param {type:\"number\"}\n","\n","#@markdown ###Advanced Parameters\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please input:\n","\n","#GPU_limit = 90 #@param {type:\"number\"}\n","number_of_steps = 100#@param {type:\"number\"}\n","patch_size = 64#@param {type:\"number\"} # pixels in\n","patch_height = 64#@param {type:\"number\"}\n","batch_size = 1#@param {type:\"number\"}\n","percentage_validation = 10#@param {type:\"number\"}\n","n_rays = 96 #@param {type:\"number\"}\n","initial_learning_rate = 0.0003 #@param {type:\"number\"}\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 1\n"," n_rays = 96\n"," percentage_validation = 10\n"," initial_learning_rate = 0.0003\n","\n","\n","percentage = percentage_validation/100\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name):\n"," shutil.rmtree(model_path+'/'+model_name)\n"," \n","\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name):\n"," print(bcolors.WARNING +\"!! WARNING: Folder already exists and has been removed !!\")\n"," shutil.rmtree(model_path+'/'+model_name)\n"," \n","\n","random_choice=random.choice(os.listdir(Training_source))\n","x = imread(Training_source+\"/\"+random_choice)\n","\n","# Here we check that the input images are stacks\n","if len(x.shape) == 3:\n"," print(\"Image dimensions (z,y,x)\",x.shape)\n","\n","if not len(x.shape) == 3:\n"," print(bcolors.WARNING +\"Your images appear to have the wrong dimensions. Image dimension\",x.shape)\n","\n","\n","#Find image Z dimension and select the mid-plane\n","Image_Z = x.shape[0]\n","mid_plane = int(Image_Z / 2)+1\n","\n","\n","#Find image XY dimension\n","Image_Y = x.shape[1]\n","Image_X = x.shape[2]\n","\n","# If default parameters, patch size is the same as image size\n","if (Use_Default_Advanced_Parameters): \n"," patch_size = min(Image_Y, Image_X) \n"," patch_height = Image_Z\n","\n","\n","#Hyperparameters failsafes\n","\n","# Here we check that patch_size is smaller than the smallest xy dimension of the image \n","\n","if patch_size > min(Image_Y, Image_X):\n"," patch_size = min(Image_Y, Image_X)\n"," print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_size is divisible by 8\n","if not patch_size % 8 == 0:\n"," patch_size = ((int(patch_size / 8)-1) * 8)\n"," print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_height is smaller than the z dimension of the image \n","\n","if patch_height > Image_Z :\n"," patch_height = Image_Z\n"," print (bcolors.WARNING + \" Your chosen patch_height is bigger than the z dimension of your image; therefore the patch_size chosen is now:\",patch_height)\n","\n","# Here we check that patch_height is divisible by 4\n","if not patch_height % 4 == 0:\n"," patch_height = ((int(patch_height / 4)-1) * 4)\n"," if patch_height == 0:\n"," patch_height = 4\n"," print (bcolors.WARNING + \" Your chosen patch_height is not divisible by 4; therefore the patch_size chosen is now:\",patch_height)\n","\n","# Here we disable pre-trained model by default (in case the next cell is not ran)\n","Use_pretrained_model = False\n","\n","# Here we disable data augmentation by default (in case the cell is not ran)\n","\n","Use_Data_augmentation = False\n","\n","print(\"Parameters initiated.\")\n","\n","\n","os.chdir(Training_target)\n","y = imread(Training_target+\"/\"+random_choice)\n","\n","#Here we use a simple normalisation strategy to visualise the image\n","from astropy.visualization import simple_norm\n","norm = simple_norm(x, percent = 99)\n","\n","mid_plane = int(Image_Z / 2)+1\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x[mid_plane], interpolation='nearest', norm=norm, cmap='magma')\n","plt.axis('off')\n","plt.title('Training source (single Z plane)');\n","plt.subplot(1,2,2)\n","plt.imshow(y[mid_plane], interpolation='nearest', cmap=lbl_cmap)\n","plt.axis('off')\n","plt.title('Training target (single Z plane)');\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"nbyf-RevQhDL","colab_type":"text"},"source":["## **3.2. Data augmentation**\n","---\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"UQ2hultWQlT9","colab_type":"text"},"source":["Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n","\n"," **However, data augmentation is not a magic solution and may also introduce issues. Therefore, we recommend that you train your network with and without augmentation, and use the QC section to validate that it improves overall performances.** \n","\n","Data augmentation is performed here by rotating the training images in the XY-Plane and flipping them along X-Axis as well as performing elastic deformations\n","\n","**The flip option and the elastic deformation will double the size of your dataset, rotation will quadruple and all together will increase the dataset by a factor of 16.**\n","\n"," Elastic deformations performed by [Elasticdeform.](https://elasticdeform.readthedocs.io/en/latest/index.html).\n"]},{"cell_type":"code","metadata":{"id":"wYdTY6ULg01b","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ###See elasticdeform’s license\n","#Copyright (c) 2001, 2002 Enthought, Inc. All rights reserved.\n","\n","#Copyright (c) 2003-2017 SciPy Developers. All rights reserved.\n","\n","#Copyright (c) 2018 Gijs van Tulder. All rights reserved.\n","\n","#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n","\n","##Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n","#Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n","#Neither the name of Enthought nor the names of the SciPy Developers may be used to endorse or promote products derived from this software without specific prior written permission.\n","#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n","\n","print(\"Double click to see elasticdeform’s license\")\n"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"kKLB47jgQrxr","colab_type":"code","cellView":"form","colab":{}},"source":["#Data augmentation\n","\n","Use_Data_augmentation = False #@param {type:\"boolean\"}\n","\n","#@markdown **Deform your images**\n","\n","Elastic_deformation = True #@param {type:\"boolean\"}\n","\n","Deformation_Sigma = 3 #@param {type:\"slider\", min:1, max:30, step:1}\n","\n","#@markdown **Rotate each image 3 times by 90 degrees.**\n","Rotation = True #@param{type:\"boolean\"}\n","\n","#@markdown **Flip each image once around the x axis of the stack.**\n","Flip = True #@param{type:\"boolean\"}\n","\n","\n","Save_augmented_images = True #@param {type:\"boolean\"}\n","\n","Saving_path = \"\" #@param {type:\"string\"}\n","\n","\n","def rotation_aug(Source_path, Target_path, flip=False):\n"," Source_images = os.listdir(Source_path)\n"," Target_images = os.listdir(Target_path)\n"," \n"," for image in Source_images:\n"," source_img = io.imread(os.path.join(Source_path,image))\n"," target_img = io.imread(os.path.join(Target_path,image))\n"," \n"," # Source Rotation\n"," source_img_90 = np.rot90(source_img,axes=(1,2))\n"," source_img_180 = np.rot90(source_img_90,axes=(1,2))\n"," source_img_270 = np.rot90(source_img_180,axes=(1,2))\n","\n"," # Target Rotation\n"," target_img_90 = np.rot90(target_img,axes=(1,2))\n"," target_img_180 = np.rot90(target_img_90,axes=(1,2))\n"," target_img_270 = np.rot90(target_img_180,axes=(1,2))\n","\n"," # Add a flip to the rotation\n"," \n"," if flip == True:\n"," source_img_lr = np.fliplr(source_img)\n"," source_img_90_lr = np.fliplr(source_img_90)\n"," source_img_180_lr = np.fliplr(source_img_180)\n"," source_img_270_lr = np.fliplr(source_img_270)\n","\n"," target_img_lr = np.fliplr(target_img)\n"," target_img_90_lr = np.fliplr(target_img_90)\n"," target_img_180_lr = np.fliplr(target_img_180)\n"," target_img_270_lr = np.fliplr(target_img_270)\n","\n"," #source_img_90_ud = np.flipud(source_img_90)\n"," \n"," # Save the augmented files\n"," # Source images\n"," io.imsave(Training_source_augmented+'/'+image,source_img)\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_90.tif',source_img_90)\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_180.tif',source_img_180)\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_270.tif',source_img_270)\n"," # Target images\n"," io.imsave(Training_target_augmented+'/'+image,target_img)\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_90.tif',target_img_90)\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_180.tif',target_img_180)\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_270.tif',target_img_270)\n","\n"," if flip == True:\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_90_lr.tif',source_img_90_lr)\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_180_lr.tif',source_img_180_lr)\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_270_lr.tif',source_img_270_lr)\n","\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_90_lr.tif',target_img_90_lr)\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_180_lr.tif',target_img_180_lr)\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_270_lr.tif',target_img_270_lr)\n","\n","def flip(Source_path, Target_path):\n"," Source_images = os.listdir(Source_path)\n"," Target_images = os.listdir(Target_path)\n"," \n"," for image in Source_images:\n"," source_img = io.imread(os.path.join(Source_path,image))\n"," target_img = io.imread(os.path.join(Target_path,image))\n"," \n"," source_img_lr = np.fliplr(source_img)\n"," target_img_lr = np.fliplr(target_img)\n","\n"," io.imsave(Training_source_augmented+'/'+image,source_img)\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)\n","\n"," io.imsave(Training_target_augmented+'/'+image,target_img)\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)\n","\n","\n","\n","\n","if Use_Data_augmentation:\n","\n","\n"," if Elastic_deformation:\n"," !pip install elasticdeform\n"," import numpy, imageio, elasticdeform\n","\n"," if not Save_augmented_images:\n"," Saving_path= \"/content\"\n","\n"," Augmented_folder = Saving_path+\"/Augmented_Folder\"\n","\n"," if os.path.exists(Augmented_folder):\n"," shutil.rmtree(Augmented_folder)\n"," os.makedirs(Augmented_folder)\n"," Training_source_augmented = Augmented_folder+\"/Training_source\"\n"," os.makedirs(Training_source_augmented)\n"," Training_target_augmented = Augmented_folder+\"/Training_target\"\n"," os.makedirs(Training_target_augmented)\n"," print(\"Data augmentation enabled\")\n"," print(\"Generation of the augmented dataset in progress\")\n","\n"," if Elastic_deformation:\n"," for filename in os.listdir(Training_source):\n"," X = imread(os.path.join(Training_source, filename))\n"," Y = imread(os.path.join(Training_target, filename))\n"," [X_deformed, Y_deformed] = elasticdeform.deform_random_grid([X, Y], sigma=Deformation_Sigma, order=0)\n","\n"," os.chdir(Augmented_folder+\"/Training_source\")\n"," imsave(filename, X)\n"," imsave(filename+\"_deformed.tif\", X_deformed)\n","\n"," os.chdir(Augmented_folder+\"/Training_target\")\n"," imsave(filename, Y)\n"," imsave(filename+\"_deformed.tif\", Y_deformed)\n","\n"," Training_source_rot = Training_source_augmented\n"," Training_target_rot = Training_target_augmented\n"," \n"," if not Elastic_deformation:\n"," Training_source_rot = Training_source\n"," Training_target_rot = Training_target\n","\n"," \n"," if Rotation == True:\n"," rotation_aug(Training_source_rot,Training_target_rot,flip=Flip)\n"," elif Rotation == False and Flip == True:\n"," flip(Training_source_rot,Training_target_rot)\n","\n"," print(\"Done\")\n","\n"," if Elastic_deformation:\n"," from astropy.visualization import simple_norm\n"," norm = simple_norm(x, percent = 99)\n","\n"," random_choice=random.choice(os.listdir(Training_source))\n"," x = imread(Augmented_folder+\"/Training_source/\"+random_choice)\n"," x_deformed = imread(Augmented_folder+\"/Training_source/\"+random_choice+\"_deformed.tif\")\n"," y = imread(Augmented_folder+\"/Training_target/\"+random_choice)\n"," y_deformed = imread(Augmented_folder+\"/Training_target/\"+random_choice+\"_deformed.tif\") \n","\n"," Image_Z = x.shape[0]\n"," mid_plane = int(Image_Z / 2)+1\n","\n"," f=plt.figure(figsize=(10,10))\n"," plt.subplot(2,2,1)\n"," plt.imshow(x[mid_plane], interpolation='nearest', norm=norm, cmap='magma')\n"," plt.axis('off')\n"," plt.title('Training source (single Z plane)');\n"," plt.subplot(2,2,2)\n"," plt.imshow(y[mid_plane], interpolation='nearest', cmap=lbl_cmap)\n"," plt.axis('off')\n"," plt.title('Training target (single Z plane)');\n"," plt.subplot(2,2,3)\n"," plt.imshow(x_deformed[mid_plane], interpolation='nearest', norm=norm, cmap='magma')\n"," plt.axis('off')\n"," plt.title('Deformed training source (single Z plane)');\n"," plt.subplot(2,2,4)\n"," plt.imshow(y_deformed[mid_plane], interpolation='nearest', cmap=lbl_cmap)\n"," plt.axis('off')\n"," plt.title('Deformed training target (single Z plane)');\n","\n","if not Use_Data_augmentation:\n"," print(\"Data augmentation disabled\")\n","\n","\n","\n"," \n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"pjz-5bRVh1ja","colab_type":"text"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a Stardist model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"zeSUtd2Thw-O","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"Demo_3D_Model_from_Stardist_3D_paper\" #@param [\"Model_from_file\", \"Demo_3D_Model_from_Stardist_3D_paper\"]\n","\n","Weights_choice = \"best\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","\n","# --------------------- Download the Demo 3D model provided in the Stardist 3D github ------------------------\n","\n"," if pretrained_model_choice == \"Demo_3D_Model_from_Stardist_3D_paper\":\n"," pretrained_model_name = \"Demo_3D\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the Demo 3D model from the Stardist_3D paper\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"https://raw.githubusercontent.com/mpicbg-csbd/stardist/master/models/examples/3D_demo/config.json\", pretrained_model_path)\n"," wget.download(\"https://github.com/mpicbg-csbd/stardist/raw/master/models/examples/3D_demo/thresholds.json\", pretrained_model_path)\n"," wget.download(\"https://github.com/mpicbg-csbd/stardist/blob/master/models/examples/3D_demo/weights_best.h5?raw=true\", pretrained_model_path)\n"," wget.download(\"https://github.com/mpicbg-csbd/stardist/blob/master/models/examples/3D_demo/weights_last.h5?raw=true\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist')\n"," Use_pretrained_model = False\n","\n"," \n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n","\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," \n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n","\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n","\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print(bcolors.WARNING+'Weights found in:')\n"," print(h5_file_path)\n"," print(bcolors.WARNING+'will be loaded prior to training.')\n","\n","else:\n"," print(bcolors.WARNING+'No pretrained network will be used.')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"DECuc3HZDbwG","colab_type":"text"},"source":["#**4. Train your network**\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"NwV5LweiavgQ","colab_type":"text"},"source":["## **4.1. Prepare the training data and model for training**\n","---\n","\n","Here, we use the information from 3. to build the model and convert the training data into a suitable format for training."]},{"cell_type":"code","metadata":{"id":"uTM781rCKT8r","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Create the model and dataset objects\n","import warnings\n","warnings.simplefilter(\"ignore\")\n","\n","\n","# --------------------- Here we load the augmented data or the raw data ------------------------\n","\n","if Use_Data_augmentation:\n"," Training_source_dir = Training_source_augmented\n"," Training_target_dir = Training_target_augmented\n","\n","if not Use_Data_augmentation:\n"," Training_source_dir = Training_source\n"," Training_target_dir = Training_target\n","# --------------------- ------------------------------------------------\n","\n","training_images_tiff=Training_source_dir+\"/*.tif\"\n","mask_images_tiff=Training_target_dir+\"/*.tif\"\n","\n","\n","# this funtion imports training images and masks and sorts them suitable for the network\n","X = sorted(glob(training_images_tiff)) \n","Y = sorted(glob(mask_images_tiff)) \n","\n","# assert -funtion check that X and Y really have images. If not this cell raises an error\n","assert all(Path(x).name==Path(y).name for x,y in zip(X,Y))\n","\n","# Here we map the training dataset (images and masks).\n","X = list(map(imread,X))\n","Y = list(map(imread,Y))\n","\n","n_channel = 1 if X[0].ndim == 3 else X[0].shape[-1]\n","\n","\n","\n","#Normalize images and fill small label holes.\n","axis_norm = (0,1,2) # normalize channels independently\n","# axis_norm = (0,1,2,3) # normalize channels jointly\n","if n_channel > 1:\n"," print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 3 in axis_norm else 'independently'))\n"," sys.stdout.flush()\n","\n","X = [normalize(x,1,99.8,axis=axis_norm) for x in tqdm(X)]\n","Y = [fill_label_holes(y) for y in tqdm(Y)]\n","\n","#Here we split the your training dataset into training images (90 %) and validation images (10 %). \n","\n","assert len(X) > 1, \"not enough training data\"\n","rng = np.random.RandomState(42)\n","ind = rng.permutation(len(X))\n","n_val = max(1, int(round(percentage * len(ind))))\n","ind_train, ind_val = ind[:-n_val], ind[-n_val:]\n","X_val, Y_val = [X[i] for i in ind_val] , [Y[i] for i in ind_val]\n","X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train] \n","print('number of images: %3d' % len(X))\n","print('- training: %3d' % len(X_trn))\n","print('- validation: %3d' % len(X_val))\n","\n","\n","\n","extents = calculate_extents(Y)\n","anisotropy = tuple(np.max(extents) / extents)\n","print('empirical anisotropy of labeled objects = %s' % str(anisotropy))\n","\n","\n","# Use OpenCL-based computations for data generator during training (requires 'gputools')\n","use_gpu = False and gputools_available()\n","\n","\n","#Here we ensure that our network has a minimal number of steps\n","if (Use_Default_Advanced_Parameters): \n"," number_of_steps= int(len(X)/batch_size)+1\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we ensure that the learning rate set correctly when using pre-trained models\n","if Use_pretrained_model:\n"," if Weights_choice == \"last\":\n"," initial_learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\": \n"," initial_learning_rate = bestLearningRate\n","# --------------------- ---------------------- ------------------------\n","\n","# Predict on subsampled grid for increased efficiency and larger field of view\n","grid = tuple(1 if a > 1.5 else 2 for a in anisotropy)\n","\n","# Use rays on a Fibonacci lattice adjusted for measured anisotropy of the training data\n","rays = Rays_GoldenSpiral(n_rays, anisotropy=anisotropy)\n","\n","conf = Config3D (\n"," rays = rays,\n"," grid = grid,\n"," anisotropy = anisotropy,\n"," use_gpu = use_gpu,\n"," n_channel_in = n_channel,\n"," train_learning_rate = initial_learning_rate,\n"," train_patch_size = (patch_height, patch_size, patch_size),\n"," train_batch_size = batch_size,\n",")\n","print(conf)\n","vars(conf)\n","\n","\n","# --------------------- This is currently disabled as it give an error ------------------------\n","#here we limit GPU to 80%\n","if use_gpu:\n"," from csbdeep.utils.tf import limit_gpu_memory\n"," # adjust as necessary: limit GPU memory to be used by TensorFlow to leave some to OpenCL-based computations\n"," limit_gpu_memory(0.8)\n","# --------------------- ---------------------- ------------------------\n","\n","\n","# Here we create a model according to section 5.3.\n","model = StarDist3D(conf, name=model_name, basedir=trained_model)\n","\n","# --------------------- Using pretrained model ------------------------\n","# Load the pretrained weights \n","if Use_pretrained_model:\n"," model.load_weights(h5_file_path)\n","# --------------------- ---------------------- ------------------------\n","\n","\n","#Here we check the FOV of the network.\n","median_size = calculate_extents(Y, np.median)\n","fov = np.array(model._axes_tile_overlap('ZYX'))\n","if any(median_size > fov):\n"," print(\"WARNING: median object size larger than field of view of the neural network.\")\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"nnMCvu2PKT9W","colab_type":"text"},"source":["## **4.2. Train the network**\n","---\n","\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point."]},{"cell_type":"code","metadata":{"id":"XfCF-Q4lKT9e","colab_type":"code","cellView":"form","colab":{}},"source":["import time\n","start = time.time()\n","\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","\n","#@markdown ##Start Training\n","augmenter = None\n","\n","# def augmenter(X_batch, Y_batch):\n","# \"\"\"Augmentation for data batch.\n","# X_batch is a list of input images (length at most batch_size)\n","# Y_batch is the corresponding list of ground-truth label images\n","# \"\"\"\n","# # ...\n","# return X_batch, Y_batch\n","\n","# Training the model. \n","# 'input_epochs' and 'steps' refers to your input data in section 5.1 \n","history = model.train(X_trn, Y_trn, validation_data=(X_val,Y_val), augmenter=augmenter,\n"," epochs=number_of_epochs, steps_per_epoch=number_of_steps)\n","None;\n","\n","print(\"Training done\")\n","\n","\n","# convert the history.history dict to a pandas DataFrame: \n","lossData = pd.DataFrame(history.history) \n","\n","if os.path.exists(model_path+\"/\"+model_name+\"/Quality Control\"):\n"," shutil.rmtree(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","os.makedirs(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","# The training evaluation.csv is saved (overwrites the Files if needed). \n","lossDataCSVpath = model_path+'/'+model_name+'/Quality Control/training_evaluation.csv'\n","with open(lossDataCSVpath, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['loss','val_loss', 'learning rate'])\n"," for i in range(len(history.history['loss'])):\n"," writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])\n","\n","\n","print(\"Network optimization in progress\")\n","\n","#Here we optimize the network.\n","model.optimize_thresholds(X_val, Y_val)\n","print(\"Done\")\n","\n","\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n","\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"iYRrmh0dCrNs","colab_type":"text"},"source":["## **4.3. Download your model(s) from Google Drive**\n","---\n","\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"LqH54fYhdbXU","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"RzAHUsi-78Ak","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"w3Z7Jkv8bPvq","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased."]},{"cell_type":"code","metadata":{"id":"05dbg6UrGunj","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","import csv\n","from matplotlib import pyplot as plt\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"mBkuXf5zhHUd","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","This section will calculate the Intersection over Union score for all the images provided in the Source_QC_folder and Target_QC_folder ! The result for one of the image will also be displayed.\n","\n","The **Intersection over Union** metric is a method that can be used to quantify the percent overlap between the target mask and your prediction output. **Therefore, the closer to 1, the better the performance.** This metric can be used to assess the quality of your model to accurately predict nuclei. \n","\n"," The results can be found in the \"*Quality Control*\" folder which is located inside your \"model_folder\"."]},{"cell_type":"code","metadata":{"id":"i9ek_kIHhK1R","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Give the paths to an image to test the performance of the model with.\n","\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","#Here we allow the user to choose the number of tile to be used when predicting the images\n","#@markdown #####To analyse large image, your images need to be divided into tiles. Each tile will then be processed independently and re-assembled to generate the final image. \"Automatic_number_of_tiles\" will search for and use the smallest number of tiles that can be used, at the expanse of your runtime. Alternatively, manually input the number of tiles in each dimension to be used to process your images. \n","\n","Automatic_number_of_tiles = True #@param {type:\"boolean\"}\n","#@markdown #####If you get an Out of memory (OOM) error when using the \"Automatic_number_of_tiles\" option, disable it and manually input the values to be used to process your images. Progressively increases these numbers until the OOM error disappear.\n","n_tiles_Z = 1#@param {type:\"number\"}\n","n_tiles_Y = 1#@param {type:\"number\"}\n","n_tiles_X = 1#@param {type:\"number\"}\n","\n","if (Automatic_number_of_tiles): \n"," n_tilesZYX = None\n","\n","if not (Automatic_number_of_tiles):\n"," n_tilesZYX = (n_tiles_Z, n_tiles_Y, n_tiles_X)\n","\n","\n","#Create a quality control Folder and check if the folder already exist\n","if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control\") == False:\n"," os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control\")\n","\n","if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\"):\n"," shutil.rmtree(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","\n","# Generate predictions from the Source_QC_folder and save them in the QC folder\n","\n","Source_QC_folder_tif = Source_QC_folder+\"/*.tif\"\n","\n","\n","np.random.seed(16)\n","lbl_cmap = random_label_cmap()\n","Z = sorted(glob(Source_QC_folder_tif))\n","Z = list(map(imread,Z))\n","n_channel = 1 if Z[0].ndim == 2 else Z[0].shape[-1]\n","axis_norm = (0,1) # normalize channels independently\n","\n","print('Number of test dataset found in the folder: '+str(len(Z)))\n","\n"," \n"," # axis_norm = (0,1,2) # normalize channels jointly\n","if n_channel > 1:\n"," print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n","\n","model = StarDist3D(None, name=QC_model_name, basedir=QC_model_path)\n","\n","names = [os.path.basename(f) for f in sorted(glob(Source_QC_folder_tif))]\n","\n"," \n","# modify the names to suitable form: path_images/image_numberX.tif\n"," \n","lenght_of_Z = len(Z)\n"," \n","for i in range(lenght_of_Z):\n"," img = normalize(Z[i], 1,99.8, axis=axis_norm)\n"," labels, polygons = model.predict_instances(img, n_tiles=n_tilesZYX)\n"," os.chdir(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n"," imsave(names[i], labels, polygons)\n","\n","\n","# Here we start testing the differences between GT and predicted masks\n","\n","\n","with open(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Quality_Control for \"+QC_model_name+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n"," writer.writerow([\"image\",\"Prediction v. GT Intersection over Union\"]) \n","\n","# define the images\n","\n"," for n in os.listdir(Source_QC_folder):\n"," if not os.path.isdir(os.path.join(Source_QC_folder,n)):\n"," print('Running QC on: '+n)\n"," \n"," test_input = io.imread(os.path.join(Source_QC_folder,n))\n"," test_prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\",n))\n"," test_ground_truth_image = io.imread(os.path.join(Target_QC_folder, n))\n","\n","#Convert pixel values to 0 or 255\n"," test_prediction_0_to_255 = test_prediction\n"," test_prediction_0_to_255[test_prediction_0_to_255>0] = 255\n","\n","#Convert pixel values to 0 or 255\n"," test_ground_truth_0_to_255 = test_ground_truth_image\n"," test_ground_truth_0_to_255[test_ground_truth_0_to_255>0] = 255\n","\n","# Intersection over Union metric\n","\n"," intersection = np.logical_and(test_ground_truth_0_to_255, test_prediction_0_to_255)\n"," union = np.logical_or(test_ground_truth_0_to_255, test_prediction_0_to_255)\n"," iou_score = np.sum(intersection) / np.sum(union)\n"," writer.writerow([n, str(iou_score)])\n","\n","\n","Image_Z = test_input.shape[0]\n","mid_plane = int(Image_Z / 2)+1\n","\n","\n","#Display the last image\n","\n","f=plt.figure(figsize=(25,25))\n","\n","from astropy.visualization import simple_norm\n","norm = simple_norm(test_input, percent = 99)\n","\n","#Input\n","plt.subplot(1,4,1)\n","plt.axis('off')\n","plt.imshow(test_input[mid_plane], aspect='equal', norm=norm, cmap='magma', interpolation='nearest')\n","plt.title('Input')\n","\n","#Ground-truth\n","plt.subplot(1,4,2)\n","plt.axis('off')\n","plt.imshow(test_ground_truth_0_to_255[mid_plane], aspect='equal', cmap='Greens')\n","plt.title('Ground Truth')\n","\n","#Prediction\n","plt.subplot(1,4,3)\n","plt.axis('off')\n","plt.imshow(test_prediction_0_to_255[mid_plane], aspect='equal', cmap='Purples')\n","plt.title('Prediction')\n","\n","#Overlay\n","plt.subplot(1,4,4)\n","plt.axis('off')\n","plt.imshow(test_ground_truth_0_to_255[mid_plane], cmap='Greens')\n","plt.imshow(test_prediction_0_to_255[mid_plane], alpha=0.5, cmap='Purples')\n","plt.title('Ground Truth and Prediction, Intersection over Union:'+str(round(iou_score,3)))\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"U8H7QRfKBzI8","colab_type":"text"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"btXwwnVpBEMB","colab_type":"text"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.3) can now be used to process images. If an older model needs to be used, please untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Prediction_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contains the images that you want to predict using the network that you trained.\n","\n","**`Result_folder`:** This folder will contain the predicted output ROI.\n","\n","**`Data_type`:** Please indicate if the images you want to predict are single images or stacks\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"x8UXP8S2eoo_","colab_type":"code","cellView":"form","colab":{}},"source":["from PIL import Image\n","\n","\n","\n","#@markdown ### Provide the path to your dataset and to the folder where the prediction will be saved (Result folder), then play the cell to predict output on your unseen images.\n","\n","Data_folder = \"\" #@param {type:\"string\"}\n","#test_dataset = Data_folder\n","\n","Results_folder = \"\" #@param {type:\"string\"}\n","#results = results_folder\n","\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","#Here we allow the user to choose the number of tile to be used when predicting the images\n","#@markdown #####To analyse large image, your images need to be divided into tiles. Each tile will then be processed independently and re-assembled to generate the final image. \"Automatic_number_of_tiles\" will search for and use the smallest number of tiles that can be used, at the expanse of your runtime. Alternatively, manually input the number of tiles in each dimension to be used to process your images. \n","\n","Automatic_number_of_tiles = True #@param {type:\"boolean\"}\n","#@markdown #####If you get an Out of memory (OOM) error when using the \"Automatic_number_of_tiles\" option, disable it and manually input the values to be used to process your images. Progressively increases these numbers until the OOM error disappear.\n","n_tiles_Z = 1#@param {type:\"number\"}\n","n_tiles_Y = 1#@param {type:\"number\"}\n","n_tiles_X = 1#@param {type:\"number\"}\n","\n","if (Automatic_number_of_tiles): \n"," n_tilesZYX = None\n","\n","if not (Automatic_number_of_tiles):\n"," n_tilesZYX = (n_tiles_Z, n_tiles_Y, n_tiles_X)\n","\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","\n","#single images\n","#testDATA = test_dataset\n","Dataset = Data_folder+\"/*.tif\"\n","\n","\n","np.random.seed(16)\n","lbl_cmap = random_label_cmap()\n","X = sorted(glob(Dataset))\n","X = list(map(imread,X))\n","n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]\n","axis_norm = (0,1) # normalize channels independently\n"," \n","# axis_norm = (0,1,2) # normalize channels jointly\n","if n_channel > 1:\n"," print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n","model = StarDist3D(None, name=Prediction_model_name, basedir=Prediction_model_path)\n"," \n","#Sorting and mapping original test dataset\n","X = sorted(glob(Dataset))\n","X = list(map(imread,X))\n","names = [os.path.basename(f) for f in sorted(glob(Dataset))]\n","\n","# modify the names to suitable form: path_images/image_numberX.tif\n","FILEnames=[]\n","for m in names:\n"," m=Results_folder+'/'+m\n"," FILEnames.append(m)\n","\n"," # Predictions folder\n","lenght_of_X = len(X)\n","for i in range(lenght_of_X):\n"," img = normalize(X[i], 1,99.8, axis=axis_norm)\n"," labels, polygons = model.predict_instances(img, n_tiles=n_tilesZYX)\n"," \n","# Save the predicted mask in the result folder\n"," os.chdir(Results_folder)\n"," imsave(FILEnames[i], labels, polygons)\n","\n"," # One example image \n","print(\"One example image is displayed bellow:\")\n","plt.figure(figsize=(13,10))\n","z = max(0, img.shape[0] // 2 - 5)\n","plt.subplot(121)\n","plt.imshow((img if img.ndim==3 else img[...,:3])[z], clim=(0,1), cmap='gray')\n","plt.title('Raw image (XY slice)')\n","plt.axis('off')\n","plt.subplot(122)\n","plt.imshow((img if img.ndim==3 else img[...,:3])[z], clim=(0,1), cmap='gray')\n","plt.imshow(labels[z], cmap=lbl_cmap, alpha=0.5)\n","plt.title('Image and predicted labels (XY slice)')\n","plt.axis('off');\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"SxJsrw3kTcFx","colab_type":"text"},"source":["## **6.2. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"rH_J20ydXWRQ","colab_type":"text"},"source":["#**Thank you for using Stardist 3D!**"]}]} \ No newline at end of file +{"nbformat":4,"nbformat_minor":0,"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.4"},"colab":{"name":"StarDist_3D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1Ur-4VIQ6gf4ONupD6hK0M-AcJkoTzMlU","timestamp":1586789439593},{"file_id":"1PKVyox_mx2rEE3VlMFQtdnVULJFhYPaD","timestamp":1583443864213},{"file_id":"1XSclOkhhHmn-9LQc9k8c3Y6seT1LEi-Y","timestamp":1583264105465},{"file_id":"1VPZYk3MeSVyZVVEmesz10VtujbD4diJk","timestamp":1579481583477},{"file_id":"1ENdOZir1Gytf6JxzyfbjgfxO3_C1dLHK","timestamp":1575415287126},{"file_id":"1G8b4dF2kCs3ePBGZthPUGOyjJpZ2G_Dm","timestamp":1575379725785},{"file_id":"1P0tT0RR_b3SFKvOcON_MzcAIcxRUQK5B","timestamp":1575377313115},{"file_id":"1hQz8PyJzBRkBZc9NwxM9mU9azRSvghBk","timestamp":1574783624098},{"file_id":"14mWTNjHgIbuuWAxb-0lhmhdIvMoZgrI0","timestamp":1574099686195},{"file_id":"1IWvFuBb0gqaJcUXhhfbcTWNh9cZEXW4S","timestamp":1573647131082},{"file_id":"1hFulBwI57YU6GoVc8sBt5KNIkCS7ynQ3","timestamp":1573579952409},{"file_id":"1Ba_Bu-PXN_2Mq5W6YHMgUYsJEfgbPtS-","timestamp":1573035984524},{"file_id":"1ePC44Qq_C2hSFGPM3PKyb0J6UBXSPddp","timestamp":1573032545399},{"file_id":"https://github.com/mpicbg-csbd/stardist/blob/master/examples/2D/2_training.ipynb","timestamp":1572984225873}],"collapsed_sections":[],"toc_visible":true},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"kiFRRolPa-Rb","colab_type":"text"},"source":["# **StarDist (3D)**\n","---\n","\n","**StarDist 3D** is a deep-learning method that can be used to segment cell nuclei from 3D bioimages and was first published by [Weigert *et al.* in 2019 on arXiv](https://arxiv.org/abs/1908.03636), extending to 3D the 2D appraoch from [Schmidt *et al.* in 2018](https://arxiv.org/abs/1806.03535). It uses a shape representation based on star-convex polygons for nuclei in an image to predict the presence and the shape of these nuclei. This StarDist 3D network is based on an adapted ResNet network architecture.\n","\n"," **This particular notebook enables nuclei segmentation of 2D dataset. If you are interested in 3D dataset, you should use the StarDist 3D notebook instead.**\n","\n","---\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is largely based on the paper:\n","\n","**Cell Detection with Star-convex Polygons** from Schmidt *et al.*, International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI), Granada, Spain, September 2018. (https://arxiv.org/abs/1806.03535)\n","\n","and the 3D extension of the approach:\n","\n","**Star-convex Polyhedra for 3D Object Detection and Segmentation in Microscopy** from Weigert *et al.* published on arXiv in 2019 (https://arxiv.org/abs/1908.03636)\n","\n","**The Original code** is freely available in GitHub:\n","https://github.com/mpicbg-csbd/stardist\n","\n","**Please also cite this original paper when using or developing this notebook.**\n"]},{"cell_type":"markdown","metadata":{"id":"iSuNqQ2ZMVGM","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"4-oByBSdE6DE","colab_type":"text"},"source":["#**0. Before getting started**\n","---\n"," For StarDist to train, **it needs to have access to a paired training dataset made of images of nuclei and their corresponding masks**. Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\n","\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n","The data structure is important. It is necessary that all the input data are in the same folder and that all the output data is in a separate folder. The provided training dataset is already split in two folders called \"Training - Images\" (Training_source) and \"Training - Masks\" (Training_target).\n","\n","Additionally, the corresponding Training_source and Training_target files need to have **the same name**.\n","\n","Please note that you currently can **only use .tif files!**\n","\n","You can also provide a folder that contains the data that you wish to analyse with the trained network once all training has been performed.\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset**\n"," - Images of nuclei (Training_source)\n"," - img_1.tif, img_2.tif, ...\n"," - Masks (Training_target)\n"," - img_1.tif, img_2.tif, ...\n"," - **Quality control dataset**\n"," - Images of nuclei\n"," - img_1.tif, img_2.tif\n"," - **Masks** \n"," - img_1.tif, img_2.tif\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"t1sYuLChbRV3","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"CDxBu1-19OyC","colab_type":"text"},"source":["\n","\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"4waLStm0RPFo","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"ZLY4qhgj8w-R","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"Ukil4yuS8seC","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"bB0IaQMZmWYM","colab_type":"text"},"source":["# **2. Install StarDist and dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"id":"j0w7C8P5zPIp","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Install StarDist and dependencies\n","\n","import tensorflow\n","print(tensorflow.__version__)\n","print(\"Tensorflow enabled.\")\n","\n","# Install packages which are not included in Google Colab\n","\n","!pip install tifffile # contains tools to operate tiff-files\n","!pip install csbdeep # contains tools for restoration of fluorescence microcopy images (Content-aware Image Restoration, CARE). It uses Keras and Tensorflow.\n","!pip install stardist # contains tools to operate STARDIST.\n","!pip install gputools\n","!pip install edt\n","!pip install wget\n","\n","\n","# ------- Variable specific to Stardist -------\n","from stardist import fill_label_holes, random_label_cmap, calculate_extents, gputools_available\n","from stardist.models import Config3D, StarDist3D, StarDistData3D\n","from stardist import relabel_image_stardist3D, Rays_GoldenSpiral, calculate_extents\n","from stardist.matching import matching_dataset\n","from csbdeep.utils import Path, normalize, download_and_extract_zip_file, plot_history # for loss plot\n","from csbdeep.io import save_tiff_imagej_compatible\n","import numpy as np\n","np.random.seed(42)\n","lbl_cmap = random_label_cmap()\n","from __future__ import print_function, unicode_literals, absolute_import, division\n","import cv2\n","%matplotlib inline\n","%config InlineBackend.figure_format = 'retina'\n","\n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","import numpy as np\n","from matplotlib import pyplot as plt\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n","import wget\n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32\n","from skimage.util import img_as_ubyte\n","from tqdm import tqdm \n","\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","\n","W = '\\033[0m' # white (normal)\n","R = '\\033[31m' # red\n","\n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","print(\"Libraries installed\")\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"DPWhXaltAYgH","colab_type":"text"},"source":["# **3. Select your parameters and paths**\n","\n","---\n","\n"]},{"cell_type":"markdown","metadata":{"id":"nAW3oU60htR_","colab_type":"text"},"source":["## **3.1. Setting main training parameters**\n","---\n"," "]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"HJKFAmuXc6d1"},"source":[" **Paths for training, predictions and results**\n","\n","**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source (images of nuclei) and Training_target (masks) training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","**Training parameters**\n","\n","**`number_of_epochs`:** Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a 400 epochs, but a full training should run for more. Evaluate the performance after training (see 5.). **Default value: 400**\n","\n","**Advanced parameters - experienced users only**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 1** \n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each image / patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`patch_size`:** and **`patch_height`:** Input the size of the patches use to train StarDist 3D (length of a side). The value should be smaller or equal to the dimensions of the image. Make patch size and patch_height as large as possible and divisible by 8 and 4, respectively. **Default value: dimension of the training images**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10** \n","\n","**`n_rays`:** Set number of rays (corners) used for StarDist (for instance a cube has 8 corners). **Default value: 96** \n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0003**\n","\n","**If you get an Out of memory (OOM) error during the training, manually decrease the patch_size and patch_height values until the OOM error disappear.**"]},{"cell_type":"code","metadata":{"colab_type":"code","cellView":"form","id":"CNJImzzVnr7h","colab":{}},"source":["\n","\n","#@markdown ###Path to training images: \n","Training_source = \"\" #@param {type:\"string\"}\n","training_images = Training_source\n","\n","\n","Training_target = \"\" #@param {type:\"string\"}\n","mask_images = Training_target \n","\n","\n","#@markdown ###Name of the model and path to model folder:\n","model_name = \"\" #@param {type:\"string\"}\n","\n","model_path = \"\" #@param {type:\"string\"}\n","trained_model = model_path \n","\n","#@markdown ### Other parameters for training:\n","number_of_epochs = 400#@param {type:\"number\"}\n","\n","#@markdown ###Advanced Parameters\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please input:\n","\n","#GPU_limit = 90 #@param {type:\"number\"}\n","batch_size = 1#@param {type:\"number\"}\n","number_of_steps = 100#@param {type:\"number\"}\n","patch_size = 64#@param {type:\"number\"} # pixels in\n","patch_height = 64#@param {type:\"number\"}\n","percentage_validation = 10#@param {type:\"number\"}\n","n_rays = 96 #@param {type:\"number\"}\n","initial_learning_rate = 0.0003 #@param {type:\"number\"}\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 1\n"," n_rays = 96\n"," percentage_validation = 10\n"," initial_learning_rate = 0.0003\n","\n","\n","percentage = percentage_validation/100\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name):\n"," shutil.rmtree(model_path+'/'+model_name)\n"," \n","\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name):\n"," print(bcolors.WARNING +\"!! WARNING: Folder already exists and has been removed !!\")\n"," shutil.rmtree(model_path+'/'+model_name)\n"," \n","\n","random_choice=random.choice(os.listdir(Training_source))\n","x = imread(Training_source+\"/\"+random_choice)\n","\n","# Here we check that the input images are stacks\n","if len(x.shape) == 3:\n"," print(\"Image dimensions (z,y,x)\",x.shape)\n","\n","if not len(x.shape) == 3:\n"," print(bcolors.WARNING +\"Your images appear to have the wrong dimensions. Image dimension\",x.shape)\n","\n","\n","#Find image Z dimension and select the mid-plane\n","Image_Z = x.shape[0]\n","mid_plane = int(Image_Z / 2)+1\n","\n","\n","#Find image XY dimension\n","Image_Y = x.shape[1]\n","Image_X = x.shape[2]\n","\n","# If default parameters, patch size is the same as image size\n","if (Use_Default_Advanced_Parameters): \n"," patch_size = min(Image_Y, Image_X) \n"," patch_height = Image_Z\n","\n","\n","#Hyperparameters failsafes\n","\n","# Here we check that patch_size is smaller than the smallest xy dimension of the image \n","\n","if patch_size > min(Image_Y, Image_X):\n"," patch_size = min(Image_Y, Image_X)\n"," print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_size is divisible by 8\n","if not patch_size % 8 == 0:\n"," patch_size = ((int(patch_size / 8)-1) * 8)\n"," print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_height is smaller than the z dimension of the image \n","\n","if patch_height > Image_Z :\n"," patch_height = Image_Z\n"," print (bcolors.WARNING + \" Your chosen patch_height is bigger than the z dimension of your image; therefore the patch_size chosen is now:\",patch_height)\n","\n","# Here we check that patch_height is divisible by 4\n","if not patch_height % 4 == 0:\n"," patch_height = ((int(patch_height / 4)-1) * 4)\n"," if patch_height == 0:\n"," patch_height = 4\n"," print (bcolors.WARNING + \" Your chosen patch_height is not divisible by 4; therefore the patch_size chosen is now:\",patch_height)\n","\n","# Here we disable pre-trained model by default (in case the next cell is not ran)\n","Use_pretrained_model = False\n","\n","# Here we disable data augmentation by default (in case the cell is not ran)\n","\n","Use_Data_augmentation = False\n","\n","print(\"Parameters initiated.\")\n","\n","\n","os.chdir(Training_target)\n","y = imread(Training_target+\"/\"+random_choice)\n","\n","#Here we use a simple normalisation strategy to visualise the image\n","from astropy.visualization import simple_norm\n","norm = simple_norm(x, percent = 99)\n","\n","mid_plane = int(Image_Z / 2)+1\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x[mid_plane], interpolation='nearest', norm=norm, cmap='magma')\n","plt.axis('off')\n","plt.title('Training source (single Z plane)');\n","plt.subplot(1,2,2)\n","plt.imshow(y[mid_plane], interpolation='nearest', cmap=lbl_cmap)\n","plt.axis('off')\n","plt.title('Training target (single Z plane)');\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"nbyf-RevQhDL","colab_type":"text"},"source":["## **3.2. Data augmentation**\n","---\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"UQ2hultWQlT9","colab_type":"text"},"source":["Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n","\n"," **However, data augmentation is not a magic solution and may also introduce issues. Therefore, we recommend that you train your network with and without augmentation, and use the QC section to validate that it improves overall performances.** \n","\n","Data augmentation is performed here by rotating the training images in the XY-Plane and flipping them along X-Axis as well as performing elastic deformations\n","\n","**The flip option and the elastic deformation will double the size of your dataset, rotation will quadruple and all together will increase the dataset by a factor of 16.**\n","\n"," Elastic deformations performed by [Elasticdeform.](https://elasticdeform.readthedocs.io/en/latest/index.html).\n"]},{"cell_type":"code","metadata":{"id":"wYdTY6ULg01b","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ###See Elasticdeform’s license\n","#Copyright (c) 2001, 2002 Enthought, Inc. All rights reserved.\n","\n","#Copyright (c) 2003-2017 SciPy Developers. All rights reserved.\n","\n","#Copyright (c) 2018 Gijs van Tulder. All rights reserved.\n","\n","#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n","\n","##Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n","#Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n","#Neither the name of Enthought nor the names of the SciPy Developers may be used to endorse or promote products derived from this software without specific prior written permission.\n","#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n","\n","print(\"Double click to see elasticdeform’s license\")\n"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"kKLB47jgQrxr","colab_type":"code","cellView":"form","colab":{}},"source":["#Data augmentation\n","\n","Use_Data_augmentation = False #@param {type:\"boolean\"}\n","\n","#@markdown **Deform your images**\n","\n","Elastic_deformation = True #@param {type:\"boolean\"}\n","\n","Deformation_Sigma = 3 #@param {type:\"slider\", min:1, max:30, step:1}\n","\n","#@markdown **Rotate each image 3 times by 90 degrees.**\n","Rotation = True #@param{type:\"boolean\"}\n","\n","#@markdown **Flip each image once around the x axis of the stack.**\n","Flip = True #@param{type:\"boolean\"}\n","\n","\n","Save_augmented_images = True #@param {type:\"boolean\"}\n","\n","Saving_path = \"\" #@param {type:\"string\"}\n","\n","\n","def rotation_aug(Source_path, Target_path, flip=False):\n"," Source_images = os.listdir(Source_path)\n"," Target_images = os.listdir(Target_path)\n"," \n"," for image in Source_images:\n"," source_img = io.imread(os.path.join(Source_path,image))\n"," target_img = io.imread(os.path.join(Target_path,image))\n"," \n"," # Source Rotation\n"," source_img_90 = np.rot90(source_img,axes=(1,2))\n"," source_img_180 = np.rot90(source_img_90,axes=(1,2))\n"," source_img_270 = np.rot90(source_img_180,axes=(1,2))\n","\n"," # Target Rotation\n"," target_img_90 = np.rot90(target_img,axes=(1,2))\n"," target_img_180 = np.rot90(target_img_90,axes=(1,2))\n"," target_img_270 = np.rot90(target_img_180,axes=(1,2))\n","\n"," # Add a flip to the rotation\n"," \n"," if flip == True:\n"," source_img_lr = np.fliplr(source_img)\n"," source_img_90_lr = np.fliplr(source_img_90)\n"," source_img_180_lr = np.fliplr(source_img_180)\n"," source_img_270_lr = np.fliplr(source_img_270)\n","\n"," target_img_lr = np.fliplr(target_img)\n"," target_img_90_lr = np.fliplr(target_img_90)\n"," target_img_180_lr = np.fliplr(target_img_180)\n"," target_img_270_lr = np.fliplr(target_img_270)\n","\n"," #source_img_90_ud = np.flipud(source_img_90)\n"," \n"," # Save the augmented files\n"," # Source images\n"," io.imsave(Training_source_augmented+'/'+image,source_img)\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_90.tif',source_img_90)\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_180.tif',source_img_180)\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_270.tif',source_img_270)\n"," # Target images\n"," io.imsave(Training_target_augmented+'/'+image,target_img)\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_90.tif',target_img_90)\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_180.tif',target_img_180)\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_270.tif',target_img_270)\n","\n"," if flip == True:\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_90_lr.tif',source_img_90_lr)\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_180_lr.tif',source_img_180_lr)\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_270_lr.tif',source_img_270_lr)\n","\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_90_lr.tif',target_img_90_lr)\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_180_lr.tif',target_img_180_lr)\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_270_lr.tif',target_img_270_lr)\n","\n","def flip(Source_path, Target_path):\n"," Source_images = os.listdir(Source_path)\n"," Target_images = os.listdir(Target_path)\n"," \n"," for image in Source_images:\n"," source_img = io.imread(os.path.join(Source_path,image))\n"," target_img = io.imread(os.path.join(Target_path,image))\n"," \n"," source_img_lr = np.fliplr(source_img)\n"," target_img_lr = np.fliplr(target_img)\n","\n"," io.imsave(Training_source_augmented+'/'+image,source_img)\n"," io.imsave(Training_source_augmented+'/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)\n","\n"," io.imsave(Training_target_augmented+'/'+image,target_img)\n"," io.imsave(Training_target_augmented+'/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)\n","\n","\n","\n","\n","if Use_Data_augmentation:\n","\n","\n"," if Elastic_deformation:\n"," !pip install elasticdeform\n"," import numpy, imageio, elasticdeform\n","\n"," if not Save_augmented_images:\n"," Saving_path= \"/content\"\n","\n"," Augmented_folder = Saving_path+\"/Augmented_Folder\"\n","\n"," if os.path.exists(Augmented_folder):\n"," shutil.rmtree(Augmented_folder)\n"," os.makedirs(Augmented_folder)\n"," Training_source_augmented = Augmented_folder+\"/Training_source\"\n"," os.makedirs(Training_source_augmented)\n"," Training_target_augmented = Augmented_folder+\"/Training_target\"\n"," os.makedirs(Training_target_augmented)\n"," print(\"Data augmentation enabled\")\n"," print(\"Generation of the augmented dataset in progress\")\n","\n"," if Elastic_deformation:\n"," for filename in os.listdir(Training_source):\n"," X = imread(os.path.join(Training_source, filename))\n"," Y = imread(os.path.join(Training_target, filename))\n"," [X_deformed, Y_deformed] = elasticdeform.deform_random_grid([X, Y], sigma=Deformation_Sigma, order=0)\n","\n"," os.chdir(Augmented_folder+\"/Training_source\")\n"," imsave(filename, X)\n"," imsave(filename+\"_deformed.tif\", X_deformed)\n","\n"," os.chdir(Augmented_folder+\"/Training_target\")\n"," imsave(filename, Y)\n"," imsave(filename+\"_deformed.tif\", Y_deformed)\n","\n"," Training_source_rot = Training_source_augmented\n"," Training_target_rot = Training_target_augmented\n"," \n"," if not Elastic_deformation:\n"," Training_source_rot = Training_source\n"," Training_target_rot = Training_target\n","\n"," \n"," if Rotation == True:\n"," rotation_aug(Training_source_rot,Training_target_rot,flip=Flip)\n"," elif Rotation == False and Flip == True:\n"," flip(Training_source_rot,Training_target_rot)\n","\n"," print(\"Done\")\n","\n"," if Elastic_deformation:\n"," from astropy.visualization import simple_norm\n"," norm = simple_norm(x, percent = 99)\n","\n"," random_choice=random.choice(os.listdir(Training_source))\n"," x = imread(Augmented_folder+\"/Training_source/\"+random_choice)\n"," x_deformed = imread(Augmented_folder+\"/Training_source/\"+random_choice+\"_deformed.tif\")\n"," y = imread(Augmented_folder+\"/Training_target/\"+random_choice)\n"," y_deformed = imread(Augmented_folder+\"/Training_target/\"+random_choice+\"_deformed.tif\") \n","\n"," Image_Z = x.shape[0]\n"," mid_plane = int(Image_Z / 2)+1\n","\n"," f=plt.figure(figsize=(10,10))\n"," plt.subplot(2,2,1)\n"," plt.imshow(x[mid_plane], interpolation='nearest', norm=norm, cmap='magma')\n"," plt.axis('off')\n"," plt.title('Training source (single Z plane)');\n"," plt.subplot(2,2,2)\n"," plt.imshow(y[mid_plane], interpolation='nearest', cmap=lbl_cmap)\n"," plt.axis('off')\n"," plt.title('Training target (single Z plane)');\n"," plt.subplot(2,2,3)\n"," plt.imshow(x_deformed[mid_plane], interpolation='nearest', norm=norm, cmap='magma')\n"," plt.axis('off')\n"," plt.title('Deformed training source (single Z plane)');\n"," plt.subplot(2,2,4)\n"," plt.imshow(y_deformed[mid_plane], interpolation='nearest', cmap=lbl_cmap)\n"," plt.axis('off')\n"," plt.title('Deformed training target (single Z plane)');\n","\n","if not Use_Data_augmentation:\n"," print(\"Data augmentation disabled\")\n","\n","\n","\n"," \n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"pjz-5bRVh1ja","colab_type":"text"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a StarDist model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"zeSUtd2Thw-O","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"Demo_3D_Model_from_Stardist_3D_paper\" #@param [\"Model_from_file\", \"Demo_3D_Model_from_Stardist_3D_paper\"]\n","\n","Weights_choice = \"best\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","\n","# --------------------- Download the Demo 3D model provided in the Stardist 3D github ------------------------\n","\n"," if pretrained_model_choice == \"Demo_3D_Model_from_Stardist_3D_paper\":\n"," pretrained_model_name = \"Demo_3D\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the Demo 3D model from the Stardist_3D paper\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"https://raw.githubusercontent.com/mpicbg-csbd/stardist/master/models/examples/3D_demo/config.json\", pretrained_model_path)\n"," wget.download(\"https://github.com/mpicbg-csbd/stardist/raw/master/models/examples/3D_demo/thresholds.json\", pretrained_model_path)\n"," wget.download(\"https://github.com/mpicbg-csbd/stardist/blob/master/models/examples/3D_demo/weights_best.h5?raw=true\", pretrained_model_path)\n"," wget.download(\"https://github.com/mpicbg-csbd/stardist/blob/master/models/examples/3D_demo/weights_last.h5?raw=true\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist')\n"," Use_pretrained_model = False\n","\n"," \n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n","\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," \n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n","\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n","\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print(bcolors.WARNING+'Weights found in:')\n"," print(h5_file_path)\n"," print(bcolors.WARNING+'will be loaded prior to training.')\n","\n","else:\n"," print(bcolors.WARNING+'No pretrained network will be used.')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"DECuc3HZDbwG","colab_type":"text"},"source":["#**4. Train your network**\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"NwV5LweiavgQ","colab_type":"text"},"source":["## **4.1. Prepare the training data and model for training**\n","---\n","\n","Here, we use the information from 3. to build the model and convert the training data into a suitable format for training."]},{"cell_type":"code","metadata":{"id":"uTM781rCKT8r","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Create the model and dataset objects\n","import warnings\n","warnings.simplefilter(\"ignore\")\n","\n","\n","# --------------------- Here we load the augmented data or the raw data ------------------------\n","\n","if Use_Data_augmentation:\n"," Training_source_dir = Training_source_augmented\n"," Training_target_dir = Training_target_augmented\n","\n","if not Use_Data_augmentation:\n"," Training_source_dir = Training_source\n"," Training_target_dir = Training_target\n","# --------------------- ------------------------------------------------\n","\n","training_images_tiff=Training_source_dir+\"/*.tif\"\n","mask_images_tiff=Training_target_dir+\"/*.tif\"\n","\n","\n","# this funtion imports training images and masks and sorts them suitable for the network\n","X = sorted(glob(training_images_tiff)) \n","Y = sorted(glob(mask_images_tiff)) \n","\n","# assert -funtion check that X and Y really have images. If not this cell raises an error\n","assert all(Path(x).name==Path(y).name for x,y in zip(X,Y))\n","\n","# Here we map the training dataset (images and masks).\n","X = list(map(imread,X))\n","Y = list(map(imread,Y))\n","\n","n_channel = 1 if X[0].ndim == 3 else X[0].shape[-1]\n","\n","\n","\n","#Normalize images and fill small label holes.\n","axis_norm = (0,1,2) # normalize channels independently\n","# axis_norm = (0,1,2,3) # normalize channels jointly\n","if n_channel > 1:\n"," print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 3 in axis_norm else 'independently'))\n"," sys.stdout.flush()\n","\n","X = [normalize(x,1,99.8,axis=axis_norm) for x in tqdm(X)]\n","Y = [fill_label_holes(y) for y in tqdm(Y)]\n","\n","#Here we split the your training dataset into training images (90 %) and validation images (10 %). \n","\n","assert len(X) > 1, \"not enough training data\"\n","rng = np.random.RandomState(42)\n","ind = rng.permutation(len(X))\n","n_val = max(1, int(round(percentage * len(ind))))\n","ind_train, ind_val = ind[:-n_val], ind[-n_val:]\n","X_val, Y_val = [X[i] for i in ind_val] , [Y[i] for i in ind_val]\n","X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train] \n","print('number of images: %3d' % len(X))\n","print('- training: %3d' % len(X_trn))\n","print('- validation: %3d' % len(X_val))\n","\n","\n","\n","extents = calculate_extents(Y)\n","anisotropy = tuple(np.max(extents) / extents)\n","print('empirical anisotropy of labeled objects = %s' % str(anisotropy))\n","\n","\n","# Use OpenCL-based computations for data generator during training (requires 'gputools')\n","use_gpu = False and gputools_available()\n","\n","\n","#Here we ensure that our network has a minimal number of steps\n","if (Use_Default_Advanced_Parameters): \n"," number_of_steps= int(len(X)/batch_size)+1\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we ensure that the learning rate set correctly when using pre-trained models\n","if Use_pretrained_model:\n"," if Weights_choice == \"last\":\n"," initial_learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\": \n"," initial_learning_rate = bestLearningRate\n","# --------------------- ---------------------- ------------------------\n","\n","# Predict on subsampled grid for increased efficiency and larger field of view\n","grid = tuple(1 if a > 1.5 else 2 for a in anisotropy)\n","\n","# Use rays on a Fibonacci lattice adjusted for measured anisotropy of the training data\n","rays = Rays_GoldenSpiral(n_rays, anisotropy=anisotropy)\n","\n","conf = Config3D (\n"," rays = rays,\n"," grid = grid,\n"," anisotropy = anisotropy,\n"," use_gpu = use_gpu,\n"," n_channel_in = n_channel,\n"," train_learning_rate = initial_learning_rate,\n"," train_patch_size = (patch_height, patch_size, patch_size),\n"," train_batch_size = batch_size,\n",")\n","print(conf)\n","vars(conf)\n","\n","\n","# --------------------- This is currently disabled as it give an error ------------------------\n","#here we limit GPU to 80%\n","if use_gpu:\n"," from csbdeep.utils.tf import limit_gpu_memory\n"," # adjust as necessary: limit GPU memory to be used by TensorFlow to leave some to OpenCL-based computations\n"," limit_gpu_memory(0.8)\n","# --------------------- ---------------------- ------------------------\n","\n","\n","# Here we create a model according to section 5.3.\n","model = StarDist3D(conf, name=model_name, basedir=trained_model)\n","\n","# --------------------- Using pretrained model ------------------------\n","# Load the pretrained weights \n","if Use_pretrained_model:\n"," model.load_weights(h5_file_path)\n","# --------------------- ---------------------- ------------------------\n","\n","\n","#Here we check the FOV of the network.\n","median_size = calculate_extents(Y, np.median)\n","fov = np.array(model._axes_tile_overlap('ZYX'))\n","if any(median_size > fov):\n"," print(\"WARNING: median object size larger than field of view of the neural network.\")\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"nnMCvu2PKT9W","colab_type":"text"},"source":["## **4.2. Train the network**\n","---\n","\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point."]},{"cell_type":"code","metadata":{"id":"XfCF-Q4lKT9e","colab_type":"code","cellView":"form","colab":{}},"source":["import time\n","start = time.time()\n","\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","#@markdown ##Start training\n","\n","augmenter = None\n","\n","# def augmenter(X_batch, Y_batch):\n","# \"\"\"Augmentation for data batch.\n","# X_batch is a list of input images (length at most batch_size)\n","# Y_batch is the corresponding list of ground-truth label images\n","# \"\"\"\n","# # ...\n","# return X_batch, Y_batch\n","\n","# Training the model. \n","# 'input_epochs' and 'steps' refers to your input data in section 5.1 \n","history = model.train(X_trn, Y_trn, validation_data=(X_val,Y_val), augmenter=augmenter,\n"," epochs=number_of_epochs, steps_per_epoch=number_of_steps)\n","None;\n","\n","print(\"Training done\")\n","\n","\n","# convert the history.history dict to a pandas DataFrame: \n","lossData = pd.DataFrame(history.history) \n","\n","if os.path.exists(model_path+\"/\"+model_name+\"/Quality Control\"):\n"," shutil.rmtree(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","os.makedirs(model_path+\"/\"+model_name+\"/Quality Control\")\n","\n","# The training evaluation.csv is saved (overwrites the Files if needed). \n","lossDataCSVpath = model_path+'/'+model_name+'/Quality Control/training_evaluation.csv'\n","with open(lossDataCSVpath, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['loss','val_loss', 'learning rate'])\n"," for i in range(len(history.history['loss'])):\n"," writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])\n","\n","\n","print(\"Network optimization in progress\")\n","\n","#Here we optimize the network.\n","model.optimize_thresholds(X_val, Y_val)\n","print(\"Done\")\n","\n","\n","# Displaying the time elapsed for training\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n","\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"iYRrmh0dCrNs","colab_type":"text"},"source":["## **4.3. Download your model(s) from Google Drive**\n","---\n","\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"LqH54fYhdbXU","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"RzAHUsi-78Ak","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"w3Z7Jkv8bPvq","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased."]},{"cell_type":"code","metadata":{"id":"05dbg6UrGunj","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","import csv\n","from matplotlib import pyplot as plt\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"mBkuXf5zhHUd","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","This section will calculate the Intersection over Union score for all the images provided in the Source_QC_folder and Target_QC_folder ! The result for one of the image will also be displayed.\n","\n","The **Intersection over Union** metric is a method that can be used to quantify the percent overlap between the target mask and your prediction output. **Therefore, the closer to 1, the better the performance.** This metric can be used to assess the quality of your model to accurately predict nuclei. \n","\n"," The results can be found in the \"*Quality Control*\" folder which is located inside your \"model_folder\"."]},{"cell_type":"code","metadata":{"id":"i9ek_kIHhK1R","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Give the paths to an image to test the performance of the model with.\n","\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","#Here we allow the user to choose the number of tile to be used when predicting the images\n","#@markdown #####To analyse large image, your images need to be divided into tiles. Each tile will then be processed independently and re-assembled to generate the final image. \"Automatic_number_of_tiles\" will search for and use the smallest number of tiles that can be used, at the expanse of your runtime. Alternatively, manually input the number of tiles in each dimension to be used to process your images. \n","\n","Automatic_number_of_tiles = True #@param {type:\"boolean\"}\n","#@markdown #####If you get an Out of memory (OOM) error when using the \"Automatic_number_of_tiles\" option, disable it and manually input the values to be used to process your images. Progressively increases these numbers until the OOM error disappear.\n","n_tiles_Z = 1#@param {type:\"number\"}\n","n_tiles_Y = 1#@param {type:\"number\"}\n","n_tiles_X = 1#@param {type:\"number\"}\n","\n","if (Automatic_number_of_tiles): \n"," n_tilesZYX = None\n","\n","if not (Automatic_number_of_tiles):\n"," n_tilesZYX = (n_tiles_Z, n_tiles_Y, n_tiles_X)\n","\n","\n","#Create a quality control Folder and check if the folder already exist\n","if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control\") == False:\n"," os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control\")\n","\n","if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\"):\n"," shutil.rmtree(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","\n","# Generate predictions from the Source_QC_folder and save them in the QC folder\n","\n","Source_QC_folder_tif = Source_QC_folder+\"/*.tif\"\n","\n","\n","np.random.seed(16)\n","lbl_cmap = random_label_cmap()\n","Z = sorted(glob(Source_QC_folder_tif))\n","Z = list(map(imread,Z))\n","n_channel = 1 if Z[0].ndim == 2 else Z[0].shape[-1]\n","axis_norm = (0,1) # normalize channels independently\n","\n","print('Number of test dataset found in the folder: '+str(len(Z)))\n","\n"," \n"," # axis_norm = (0,1,2) # normalize channels jointly\n","if n_channel > 1:\n"," print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n","\n","model = StarDist3D(None, name=QC_model_name, basedir=QC_model_path)\n","\n","names = [os.path.basename(f) for f in sorted(glob(Source_QC_folder_tif))]\n","\n"," \n","# modify the names to suitable form: path_images/image_numberX.tif\n"," \n","lenght_of_Z = len(Z)\n"," \n","for i in range(lenght_of_Z):\n"," img = normalize(Z[i], 1,99.8, axis=axis_norm)\n"," labels, polygons = model.predict_instances(img, n_tiles=n_tilesZYX)\n"," os.chdir(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n"," imsave(names[i], labels, polygons)\n","\n","\n","# Here we start testing the differences between GT and predicted masks\n","\n","\n","with open(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Quality_Control for \"+QC_model_name+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n"," writer.writerow([\"image\",\"Prediction v. GT Intersection over Union\"]) \n","\n","# define the images\n","\n"," for n in os.listdir(Source_QC_folder):\n"," if not os.path.isdir(os.path.join(Source_QC_folder,n)):\n"," print('Running QC on: '+n)\n"," \n"," test_input = io.imread(os.path.join(Source_QC_folder,n))\n"," test_prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\",n))\n"," test_ground_truth_image = io.imread(os.path.join(Target_QC_folder, n))\n","\n","#Convert pixel values to 0 or 255\n"," test_prediction_0_to_255 = test_prediction\n"," test_prediction_0_to_255[test_prediction_0_to_255>0] = 255\n","\n","#Convert pixel values to 0 or 255\n"," test_ground_truth_0_to_255 = test_ground_truth_image\n"," test_ground_truth_0_to_255[test_ground_truth_0_to_255>0] = 255\n","\n","# Intersection over Union metric\n","\n"," intersection = np.logical_and(test_ground_truth_0_to_255, test_prediction_0_to_255)\n"," union = np.logical_or(test_ground_truth_0_to_255, test_prediction_0_to_255)\n"," iou_score = np.sum(intersection) / np.sum(union)\n"," writer.writerow([n, str(iou_score)])\n","\n","\n","Image_Z = test_input.shape[0]\n","mid_plane = int(Image_Z / 2)+1\n","\n","\n","#Display the last image\n","\n","f=plt.figure(figsize=(25,25))\n","\n","from astropy.visualization import simple_norm\n","norm = simple_norm(test_input, percent = 99)\n","\n","#Input\n","plt.subplot(1,4,1)\n","plt.axis('off')\n","plt.imshow(test_input[mid_plane], aspect='equal', norm=norm, cmap='magma', interpolation='nearest')\n","plt.title('Input')\n","\n","#Ground-truth\n","plt.subplot(1,4,2)\n","plt.axis('off')\n","plt.imshow(test_ground_truth_0_to_255[mid_plane], aspect='equal', cmap='Greens')\n","plt.title('Ground Truth')\n","\n","#Prediction\n","plt.subplot(1,4,3)\n","plt.axis('off')\n","plt.imshow(test_prediction_0_to_255[mid_plane], aspect='equal', cmap='Purples')\n","plt.title('Prediction')\n","\n","#Overlay\n","plt.subplot(1,4,4)\n","plt.axis('off')\n","plt.imshow(test_ground_truth_0_to_255[mid_plane], cmap='Greens')\n","plt.imshow(test_prediction_0_to_255[mid_plane], alpha=0.5, cmap='Purples')\n","plt.title('Ground Truth and Prediction, Intersection over Union:'+str(round(iou_score,3)))\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"U8H7QRfKBzI8","colab_type":"text"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"btXwwnVpBEMB","colab_type":"text"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.3) can now be used to process images. If an older model needs to be used, please untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Prediction_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contains the images that you want to predict using the network that you trained.\n","\n","**`Result_folder`:** This folder will contain the predicted output ROI.\n","\n","**`Data_type`:** Please indicate if the images you want to predict are single images or stacks\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"x8UXP8S2eoo_","colab_type":"code","cellView":"form","colab":{}},"source":["from PIL import Image\n","\n","\n","\n","#@markdown ### Provide the path to your dataset and to the folder where the prediction will be saved (Result folder), then play the cell to predict output on your unseen images.\n","\n","Data_folder = \"\" #@param {type:\"string\"}\n","#test_dataset = Data_folder\n","\n","Results_folder = \"\" #@param {type:\"string\"}\n","#results = results_folder\n","\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","#Here we allow the user to choose the number of tile to be used when predicting the images\n","#@markdown #####To analyse large image, your images need to be divided into tiles. Each tile will then be processed independently and re-assembled to generate the final image. \"Automatic_number_of_tiles\" will search for and use the smallest number of tiles that can be used, at the expanse of your runtime. Alternatively, manually input the number of tiles in each dimension to be used to process your images. \n","\n","Automatic_number_of_tiles = False #@param {type:\"boolean\"}\n","#@markdown #####If you get an Out of memory (OOM) error when using the \"Automatic_number_of_tiles\" option, disable it and manually input the values to be used to process your images. Progressively increases these numbers until the OOM error disappear.\n","n_tiles_Z = 2#@param {type:\"number\"}\n","n_tiles_Y = 2#@param {type:\"number\"}\n","n_tiles_X = 2#@param {type:\"number\"}\n","\n","if (Automatic_number_of_tiles): \n"," n_tilesZYX = None\n","\n","if not (Automatic_number_of_tiles):\n"," n_tilesZYX = (n_tiles_Z, n_tiles_Y, n_tiles_X)\n","\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","\n","#single images\n","#testDATA = test_dataset\n","Dataset = Data_folder+\"/*.tif\"\n","\n","\n","np.random.seed(16)\n","lbl_cmap = random_label_cmap()\n","X = sorted(glob(Dataset))\n","X = list(map(imread,X))\n","n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]\n","axis_norm = (0,1) # normalize channels independently\n"," \n","# axis_norm = (0,1,2) # normalize channels jointly\n","if n_channel > 1:\n"," print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n","model = StarDist3D(None, name=Prediction_model_name, basedir=Prediction_model_path)\n"," \n","#Sorting and mapping original test dataset\n","X = sorted(glob(Dataset))\n","X = list(map(imread,X))\n","names = [os.path.basename(f) for f in sorted(glob(Dataset))]\n","\n","# modify the names to suitable form: path_images/image_numberX.tif\n","FILEnames=[]\n","for m in names:\n"," m=Results_folder+'/'+m\n"," FILEnames.append(m)\n","\n"," # Predictions folder\n","lenght_of_X = len(X)\n","for i in range(lenght_of_X):\n"," img = normalize(X[i], 1,99.8, axis=axis_norm)\n"," labels, polygons = model.predict_instances(img, n_tiles=n_tilesZYX)\n"," \n","# Save the predicted mask in the result folder\n"," os.chdir(Results_folder)\n"," imsave(FILEnames[i], labels, polygons)\n","\n"," # One example image \n","print(\"One example image is displayed bellow:\")\n","plt.figure(figsize=(13,10))\n","z = max(0, img.shape[0] // 2 - 5)\n","plt.subplot(121)\n","plt.imshow((img if img.ndim==3 else img[...,:3])[z], clim=(0,1), cmap='gray')\n","plt.title('Raw image (XY slice)')\n","plt.axis('off')\n","plt.subplot(122)\n","plt.imshow((img if img.ndim==3 else img[...,:3])[z], clim=(0,1), cmap='gray')\n","plt.imshow(labels[z], cmap=lbl_cmap, alpha=0.5)\n","plt.title('Image and predicted labels (XY slice)')\n","plt.axis('off');\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"SxJsrw3kTcFx","colab_type":"text"},"source":["## **6.2. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"rH_J20ydXWRQ","colab_type":"text"},"source":["#**Thank you for using StarDist 3D!**"]}]} \ No newline at end of file diff --git a/Colab_notebooks/Template_ZeroCostDL4Mic.ipynb b/Colab_notebooks/Template_ZeroCostDL4Mic.ipynb index 0ee33b25..7a791f7c 100755 --- a/Colab_notebooks/Template_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/Template_ZeroCostDL4Mic.ipynb @@ -1 +1 @@ -{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"Template_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1owWtQQucUxUOZMaPh2x_mxe_qXKHCZhp","timestamp":1588074588514},{"file_id":"159ARwlQE7-zi0EHxunOF_YPFLt-ZVU5x","timestamp":1587562499898},{"file_id":"1W-7NHehG5MRFILvZZzhPWWnOdJMkadb2","timestamp":1586332290412},{"file_id":"1pUetEQICxYWkYVaQIgdRH1EZBTl7oc2A","timestamp":1586292199692},{"file_id":"1MD36ZkM6XR9EuV12zimJmfCjzyeYZFWq","timestamp":1586269469061},{"file_id":"16A2mbaHzlEElntS8qkFBOsBvZG-mUeY6","timestamp":1586253795726},{"file_id":"1gJlcjOiSxr2buDOxmcFbT_d-GqwLjXtK","timestamp":1583343225796},{"file_id":"10yGI51WzHfgWgZAyE-EbkZFEvIOd6CP6","timestamp":1583171396283}],"collapsed_sections":[],"toc_visible":true},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.4"},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"5Yw8pstKuR7_","colab_type":"text"},"source":[" This is a template for a ZeroCostDL4Mic notebook and needs to be filled with appropriate model code and information.\n","\n"," Thank you for contributing to the ZeroCostDL4Mic Project. Please use this notebook as a template for your implementation. When your notebook is completed, please upload it to your github page and send us a link so we can reference your work.\n","\n"," If possible, remember to provide separate training and test datasets (for quality control) containing source and target images with your finished notebooks. This is very useful so that ZeroCostDL4Mic users can test your notebook. "]},{"cell_type":"markdown","metadata":{"id":"V9zNGvape2-I","colab_type":"text"},"source":["# **Name of the Network**\n","\n"," Description of the network and link to publication with author reference. [author et al, etc.](URL).\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is inspired from the *Zero-Cost Deep-Learning to Enhance Microscopy* project (ZeroCostDL4Mic) (https://github.com/HenriquesLab/DeepLearning_Collab/wiki) and was created by **Your name**\n","\n","This notebook is based on the following paper: \n","\n","**Original Title of the paper**, Journal, volume, pages, year and complete author list, [link to paper](URL)\n","\n","And source code found in: *provide github link or equivalent if applicable*\n","\n","Provide information on dataset availability and link for download if applicable.\n","\n","\n","**Please also cite this original paper when using or developing this notebook.**"]},{"cell_type":"markdown","metadata":{"id":"jWAz2i7RdxUV","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use ZeroCostDL4Mic notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"vNMDQHm0Ah-Z","colab_type":"text"},"source":["#**0. Before getting started**\n","---\n"," Give information on the required structure and dataype of the training dataset.\n","\n"," Provide information on quality control dataset, such as:\n","\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n"," **Additionally, the corresponding input and output files need to have the same name**.\n","\n"," Please note that you currently can **only use .tif files!**\n","\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset**\n"," - Low SNR images (Training_source)\n"," - img_1.tif, img_2.tif, ...\n"," - High SNR images (Training_target)\n"," - img_1.tif, img_2.tif, ...\n"," - **Quality control dataset**\n"," - Low SNR images\n"," - img_1.tif, img_2.tif\n"," - High SNR images\n"," - img_1.tif, img_2.tif\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"DMNHVZfHmbKb","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"BCPhV-pe-syw","colab_type":"text"},"source":["\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"VNZetvLiS1qV","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime settings are correct then Google did not allocate GPU to your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n","\n","from tensorflow.python.client import device_lib \n","device_lib.list_local_devices()\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"UBrnApIUBgxv","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"01Djr8v-5pPk","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","#mounts user's Google Drive to Google Colab.\n","\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"n4yWFoJNnoin","colab_type":"text"},"source":["# **2. Install Name of the network and Dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"id":"3u2mXn3XsWzd","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Install Network and Dependencies\n","\n","#Libraries contains information of certain topics. \n","\n","#Put the imported code and libraries here\n","\n","print(\"Depencies installed and imported.\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"Fw0kkTU6CsU4","colab_type":"text"},"source":["# **3. Select your paths and parameters**\n","\n","---\n","\n","The code below allows the user to enter the paths to where the training data is and to define the training parameters.\n"]},{"cell_type":"markdown","metadata":{"id":"da_R1mCG_PDX","colab_type":"text"},"source":["## **3.1. Setting Main Training Parameters**\n","---\n",""]},{"cell_type":"markdown","metadata":{"id":"CB6acvUFtWqd","colab_type":"text"},"source":[" **Paths for training, predictions and results**\n","\n"," Fill the parameters here as needed and update the code. Note that the sections containing `Training_source`, `Training target`, `model_name` and `model_path` should appear in your notebook.\n","\n","**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source and Training_target data respectively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","**Training Parameters**\n","\n","**`number_of_epochs`:**Give estimates for training performance given a number of epochs and provide a default value. **Default value:**\n","\n","**`other_parameters`:**Give other parameters or default values **Default value:**\n","\n","**If additional parameter above affects the training of the notebook give a brief explanation and how problems can be mitigated** \n","\n","\n","**Advanced Parameters - experienced users only**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 16**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during training. **Default value: 10** "]},{"cell_type":"code","metadata":{"id":"ewpNJ_I0Mv47","colab_type":"code","cellView":"form","colab":{}},"source":["class bcolors:\n"," WARNING = '\\033[31m'\n","\n","#@markdown ###Path to training images:\n","\n","Training_source = \"\" #@param {type:\"string\"}\n","\n","# Ground truth images\n","Training_target = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","#@markdown ###Name of the model and path to model folder:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","\n","# other parameters for training.\n","#@markdown ###Training Parameters\n","#@markdown Number of epochs:\n","\n","number_of_epochs = 50#@param {type:\"number\"}\n","\n","#@markdown Other parameters, add as necessary\n","other_parameters = 80#@param {type:\"number\"} # in pixels\n","\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","#@markdown ###If not, please input:\n","\n","number_of_steps = 400#@param {type:\"number\"}\n","batch_size = 16#@param {type:\"number\"}\n","percentage_validation = 10 #@param {type:\"number\"}\n","\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 16\n"," percentage_validation = 10\n","\n","#Here we define the percentage to use for validation\n","percentage = percentage_validation/100\n","\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name):\n"," shutil.rmtree(model_path+'/'+model_name)\n","\n","\n","# The shape of the images.\n","x = imread(InputFile)\n","y = imread(OutputFile)\n","\n","print('Loaded Input images (number, width, length) =', x.shape)\n","print('Loaded Output images (number, width, length) =', y.shape)\n","print(\"Parameters initiated.\")\n","\n","# This will display a randomly chosen dataset input and output\n","random_choice = random.choice(os.listdir(Training_source))\n","x = imread(Training_source+\"/\"+random_choice)\n","\n","\n","# Here we check that the input images contains the expected dimensions\n","if len(x.shape) == 2:\n"," print(\"Image dimensions (y,x)\",x.shape)\n","\n","if not len(x.shape) == 2:\n"," print(bcolors.WARNING +\"Your images appear to have the wrong dimensions. Image dimension\",x.shape)\n","\n","\n","#Find image XY dimension\n","Image_Y = x.shape[0]\n","Image_X = x.shape[1]\n","\n","#Hyperparameters failsafes\n","\n","# Here we check that patch_size is smaller than the smallest xy dimension of the image \n","\n","if patch_size > min(Image_Y, Image_X):\n"," patch_size = min(Image_Y, Image_X)\n"," print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_size is divisible by 8\n","if not patch_size % 8 == 0:\n"," patch_size = ((int(patch_size / 8)-1) * 8)\n"," print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:\",patch_size)\n","\n","\n","\n","os.chdir(Training_target)\n","y = imread(Training_target+\"/\"+random_choice)\n","\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x, interpolation='nearest')\n","plt.title('Training source')\n","plt.axis('off');\n","\n","plt.subplot(1,2,2)\n","plt.imshow(y, interpolation='nearest')\n","plt.title('Training target')\n","plt.axis('off');\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"wA66DlgI_Bya","colab_type":"text"},"source":["## **3.2. Data augmentation**\n","---\n",""]},{"cell_type":"markdown","metadata":{"id":"opQ2MwPy_HFC","colab_type":"text"},"source":["Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n","\n","Data augmentation is performed here by rotating the patches in XY-Plane and flip them along X-Axis. This only works if the images are square in XY.\n","\n","Add any other information which is necessary to run augmentation with your notebook/data."]},{"cell_type":"code","metadata":{"id":"pcWXnWP0_WRn","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ###Add any further useful augmentations\n","Use_Data_augmentation = False #@param{type:\"boolean\"}\n","\n","#@markdown Select this option if you want to use augmentation to increase the size of your dataset\n","\n","#@markdown **Rotate each image 3 times by 90 degrees.**\n","Rotation = True #@param{type:\"boolean\"}\n","\n","#@markdown **Flip each image once around the x axis of the stack.**\n","Flip = True #@param{type:\"boolean\"}\n","\n","\n","#@markdown **Would you like to save your augmented images?**\n","\n","Save_augmented_images = False #@param {type:\"boolean\"}\n","\n","Saving_path = \"\" #@param {type:\"string\"}\n","\n","\n","if not Save_augmented_images:\n"," Saving_path= \"/content\"\n","\n","\n","def rotation_aug(Source_path, Target_path, flip=False):\n"," Source_images = os.listdir(Source_path)\n"," Target_images = os.listdir(Target_path)\n"," \n"," for image in Source_images:\n"," source_img = io.imread(os.path.join(Source_path,image))\n"," target_img = io.imread(os.path.join(Target_path,image))\n"," \n"," # Source Rotation\n"," source_img_90 = np.rot90(source_img,axes=(1,2))\n"," source_img_180 = np.rot90(source_img_90,axes=(1,2))\n"," source_img_270 = np.rot90(source_img_180,axes=(1,2))\n","\n"," # Target Rotation\n"," target_img_90 = np.rot90(target_img,axes=(1,2))\n"," target_img_180 = np.rot90(target_img_90,axes=(1,2))\n"," target_img_270 = np.rot90(target_img_180,axes=(1,2))\n","\n"," # Add a flip to the rotation\n"," \n"," if flip == True:\n"," source_img_lr = np.fliplr(source_img)\n"," source_img_90_lr = np.fliplr(source_img_90)\n"," source_img_180_lr = np.fliplr(source_img_180)\n"," source_img_270_lr = np.fliplr(source_img_270)\n","\n"," target_img_lr = np.fliplr(target_img)\n"," target_img_90_lr = np.fliplr(target_img_90)\n"," target_img_180_lr = np.fliplr(target_img_180)\n"," target_img_270_lr = np.fliplr(target_img_270)\n","\n"," #source_img_90_ud = np.flipud(source_img_90)\n"," \n"," # Save the augmented files\n"," # Source images\n"," io.imsave(Saving_path+'/augmented_source/'+image,source_img)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90.tif',source_img_90)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180.tif',source_img_180)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270.tif',source_img_270)\n"," # Target images\n"," io.imsave(Saving_path+'/augmented_target/'+image,target_img)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90.tif',target_img_90)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180.tif',target_img_180)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270.tif',target_img_270)\n","\n"," if flip == True:\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90_lr.tif',source_img_90_lr)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180_lr.tif',source_img_180_lr)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270_lr.tif',source_img_270_lr)\n","\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90_lr.tif',target_img_90_lr)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180_lr.tif',target_img_180_lr)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270_lr.tif',target_img_270_lr)\n","\n","def flip(Source_path, Target_path):\n"," Source_images = os.listdir(Source_path)\n"," Target_images = os.listdir(Target_path) \n","\n"," for image in Source_images:\n"," source_img = io.imread(os.path.join(Source_path,image))\n"," target_img = io.imread(os.path.join(Target_path,image))\n"," \n"," source_img_lr = np.fliplr(source_img)\n"," target_img_lr = np.fliplr(target_img)\n","\n"," io.imsave(Saving_path+'/augmented_source/'+image,source_img)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)\n","\n"," io.imsave(Saving_path+'/augmented_target/'+image,target_img)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)\n","\n","\n","if Use_Data_augmentation:\n","\n"," if os.path.exists(Saving_path+'/augmented_source'):\n"," shutil.rmtree(Saving_path+'/augmented_source')\n"," os.mkdir(Saving_path+'/augmented_source')\n","\n"," if os.path.exists(Saving_path+'/augmented_target'):\n"," shutil.rmtree(Saving_path+'/augmented_target') \n"," os.mkdir(Saving_path+'/augmented_target')\n","\n"," print(\"Data augmentation enabled\")\n"," print(\"Data augmentation in progress....\")\n","\n"," if Rotation == True:\n"," rotation_aug(Training_source,Training_target,flip=Flip)\n"," \n"," elif Rotation == False and Flip == True:\n"," flip(Training_source,Training_target)\n"," print(\"Done\")\n","\n","\n","if not Use_Data_augmentation:\n"," print(bcolors.WARNING+\"Data augmentation disabled\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"lasWo8w6B5BM","colab_type":"text"},"source":["## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a model of Your Network**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pret-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"Wr5O55VuB6t5","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"Model_from_file\" #@param [\"Model_from_file\"]\n","\n","Weights_choice = \"last\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","\n","# --------------------- Download the a model provided in the XXX ------------------------\n","\n"," if pretrained_model_choice == \"Model_name\":\n"," pretrained_model_name = \"Model_name\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path) \n"," wget.download(\"\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist')\n"," Use_pretrained_model = False\n","\n"," \n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n","\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," \n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n","\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n","\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n","else:\n"," print(bcolors.WARNING+'No pretrained nerwork will be used.')\n","\n","\n","#@markdown ### You will need to add or replace the code that loads any previously trained weights to the notebook here."],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"rQndJj70FzfL","colab_type":"text"},"source":["# **4. Train the network**\n","---"]},{"cell_type":"markdown","metadata":{"id":"wQPz0F6JlvJR","colab_type":"text"},"source":["## **4.1. Train the network**\n","---\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches."]},{"cell_type":"code","metadata":{"id":"EZnoS3rb8BSR","colab_type":"code","cellView":"form","colab":{}},"source":["import time\n","import csv\n","\n","start = time.time()\n","\n","#@markdown ##Start Training\n","\n","# Start Training\n","\n","#Insert the code necessary to initiate training of your model\n","\n","#Note that the notebook should load weights either from the model that is \n","#trained from scratch or if the pretrained weights are used (3.3.)\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"XQjQb_J_Qyku","colab_type":"text"},"source":["##**4.2. Download your model(s) from Google Drive**\n","\n","\n","---\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"2HbZd7rFqAad","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","metadata":{"id":"EdcnkCr9Nbl8","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the name of the model and path to model folder:\n","#@markdown #####During training, the model files are automatically saved inside a folder named after model_name in section 3. Provide the path to this folder below. \n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"yDY9dtzdUTLh","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased."]},{"cell_type":"code","metadata":{"id":"vMzSP50kMv5p","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","import csv\n","from matplotlib import pyplot as plt\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"RZOPCVN0qcYb","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n"," Update the code below to perform predictions on your quality control dataset. Use the metrics that are the most meaningful to assess the quality of the prediction.\n","\n","This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the \"Source_QC_folder\" and \"Target_QC_folder\" !\n","\n","**1. The SSIM (structural similarity) map** \n","\n","The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). \n","\n","**mSSIM** is the SSIM value calculated across the entire window of both images.\n","\n","**The output below shows the SSIM maps with the mSSIM**\n","\n","**2. The RSE (Root Squared Error) map** \n","\n","This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).\n","\n","\n","**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.\n","\n","**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.\n","\n","**The output below shows the RSE maps with the NRMSE and PSNR values.**\n","\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"Nh8MlX3sqd_7","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","# Create a quality control/Prediction Folder\n","if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\"):\n"," shutil.rmtree(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","# Insert code to activate the pretrained model if necessary. \n","\n","# List Tif images in Source_QC_folder\n","Source_QC_folder_tif = Source_QC_folder+\"/*.tif\"\n","Z = sorted(glob(Source_QC_folder_tif))\n","Z = list(map(imread,Z))\n","print('Number of test dataset found in the folder: '+str(len(Z)))\n","\n","\n","# Insert code to perform predictions on all datasets in the Source_QC folder\n","\n","\n","def ssim(img1, img2):\n"," return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)\n","\n","\n","def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," \"\"\"Percentile-based image normalization.\"\"\"\n","\n"," mi = np.percentile(x,pmin,axis=axis,keepdims=True)\n"," ma = np.percentile(x,pmax,axis=axis,keepdims=True)\n"," return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)\n","\n","\n","def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," if dtype is not None:\n"," x = x.astype(dtype,copy=False)\n"," mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)\n"," ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)\n"," eps = dtype(eps)\n","\n"," try:\n"," import numexpr\n"," x = numexpr.evaluate(\"(x - mi) / ( ma - mi + eps )\")\n"," except ImportError:\n"," x = (x - mi) / ( ma - mi + eps )\n","\n"," if clip:\n"," x = np.clip(x,0,1)\n","\n"," return x\n","\n","def norm_minmse(gt, x, normalize_gt=True):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n","\n"," \"\"\"\n"," normalizes and affinely scales an image pair such that the MSE is minimized \n"," \n"," Parameters\n"," ----------\n"," gt: ndarray\n"," the ground truth image \n"," x: ndarray\n"," the image that will be affinely scaled \n"," normalize_gt: bool\n"," set to True of gt image should be normalized (default)\n"," Returns\n"," -------\n"," gt_scaled, x_scaled \n"," \"\"\"\n"," if normalize_gt:\n"," gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)\n"," x = x.astype(np.float32, copy=False) - np.mean(x)\n"," #x = x - np.mean(x)\n"," gt = gt.astype(np.float32, copy=False) - np.mean(gt)\n"," #gt = gt - np.mean(gt)\n"," scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())\n"," return gt, scale * x\n","\n","# Open and create the csv file that will contain all the QC metrics\n","with open(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/QC_metrics_\"+QC_model_name+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n","\n"," # Write the header in the csv file\n"," writer.writerow([\"image #\",\"Prediction v. GT mSSIM\",\"Input v. GT mSSIM\", \"Prediction v. GT NRMSE\", \"Input v. GT NRMSE\", \"Prediction v. GT PSNR\", \"Input v. GT PSNR\"]) \n","\n"," # Let's loop through the provided dataset in the QC folders\n","\n","\n"," for i in os.listdir(Source_QC_folder):\n"," if not os.path.isdir(os.path.join(Source_QC_folder,i)):\n"," print('Running QC on: '+i)\n"," # -------------------------------- Target test data (Ground truth) --------------------------------\n"," test_GT = io.imread(os.path.join(Target_QC_folder, i))\n","\n"," # -------------------------------- Source test data --------------------------------\n"," test_source = io.imread(os.path.join(Source_QC_folder,i))\n","\n"," # Normalize the images wrt each other by minimizing the MSE between GT and Source image\n"," test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)\n","\n"," # -------------------------------- Prediction --------------------------------\n"," test_prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\",i))\n","\n"," # Normalize the images wrt each other by minimizing the MSE between GT and prediction\n"," test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True) \n","\n","\n"," # -------------------------------- Calculate the metric maps and save them --------------------------------\n","\n"," # Calculate the SSIM maps\n"," index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)\n"," index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)\n","\n"," #Save ssim_maps\n"," img_SSIM_GTvsPrediction_32bit = np.float32(img_SSIM_GTvsPrediction)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsPrediction_'+i,img_SSIM_GTvsPrediction_32bit)\n"," img_SSIM_GTvsSource_32bit = np.float32(img_SSIM_GTvsSource)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsSource_'+i,img_SSIM_GTvsSource_32bit)\n"," \n"," # Calculate the Root Squared Error (RSE) maps\n"," img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))\n"," img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))\n","\n"," # Save SE maps\n"," img_RSE_GTvsPrediction_32bit = np.float32(img_RSE_GTvsPrediction)\n"," img_RSE_GTvsSource_32bit = np.float32(img_RSE_GTvsSource)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsPrediction_'+i,img_RSE_GTvsPrediction_32bit)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsSource_'+i,img_RSE_GTvsSource_32bit)\n","\n","\n"," # -------------------------------- Calculate the RSE metrics and save them --------------------------------\n","\n"," # Normalised Root Mean Squared Error (here it's valid to take the mean of the image)\n"," NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))\n"," NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))\n"," \n"," # We can also measure the peak signal to noise ratio between the images\n"," PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)\n"," PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)\n","\n"," writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])\n","\n","\n","# All data is now processed saved\n","Test_FileList = os.listdir(Source_QC_folder) # this assumes, as it should, that both source and target are named the same\n","\n","plt.figure(figsize=(15,15))\n","# Currently only displays the last computed set, from memory\n","# Target (Ground-truth)\n","plt.subplot(3,3,1)\n","plt.axis('off')\n","img_GT = io.imread(os.path.join(Target_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_GT)\n","plt.title('Target',fontsize=15)\n","\n","# Source\n","plt.subplot(3,3,2)\n","plt.axis('off')\n","img_Source = io.imread(os.path.join(Source_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_Source)\n","plt.title('Source',fontsize=15)\n","\n","#Prediction\n","plt.subplot(3,3,3)\n","plt.axis('off')\n","img_Prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction/\", Test_FileList[-1]))\n","plt.imshow(img_Prediction)\n","plt.title('Prediction',fontsize=15)\n","\n","#Setting up colours\n","cmap = plt.cm.CMRmap\n","\n","#SSIM between GT and Source\n","plt.subplot(3,3,5)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)\n","plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#SSIM between GT and Prediction\n","plt.subplot(3,3,6)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)\n","plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)\n","\n","#Root Squared Error between GT and Source\n","plt.subplot(3,3,8)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)\n","plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)\n","#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))\n","plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#Root Squared Error between GT and Prediction\n","plt.subplot(3,3,9)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"Esqnbew8uznk"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"d8wuQGjoq6eN","colab_type":"text"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","Fill the below code to perform predictions using your model.\n","\n","The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n","\n","**`Result_folder`:** This folder will contain the predicted output images."]},{"cell_type":"code","metadata":{"id":"9ZmST3JRq-Ho","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.\n","\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, provide the name of the model and path to model folder:\n","#@markdown #####During training, the model files are automatically saved inside a folder named after model_name in section 3. Provide the path to this folder below.\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","\n","# Activate the (pre-)trained model\n","\n","\n","# Provide the code for performing predictions and saving them\n","\n","\n","print(\"Images saved into folder:\", Result_folder)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"EIe3CRD7XUxa","colab_type":"text"},"source":["## **6.2. Inspect the predicted output**\n","---\n","\n"]},{"cell_type":"code","metadata":{"id":"LmDP8xiwXTTL","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.\n","\n","# This will display a randomly chosen dataset input and predicted output\n","random_choice = random.choice(os.listdir(Data_folder))\n","x = imread(Data_folder+\"/\"+random_choice)\n","\n","os.chdir(Result_folder)\n","y = imread(Result_folder+\"/\"+random_choice)\n","\n","plt.figure(figsize=(16,8))\n","\n","plt.subplot(1,2,1)\n","plt.axis('off')\n","plt.imshow(x, interpolation='nearest')\n","plt.title('Input')\n","\n","plt.subplot(1,2,2)\n","plt.axis('off')\n","plt.imshow(y, interpolation='nearest')\n","plt.title('Predicted output');\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"hvkd66PldsXB","colab_type":"text"},"source":["## **6.3. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"Rn9zpWpo0xNw","colab_type":"text"},"source":["\n","#**Thank you for using YOUR MODEL!**"]}]} \ No newline at end of file +{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"Template_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1owWtQQucUxUOZMaPh2x_mxe_qXKHCZhp","timestamp":1588074588514},{"file_id":"159ARwlQE7-zi0EHxunOF_YPFLt-ZVU5x","timestamp":1587562499898},{"file_id":"1W-7NHehG5MRFILvZZzhPWWnOdJMkadb2","timestamp":1586332290412},{"file_id":"1pUetEQICxYWkYVaQIgdRH1EZBTl7oc2A","timestamp":1586292199692},{"file_id":"1MD36ZkM6XR9EuV12zimJmfCjzyeYZFWq","timestamp":1586269469061},{"file_id":"16A2mbaHzlEElntS8qkFBOsBvZG-mUeY6","timestamp":1586253795726},{"file_id":"1gJlcjOiSxr2buDOxmcFbT_d-GqwLjXtK","timestamp":1583343225796},{"file_id":"10yGI51WzHfgWgZAyE-EbkZFEvIOd6CP6","timestamp":1583171396283}],"collapsed_sections":[],"toc_visible":true},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.4"},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"5Yw8pstKuR7_","colab_type":"text"},"source":[" This is a template for a ZeroCostDL4Mic notebook and needs to be filled with appropriate model code and information.\n","\n"," Thank you for contributing to the ZeroCostDL4Mic Project. Please use this notebook as a template for your implementation. When your notebook is completed, please upload it to your github page and send us a link so we can reference your work.\n","\n"," If possible, remember to provide separate training and test datasets (for quality control) containing source and target images with your finished notebooks. This is very useful so that ZeroCostDL4Mic users can test your notebook. "]},{"cell_type":"markdown","metadata":{"id":"V9zNGvape2-I","colab_type":"text"},"source":["# **Name of the Network**\n","\n","---\n","\n"," Description of the network and link to publication with author reference. [author et al, etc.](URL).\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is inspired from the *Zero-Cost Deep-Learning to Enhance Microscopy* project (ZeroCostDL4Mic) (https://github.com/HenriquesLab/DeepLearning_Collab/wiki) and was created by **Your name**\n","\n","This notebook is based on the following paper: \n","\n","**Original Title of the paper**, Journal, volume, pages, year and complete author list, [link to paper](URL)\n","\n","And source code found in: *provide github link or equivalent if applicable*\n","\n","Provide information on dataset availability and link for download if applicable.\n","\n","\n","**Please also cite this original paper when using or developing this notebook.**"]},{"cell_type":"markdown","metadata":{"id":"jWAz2i7RdxUV","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use ZeroCostDL4Mic notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"vNMDQHm0Ah-Z","colab_type":"text"},"source":["#**0. Before getting started**\n","---\n"," Give information on the required structure and dataype of the training dataset.\n","\n"," Provide information on quality control dataset, such as:\n","\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n"," **Additionally, the corresponding input and output files need to have the same name**.\n","\n"," Please note that you currently can **only use .tif files!**\n","\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset**\n"," - Low SNR images (Training_source)\n"," - img_1.tif, img_2.tif, ...\n"," - High SNR images (Training_target)\n"," - img_1.tif, img_2.tif, ...\n"," - **Quality control dataset**\n"," - Low SNR images\n"," - img_1.tif, img_2.tif\n"," - High SNR images\n"," - img_1.tif, img_2.tif\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"DMNHVZfHmbKb","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"BCPhV-pe-syw","colab_type":"text"},"source":["\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"VNZetvLiS1qV","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime settings are correct then Google did not allocate GPU to your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n","\n","from tensorflow.python.client import device_lib \n","device_lib.list_local_devices()\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"UBrnApIUBgxv","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"01Djr8v-5pPk","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","#mounts user's Google Drive to Google Colab.\n","\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"n4yWFoJNnoin","colab_type":"text"},"source":["# **2. Install Name of the network and dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"id":"3u2mXn3XsWzd","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Install Network and dependencies\n","\n","#Libraries contains information of certain topics. \n","\n","#Put the imported code and libraries here\n","\n","print(\"Depencies installed and imported.\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"Fw0kkTU6CsU4","colab_type":"text"},"source":["# **3. Select your paths and parameters**\n","\n","---\n","\n","The code below allows the user to enter the paths to where the training data is and to define the training parameters.\n"]},{"cell_type":"markdown","metadata":{"id":"da_R1mCG_PDX","colab_type":"text"},"source":["## **3.1. Setting the main training parameters**\n","---\n",""]},{"cell_type":"markdown","metadata":{"id":"CB6acvUFtWqd","colab_type":"text"},"source":[" **Paths for training, predictions and results**\n","\n"," Fill the parameters here as needed and update the code. Note that the sections containing `Training_source`, `Training target`, `model_name` and `model_path` should appear in your notebook.\n","\n","**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source and Training_target data respectively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","**Training parameters**\n","\n","**`number_of_epochs`:**Give estimates for training performance given a number of epochs and provide a default value. **Default value:**\n","\n","**`other_parameters`:**Give other parameters or default values **Default value:**\n","\n","**If additional parameter above affects the training of the notebook give a brief explanation and how problems can be mitigated** \n","\n","\n","**Advanced parameters - experienced users only**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 16**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during training. **Default value: 10** "]},{"cell_type":"code","metadata":{"id":"ewpNJ_I0Mv47","colab_type":"code","cellView":"form","colab":{}},"source":["class bcolors:\n"," WARNING = '\\033[31m'\n","\n","#@markdown ###Path to training images:\n","\n","Training_source = \"\" #@param {type:\"string\"}\n","\n","# Ground truth images\n","Training_target = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","#@markdown ###Name of the model and path to model folder:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","\n","# other parameters for training.\n","#@markdown ###Training Parameters\n","#@markdown Number of epochs:\n","\n","number_of_epochs = 50#@param {type:\"number\"}\n","\n","#@markdown Other parameters, add as necessary\n","other_parameters = 80#@param {type:\"number\"} # in pixels\n","\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","#@markdown ###If not, please input:\n","\n","number_of_steps = 400#@param {type:\"number\"}\n","batch_size = 16#@param {type:\"number\"}\n","percentage_validation = 10 #@param {type:\"number\"}\n","\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 16\n"," percentage_validation = 10\n","\n","#Here we define the percentage to use for validation\n","percentage = percentage_validation/100\n","\n","\n","#here we check that no model with the same name already exist, if so delete\n","if os.path.exists(model_path+'/'+model_name):\n"," shutil.rmtree(model_path+'/'+model_name)\n","\n","\n","# The shape of the images.\n","x = imread(InputFile)\n","y = imread(OutputFile)\n","\n","print('Loaded Input images (number, width, length) =', x.shape)\n","print('Loaded Output images (number, width, length) =', y.shape)\n","print(\"Parameters initiated.\")\n","\n","# This will display a randomly chosen dataset input and output\n","random_choice = random.choice(os.listdir(Training_source))\n","x = imread(Training_source+\"/\"+random_choice)\n","\n","\n","# Here we check that the input images contains the expected dimensions\n","if len(x.shape) == 2:\n"," print(\"Image dimensions (y,x)\",x.shape)\n","\n","if not len(x.shape) == 2:\n"," print(bcolors.WARNING +\"Your images appear to have the wrong dimensions. Image dimension\",x.shape)\n","\n","\n","#Find image XY dimension\n","Image_Y = x.shape[0]\n","Image_X = x.shape[1]\n","\n","#Hyperparameters failsafes\n","\n","# Here we check that patch_size is smaller than the smallest xy dimension of the image \n","\n","if patch_size > min(Image_Y, Image_X):\n"," patch_size = min(Image_Y, Image_X)\n"," print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n","\n","# Here we check that patch_size is divisible by 8\n","if not patch_size % 8 == 0:\n"," patch_size = ((int(patch_size / 8)-1) * 8)\n"," print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:\",patch_size)\n","\n","\n","\n","os.chdir(Training_target)\n","y = imread(Training_target+\"/\"+random_choice)\n","\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x, interpolation='nearest')\n","plt.title('Training source')\n","plt.axis('off');\n","\n","plt.subplot(1,2,2)\n","plt.imshow(y, interpolation='nearest')\n","plt.title('Training target')\n","plt.axis('off');\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"wA66DlgI_Bya","colab_type":"text"},"source":["## **3.2. Data augmentation**\n","---\n",""]},{"cell_type":"markdown","metadata":{"id":"opQ2MwPy_HFC","colab_type":"text"},"source":["Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n","\n","Data augmentation is performed here by rotating the patches in XY-Plane and flip them along X-Axis. This only works if the images are square in XY.\n","\n","Add any other information which is necessary to run augmentation with your notebook/data."]},{"cell_type":"code","metadata":{"id":"pcWXnWP0_WRn","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ###Add any further useful augmentations\n","Use_Data_augmentation = False #@param{type:\"boolean\"}\n","\n","#@markdown Select this option if you want to use augmentation to increase the size of your dataset\n","\n","#@markdown **Rotate each image 3 times by 90 degrees.**\n","Rotation = True #@param{type:\"boolean\"}\n","\n","#@markdown **Flip each image once around the x axis of the stack.**\n","Flip = True #@param{type:\"boolean\"}\n","\n","\n","#@markdown **Would you like to save your augmented images?**\n","\n","Save_augmented_images = False #@param {type:\"boolean\"}\n","\n","Saving_path = \"\" #@param {type:\"string\"}\n","\n","\n","if not Save_augmented_images:\n"," Saving_path= \"/content\"\n","\n","\n","def rotation_aug(Source_path, Target_path, flip=False):\n"," Source_images = os.listdir(Source_path)\n"," Target_images = os.listdir(Target_path)\n"," \n"," for image in Source_images:\n"," source_img = io.imread(os.path.join(Source_path,image))\n"," target_img = io.imread(os.path.join(Target_path,image))\n"," \n"," # Source Rotation\n"," source_img_90 = np.rot90(source_img,axes=(1,2))\n"," source_img_180 = np.rot90(source_img_90,axes=(1,2))\n"," source_img_270 = np.rot90(source_img_180,axes=(1,2))\n","\n"," # Target Rotation\n"," target_img_90 = np.rot90(target_img,axes=(1,2))\n"," target_img_180 = np.rot90(target_img_90,axes=(1,2))\n"," target_img_270 = np.rot90(target_img_180,axes=(1,2))\n","\n"," # Add a flip to the rotation\n"," \n"," if flip == True:\n"," source_img_lr = np.fliplr(source_img)\n"," source_img_90_lr = np.fliplr(source_img_90)\n"," source_img_180_lr = np.fliplr(source_img_180)\n"," source_img_270_lr = np.fliplr(source_img_270)\n","\n"," target_img_lr = np.fliplr(target_img)\n"," target_img_90_lr = np.fliplr(target_img_90)\n"," target_img_180_lr = np.fliplr(target_img_180)\n"," target_img_270_lr = np.fliplr(target_img_270)\n","\n"," #source_img_90_ud = np.flipud(source_img_90)\n"," \n"," # Save the augmented files\n"," # Source images\n"," io.imsave(Saving_path+'/augmented_source/'+image,source_img)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90.tif',source_img_90)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180.tif',source_img_180)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270.tif',source_img_270)\n"," # Target images\n"," io.imsave(Saving_path+'/augmented_target/'+image,target_img)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90.tif',target_img_90)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180.tif',target_img_180)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270.tif',target_img_270)\n","\n"," if flip == True:\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90_lr.tif',source_img_90_lr)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180_lr.tif',source_img_180_lr)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270_lr.tif',source_img_270_lr)\n","\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90_lr.tif',target_img_90_lr)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180_lr.tif',target_img_180_lr)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270_lr.tif',target_img_270_lr)\n","\n","def flip(Source_path, Target_path):\n"," Source_images = os.listdir(Source_path)\n"," Target_images = os.listdir(Target_path) \n","\n"," for image in Source_images:\n"," source_img = io.imread(os.path.join(Source_path,image))\n"," target_img = io.imread(os.path.join(Target_path,image))\n"," \n"," source_img_lr = np.fliplr(source_img)\n"," target_img_lr = np.fliplr(target_img)\n","\n"," io.imsave(Saving_path+'/augmented_source/'+image,source_img)\n"," io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)\n","\n"," io.imsave(Saving_path+'/augmented_target/'+image,target_img)\n"," io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)\n","\n","\n","if Use_Data_augmentation:\n","\n"," if os.path.exists(Saving_path+'/augmented_source'):\n"," shutil.rmtree(Saving_path+'/augmented_source')\n"," os.mkdir(Saving_path+'/augmented_source')\n","\n"," if os.path.exists(Saving_path+'/augmented_target'):\n"," shutil.rmtree(Saving_path+'/augmented_target') \n"," os.mkdir(Saving_path+'/augmented_target')\n","\n"," print(\"Data augmentation enabled\")\n"," print(\"Data augmentation in progress....\")\n","\n"," if Rotation == True:\n"," rotation_aug(Training_source,Training_target,flip=Flip)\n"," \n"," elif Rotation == False and Flip == True:\n"," flip(Training_source,Training_target)\n"," print(\"Done\")\n","\n","\n","if not Use_Data_augmentation:\n"," print(bcolors.WARNING+\"Data augmentation disabled\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"lasWo8w6B5BM","colab_type":"text"},"source":["## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a model of Your Network**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pret-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"Wr5O55VuB6t5","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"Model_from_file\" #@param [\"Model_from_file\"]\n","\n","Weights_choice = \"last\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","\n","# --------------------- Download the a model provided in the XXX ------------------------\n","\n"," if pretrained_model_choice == \"Model_name\":\n"," pretrained_model_name = \"Model_name\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path) \n"," wget.download(\"\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist')\n"," Use_pretrained_model = False\n","\n"," \n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n","\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," \n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n","\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n","\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n","else:\n"," print(bcolors.WARNING+'No pretrained nerwork will be used.')\n","\n","\n","#@markdown ### You will need to add or replace the code that loads any previously trained weights to the notebook here."],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"rQndJj70FzfL","colab_type":"text"},"source":["# **4. Train the network**\n","---"]},{"cell_type":"markdown","metadata":{"id":"wQPz0F6JlvJR","colab_type":"text"},"source":["## **4.1. Train the network**\n","---\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches."]},{"cell_type":"code","metadata":{"id":"EZnoS3rb8BSR","colab_type":"code","cellView":"form","colab":{}},"source":["import time\n","import csv\n","\n","start = time.time()\n","\n","#@markdown ##Start training\n","\n","# Start Training\n","\n","#Insert the code necessary to initiate training of your model\n","\n","#Note that the notebook should load weights either from the model that is \n","#trained from scratch or if the pretrained weights are used (3.3.)\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"XQjQb_J_Qyku","colab_type":"text"},"source":["##**4.2. Download your model(s) from Google Drive**\n","\n","\n","---\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"2HbZd7rFqAad","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","metadata":{"id":"EdcnkCr9Nbl8","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the name of the model and path to model folder:\n","#@markdown #####During training, the model files are automatically saved inside a folder named after model_name in section 3. Provide the path to this folder below. \n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"yDY9dtzdUTLh","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased."]},{"cell_type":"code","metadata":{"id":"vMzSP50kMv5p","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","import csv\n","from matplotlib import pyplot as plt\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"RZOPCVN0qcYb","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n"," Update the code below to perform predictions on your quality control dataset. Use the metrics that are the most meaningful to assess the quality of the prediction.\n","\n","This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the \"Source_QC_folder\" and \"Target_QC_folder\" !\n","\n","**1. The SSIM (structural similarity) map** \n","\n","The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). \n","\n","**mSSIM** is the SSIM value calculated across the entire window of both images.\n","\n","**The output below shows the SSIM maps with the mSSIM**\n","\n","**2. The RSE (Root Squared Error) map** \n","\n","This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).\n","\n","\n","**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.\n","\n","**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.\n","\n","**The output below shows the RSE maps with the NRMSE and PSNR values.**\n","\n","\n","\n"]},{"cell_type":"code","metadata":{"id":"Nh8MlX3sqd_7","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","# Create a quality control/Prediction Folder\n","if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\"):\n"," shutil.rmtree(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","# Insert code to activate the pretrained model if necessary. \n","\n","# List Tif images in Source_QC_folder\n","Source_QC_folder_tif = Source_QC_folder+\"/*.tif\"\n","Z = sorted(glob(Source_QC_folder_tif))\n","Z = list(map(imread,Z))\n","print('Number of test dataset found in the folder: '+str(len(Z)))\n","\n","\n","# Insert code to perform predictions on all datasets in the Source_QC folder\n","\n","\n","def ssim(img1, img2):\n"," return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)\n","\n","\n","def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," \"\"\"Percentile-based image normalization.\"\"\"\n","\n"," mi = np.percentile(x,pmin,axis=axis,keepdims=True)\n"," ma = np.percentile(x,pmax,axis=axis,keepdims=True)\n"," return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)\n","\n","\n","def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," if dtype is not None:\n"," x = x.astype(dtype,copy=False)\n"," mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)\n"," ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)\n"," eps = dtype(eps)\n","\n"," try:\n"," import numexpr\n"," x = numexpr.evaluate(\"(x - mi) / ( ma - mi + eps )\")\n"," except ImportError:\n"," x = (x - mi) / ( ma - mi + eps )\n","\n"," if clip:\n"," x = np.clip(x,0,1)\n","\n"," return x\n","\n","def norm_minmse(gt, x, normalize_gt=True):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n","\n"," \"\"\"\n"," normalizes and affinely scales an image pair such that the MSE is minimized \n"," \n"," Parameters\n"," ----------\n"," gt: ndarray\n"," the ground truth image \n"," x: ndarray\n"," the image that will be affinely scaled \n"," normalize_gt: bool\n"," set to True of gt image should be normalized (default)\n"," Returns\n"," -------\n"," gt_scaled, x_scaled \n"," \"\"\"\n"," if normalize_gt:\n"," gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)\n"," x = x.astype(np.float32, copy=False) - np.mean(x)\n"," #x = x - np.mean(x)\n"," gt = gt.astype(np.float32, copy=False) - np.mean(gt)\n"," #gt = gt - np.mean(gt)\n"," scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())\n"," return gt, scale * x\n","\n","# Open and create the csv file that will contain all the QC metrics\n","with open(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/QC_metrics_\"+QC_model_name+\".csv\", \"w\", newline='') as file:\n"," writer = csv.writer(file)\n","\n"," # Write the header in the csv file\n"," writer.writerow([\"image #\",\"Prediction v. GT mSSIM\",\"Input v. GT mSSIM\", \"Prediction v. GT NRMSE\", \"Input v. GT NRMSE\", \"Prediction v. GT PSNR\", \"Input v. GT PSNR\"]) \n","\n"," # Let's loop through the provided dataset in the QC folders\n","\n","\n"," for i in os.listdir(Source_QC_folder):\n"," if not os.path.isdir(os.path.join(Source_QC_folder,i)):\n"," print('Running QC on: '+i)\n"," # -------------------------------- Target test data (Ground truth) --------------------------------\n"," test_GT = io.imread(os.path.join(Target_QC_folder, i))\n","\n"," # -------------------------------- Source test data --------------------------------\n"," test_source = io.imread(os.path.join(Source_QC_folder,i))\n","\n"," # Normalize the images wrt each other by minimizing the MSE between GT and Source image\n"," test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)\n","\n"," # -------------------------------- Prediction --------------------------------\n"," test_prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\",i))\n","\n"," # Normalize the images wrt each other by minimizing the MSE between GT and prediction\n"," test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True) \n","\n","\n"," # -------------------------------- Calculate the metric maps and save them --------------------------------\n","\n"," # Calculate the SSIM maps\n"," index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)\n"," index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)\n","\n"," #Save ssim_maps\n"," img_SSIM_GTvsPrediction_32bit = np.float32(img_SSIM_GTvsPrediction)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsPrediction_'+i,img_SSIM_GTvsPrediction_32bit)\n"," img_SSIM_GTvsSource_32bit = np.float32(img_SSIM_GTvsSource)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsSource_'+i,img_SSIM_GTvsSource_32bit)\n"," \n"," # Calculate the Root Squared Error (RSE) maps\n"," img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))\n"," img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))\n","\n"," # Save SE maps\n"," img_RSE_GTvsPrediction_32bit = np.float32(img_RSE_GTvsPrediction)\n"," img_RSE_GTvsSource_32bit = np.float32(img_RSE_GTvsSource)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsPrediction_'+i,img_RSE_GTvsPrediction_32bit)\n"," io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsSource_'+i,img_RSE_GTvsSource_32bit)\n","\n","\n"," # -------------------------------- Calculate the RSE metrics and save them --------------------------------\n","\n"," # Normalised Root Mean Squared Error (here it's valid to take the mean of the image)\n"," NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))\n"," NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))\n"," \n"," # We can also measure the peak signal to noise ratio between the images\n"," PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)\n"," PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)\n","\n"," writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])\n","\n","\n","# All data is now processed saved\n","Test_FileList = os.listdir(Source_QC_folder) # this assumes, as it should, that both source and target are named the same\n","\n","plt.figure(figsize=(15,15))\n","# Currently only displays the last computed set, from memory\n","# Target (Ground-truth)\n","plt.subplot(3,3,1)\n","plt.axis('off')\n","img_GT = io.imread(os.path.join(Target_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_GT)\n","plt.title('Target',fontsize=15)\n","\n","# Source\n","plt.subplot(3,3,2)\n","plt.axis('off')\n","img_Source = io.imread(os.path.join(Source_QC_folder, Test_FileList[-1]))\n","plt.imshow(img_Source)\n","plt.title('Source',fontsize=15)\n","\n","#Prediction\n","plt.subplot(3,3,3)\n","plt.axis('off')\n","img_Prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction/\", Test_FileList[-1]))\n","plt.imshow(img_Prediction)\n","plt.title('Prediction',fontsize=15)\n","\n","#Setting up colours\n","cmap = plt.cm.CMRmap\n","\n","#SSIM between GT and Source\n","plt.subplot(3,3,5)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)\n","plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#SSIM between GT and Prediction\n","plt.subplot(3,3,6)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)\n","plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)\n","\n","#Root Squared Error between GT and Source\n","plt.subplot(3,3,8)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)\n","plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Source',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)\n","#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))\n","plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)\n","\n","#Root Squared Error between GT and Prediction\n","plt.subplot(3,3,9)\n","#plt.axis('off')\n","plt.tick_params(\n"," axis='both', # changes apply to the x-axis and y-axis\n"," which='both', # both major and minor ticks are affected\n"," bottom=False, # ticks along the bottom edge are off\n"," top=False, # ticks along the top edge are off\n"," left=False, # ticks along the left edge are off\n"," right=False, # ticks along the right edge are off\n"," labelbottom=False,\n"," labelleft=False) \n","imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)\n","plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)\n","plt.title('Target vs. Prediction',fontsize=15)\n","plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"Esqnbew8uznk"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"d8wuQGjoq6eN","colab_type":"text"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","Fill the below code to perform predictions using your model.\n","\n","The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n","\n","**`Result_folder`:** This folder will contain the predicted output images."]},{"cell_type":"code","metadata":{"id":"9ZmST3JRq-Ho","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.\n","\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, provide the name of the model and path to model folder:\n","#@markdown #####During training, the model files are automatically saved inside a folder named after model_name in section 3. Provide the path to this folder below.\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","\n","# Activate the (pre-)trained model\n","\n","\n","# Provide the code for performing predictions and saving them\n","\n","\n","print(\"Images saved into folder:\", Result_folder)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"EIe3CRD7XUxa","colab_type":"text"},"source":["## **6.2. Inspect the predicted output**\n","---\n","\n"]},{"cell_type":"code","metadata":{"id":"LmDP8xiwXTTL","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.\n","\n","# This will display a randomly chosen dataset input and predicted output\n","random_choice = random.choice(os.listdir(Data_folder))\n","x = imread(Data_folder+\"/\"+random_choice)\n","\n","os.chdir(Result_folder)\n","y = imread(Result_folder+\"/\"+random_choice)\n","\n","plt.figure(figsize=(16,8))\n","\n","plt.subplot(1,2,1)\n","plt.axis('off')\n","plt.imshow(x, interpolation='nearest')\n","plt.title('Input')\n","\n","plt.subplot(1,2,2)\n","plt.axis('off')\n","plt.imshow(y, interpolation='nearest')\n","plt.title('Predicted output');\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"hvkd66PldsXB","colab_type":"text"},"source":["## **6.3. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"Rn9zpWpo0xNw","colab_type":"text"},"source":["\n","#**Thank you for using YOUR NETWORK!**"]}]} \ No newline at end of file diff --git a/Colab_notebooks/U-Net_3D_ZeroCostDL4Mic.ipynb b/Colab_notebooks/U-Net_3D_ZeroCostDL4Mic.ipynb new file mode 100755 index 00000000..81148a30 --- /dev/null +++ b/Colab_notebooks/U-Net_3D_ZeroCostDL4Mic.ipynb @@ -0,0 +1 @@ +{"nbformat":4,"nbformat_minor":0,"metadata":{"accelerator":"GPU","colab":{"name":"U-Net_3D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1bhXCDvEgu4MumzQ_CEZsfReOqHRt3bsY","timestamp":1595252090910},{"file_id":"1owWtQQucUxUOZMaPh2x_mxe_qXKHCZhp","timestamp":1588074588514},{"file_id":"159ARwlQE7-zi0EHxunOF_YPFLt-ZVU5x","timestamp":1587562499898},{"file_id":"1W-7NHehG5MRFILvZZzhPWWnOdJMkadb2","timestamp":1586332290412},{"file_id":"1pUetEQICxYWkYVaQIgdRH1EZBTl7oc2A","timestamp":1586292199692},{"file_id":"1MD36ZkM6XR9EuV12zimJmfCjzyeYZFWq","timestamp":1586269469061},{"file_id":"16A2mbaHzlEElntS8qkFBOsBvZG-mUeY6","timestamp":1586253795726},{"file_id":"1gJlcjOiSxr2buDOxmcFbT_d-GqwLjXtK","timestamp":1583343225796},{"file_id":"10yGI51WzHfgWgZAyE-EbkZFEvIOd6CP6","timestamp":1583171396283}],"collapsed_sections":[],"toc_visible":true,"machine_shape":"hm"},"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.7.3"}},"cells":[{"cell_type":"markdown","metadata":{"colab_type":"text","id":"V9zNGvape2-I"},"source":["# **U-Net (3D)**\n"," ---\n","\n"," The 3D U-Net was first introduced by [Çiçek et al](https://arxiv.org/abs/1606.06650) for learning dense volumetric segmentations from sparsely annotated ground-truth data building upon the original U-Net architecture by [Ronneberger et al](https://arxiv.org/abs/1505.04597). \n","\n","**This particular implementation allows supervised learning between any two types of 3D image data. If you are interested in image segmentation of 2D datasets, you should use the 2D U-Net notebook instead.**\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the *Zero-Cost Deep-Learning to Enhance Microscopy* project ([ZeroCostDL4Mic](https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki)) jointly developed by the [Jacquemet](https://cellmig.org/) and [Henriques](https://henriqueslab.github.io/) laboratories and created by Daniel Krentzel.\n","\n","This notebook is laregly based on the following paper: \n","\n","[**3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation**](https://arxiv.org/pdf/1606.06650.pdf) by Özgün Çiçek *et al.* published on arXiv in 2016\n","\n","The following two Python libraries play an important role in the notebook: \n","\n","1. [**Elasticdeform**](https://github.com/gvtulder/elasticdeform)\n"," by Gijs van Tulder was used to augment the 3D training data using elastic grid-based deformations as described in the original 3D U-Net paper. \n","\n","2. [**Tifffile**](https://github.com/cgohlke/tifffile) by Christoph Gohlke is a great library for reading and writing TIFF files. \n","\n","The [example dataset](https://www.epfl.ch/labs/cvlab/data/data-em/) represents a 5x5x5µm section taken from the CA1 hippocampus region of the brain with annotated mitochondria and was acquired by Graham Knott and Marco Cantoni at EPFL.\n","\n","\n","**Please also cite the original paper and relevant Python libraries when using or developing this notebook.**"]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"jWAz2i7RdxUV"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use ZeroCostDL4Mic notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cells: \n","\n","**Text cells** provide information and can be modified by double-clicking the cell. You are currently reading a text cell. You can create a new one by clicking `+ Text`.\n","\n","**Code cells** contain code which can be modfied by selecting the cell. To execute the cell, move your cursor to the `[]`-symbol on the left side of the cell (a play button should appear). Click it to execute the cell. Once the cell is fully executed, the animation stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","Three tabs are located on the upper left side of the notebook:\n","\n","1. *Table of contents* contains the structure of the notebook. Click the headers to move quickly between sections.\n","\n","2. *Code snippets* provides a wide array of example code specific to Google Colab. You can ignore this when using this notebook.\n","\n","3. *Files* displays the current working directory. We will mount your Google Drive in Section 1.2. so that you can access your files and save them permanently.\n","\n","**Important:** All uploaded files are purged once the runtime ends.\n","\n","**Note:** The directory *sample data* in *Files* contains default files. Do not upload anything there!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive by clicking *File* -> *Save a copy in Drive*.\n","\n","To **edit a cell**, double click on the text. This will either display the source code (in code cells) or the [markdown](https://colab.research.google.com/notebooks/markdown_guide.ipynb#scrollTo=70pYkR9LiOV0) (in text cells).\n","You can use `#` in code cells to comment out parts of the code. This allows you to keep the original piece of code while not executing it."]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"vNMDQHm0Ah-Z"},"source":["#**0. Before getting started**\n","---\n","\n","As the network operates in three dimensions, certain consideration should be given to correctly pre-processing the data. Ensure that the structure of interest does not substantially change between slices - image volumes with isotropic pixelsizes are ideal for this architecture.\n","\n","Each image volume must be provided as an **8-bit** or **binary multipage TIFF file** to maintain the correct ordering of individual image slices. If more than one image volume has been annotated, source and target files must be named identically and placed in separate directories. In case only one image volume has been annotated, source and target file do not have to be placed in separate directories and can be named differently, as long as their paths are explicitly provided in Section 3. \n","\n","**Prepare two datasets** (*training* and *testing*) for quality control puproses. Make sure that the *testing* dataset does not overlap with the *training* dataset and is ideally sourced from a different acquisiton and sample to ensure robustness of the trained model. \n","\n","\n","---\n","\n","\n","### **Directory structure**\n","\n","Make sure to adhere to one of the following directory structures. If only one annotated training volume exists, choose the first structure. In case more than one training volume is available, choose the second structure.\n","\n","**Structure 1:** Only one training volume\n","```\n","path/to/directory/with/one/training/volume\n","│--training_source.tif\n","│--training_target.tif\n","| \n","│--testing_source.tif\n","|--testing_target.tif \n","|\n","|--data_to_predict_on.tif\n","|--prediction_results.tif\n","\n","```\n","**Structure 2:** Various training volumes\n","```\n","path/to/directory/with/various/training/volumes\n","│--testing_source.tif\n","|--testing_target.tif \n","|\n","└───training\n","| └───source\n","| | |--training_volume_one.tif\n","| | |--training_volume_two.tif\n","| | |--...\n","| | |--training_volume_n.tif\n","| |\n","| └───target\n","| |--training_volume_one.tif\n","| |--training_volume_two.tif\n","| |--...\n","| |--training_volume_n.tif\n","|\n","|--data_to_predict_on.tif\n","|--prediction_results.tif\n","```\n","**Note:** Naming directories is completely up to you, as long as the paths are correctly specified throughout the notebook.\n","\n","\n","---\n","\n","\n","### **Important note**\n","\n","* If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do so), you will need to run **Sections 1 - 4**, then use **Section 5** to assess the quality of your model and **Section 6** to run predictions using the model that you trained.\n","\n","* If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **Sections 1 and 2** to set up the notebook, then use **Section 5** to assess the quality of your model.\n","\n","* If you only wish to **Run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **Sections 1 and 2** to set up the notebook, then use **Section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"fFdz-rHnQxld","colab":{}},"source":["#@markdown ##**Download example dataset**\n","\n","#@markdown This usually takes a few minutes. The images are saved in *example_dataset*.\n","\n","import requests \n","import os\n","from tqdm.notebook import tqdm \n","\n","def make_directory(dir):\n"," if not os.path.exists(dir):\n"," os.makedirs(dir)\n","\n","def download_from_url(url, save_as):\n"," file_url = url\n"," r = requests.get(file_url, stream=True) \n"," \n"," with open(save_as, 'wb') as file: \n"," for block in tqdm(r.iter_content(chunk_size = 1024), desc = 'Downloading ' + os.path.basename(save_as), total=126875, ncols=1000):\n"," if block:\n"," file.write(block) \n","\n","\n","make_directory('example_dataset')\n","\n","download_from_url('https://documents.epfl.ch/groups/c/cv/cvlab-unit/www/data/%20ElectronMicroscopy_Hippocampus/training.tif', 'example_dataset/training.tif')\n","download_from_url('https://documents.epfl.ch/groups/c/cv/cvlab-unit/www/data/%20ElectronMicroscopy_Hippocampus/training_groundtruth.tif', 'example_dataset/training_groundtruth.tif')\n","download_from_url('https://documents.epfl.ch/groups/c/cv/cvlab-unit/www/data/%20ElectronMicroscopy_Hippocampus/testing.tif', 'example_dataset/testing.tif')\n","download_from_url('https://documents.epfl.ch/groups/c/cv/cvlab-unit/www/data/%20ElectronMicroscopy_Hippocampus/testing_groundtruth.tif', 'example_dataset/testing_groundtruth.tif')\n","\n","print('Example dataset successfully downloaded!')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"DMNHVZfHmbKb"},"source":["# **1. Initialise the Colab session**\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"BCPhV-pe-syw"},"source":["\n","## **1.1. Check GPU access and Python version**\n","---\n","\n","By default, Colab sessions run Python 3 with GPU acceleration. You can manually set this by:\n","\n","1. Going to **Runtime -> Change runtime type**\n","\n","2. **Runtime type: Python 3** *(This notebook uses Python 3)*\n","\n","3. **Accelator: GPU** *(Graphics Processing Unit)*\n"]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"r9eqe5TazD5o","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"UBrnApIUBgxv"},"source":["## **1.2. Mount Google Drive**\n","---\n"," To use this notebook with your **own data**, place it in a folder on **Google Drive** following one of the directory structures outlined in **Section 0**.\n","\n","1. **Run** the **cell** below to mount your Google Drive and follow the link. \n","\n","2. **Sign in** to your Google account and press 'Allow'. \n","\n","3. Next, copy the **authorization code**, paste it into the cell and press enter. This will allow Colab to read and write data from and to your Google Drive. \n","\n","4. Once this is done, your data can be viewed in the **Files tab** on the top left of the notebook after hitting 'Refresh'."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"01Djr8v-5pPk","colab":{}},"source":["#@markdown ##Run this cell to connect your Google Drive to Colab\n","\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"Vspvj5Q2ijd4","colab":{}},"source":["#@markdown ##Unzip pre-trained model directory\n","\n","#@markdown 1. Upload a zipped model directory using the *Files* tab\n","#@markdown 2. Run this cell to unzip your model file\n","#@markdown 3. The model directory will appear in the *Files* tab \n","\n","from google.colab import files\n","\n","zipped_model_file = \"\" #@param {type:\"string\"}\n","\n","!unzip \"$zipped_model_file\""],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"n4yWFoJNnoin"},"source":["# **2. Install 3D U-Net dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"3u2mXn3XsWzd","colab":{}},"source":["#@markdown ##Install dependencies and instantiate network\n","\n","#Put the imported code and libraries here\n","from __future__ import absolute_import, division, print_function, unicode_literals\n","\n","try:\n"," import elasticdeform\n","except:\n"," !pip install elasticdeform\n"," import elasticdeform\n","\n","try:\n"," import tifffile\n","except:\n"," !pip install tifffile\n"," import tifffile\n","\n","import os\n","import csv\n","import random\n","import h5py\n","import imageio\n","import math\n","import shutil\n","\n","import pandas as pd\n","from glob import glob\n","from tqdm import tqdm\n","\n","from skimage import transform\n","from skimage import exposure\n","from skimage import color\n","\n","from scipy.ndimage import zoom\n","\n","import matplotlib.pyplot as plt\n","\n","import numpy as np\n","import tensorflow as tf\n","\n","from keras import backend as K\n","\n","from keras.layers import Conv3D\n","from keras.layers import BatchNormalization\n","from keras.layers import ReLU\n","from keras.layers import MaxPooling3D\n","from keras.layers import Conv3DTranspose\n","from keras.layers import Input\n","from keras.layers import Concatenate\n","\n","from keras.models import Model\n","\n","from keras.utils import Sequence\n","\n","from keras.callbacks import ModelCheckpoint\n","from keras.callbacks import CSVLogger\n","from keras.callbacks import Callback\n","\n","from keras.metrics import RootMeanSquaredError\n","\n","from ipywidgets import interact\n","from ipywidgets import interactive\n","from ipywidgets import fixed\n","from ipywidgets import interact_manual \n","import ipywidgets as widgets\n","\n","print(\"Depencies installed and imported.\")\n","\n","# Define MultiPageTiffGenerator class\n","class MultiPageTiffGenerator(Sequence):\n","\n"," def __init__(self,\n"," source_path,\n"," target_path,\n"," batch_size=1,\n"," shape=(128,128,32,1),\n"," augment=True,\n"," val_split=0.2,\n"," is_val=False,\n"," random_crop=True,\n"," downscale=1,\n"," binary_target=False):\n","\n"," # If directory with various multi-page tiffiles is provided read as list\n"," if os.path.isfile(source_path):\n"," self.dir_flag = False\n"," self.source = tifffile.imread(source_path)\n"," if binary_target:\n"," self.target = tifffile.imread(target_path).astype(np.bool)\n"," else:\n"," self.target = tifffile.imread(target_path)\n","\n"," elif os.path.isdir(source_path):\n"," self.dir_flag = True\n"," self.source_dir_list = glob(os.path.join(source_path, '*'))\n"," self.target_dir_list = glob(os.path.join(target_path, '*'))\n","\n"," self.source_dir_list.sort()\n"," self.target_dir_list.sort()\n","\n"," self.shape = shape\n"," self.batch_size = batch_size\n"," self.augment = augment\n"," self.val_split = val_split\n"," self.is_val = is_val\n"," self.random_crop = random_crop\n"," self.downscale = downscale\n"," self.binary_target = binary_target\n"," self.on_epoch_end()\n","\n"," def __len__(self):\n"," # If various multi-page tiff files provided sum all images within each\n"," # Expected number of non-augmented images is 1/3 of entire training set, hence multiply lenght by 3\n"," if self.augment:\n"," augment_factor = 3\n"," else:\n"," augment_factor = 1\n"," \n"," if self.dir_flag:\n"," num_of_imgs = 0\n"," for tiff_path in self.source_dir_list:\n"," num_of_imgs += tifffile.imread(tiff_path).shape[0]\n"," xy_shape = tifffile.imread(self.source_dir_list[0]).shape[1:]\n","\n"," if self.is_val:\n"," if self.random_crop:\n"," crop_volume = self.shape[0] * self.shape[1] * self.shape[2]\n"," volume = xy_shape[0] * xy_shape[1] * self.val_split * num_of_imgs\n"," return math.floor(augment_factor * volume / (crop_volume * self.batch_size * self.downscale))\n"," else:\n"," return math.floor(self.val_split * num_of_imgs / self.batch_size)\n"," else:\n"," if self.random_crop:\n"," crop_volume = self.shape[0] * self.shape[1] * self.shape[2]\n"," volume = xy_shape[0] * xy_shape[1] * (1 - self.val_split) * num_of_imgs\n"," return math.floor(augment_factor * volume / (crop_volume * self.batch_size * self.downscale))\n","\n"," else:\n"," return math.floor(augment_factor*(1 - self.val_split) * num_of_imgs/self.batch_size)\n"," else:\n"," if self.is_val:\n"," if self.random_crop:\n"," crop_volume = self.shape[0] * self.shape[1] * self.shape[2]\n"," volume = self.source.shape[0] * self.source.shape[1] * self.val_split * self.source.shape[2]\n"," return math.floor(augment_factor * volume / (crop_volume * self.batch_size * self.downscale))\n"," else:\n"," return math.floor((self.val_split * self.source.shape[0] / self.batch_size))\n"," else:\n"," if self.random_crop:\n"," crop_volume = self.shape[0] * self.shape[1] * self.shape[2]\n"," volume = self.source.shape[0] * self.source.shape[1] * (1 - self.val_split) * self.source.shape[2]\n"," return math.floor(augment_factor * volume / (crop_volume * self.batch_size * self.downscale))\n"," else:\n"," return math.floor(augment_factor * (1 - self.val_split) * self.source.shape[0] / self.batch_size)\n","\n"," def __getitem__(self, idx):\n","\n"," source_batch = np.empty((self.batch_size,\n"," self.shape[0],\n"," self.shape[1],\n"," self.shape[2],\n"," self.shape[3]))\n"," target_batch = np.empty((self.batch_size,\n"," self.shape[0],\n"," self.shape[1],\n"," self.shape[2],\n"," self.shape[3]))\n","\n"," for batch in range(self.batch_size):\n"," # Modulo operator ensures IndexError is avoided\n"," stack_start = self.batch_list[(idx+batch*self.shape[2])%len(self.batch_list)]\n","\n"," if self.dir_flag:\n"," self.source = tifffile.imread(self.source_dir_list[stack_start[0]])\n"," if self.binary_target:\n"," self.target = tifffile.imread(self.target_dir_list[stack_start[0]]).astype(np.bool)\n"," else:\n"," self.target = tifffile.imread(self.target_dir_list[stack_start[0]])\n","\n"," src_list = []\n"," tgt_list = []\n"," for i in range(stack_start[1], stack_start[1]+self.shape[2]):\n"," src = self.source[i]\n"," src = transform.downscale_local_mean(src, (self.downscale, self.downscale))\n"," if not self.random_crop:\n"," src = transform.resize(src, (self.shape[0], self.shape[1]), mode='constant', preserve_range=True)\n"," src = self._min_max_scaling(src)\n"," src_list.append(src)\n","\n"," tgt = self.target[i]\n"," tgt = transform.downscale_local_mean(tgt, (self.downscale, self.downscale))\n"," if not self.random_crop:\n"," tgt = transform.resize(tgt, (self.shape[0], self.shape[1]), mode='constant', preserve_range=True)\n"," if not self.binary_target:\n"," tgt = self._min_max_scaling(tgt)\n"," tgt_list.append(tgt)\n","\n"," if self.random_crop:\n"," if src.shape[0] == self.shape[0]:\n"," x_rand = 0\n"," if src.shape[1] == self.shape[1]:\n"," y_rand = 0\n"," if src.shape[0] > self.shape[0]:\n"," x_rand = np.random.randint(src.shape[0] - self.shape[0])\n"," if src.shape[1] > self.shape[1]:\n"," y_rand = np.random.randint(src.shape[1] - self.shape[1])\n"," if src.shape[0] < self.shape[0] or src.shape[1] < self.shape[1]:\n"," raise ValueError('Patch shape larger than (downscaled) source shape')\n"," \n"," for i in range(self.shape[2]):\n"," if self.random_crop:\n"," src = src_list[i]\n"," tgt = tgt_list[i]\n"," src_crop = src[x_rand:self.shape[0]+x_rand, y_rand:self.shape[1]+y_rand]\n"," tgt_crop = tgt[x_rand:self.shape[0]+x_rand, y_rand:self.shape[1]+y_rand]\n"," else:\n"," src_crop = src_list[i]\n"," tgt_crop = tgt_list[i]\n","\n"," source_batch[batch,:,:,i,0] = src_crop\n"," target_batch[batch,:,:,i,0] = tgt_crop\n","\n"," if self.augment:\n"," # On-the-fly data augmentation\n"," rand = np.random.random()\n"," # Data augmentation by reversing stack\n"," if rand > 2/3:\n"," source_batch_rev = source_batch[::-1]\n"," target_batch_rev = target_batch[::-1]\n","\n"," return source_batch_rev, target_batch_rev\n"," # Data augmentation by elastic deformation\n"," elif rand < 2/3 and rand > 1/3:\n"," [source_batch_deform, target_batch_deform] = elasticdeform.deform_random_grid([source_batch, target_batch],\n"," axis=(1, 2, 3),\n"," sigma=5,\n"," points=3,\n"," order=4) # points=2 is better imo\n"," if self.binary_target:\n"," target_batch_deform = target_batch_deform > 0.25\n"," \n"," return source_batch_deform, target_batch_deform\n"," else:\n"," return source_batch, target_batch\n"," else:\n"," return source_batch, target_batch\n","\n"," def on_epoch_end(self):\n"," # Validation split performed here\n"," self.batch_list = []\n"," # Create batch_list of all combinations of tifffile and stack position\n"," if self.dir_flag:\n"," for i in range(len(self.source_dir_list)):\n"," num_of_pages = tifffile.imread(self.source_dir_list[i]).shape[0]\n"," if self.is_val:\n"," start_page = num_of_pages-math.floor(self.val_split*num_of_pages)\n"," for j in range(start_page, num_of_pages-self.shape[2]):\n"," self.batch_list.append([i, j])\n"," else:\n"," last_page = math.floor((1-self.val_split)*num_of_pages)\n"," for j in range(last_page-self.shape[2]):\n"," self.batch_list.append([i, j])\n"," else:\n"," num_of_pages = self.source.shape[0]\n"," if self.is_val:\n"," start_page = num_of_pages-math.floor(self.val_split*num_of_pages)\n"," for j in range(start_page, num_of_pages-self.shape[2]):\n"," self.batch_list.append([0, j])\n","\n"," else:\n"," last_page = math.floor((1-self.val_split)*num_of_pages)\n"," for j in range(last_page-self.shape[2]):\n"," self.batch_list.append([0, j])\n"," \n"," if self.is_val and (len(self.batch_list) <= 0):\n"," raise ValueError('validation_split too small! Increase val_split or decrease z-depth')\n"," random.shuffle(self.batch_list)\n"," \n"," def _min_max_scaling(self, data):\n"," n = data - np.min(data)\n"," d = np.max(data) - np.min(data) \n"," \n"," return n/d\n"," \n"," def class_weights(self):\n","\n"," ones = 0\n"," pixels = 0\n","\n"," if self.dir_flag:\n"," for i in range(len(self.target_dir_list)):\n"," tgt = tifffile.imread(self.target_dir_list[i]).astype(np.bool)\n"," ones += np.sum(tgt)\n"," pixels += tgt.shape[0]*tgt.shape[1]*tgt.shape[2]\n"," else:\n"," ones = np.sum(self.target)\n"," pixels = self.target.shape[0]*self.target.shape[1]*self.target.shape[2]\n"," p_ones = ones/pixels\n"," p_zeros = 1-p_ones\n","\n"," # Return swapped probability to increase weight of unlikely class\n"," return p_ones, p_zeros\n","\n","# Define custom loss and dice coefficient\n","def dice_coefficient(y_true, y_pred):\n","\n"," eps = 1e-6\n"," y_true_f = K.flatten(y_true)\n"," y_pred_f = K.flatten(y_pred)\n"," intersection = K.sum(y_true_f*y_pred_f)\n","\n"," return (2.*intersection)/(K.sum(y_true_f*y_true_f)+K.sum(y_pred_f*y_pred_f)+eps)\n","\n","def weighted_binary_crossentropy(zero_weight, one_weight):\n","\n"," def _weighted_binary_crossentropy(y_true, y_pred):\n","\n"," binary_crossentropy = K.binary_crossentropy(y_true, y_pred)\n","\n"," weight_vector = y_true*one_weight+(1.-y_true)*zero_weight\n"," weighted_binary_crossentropy = weight_vector*binary_crossentropy\n","\n"," return K.mean(weighted_binary_crossentropy)\n","\n"," return _weighted_binary_crossentropy\n","\n","# Custom callback showing sample prediction\n","class SampleImageCallback(Callback):\n","\n"," def __init__(self, model, sample_data, model_path, save=False):\n"," self.model = model\n"," self.sample_data = sample_data\n"," self.model_path = model_path\n"," self.save = save\n","\n"," def on_epoch_end(self, epoch, logs={}):\n","\n"," sample_predict = self.model.predict_on_batch(self.sample_data)\n","\n"," f=plt.figure(figsize=(16,8))\n"," plt.subplot(1,2,1)\n"," plt.imshow(self.sample_data[0,:,:,0,0], interpolation='nearest', cmap='gray')\n"," plt.title('Sample source')\n"," plt.axis('off');\n","\n"," plt.subplot(1,2,2)\n"," plt.imshow(sample_predict[0,:,:,0,0], interpolation='nearest', cmap='magma')\n"," plt.title('Predicted target')\n"," plt.axis('off');\n","\n"," plt.show()\n","\n"," if self.save:\n"," plt.savefig(self.model_path + '/epoch_' + str(epoch+1) + '.png')\n","\n","\n","# Define Unet3D class\n","class Unet3D:\n","\n"," def __init__(self,\n"," shape=(256,256,16,1)):\n","\n"," if isinstance(shape, str):\n"," shape = eval(shape)\n","\n"," self.shape = shape\n"," \n"," input_tensor = Input(self.shape, name='input')\n","\n"," self.model = self.unet_3D(input_tensor)\n","\n"," def down_block_3D(self, input_tensor, filters):\n","\n"," x = Conv3D(filters=filters, kernel_size=(3,3,3), padding='same')(input_tensor)\n"," x = BatchNormalization()(x)\n"," x = ReLU()(x)\n","\n"," x = Conv3D(filters=filters*2, kernel_size=(3,3,3), padding='same')(x)\n"," x = BatchNormalization()(x)\n"," x = ReLU()(x)\n","\n"," return x\n","\n"," def up_block_3D(self, input_tensor, concat_layer, filters):\n","\n"," x = Conv3DTranspose(filters, kernel_size=(2,2,2), strides=(2,2,2))(input_tensor)\n","\n"," x = Concatenate()([x, concat_layer])\n","\n"," x = Conv3D(filters=filters, kernel_size=(3,3,3), padding='same')(x)\n"," x = BatchNormalization()(x)\n"," x = ReLU()(x)\n","\n"," x = Conv3D(filters=filters*2, kernel_size=(3,3,3), padding='same')(x)\n"," x = BatchNormalization()(x)\n"," x = ReLU()(x)\n","\n"," return x\n","\n"," def unet_3D(self, input_tensor, filters=32):\n","\n"," d1 = self.down_block_3D(input_tensor, filters=filters)\n"," p1 = MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), data_format='channels_last')(d1)\n"," d2 = self.down_block_3D(p1, filters=filters*2)\n"," p2 = MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), data_format='channels_last')(d2)\n"," d3 = self.down_block_3D(p2, filters=filters*4)\n"," p3 = MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), data_format='channels_last')(d3)\n","\n"," d4 = self.down_block_3D(p3, filters=filters*8)\n","\n"," u1 = self.up_block_3D(d4, d3, filters=filters*4)\n"," u2 = self.up_block_3D(u1, d2, filters=filters*2)\n"," u3 = self.up_block_3D(u2, d1, filters=filters)\n","\n"," output_tensor = Conv3D(filters=1, kernel_size=(1,1,1), activation='sigmoid')(u3)\n","\n"," return Model(inputs=[input_tensor], outputs=[output_tensor])\n","\n"," def summary(self):\n"," return self.model.summary()\n","\n"," def train(self, \n"," epochs, \n"," batch_size, \n"," train_source, \n"," train_target, \n"," model_path, \n"," model_name,\n"," optimizer='adam',\n"," loss='weighted_binary_crossentropy',\n"," metrics='dice',\n"," val_split=0.2, \n"," augment=True, \n"," ckpt_period=1, \n"," save_best_ckpt_only=False, \n"," ckpt_path=None,\n"," random_crop=True,\n"," downscaling=1,\n"," binary_target=True):\n"," \n","\n"," train_generator = MultiPageTiffGenerator(train_source,\n"," train_target,\n"," batch_size=batch_size,\n"," shape=self.shape,\n"," augment=augment,\n"," val_split=val_split,\n"," random_crop=random_crop,\n"," downscale=downscaling,\n"," binary_target=binary_target)\n","\n"," val_generator = MultiPageTiffGenerator(train_source,\n"," train_target,\n"," batch_size=batch_size,\n"," shape=self.shape,\n"," augment=False,\n"," val_split=val_split,\n"," is_val=True,\n"," random_crop=random_crop,\n"," downscale=downscaling,\n"," binary_target=binary_target)\n","\n"," class_weight_zero, class_weight_one = train_generator.class_weights()\n"," \n"," if loss == 'weighted_binary_crossentropy':\n"," loss = weighted_binary_crossentropy(class_weight_zero, class_weight_one)\n"," \n"," if metrics == 'dice':\n"," metrics = dice_coefficient\n","\n"," self.model.compile(optimizer=optimizer,\n"," loss=loss,\n"," metrics=[metrics])\n","\n"," if ckpt_path is not None:\n"," self.model.load_weights(ckpt_path)\n","\n"," full_model_path = os.path.join(model_path, model_name)\n","\n"," if not os.path.exists(full_model_path):\n"," os.makedirs(full_model_path)\n"," \n"," log_dir = full_model_path + '/Quality Control'\n","\n"," if not os.path.exists(log_dir):\n"," os.makedirs(log_dir)\n"," \n"," ckpt_dir = full_model_path + '/ckpt'\n","\n"," if not os.path.exists(ckpt_dir):\n"," os.makedirs(ckpt_dir)\n","\n"," csv_out_name = log_dir + '/training_evaluation.csv'\n"," if ckpt_path is None:\n"," csv_logger = CSVLogger(csv_out_name)\n"," else:\n"," csv_logger = CSVLogger(csv_out_name, append=True)\n","\n"," if save_best_ckpt_only:\n"," ckpt_name = ckpt_dir + '/' + model_name + '.hdf5'\n"," else:\n"," ckpt_name = ckpt_dir + '/' + model_name + '_epoch_{epoch:02d}_val_loss_{val_loss:.4f}.hdf5'\n"," \n"," model_ckpt = ModelCheckpoint(ckpt_name,\n"," verbose=1,\n"," period=ckpt_period,\n"," save_best_only=save_best_ckpt_only,\n"," save_weights_only=True)\n","\n"," sample_batch = val_generator[0][0]\n"," sample_img = SampleImageCallback(self.model, \n"," sample_batch, \n"," model_path)\n","\n"," self.model.fit_generator(generator=train_generator,\n"," validation_data=val_generator,\n"," validation_steps=math.floor(len(val_generator)/batch_size),\n"," epochs=epochs,\n"," callbacks=[csv_logger,\n"," model_ckpt,\n"," sample_img])\n","\n"," last_ckpt_name = ckpt_dir + '/' + model_name + '_last.hdf5'\n"," self.model.save_weights(last_ckpt_name)\n","\n"," def _min_max_scaling(self, data):\n"," n = data - np.min(data)\n"," d = np.max(data) - np.min(data) \n"," \n"," return n/d\n","\n"," def predict(self, input, ckpt_path, z_range=None, downscaling=None, true_patch_size=None):\n","\n"," self.model.load_weights(ckpt_path)\n","\n"," if isinstance(downscaling, str):\n"," downscaling = eval(downscaling)\n","\n"," if math.isnan(downscaling):\n"," downscaling = None\n","\n"," if isinstance(true_patch_size, str):\n"," true_patch_size = eval(true_patch_size)\n"," \n"," if not isinstance(true_patch_size, tuple): \n"," if math.isnan(true_patch_size):\n"," true_patch_size = None\n","\n"," if isinstance(input, str):\n"," src_volume = tifffile.imread(input)\n"," elif isinstance(input, np.ndarray):\n"," src_volume = input\n"," else:\n"," raise TypeError('Input is not path or numpy array!')\n"," \n"," in_size = src_volume.shape\n","\n"," if downscaling or true_patch_size is not None:\n"," x_scaling = 0\n"," y_scaling = 0\n","\n"," if true_patch_size is not None:\n"," x_scaling += true_patch_size[0]/self.shape[0]\n"," y_scaling += true_patch_size[1]/self.shape[1]\n"," if downscaling is not None:\n"," x_scaling += downscaling\n"," y_scaling += downscaling\n","\n"," src_list = []\n"," for i in range(src_volume.shape[0]):\n"," src_list.append(transform.downscale_local_mean(src_volume[i], (int(x_scaling), int(y_scaling))))\n"," src_volume = np.array(src_list) \n","\n"," if z_range is not None:\n"," src_volume = src_volume[z_range[0]:z_range[1]]\n","\n"," src_volume = self._min_max_scaling(src_volume) \n","\n"," src_array = np.zeros((1,\n"," math.ceil(src_volume.shape[1]/self.shape[0])*self.shape[0], \n"," math.ceil(src_volume.shape[2]/self.shape[1])*self.shape[1],\n"," math.ceil(src_volume.shape[0]/self.shape[2])*self.shape[2], \n"," self.shape[3]))\n","\n"," for i in range(src_volume.shape[0]):\n"," src_array[0,:src_volume.shape[1],:src_volume.shape[2],i,0] = src_volume[i]\n","\n"," pred_array = np.empty(src_array.shape)\n","\n"," for i in range(math.ceil(src_volume.shape[1]/self.shape[0])):\n"," for j in range(math.ceil(src_volume.shape[2]/self.shape[1])):\n"," for k in range(math.ceil(src_volume.shape[0]/self.shape[2])):\n"," pred_temp = self.model.predict(src_array[:,\n"," i*self.shape[0]:i*self.shape[0]+self.shape[0],\n"," j*self.shape[1]:j*self.shape[1]+self.shape[1],\n"," k*self.shape[2]:k*self.shape[2]+self.shape[2]])\n"," pred_array[:,\n"," i*self.shape[0]:i*self.shape[0]+self.shape[0],\n"," j*self.shape[1]:j*self.shape[1]+self.shape[1],\n"," k*self.shape[2]:k*self.shape[2]+self.shape[2]] = pred_temp\n"," \n"," pred_volume = np.rollaxis(np.squeeze(pred_array), -1)[:src_volume.shape[0],:src_volume.shape[1],:src_volume.shape[2]] \n","\n"," if downscaling is not None:\n"," pred_list = []\n"," for i in range(pred_volume.shape[0]):\n"," pred_list.append(transform.resize(pred_volume[i], (in_size[1], in_size[2]), preserve_range=True))\n"," pred_volume = np.array(pred_list)\n","\n"," return pred_volume\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"Fw0kkTU6CsU4"},"source":["# **3. Select your model parameters**\n","\n","---\n"]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"CB6acvUFtWqd"},"source":["## **Paths to training data and model**\n","\n","* **`training_source`** and **`training_target`** specify the paths to the training data. They can either be a single multipage TIFF file each or directories containing various multipage TIFF files in which case target and source files must be named identically within the respective directories. See Section 0 for a detailed description of the necessary directory structure.\n","\n","* **`model_name`** will be used when naming checkpoints. Adhere to a `lower_case_with_underscores` naming convention and beware of using the name of an existing model within the same folder, as it will be overwritten.\n","\n","* **`model_path`** specifies the directory where the model checkpoints and quality control logs will be saved.\n","\n","\n","**Note:** You can copy paths from the 'Files' tab by right-clicking any folder or file and selecting 'Copy path'. \n","\n","## **Training parameters**\n","\n","* **`number_of_epochs`** is the number of times the entire training data will be seen by the model. *Default: >100*\n","\n","* **`batch_size`** is the number of training patches of size `patch_size` that will be bundled together at each training step. *Default: 1*\n","\n","* **`patch_size`** specifies the size of the three-dimensional training patches in (x, y, z) that will be fed to the model. In order to avoid errors, preferably use a square aspect ratio or stick to the advanced parameters. *Default: <(512, 512, 16)*\n","\n","* **`validation_split_in_percent`** is the relative amount of training data that will be set aside for validation. *Default: 20* \n","\n","* **`downscaling_in_xy`** downscales the training images by the specified amount in x and y. This is useful to enforce isotropic pixel-size if the z resolution is lower than the xy resolution in the training volume or to capture a larger field-of-view while decreasing the memory requirements. *Default: 1*\n","\n","* **`image_pre_processing`** selects whether the training images are randomly cropped during training or resized to `patch_size`. Choose `randomly crop to patch_size` to shrink the field-of-view of the training images to the `patch_size`. *Default: randomly crop to patch_size* \n","\n","* **`binary_target`** forces the target image to be binary. Choose this if your model is trained to perform binary segmentation tasks *Default: True* \n","\n","* **`loss_function`** defines the loss. Read more [here](https://keras.io/api/losses/). *Default: weighted_binary_crossentropy* \n","\n","* **`metrics`** defines the metric. Read more [here](https://keras.io/api/metrics/). *Default: dice* \n","\n","* **`optimizer`** defines the optimizer. Read more [here](https://keras.io/api/optimizers/). *Default: adam* \n","\n","**Note:** If a *ResourceExhaustedError* is raised in Section 4.1. during training, decrease `batch_size` and `patch_size`. Decrease `batch_size` first and if the error persists at `batch_size = 1`, reduce the `patch_size`. \n","\n","\n","## **Data augmentation**\n"," \n","* **`apply_data_augmentation`** ensures that data augmentation is randomly applied to the training data at each training step. This includes inverting the order of the slices within a training patch, as well as applying elastic grid-based deformations as described in the original 3D U-Net paper. Augmenting the training data increases robustness of the model by simulating possible variations within the training data which avoids it from overfitting on small datasets. We therefore strongly recommended selecting data augmentation. *Default: True*\n","\n","**Note:** The number of steps per epoch are calculated as `floor(augment_factor * (1 - validation_split) * num_of_slices / batch_size)` if `image_pre_processing` is `resize to patch_size` where `augment_factor` is three if `apply_data_augmentation` is `True` and one otherwise. The `num_of_slices` is the overall number of slices (z-depth) in the training set across all provided image volumes. If `image_pre_processing` is `randomly crop to patch_size`, the number of steps per epoch are calculated as `floor(augment_factor * volume / (crop_volume * batch_size))` where `volume` is the overall volume of the training data in pixels accounting for the validation split and `crop_volume` is defined as the volume in pixels based on the specified `patch_size`."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"ewpNJ_I0Mv47","colab":{}},"source":["class bcolors:\n"," WARNING = '\\033[31m'\n","\n","#@markdown ###Path to training data:\n","training_source = \"\" #@param {type:\"string\"}\n","training_target = \"\" #@param {type:\"string\"}\n","\n","#@markdown ---\n","\n","#@markdown ###Model name and path to model folder:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","full_model_path = os.path.join(model_path, model_name)\n","\n","#@markdown ---\n","\n","#@markdown ###Training parameters\n","number_of_epochs = 100#@param {type:\"number\"}\n","\n","#@markdown ###Default advanced parameters\n","use_default_advanced_parameters = False #@param {type:\"boolean\"}\n","\n","#@markdown If not, please change:\n","\n","batch_size = 1#@param {type:\"number\"}\n","patch_size = (256,256,16) #@param {type:\"number\"} # in pixels\n","training_shape = patch_size + (1,)\n","image_pre_processing = 'randomly crop to patch_size' #@param [\"randomly crop to patch_size\", \"resize to patch_size\"]\n","\n","validation_split_in_percent = 20 #@param{type:\"number\"}\n","downscaling_in_xy = 1#@param {type:\"number\"} # in pixels\n","\n","binary_target = True #@param {type:\"boolean\"}\n","\n","loss_function = 'weighted_binary_crossentropy' #@param [\"weighted_binary_crossentropy\", \"binary_crossentropy\", \"categorical_crossentropy\", \"sparse_categorical_crossentropy\", \"mean_squared_error\", \"mean_absolute_error\"]\n","\n","metrics = 'dice' #@param [\"dice\", \"accuracy\"]\n","\n","optimizer = 'adam' #@param [\"adam\", \"sgd\", \"rmsprop\"]\n","\n","\n","if image_pre_processing == \"randomly crop to patch_size\":\n"," random_crop = True\n","else:\n"," random_crop = False\n","\n","#@markdown ---\n","\n","#@markdown ###Checkpointing\n","\n","checkpointing_period = 1 #@param {type:\"number\"}\n","\n","#@markdown If chosen only the best checkpoint is saved, otherwise a checkpoint is saved every checkpoint_period epochs:\n","save_best_only = True #@param {type:\"boolean\"}\n","\n","#@markdown Choose if training was interrupted:\n","resume_training = False #@param {type:\"boolean\"}\n","\n","#@markdown For transfer learning, do not select resume_training and specify a checkpoint_path below:\n","checkpoint_path = \"\" #@param {type:\"string\"}\n","\n","if resume_training and checkpoint_path != \"\":\n"," print('If resume_training is True while checkpoint_path is specified, resume_training will be set to False!')\n"," resume_training = False\n","\n","#@markdown ---\n","\n","#@markdown ###Data Augmentation\n","\n","apply_data_augmentation = True #@param {type:\"boolean\"}\n"," \n","\n","# Retrieve last checkpoint\n","if resume_training:\n"," try:\n"," ckpt_dir_list = glob(full_model_path + '/ckpt/*')\n"," ckpt_dir_list.sort()\n"," last_ckpt_path = ckpt_dir_list[-1]\n"," print('Training will resume from checkpoint:', os.path.basename(last_ckpt_path))\n"," except IndexError:\n"," last_ckpt_path=None\n"," print('CheckpointError: No previous checkpoints were found, training from scratch.')\n","elif not resume_training and checkpoint_path != \"\":\n"," last_ckpt_path = checkpoint_path\n"," assert os.path.isfile(last_ckpt_path), 'checkpoint_path does not exist!'\n","else:\n"," last_ckpt_path=None\n","\n","\n","if use_default_advanced_parameters: \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 1\n"," training_shape = (256,256,8,1)\n"," validation_split_in_percent = 20\n"," downscaling_in_xy = 1\n"," random_crop = True\n"," binary_target = True\n"," loss_function = 'weighted_binary_crossentropy'\n"," metrics = 'dice'\n"," optimizer = 'adam'\n","\n","# Instantiate Unet3D \n","model = Unet3D(shape=training_shape)\n","\n","#here we check that no model with the same name already exist, if so delete\n","if not resume_training and os.path.exists(full_model_path):\n"," print('!! WARNING: Folder already exists and will be overwritten !!') \n"," shutil.rmtree(full_model_path)\n","\n","if not os.path.exists(full_model_path):\n"," os.makedirs(full_model_path)\n","\n","# Show sample image\n","# Show sample image\n","if os.path.isdir(training_source):\n"," training_source_sample = sorted(glob(os.path.join(training_source, '*')))[0]\n"," training_target_sample = sorted(glob(os.path.join(training_target, '*')))[0]\n","else:\n"," training_source_sample = training_source\n"," training_target_sample = training_target\n","\n","src_sample = tifffile.imread(training_source_sample)\n","src_sample = model._min_max_scaling(src_sample)\n","if binary_target:\n"," tgt_sample = tifffile.imread(training_target_sample).astype(np.bool)\n","else:\n"," tgt_sample = tifffile.imread(training_target_sample)\n","\n","src_down = transform.downscale_local_mean(src_sample[0], (downscaling_in_xy, downscaling_in_xy))\n","tgt_down = transform.downscale_local_mean(tgt_sample[0], (downscaling_in_xy, downscaling_in_xy)) \n","\n","if random_crop:\n"," true_patch_size = None\n","\n"," if src_down.shape[0] == training_shape[0]:\n"," x_rand = 0\n"," if src_down.shape[1] == training_shape[1]:\n"," y_rand = 0\n"," if src_down.shape[0] > training_shape[0]:\n"," x_rand = np.random.randint(src_down.shape[0] - training_shape[0])\n"," if src_down.shape[1] > training_shape[1]:\n"," y_rand = np.random.randint(src_down.shape[1] - training_shape[1])\n"," if src_down.shape[0] < training_shape[0] or src_down.shape[1] < training_shape[1]:\n"," raise ValueError('Patch shape larger than (downscaled) source shape')\n","else:\n"," true_patch_size = src_down.shape\n","\n","def scroll_in_z(z):\n"," src_down = transform.downscale_local_mean(src_sample[z-1], (downscaling_in_xy,downscaling_in_xy))\n"," tgt_down = transform.downscale_local_mean(tgt_sample[z-1], (downscaling_in_xy,downscaling_in_xy)) \n"," if random_crop:\n"," src_slice = src_down[x_rand:training_shape[0]+x_rand, y_rand:training_shape[1]+y_rand]\n"," tgt_slice = tgt_down[x_rand:training_shape[0]+x_rand, y_rand:training_shape[1]+y_rand]\n"," else:\n"," \n"," src_slice = transform.resize(src_down, (training_shape[0], training_shape[1]), mode='constant', preserve_range=True)\n"," tgt_slice = transform.resize(tgt_down, (training_shape[0], training_shape[1]), mode='constant', preserve_range=True)\n","\n"," f=plt.figure(figsize=(16,8))\n"," plt.subplot(1,2,1)\n"," plt.imshow(src_slice, cmap='gray')\n"," plt.title('Training source (z = ' + str(z) + ')', fontsize=15)\n"," plt.axis('off')\n","\n"," plt.subplot(1,2,2)\n"," plt.imshow(tgt_slice, cmap='magma')\n"," plt.title('Training target (z = ' + str(z) + ')', fontsize=15)\n"," plt.axis('off')\n","\n","print('This is what the training images will look like with the chosen settings')\n","interact(scroll_in_z, z=widgets.IntSlider(min=1, max=src_sample.shape[0], step=1, value=0));\n","\n","# Save model parameters\n","params = {'training_source': training_source,\n"," 'training_target': training_target,\n"," 'model_name': model_name,\n"," 'model_path': model_path,\n"," 'number_of_epochs': number_of_epochs,\n"," 'batch_size': batch_size,\n"," 'training_shape': training_shape,\n"," 'downscaling': downscaling_in_xy,\n"," 'true_patch_size': true_patch_size,\n"," 'val_split': validation_split_in_percent/100,\n"," 'random_crop': random_crop,\n"," 'data_augmentation': apply_data_augmentation}\n","\n","params_df = pd.DataFrame.from_dict(params, orient='index')\n","# Check if file is actually made\n","params_df.to_csv(os.path.join(full_model_path, 'params.csv'))"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"rQndJj70FzfL"},"source":["# **4. Train the network**\n","---"]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"wQPz0F6JlvJR"},"source":["## **4.1. Train the network**\n","---\n","\n","\n","**CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training times must be less than 12 hours! If training takes longer than 12 hours, please decrease `number_of_epochs`."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"opWPgUl7erct","colab":{}},"source":["#@markdown ##Show model summary\n","model.summary()"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"EZnoS3rb8BSR","scrolled":false,"colab":{}},"source":["#@markdown ##Start Training\n","\n","# Start Training\n","model.train(epochs=number_of_epochs,\n"," batch_size=batch_size,\n"," train_source=training_source,\n"," train_target=training_target,\n"," model_path=model_path,\n"," model_name=model_name,\n"," loss=loss_function,\n"," metrics=metrics,\n"," optimizer=optimizer,\n"," val_split=validation_split_in_percent/100,\n"," augment=apply_data_augmentation,\n"," ckpt_period=checkpointing_period,\n"," save_best_ckpt_only=save_best_only,\n"," ckpt_path=last_ckpt_path,\n"," random_crop=random_crop,\n"," downscaling=downscaling_in_xy,\n"," binary_target=binary_target)\n","\n","print('Training successfully completed!')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"XQjQb_J_Qyku"},"source":["##**4.3. Download your model(s) from Google Drive**\n","\n","\n","---\n","Once training is complete, the trained model is automatically saved to your Google Drive, in the **`model_path`** folder that was specified in Section 3. Download the folder to avoid any unwanted surprises, since the data can be erased if you train another model using the same `model_path`."]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"2HbZd7rFqAad"},"source":["# **5. Evaluate your model**\n","---\n","\n","In this section the newly trained model can be assessed for performance. This involves inspecting the loss function in Section 5.1. and employing more advanced metrics in Section 5.2.\n","\n","**We highly recommend performing quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"EdcnkCr9Nbl8","colab":{}},"source":["#@markdown ###Model to be evaluated:\n","#@markdown If left blank, the latest model defined in Section 3 will be evaluated:\n","\n","qc_model_name = \"\" #@param {type:\"string\"}\n","qc_model_path = \"\" #@param {type:\"string\"}\n","\n","if len(qc_model_path) == 0 and len(qc_model_name) == 0:\n"," qc_model_name = model_name\n"," qc_model_path = model_path\n","\n","full_qc_model_path = os.path.join(qc_model_path, qc_model_name)\n","\n","if os.path.exists(full_qc_model_path):\n"," print(qc_model_name + ' will be evaluated')\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"yDY9dtzdUTLh"},"source":["## **5.1. Inspecting loss function**\n","---\n","\n","**The training loss** is the error between prediction and target after each epoch calculated across the training data while the **validation loss** calculates the error on the (unseen) validation data. During training these values should decrease until converging at which point the model has been sufficiently trained. If the validation loss starts increasing while the training loss has plateaued, the model has overfit on the training data which reduces its ability to generalise. Aim to halt training before this point.\n","\n","**Note:** For a more in-depth explanation please refer to [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols et al.\n","\n","\n","The accuracy is another performance metric that is calculated after each epoch. We use the [Sørensen–Dice coefficient](https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient) to score the prediction accuracy. \n","\n"]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"vMzSP50kMv5p","colab":{}},"source":["#@markdown ##Visualise loss and accuracy\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","accuracyDataFromCSV = []\n","valaccuracyDataFromCSV = []\n","\n","with open(full_qc_model_path + '/Quality Control/training_evaluation.csv', 'r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[2]))\n"," vallossDataFromCSV.append(float(row[4]))\n"," accuracyDataFromCSV.append(float(row[1]))\n"," valaccuracyDataFromCSV.append(float(row[3]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training and validation loss', fontsize=14)\n","plt.ylabel('Loss', fontsize=12)\n","plt.xlabel('Epochs', fontsize=12)\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.plot(epochNumber,accuracyDataFromCSV, label='Training accuracy')\n","plt.plot(epochNumber,valaccuracyDataFromCSV, label='Validation accuracy')\n","plt.title('Training and validation accuracy', fontsize=14)\n","plt.ylabel('Dice', fontsize=12)\n","plt.xlabel('Epochs', fontsize=12)\n","plt.legend()\n","plt.savefig(full_qc_model_path + '/Quality Control/lossCurvePlots.png')\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"RZOPCVN0qcYb"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","This section will provide both a visual indication of the model performance by comparing the overlay of the predicted and source volume."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"XbL7T9bw98Ja","colab":{}},"source":["#@markdown ##Compare prediction and ground-truth on testing data\n","\n","#@markdown Provide an unseen annotated dataset to determine the performance of the model:\n","\n","testing_source = \"\" #@param{type:\"string\"}\n","testing_target = \"\" #@param{type:\"string\"}\n","\n","qc_dir = full_qc_model_path + '/Quality Control'\n","predict_dir = qc_dir + '/Prediction'\n","if os.path.exists(predict_dir):\n"," shutil.rmtree(predict_dir)\n","\n","os.makedirs(predict_dir)\n","\n","# predict_dir + '/' + \n","predict_path = os.path.splitext(os.path.basename(testing_source))[0] + '_prediction.tif'\n","\n","def last_chars(x):\n"," return(x[-11:])\n","\n","try:\n"," ckpt_dir_list = glob(full_qc_model_path + '/ckpt/*')\n"," ckpt_dir_list.sort(key=last_chars)\n"," last_ckpt_path = ckpt_dir_list[0]\n"," print('Predicting from checkpoint:', os.path.basename(last_ckpt_path))\n","except IndexError:\n"," raise CheckpointError('No previous checkpoints were found, please retrain model.')\n","\n","# Load parameters\n","params = pd.read_csv(os.path.join(full_qc_model_path, 'params.csv'), names=['val'], header=0, index_col=0) \n","\n","model = Unet3D(shape=params.loc['training_shape', 'val'])\n","\n","prediction = model.predict(testing_source, last_ckpt_path, downscaling=params.loc['downscaling', 'val'], true_patch_size=params.loc['true_patch_size', 'val'])\n","\n","tifffile.imwrite(predict_path, prediction.astype('float32'), imagej=True)\n","\n","print('Predicted images!')\n","\n","qc_metrics_path = full_qc_model_path + '/Quality Control/QC_metrics_' + qc_model_name + '.csv'\n","\n","test_target = tifffile.imread(testing_target)\n","test_source = tifffile.imread(testing_source)\n","test_prediction = tifffile.imread(predict_path)\n","\n","def scroll_in_z(z):\n","\n"," plt.figure(figsize=(25,5))\n"," # Source\n"," plt.subplot(1,4,1)\n"," plt.axis('off')\n"," plt.imshow(test_source[z-1], cmap='gray')\n"," plt.title('Source (z = ' + str(z) + ')', fontsize=15)\n","\n"," # Target (Ground-truth)\n"," plt.subplot(1,4,2)\n"," plt.axis('off')\n"," plt.imshow(test_target[z-1], cmap='magma')\n"," plt.title('Target (z = ' + str(z) + ')', fontsize=15)\n","\n"," # Prediction\n"," plt.subplot(1,4,3)\n"," plt.axis('off')\n"," plt.imshow(test_prediction[z-1], cmap='magma')\n"," plt.title('Prediction (z = ' + str(z) + ')', fontsize=15)\n"," \n"," # Overlay\n"," plt.subplot(1,4,4)\n"," plt.axis('off')\n"," plt.imshow(test_target[z-1], cmap='Greens')\n"," plt.imshow(test_prediction[z-1], alpha=0.5, cmap='Purples')\n"," plt.title('Overlay (z = ' + str(z) + ')', fontsize=15)\n","\n","interact(scroll_in_z, z=widgets.IntSlider(min=1, max=test_source.shape[0], step=1, value=0));"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"lIP7AOvkg5pT"},"source":["## **5.3. Determine best Intersection over Union and threshold**\n","---\n","\n","**Note:** This section is only relevant if the target image is a binary mask and `binary_target` is selected in Section 3! \n","\n","This section will provide both a visual and a quantitative indication of the model performance by comparing the overlay of the predicted and source volume, as well as computing the highest [**Intersection over Union**](https://en.wikipedia.org/wiki/Jaccard_index) (IoU) score. The IoU is also known as the Jaccard Index. \n","\n","The best threshold is calculated using the IoU. Each threshold value from 0 to 255 is tested and the threshold with the highest score is deemed the best. The IoU is calculated for the entire volume in 3D."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"1hXoooMbYvxl","colab":{}},"source":["#@markdown ##Calculate Intersection over Union and best threshold \n","prediction = tifffile.imread(predict_path)\n","prediction = np.interp(prediction, (prediction.min(), prediction.max()), (0, 255))\n","\n","target = tifffile.imread(testing_target).astype(np.bool)\n","\n","def iou_vs_threshold(prediction, target):\n"," threshold_list = []\n"," IoU_scores_list = []\n","\n"," for threshold in range(0,256): \n"," mask = prediction > threshold\n","\n"," intersection = np.logical_and(target, mask)\n"," union = np.logical_or(target, mask)\n"," iou_score = np.sum(intersection) / np.sum(union)\n","\n"," threshold_list.append(threshold)\n"," IoU_scores_list.append(iou_score)\n","\n"," return threshold_list, IoU_scores_list\n","\n","threshold_list, IoU_scores_list = iou_vs_threshold(prediction, target)\n","thresh_arr = np.array(list(zip(threshold_list, IoU_scores_list)))\n","best_thresh = int(np.where(thresh_arr == np.max(thresh_arr[:,1]))[0])\n","best_iou = IoU_scores_list[best_thresh]\n","\n","print('Highest IoU is {:.4f} with a threshold of {}'.format(best_iou, best_thresh))\n","\n","def adjust_threshold(threshold, z):\n","\n"," f=plt.figure(figsize=(25,5))\n"," plt.subplot(1,4,1)\n"," plt.imshow((prediction[z-1] > threshold).astype('uint8'), cmap='magma')\n"," plt.title('Prediction (Threshold = ' + str(threshold) + ')', fontsize=15)\n"," plt.axis('off')\n","\n"," plt.subplot(1,4,2)\n"," plt.imshow(target[z-1], cmap='magma')\n"," plt.title('Target (z = ' + str(z) + ')', fontsize=15)\n"," plt.axis('off')\n","\n"," plt.subplot(1,4,3)\n"," plt.axis('off')\n"," plt.imshow(test_source[z-1], cmap='gray')\n"," plt.imshow((prediction[z-1] > threshold).astype('uint8'), alpha=0.4, cmap='Reds')\n"," plt.title('Overlay (z = ' + str(z) + ')', fontsize=15)\n","\n"," plt.subplot(1,4,4)\n"," plt.title('Threshold vs. IoU', fontsize=15)\n"," plt.plot(threshold_list, IoU_scores_list)\n"," plt.plot(threshold, IoU_scores_list[threshold], 'ro') \n"," plt.ylabel('IoU score')\n"," plt.xlabel('Threshold')\n"," plt.show()\n","\n","interact(adjust_threshold, \n"," threshold=widgets.IntSlider(min=0, max=255, step=1, value=best_thresh),\n"," z=widgets.IntSlider(min=1, max=prediction.shape[0], step=1, value=0));"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"Esqnbew8uznk"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","Once sufficient performance of the trained model has been established using Section 5, the network can be used to segment unseen volumetric data."]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"d8wuQGjoq6eN"},"source":["## **6.1. Generate predictions from unseen dataset**\n","---\n","\n","The most recently trained model can now be used to predict segmentation masks on unseen images. If you want to use an older model, leave `model_path` blank. Predicted output images are saved in `output_path` as Image-J compatible TIFF files.\n","\n","## **Prediction parameters**\n","\n","* **`source_path`** specifies the location of the source \n","image volume.\n","\n","* **`output_directory`** specified the directory where the output predictions are stored.\n","\n","* **`binary_target`** should be chosen if the network is trained to predict binary segmentation masks.\n","\n","* **`threshold`** can be calculated in Section 5 and is used to generate binary masks from the predictions.\n","\n","* **`big_tiff`** should be chosen if the expected prediction exceeds 4GB. The predictions will be saved using the BigTIFF format. Beware that this might substantially reduce the prediction speed. *Default: False* \n","\n","* **`prediction_depth`** is only relevant if the prediction is saved as a BigTIFF. The prediction will not be performed in one go to not deplete the memory resources. Instead, the prediction is iteratively performed on a subset of the entire volume with shape `(source.shape[0], source.shape[1], prediction_depth)`. *Default: 32*\n","\n","* **`model_path`** specifies the path to a model other than the most recently trained."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"Ps4bbZgkmV8V","colab":{}},"source":["#@markdown ## Download example volume\n","\n","#@markdown This can take up to an hour\n","\n","import requests \n","import os\n","from tqdm.notebook import tqdm \n","\n","\n","def download_from_url(url, save_as):\n"," file_url = url\n"," r = requests.get(file_url, stream=True) \n"," \n"," with open(save_as, 'wb') as file: \n"," for block in tqdm(r.iter_content(chunk_size = 1024), desc = 'Downloading ' + os.path.basename(save_as), total=3275073, ncols=1000):\n"," if block:\n"," file.write(block) \n","\n","download_from_url('https://documents.epfl.ch/groups/c/cv/cvlab-unit/www/data/%20ElectronMicroscopy_Hippocampus/volumedata.tif', 'example_dataset/volumedata.tif')"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"8oQr1yKyBwZS","colab":{}},"source":["#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then run the cell to predict outputs from your unseen images.\n","\n","source_path = \"\" #@param {type:\"string\"}\n","output_directory = \"\" #@param {type:\"string\"}\n","\n","if not os.path.exists(output_directory):\n"," os.makedirs(output_directory)\n","\n","output_path = os.path.join(output_directory, os.path.splitext(os.path.basename(source_path))[0] + '_predicted.tif')\n","#@markdown ###Prediction parameters:\n","\n","binary_target = True #@param {type:\"boolean\"}\n","\n","save_probability_map = False #@param {type:\"boolean\"}\n","\n","#@markdown Determine best threshold in Section 5.2.\n","\n","use_calculated_threshold = True #@param {type:\"boolean\"}\n","threshold = 100#@param {type:\"number\"}\n","\n","# Tifffile library issues means that images cannot be appended to \n","#@markdown Choose if prediction file exceeds 4GB or if input file is very large (above 2GB). Image volume saved as BigTIFF.\n","big_tiff = False #@param {type:\"boolean\"}\n","\n","#@markdown Reduce `prediction_depth` if runtime runs out of memory during prediction. Only relevant if prediction saved as BigTIFF\n","\n","prediction_depth = 32#@param {type:\"number\"}\n","\n","#@markdown ###Model to be evaluated\n","#@markdown If left blank, the latest model defined in Section 3 will be evaluated\n","\n","full_model_path_ = \"\" #@param {type:\"string\"}\n","\n","if len(full_model_path_) == 0:\n"," full_model_path_ = os.path.join(model_path, model_name) \n","\n","\n","\n","# Load parameters\n","params = pd.read_csv(os.path.join(full_model_path_, 'params.csv'), names=['val'], header=0, index_col=0) \n","model = Unet3D(shape=params.loc['training_shape', 'val'])\n","\n","if use_calculated_threshold:\n"," threshold = best_thresh\n","\n","def last_chars(x):\n"," return(x[-11:])\n","\n","try:\n"," ckpt_dir_list = glob(full_model_path_ + '/ckpt/*')\n"," ckpt_dir_list.sort(key=last_chars)\n"," last_ckpt_path = ckpt_dir_list[0]\n"," print('Predicting from checkpoint:', os.path.basename(last_ckpt_path))\n","except IndexError:\n"," raise CheckpointError('No previous checkpoints were found, please retrain model.')\n","\n","src = tifffile.imread(source_path)\n","\n","if src.nbytes >= 4e9:\n"," big_tiff = True\n"," print('The source file exceeds 4GB in memory, prediction will be saved as BigTIFF!')\n","\n","if binary_target:\n"," if not big_tiff:\n"," prediction = model.predict(src, last_ckpt_path, downscaling=params.loc['downscaling', 'val'], true_patch_size=params.loc['true_patch_size', 'val'])\n"," prediction = np.interp(prediction, (prediction.min(), prediction.max()), (0, 255))\n"," prediction = (prediction > threshold).astype('float32')\n","\n"," tifffile.imwrite(output_path, prediction, imagej=True)\n","\n"," else:\n"," with tifffile.TiffWriter(output_path, bigtiff=True) as tif:\n"," for i in tqdm(range(0, src.shape[0], prediction_depth)):\n"," prediction = model.predict(src, last_ckpt_path, z_range=(i,i+prediction_depth), downscaling=params.loc['downscaling', 'val'], true_patch_size=params.loc['true_patch_size', 'val'])\n"," prediction = np.interp(prediction, (prediction.min(), prediction.max()), (0, 255))\n"," prediction = (prediction > threshold).astype('float32')\n"," \n"," for j in range(prediction.shape[0]):\n"," tif.save(prediction[j])\n","\n","if not binary_target or save_probability_map:\n"," if not binary_target:\n"," prob_map_path = output_path\n"," else:\n"," prob_map_path = os.path.splitext(output_path)[0] + '_prob_map.tif'\n"," \n"," if not big_tiff:\n"," prediction = model.predict(src, last_ckpt_path, downscaling=params.loc['downscaling', 'val'], true_patch_size=params.loc['true_patch_size', 'val'])\n"," prediction = np.interp(prediction, (prediction.min(), prediction.max()), (0, 255))\n"," tifffile.imwrite(prob_map_path, prediction.astype('float32'), imagej=True)\n","\n"," else:\n"," with tifffile.TiffWriter(prob_map_path, bigtiff=True) as tif:\n"," for i in tqdm(range(0, src.shape[0], prediction_depth)):\n"," prediction = model.predict(src, last_ckpt_path, z_range=(i,i+prediction_depth), downscaling=params.loc['downscaling', 'val'], true_patch_size=params.loc['true_patch_size', 'val'])\n"," prediction = np.interp(prediction, (prediction.min(), prediction.max()), (0, 255))\n"," \n"," for j in range(prediction.shape[0]):\n"," tif.save(prediction[j])\n","\n","print('Predictions saved as', output_path)\n","\n","src_volume = tifffile.imread(source_path)\n","pred_volume = tifffile.imread(output_path)\n","\n","def scroll_in_z(z):\n"," \n"," f=plt.figure(figsize=(25,5))\n"," plt.subplot(1,2,1)\n"," plt.imshow(src_volume[z-1], cmap='gray')\n"," plt.title('Source (z = ' + str(z) + ')', fontsize=15)\n"," plt.axis('off')\n","\n"," plt.subplot(1,2,2)\n"," plt.imshow(pred_volume[z-1], cmap='magma')\n"," plt.title('Prediction (z = ' + str(z) + ')', fontsize=15)\n"," plt.axis('off')\n","\n","interact(scroll_in_z, z=widgets.IntSlider(min=1, max=src_volume.shape[0], step=1, value=0));\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"hvkd66PldsXB"},"source":["## **6.2. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"code","metadata":{"cellView":"form","colab_type":"code","id":"anzh9w6x_sGO","colab":{}},"source":["#@markdown ##Download model directory\n","#@markdown 1. Specify the model_path\n","#@markdown 2. Run this cell to zip the model directory\n","#@markdown 3. Download the zipped file from the *Files* tab on the left\n","\n","from google.colab import files\n","\n","model_path_download = \"\" #@param {type:\"string\"}\n","\n","if len(model_path_download) == 0:\n"," model_path_download = model_path\n","\n","model_name_download = os.path.basename(model_path_download)\n","\n","print('Zipping', model_name_download)\n","\n","zip_model_path = model_name_download + '.zip'\n","\n","!zip -r \"$zip_model_path\" \"$model_path_download\"\n","\n","print('Successfully saved zipped model directory as', zip_model_path)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"Rn9zpWpo0xNw"},"source":["\n","#**Thank you for using 3D U-Net!**"]}]} \ No newline at end of file diff --git a/Colab_notebooks/U-net_2D_ZeroCostDL4Mic.ipynb b/Colab_notebooks/U-net_2D_ZeroCostDL4Mic.ipynb index d467333c..a60dc73b 100755 --- a/Colab_notebooks/U-net_2D_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/U-net_2D_ZeroCostDL4Mic.ipynb @@ -1 +1 @@ -{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"U-net_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1VcTsLOL28ntbr23gYrhY3upxkztZeUvn","timestamp":1591024690909},{"file_id":"19jT_GoHGN-UTM1aEgkgrOjB8pcFz5AW4","timestamp":1591017297795},{"file_id":"1UkoWB27ZWh5j_qivSZIOeOJP1h2EqrVz","timestamp":1589363183397},{"file_id":"1ofNqOc7lz-m6NL4B-m4BIheaU5N0GMln","timestamp":1588873191434},{"file_id":"1rJnsgIKyL6vuneydIfjCKMtMhV3XlQ6o","timestamp":1588583580765},{"file_id":"1RUYrp8beEgDKL1kOWw5LgR1QQb4yHQtG","timestamp":1587061416704},{"file_id":"1FVax0eY3-m8DbJHx0B8Dnep-uGlp30Zt","timestamp":1586601038120},{"file_id":"1TTqmCf2mFQ_PNIZEXX9sRAhoixjYP_AB","timestamp":1585842446113},{"file_id":"1cWwS-jbLYTDOpPp_hhKOLGFXfu06ccpG","timestamp":1585821375983},{"file_id":"1TPEE_AtGTLedawgVBwwXofEJEcJUCgo3","timestamp":1585137343783},{"file_id":"1SxFRb38aC_kmKzKVQfkwWzkK9n7YFxVv","timestamp":1585053829456},{"file_id":"15iw9IOwHNF_GhiHxkh_rWbJG8JnW14Wh","timestamp":1584375074441},{"file_id":"15oMbXnMa4LDEMhPHBr3ga0xhJomMLhDo","timestamp":1584105762670},{"file_id":"1__NtYFNA3DxNB7LrUY13Bt8_frye3iWl","timestamp":1583445015203},{"file_id":"11jsQfqKeDU1Zk3nPykjWKwYhFmvJ1zJ-","timestamp":1575289898486}],"collapsed_sections":[],"toc_visible":true},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"WDrFAwpFIpE0","colab_type":"text"},"source":["# **U-net**\n","\n","U-net is an encoder-decoder architecture originally used for image segmentation. The first half of the U-net architecture is a downsampling convolutional neural network which acts as a feature extractor from input images. The other half upsamples these results and restores an image by combining results from downsampling with the upsampled images.\n","\n","U-net has become a commonly used architecture for image-to-image tasks and is also used in [CARE](https://www.nature.com/articles/s41592-018-0216-7).\n","\n","This notebook represents a basic U-net architecture which can be used by users to get acquainted with the functionality of image-to-image networks in microscopy. It should not be expected to provide results as good as networks built for specific image-to-image tasks.\n","\n","---\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is largely based on the papers: \n","\n","**U-net: Convolutional Networks for Biomedical Image Segmentation** by *Olaf Ronneberger, Philipp Fischer, Thomas Brox* (https://arxiv.org/abs/1505.04597)\n","\n","and \n","\n","**U-net: deep learning for cell counting, detection, and morphometry** by *Thorsten Falk et al.* Nature Methods 2019\n","(https://www.nature.com/articles/s41592-018-0261-2)\n","And source code found in: https://github.com/zhixuhao/unet by *Zhixuhao*\n","\n","**Please also cite this original paper when using or developing this notebook.** "]},{"cell_type":"markdown","metadata":{"id":"ABNu2p4stHeB","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"HVwncY_NvlYi","colab_type":"text"},"source":["# **0. Before getting started**\n","---\n","\n","Before you run the notebook, please ensure that you are logged into your Google account and have the training and/or data to process in your Google Drive.\n","\n","For U-net to train, **it needs to have access to a paired training dataset corresponding to images and their corresponding masks**. Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\n","\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n","Additionally, the corresponding Training_source and Training_target files need to have **the same name**.\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset**\n"," - Training_source\n"," - img_1.tif, img_2.tif, ...\n"," - Training_target\n"," - img_1.tif, img_2.tif, ...\n"," - **Quality control dataset**\n"," - Training_source\n"," - img_1.tif, img_2.tif\n"," - Training_target \n"," - img_1.tif, img_2.tif\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"JrGNzgEyxzGQ","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"wYoajeT54sQM","colab_type":"text"},"source":["\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"TpT6gbwURzrV","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi\n","\n","# from tensorflow.python.client import device_lib \n","# device_lib.list_local_devices()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"quzkzlRD45HF","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"eLwDxBnp4-bc","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"leK5kmgD5Ism","colab_type":"text"},"source":["# **2. Install U-net dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"id":"vOeLpQfT0QF1","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play to install U-net dependencies\n","\n","#As this notebokk depends mostly on keras which runs a tensorflow backend (which in turn is pre-installed in colab)\n","#only the data library needs to be additionally installed.\n","%tensorflow_version 1.x\n","import tensorflow\n","print(tensorflow.__version__)\n","print(\"Tensorflow enabled.\")\n","\n","#We enforce the keras==2.2.5 release to ensure that the notebook continues working even if keras is updated.\n","\n","!pip install keras==2.2.5\n","!pip install data\n","\n","# Keras imports\n","from keras import models\n","from keras.models import Model, load_model\n","from keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n","from keras.optimizers import Adam\n","# from keras.callbacks import ModelCheckpoint, LearningRateScheduler, CSVLogger # we currently don't use any other callbacks from ModelCheckpoints\n","from keras.callbacks import ModelCheckpoint\n","from keras.callbacks import ReduceLROnPlateau\n","from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img\n","from keras import backend as keras\n","\n","# General import\n","from __future__ import print_function\n","import numpy as np\n","import pandas as pd\n","import os\n","import glob\n","from skimage import img_as_ubyte, io, transform\n","import matplotlib as mpl\n","from matplotlib import pyplot as plt\n","from matplotlib.pyplot import imread\n","from pathlib import Path\n","import shutil\n","import random\n","import time\n","import csv\n","import sys\n","from math import ceil\n","\n","# Imports for QC\n","from PIL import Image\n","from scipy import signal\n","from scipy import ndimage\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","\n","# For sliders and dropdown menu and progress bar\n","from ipywidgets import interact\n","import ipywidgets as widgets\n","# from tqdm import tqdm\n","from tqdm.notebook import tqdm\n","\n","from sklearn.feature_extraction import image\n","from skimage import img_as_ubyte, io, transform\n","from skimage.util.shape import view_as_windows\n","\n","# Suppressing some warnings\n","import warnings\n","warnings.filterwarnings('ignore')\n","\n","\n","# def create_patches(Training_source, Training_target, patch_width, patch_height):\n","# \"\"\"\n","# Function creates patches from the Training_source and Training_target images. \n","# The steps parameter indicates the offset between patches and, if integer, is the same in x and y.\n"," \n","# Returns: - Two arrays containing all patches without overlap\n","# - saves all created patches in two new directories in the /content folder.\n","# \"\"\"\n","# all_patches_img = []\n","# all_patches_mask = []\n","# for i in os.listdir(Training_source):\n","# img = io.imread(Training_source+'/'+i)\n","# mask = io.imread(Training_target+'/'+i,as_gray=True)\n","\n","# # Using view_as_windows with step size equal to the patch size to ensure there is no overlap\n","# patches_img = view_as_windows(img, (patch_width, patch_height), (patch_width, patch_height))\n","# patches_mask = view_as_windows(mask, (patch_width, patch_height), (patch_width, patch_height))\n","# #the shape of patches_img and patches_mask will be (number of patches along x, number of patches along y,patch_width,patch_height)\n"," \n","# all_patches_img.append(patches_img)\n","# all_patches_mask.append(patches_mask)\n","\n","# #Convert the list of patches to array to reshape\n","# img_array = np.array(all_patches_img)\n","# mask_array = np.array(all_patches_mask)\n","# #the shape of all_patches will be (number of images, number of patches along x, number of patches along y, patch_width, patch_height)\n","\n","# #Here we get the number of patches per image\n","# num_x = img_array.shape[1]\n","# num_y = img_array.shape[2]\n","\n","# #reshape the windows to give only a list of patches\n","# img_array = img_array.reshape(len(os.listdir(Training_source))*num_x*num_y,patch_width,patch_height)\n","# mask_array = mask_array.reshape(len(os.listdir(Training_source))*num_x*num_y,patch_width,patch_height)\n","\n","# #Here we save the patches, in the /content directory as they will not usually be needed after training\n","# if os.path.exists(os.path.join('/content','img_patches')):\n","# shutil.rmtree(os.path.join('/content','img_patches'))\n","# if os.path.exists(os.path.join('/content','mask_patches')):\n","# shutil.rmtree(os.path.join('/content','mask_patches'))\n","# if os.path.exists(os.path.join('/content','rejected')):\n","# shutil.rmtree(os.path.join('/content','rejected'))\n","\n","# os.mkdir(os.path.join('/content','img_patches'))\n","# os.mkdir(os.path.join('/content','mask_patches'))\n","# os.mkdir(os.path.join('/content','rejected')) #This directory will contain the images that have too little signal.\n"," \n","# for file in range(0,img_array.shape[0]):\n","# img_save_path = os.path.join('/content/img_patches','patch_'+str(file)+'.tif')\n","# mask_save_path = os.path.join('/content/mask_patches','patch_'+str(file)+'.tif')\n","\n","# # if the mask conatins at least 2% of its total number pixels as mask, then go ahead and save the images\n","# pixel_threshold_array = sorted(mask_array[file].flatten())\n","# if pixel_threshold_array[int(round(len(pixel_threshold_array)*0.98))]>0:\n","# io.imsave(img_save_path, img_as_ubyte(normalizeMinMax(img_array[file])))\n","# io.imsave(mask_save_path, convert2Mask(normalizeMinMax(mask_array[file]),0))\n","# else:\n","# io.imsave('/content/rejected'+'/patch_'+str(file)+'.tif', img_as_ubyte(img_array[file]))\n","\n","# return img_array, mask_array\n","\n","\n","def create_patches(Training_source, Training_target, patch_width, patch_height):\n"," \"\"\"\n"," Function creates patches from the Training_source and Training_target images. \n"," The steps parameter indicates the offset between patches and, if integer, is the same in x and y.\n"," Saves all created patches in two new directories in the /content folder.\n","\n"," Returns: - Two paths to where the patches are now saved\n"," \"\"\"\n"," DEBUG = False\n","\n"," Patch_source = os.path.join('/content','img_patches')\n"," Patch_target = os.path.join('/content','mask_patches')\n"," Patch_rejected = os.path.join('/content','rejected')\n"," \n","\n"," #Here we save the patches, in the /content directory as they will not usually be needed after training\n"," if os.path.exists(Patch_source):\n"," shutil.rmtree(Patch_source)\n"," if os.path.exists(Patch_target):\n"," shutil.rmtree(Patch_target)\n"," if os.path.exists(Patch_rejected):\n"," shutil.rmtree(Patch_rejected)\n","\n"," os.mkdir(Patch_source)\n"," os.mkdir(Patch_target)\n"," os.mkdir(Patch_rejected) #This directory will contain the images that have too little signal.\n"," \n","\n"," all_patches_img = np.empty([0,patch_width, patch_height])\n"," all_patches_mask = np.empty([0,patch_width, patch_height])\n","\n"," for file in os.listdir(Training_source):\n","\n"," img = io.imread(os.path.join(Training_source, file))\n"," mask = io.imread(os.path.join(Training_target, file),as_gray=True)\n","\n"," if DEBUG:\n"," print(file)\n"," print(img.dtype)\n","\n"," # Using view_as_windows with step size equal to the patch size to ensure there is no overlap\n"," patches_img = view_as_windows(img, (patch_width, patch_height), (patch_width, patch_height))\n"," patches_mask = view_as_windows(mask, (patch_width, patch_height), (patch_width, patch_height))\n"," #the shape of patches_img and patches_mask will be (number of patches along x, number of patches along y,patch_width,patch_height)\n","\n"," all_patches_img = np.concatenate((all_patches_img, patches_img.reshape(patches_img.shape[0]*patches_img.shape[1], patch_width,patch_height)), axis = 0)\n"," all_patches_mask = np.concatenate((all_patches_mask, patches_mask.reshape(patches_mask.shape[0]*patches_mask.shape[1], patch_width,patch_height)), axis = 0)\n","\n"," number_of_patches = all_patches_img.shape[0]\n"," print('number of patches: '+str(number_of_patches))\n","\n"," if DEBUG:\n"," print(all_patches_img.shape)\n"," print(all_patches_img.dtype)\n","\n"," for i in range(number_of_patches):\n"," img_save_path = os.path.join(Patch_source,'patch_'+str(i)+'.tif')\n"," mask_save_path = os.path.join(Patch_target,'patch_'+str(i)+'.tif')\n","\n"," # if the mask conatins at least 2% of its total number pixels as mask, then go ahead and save the images\n"," pixel_threshold_array = sorted(all_patches_mask[i].flatten())\n"," if pixel_threshold_array[int(round(len(pixel_threshold_array)*0.98))]>0:\n"," io.imsave(img_save_path, img_as_ubyte(normalizeMinMax(all_patches_img[i])))\n"," io.imsave(mask_save_path, convert2Mask(normalizeMinMax(all_patches_mask[i]),0))\n"," else:\n"," io.imsave(Patch_rejected+'/patch_'+str(i)+'_image.tif', img_as_ubyte(normalizeMinMax(all_patches_img[i])))\n"," io.imsave(Patch_rejected+'/patch_'+str(i)+'_mask.tif', convert2Mask(normalizeMinMax(all_patches_mask[i])))\n","\n"," return Patch_source, Patch_target\n","\n","\n","def estimatePatchSize(data_path, max_width = 512, max_height = 512):\n","\n"," files = os.listdir(data_path)\n"," \n"," # Get the size of the first image found in the folder and initialise the variables to that\n"," n = 0 \n"," while os.path.isdir(os.path.join(data_path, files[n])):\n"," n += 1\n"," (height_min, width_min) = Image.open(os.path.join(data_path, files[n])).size\n","\n"," # Screen the size of all dataset to find the minimum image size\n"," for file in files:\n"," if not os.path.isdir(os.path.join(data_path, file)):\n"," (height, width) = Image.open(os.path.join(data_path, file)).size\n"," if width < width_min:\n"," width_min = width\n"," if height < height_min:\n"," height_min = height\n"," \n"," # Find the power of patches that will fit within the smallest dataset\n"," width_min, height_min = (fittingPowerOfTwo(width_min), fittingPowerOfTwo(height_min))\n","\n"," # Clip values at maximum permissible values\n"," if width_min > max_width:\n"," width_min = max_width\n","\n"," if height_min > max_height:\n"," height_min = max_height\n"," \n"," return (width_min, height_min)\n","\n","def fittingPowerOfTwo(number):\n"," n = 0\n"," while 2**n <= number:\n"," n += 1 \n"," return 2**(n-1)\n","\n","\n","def getClassWeights(Training_target_path):\n","\n"," Mask_dir_list = os.listdir(Training_target_path)\n"," number_of_dataset = len(Mask_dir_list)\n","\n"," class_count = np.zeros(2, dtype=int)\n"," for i in tqdm(range(number_of_dataset)):\n"," mask = io.imread(os.path.join(Training_target_path, Mask_dir_list[i]))\n"," mask = normalizeMinMax(mask)\n"," class_count[0] += mask.shape[0]*mask.shape[1] - mask.sum()\n"," class_count[1] += mask.sum()\n","\n"," n_samples = class_count.sum()\n"," n_classes = 2\n","\n"," class_weights = n_samples / (n_classes * class_count)\n"," return class_weights\n","\n","def weighted_binary_crossentropy(class_weights):\n","\n"," def _weighted_binary_crossentropy(y_true, y_pred):\n"," binary_crossentropy = keras.binary_crossentropy(y_true, y_pred)\n"," weight_vector = y_true * class_weights[1] + (1. - y_true) * class_weights[0]\n"," weighted_binary_crossentropy = weight_vector * binary_crossentropy\n","\n"," return keras.mean(weighted_binary_crossentropy)\n","\n"," return _weighted_binary_crossentropy\n","\n","\n","def save_augment(datagen,orig_img,dir_augmented_data=\"/content/augment\"):\n"," \"\"\"\n"," Saves a subset of the augmented data for visualisation, by default in /content.\n","\n"," This is adapted from: https://fairyonice.github.io/Learn-about-ImageDataGenerator.html\n"," \n"," \"\"\"\n"," try:\n"," os.mkdir(dir_augmented_data)\n"," except:\n"," ## if the preview folder exists, then remove\n"," ## the contents (pictures) in the folder\n"," for item in os.listdir(dir_augmented_data):\n"," os.remove(dir_augmented_data + \"/\" + item)\n","\n"," ## convert the original image to array\n"," x = img_to_array(orig_img)\n"," ## reshape (Sampke, Nrow, Ncol, 3) 3 = R, G or B\n"," #print(x.shape)\n"," x = x.reshape((1,) + x.shape)\n"," #print(x.shape)\n"," ## -------------------------- ##\n"," ## randomly generate pictures\n"," ## -------------------------- ##\n"," i = 0\n"," #We will just save 5 images,\n"," #but this can be changed, but note the visualisation in 3. currently uses 5.\n"," Nplot = 5\n"," for batch in datagen.flow(x,batch_size=1,\n"," save_to_dir=dir_augmented_data,\n"," save_format='tif',\n"," seed=42):\n"," i += 1\n"," if i > Nplot - 1:\n"," break\n","\n","# Generators\n","def buildDoubleGenerator(image_datagen, mask_datagen, image_folder_path, mask_folder_path, subset, batch_size, target_size):\n"," '''\n"," Can generate image and mask at the same time use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same\n"," \n"," datagen: ImageDataGenerator \n"," subset: can take either 'training' or 'validation'\n"," '''\n"," seed = 1\n"," image_generator = image_datagen.flow_from_directory(\n"," os.path.dirname(image_folder_path),\n"," classes = [os.path.basename(image_folder_path)],\n"," class_mode = None,\n"," color_mode = \"grayscale\",\n"," target_size = target_size,\n"," batch_size = batch_size,\n"," subset = subset,\n"," interpolation = \"bicubic\",\n"," seed = seed)\n"," \n"," mask_generator = mask_datagen.flow_from_directory(\n"," os.path.dirname(mask_folder_path),\n"," classes = [os.path.basename(mask_folder_path)],\n"," class_mode = None,\n"," color_mode = \"grayscale\",\n"," target_size = target_size,\n"," batch_size = batch_size,\n"," subset = subset,\n"," interpolation = \"nearest\",\n"," seed = seed)\n"," \n"," this_generator = zip(image_generator, mask_generator)\n"," for (img,mask) in this_generator:\n"," # img,mask = adjustData(img,mask)\n"," yield (img,mask)\n","\n","\n","def prepareGenerators(image_folder_path, mask_folder_path, datagen_parameters, batch_size = 4, target_size = (512, 512)):\n"," image_datagen = ImageDataGenerator(**datagen_parameters, preprocessing_function = normalizePercentile)\n"," mask_datagen = ImageDataGenerator(**datagen_parameters, preprocessing_function = normalizeMinMax)\n","\n"," train_datagen = buildDoubleGenerator(image_datagen, mask_datagen, image_folder_path, mask_folder_path, 'training', batch_size, target_size)\n"," validation_datagen = buildDoubleGenerator(image_datagen, mask_datagen, image_folder_path, mask_folder_path, 'validation', batch_size, target_size)\n","\n"," return (train_datagen, validation_datagen)\n","\n","\n","# Normalization functions from Martin Weigert\n","def normalizePercentile(x, pmin=1, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," \"\"\"Percentile-based image normalization.\"\"\"\n","\n"," mi = np.percentile(x,pmin,axis=axis,keepdims=True)\n"," ma = np.percentile(x,pmax,axis=axis,keepdims=True)\n"," return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)\n","\n","\n","def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," if dtype is not None:\n"," x = x.astype(dtype,copy=False)\n"," mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)\n"," ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)\n"," eps = dtype(eps)\n","\n"," try:\n"," import numexpr\n"," x = numexpr.evaluate(\"(x - mi) / ( ma - mi + eps )\")\n"," except ImportError:\n"," x = (x - mi) / ( ma - mi + eps )\n","\n"," if clip:\n"," x = np.clip(x,0,1)\n","\n"," return x\n","\n","\n","\n","# Simple normalization to min/max fir the Mask\n","def normalizeMinMax(x, dtype=np.float32):\n"," x = x.astype(dtype,copy=False)\n"," x = (x - np.amin(x)) / (np.amax(x) - np.amin(x))\n"," return x\n","\n","\n","# def predictionGenerator(Data_path, target_size = (256,256), as_gray = True):\n","# for filename in os.listdir(Data_path):\n","# if not os.path.isdir(os.path.join(Data_path, filename)):\n","# img = io.imread(os.path.join(Data_path, filename), as_gray = as_gray)\n","# img = normalizePercentile(img)\n","# # img = img/255 # WARNING: this is expecting 8bit images\n","# img = transform.resize(img,target_size, preserve_range=True, anti_aliasing=True, order = 1) # liner interpolation\n","# img = np.reshape(img,img.shape+(1,))\n","# img = np.reshape(img,(1,)+img.shape)\n","# yield img\n","\n","\n","# def predictionResize(Data_path, predictions):\n","# resized_predictions = []\n","# for (i, filename) in enumerate(os.listdir(Data_path)):\n","# if not os.path.isdir(os.path.join(Data_path, filename)):\n","# img = Image.open(os.path.join(Data_path, filename))\n","# (width, height) = img.size\n","# resized_predictions.append(transform.resize(predictions[i], (height, width), preserve_range=True, anti_aliasing=True, order = 1))\n","# return resized_predictions\n","\n","\n","# This is code outlines the architecture of U-net. The choice of pooling steps decides the depth of the network. \n","def unet(pretrained_weights = None, input_size = (256,256,1), pooling_steps = 4, learning_rate = 1e-4, verbose=True, class_weights=np.ones(2)):\n"," inputs = Input(input_size)\n"," conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)\n"," conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)\n"," # Downsampling steps\n"," pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n"," conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)\n"," conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)\n"," \n"," if pooling_steps > 1:\n"," pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n"," conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)\n"," conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)\n","\n"," if pooling_steps > 2:\n"," pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n"," conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)\n"," conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)\n"," drop4 = Dropout(0.5)(conv4)\n"," \n"," if pooling_steps > 3:\n"," pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n"," conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)\n"," conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)\n"," drop5 = Dropout(0.5)(conv5)\n","\n"," #Upsampling steps\n"," up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))\n"," merge6 = concatenate([drop4,up6], axis = 3)\n"," conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)\n"," conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)\n"," \n"," if pooling_steps > 2:\n"," up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop4))\n"," if pooling_steps > 3:\n"," up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))\n"," merge7 = concatenate([conv3,up7], axis = 3)\n"," conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)\n"," \n"," if pooling_steps > 1:\n"," up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv3))\n"," if pooling_steps > 2:\n"," up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))\n"," merge8 = concatenate([conv2,up8], axis = 3)\n"," conv8 = Conv2D(128, 3, activation= 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)\n"," \n"," if pooling_steps == 1:\n"," up9 = Conv2D(64, 2, padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv2))\n"," else:\n"," up9 = Conv2D(64, 2, padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8)) #activation = 'relu'\n"," \n"," merge9 = concatenate([conv1,up9], axis = 3)\n"," conv9 = Conv2D(64, 3, padding = 'same', kernel_initializer = 'he_normal')(merge9) #activation = 'relu'\n"," conv9 = Conv2D(64, 3, padding = 'same', kernel_initializer = 'he_normal')(conv9) #activation = 'relu'\n"," conv9 = Conv2D(2, 3, padding = 'same', kernel_initializer = 'he_normal')(conv9) #activation = 'relu'\n"," conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)\n","\n"," model = Model(inputs = inputs, outputs = conv10)\n","\n"," # model.compile(optimizer = Adam(lr = learning_rate), loss = 'binary_crossentropy', metrics = ['acc'])\n"," model.compile(optimizer = Adam(lr = learning_rate), loss = weighted_binary_crossentropy(class_weights))\n","\n","\n"," if verbose:\n"," model.summary()\n","\n"," if(pretrained_weights):\n"," \tmodel.load_weights(pretrained_weights);\n","\n"," return model\n","\n","\n","\n","# def prediction(Image_path, model_path):\n","\n","# # Read the data in and normalize\n","# Image = io.imread(Image_path, as_gray = True)\n","# Image = normalizePercentile(Image)\n","\n","# h5_file_path = os.path.join(model_path, 'weights_best.hdf5')\n","\n","# # Getting the number of pooling layers from the total number of layers\n","# n_layers = len(load_model(h5_file_path, custom_objects={'_weighted_binary_crossentropy': weighted_binary_crossentropy(np.ones(2))}).layers)\n","# if n_layers == 13:\n","# pooling_steps = 1\n","# elif n_layers == 20:\n","# pooling_steps = 2\n","# elif n_layers == 28:\n","# pooling_steps = 3\n","# else:\n","# pooling_steps = 4\n","\n","# # Define the model\n","# pooling_steps = 2 # currently hard-coded --> needs to get it from the loaded model\n","# model = unet(pretrained_weights = h5_file_path, input_size = (Image.shape[0],Image.shape[1],1), pooling_steps = pooling_steps, verbose=False)\n","\n","# # Reshape the data\n","# Image = np.reshape(Image,Image.shape+(1,))\n","# Image = np.reshape(Image,(1,)+Image.shape)\n","\n","# return model.predict(Image, batch_size = 1)\n","\n","\n","def predict_as_tiles(Image_path, model):\n","\n"," # Read the data in and normalize\n"," Image_raw = io.imread(Image_path, as_gray = True)\n"," Image_raw = normalizePercentile(Image_raw)\n","\n"," # Get the patch size from the input layer of the model\n"," patch_size = model.layers[0].output_shape[1:3]\n","\n"," # Pad the image with zeros if any of its dimensions is smaller than the patch size\n"," if Image_raw.shape[0] < patch_size[0] or Image_raw.shape[1] < patch_size[1]:\n"," Image = np.zeros((max(Image_raw.shape[0], patch_size[0]), max(Image_raw.shape[1], patch_size[1])))\n"," Image[0:Image_raw.shape[0], 0: Image_raw.shape[1]] = Image_raw\n"," else:\n"," Image = Image_raw\n","\n"," # Calculate the number of patches in each dimension\n"," n_patch_in_width = ceil(Image.shape[0]/patch_size[0])\n"," n_patch_in_height = ceil(Image.shape[1]/patch_size[1])\n","\n"," prediction = np.zeros(Image.shape)\n","\n"," for x in range(n_patch_in_width):\n"," for y in range(n_patch_in_height):\n"," xi = patch_size[0]*x\n"," yi = patch_size[1]*y\n","\n"," # If the patch exceeds the edge of the image shift it back \n"," if xi+patch_size[0] >= Image.shape[0]:\n"," xi = Image.shape[0]-patch_size[0]\n","\n"," if yi+patch_size[1] >= Image.shape[1]:\n"," yi = Image.shape[1]-patch_size[1]\n"," \n"," # Extract and reshape the patch\n"," patch = Image[xi:xi+patch_size[0], yi:yi+patch_size[1]]\n"," patch = np.reshape(patch,patch.shape+(1,))\n"," patch = np.reshape(patch,(1,)+patch.shape)\n","\n"," # Get the prediction from the patch and paste it in the prediction in the right place\n"," predicted_patch = model.predict(patch, batch_size = 1)\n"," prediction[xi:xi+patch_size[0], yi:yi+patch_size[1]] = np.squeeze(predicted_patch)\n","\n","\n"," return prediction[0:Image_raw.shape[0], 0: Image_raw.shape[1]]\n"," \n","\n","\n","\n","def saveResult(save_path, nparray, source_dir_list, prefix='', threshold=None):\n"," for (filename, image) in zip(source_dir_list, nparray):\n"," io.imsave(os.path.join(save_path, prefix+os.path.splitext(filename)[0]+'.tif'), img_as_ubyte(image)) # saving as unsigned 8-bit image\n"," \n"," # For masks, threshold the images and return 8 bit image\n"," if threshold is not None:\n"," mask = convert2Mask(image, threshold)\n"," io.imsave(os.path.join(save_path, prefix+'mask_'+os.path.splitext(filename)[0]+'.tif'), mask)\n","\n","\n","def convert2Mask(image, threshold):\n"," mask = img_as_ubyte(image, force_copy=True)\n"," mask[mask > threshold] = 255\n"," mask[mask <= threshold] = 0\n"," return mask\n","\n","\n","def getIoUvsThreshold(prediction_filepath, groud_truth_filepath):\n"," prediction = io.imread(prediction_filepath)\n"," ground_truth_image = img_as_ubyte(io.imread(groud_truth_filepath, as_gray=True), force_copy=True)\n","\n"," threshold_list = []\n"," IoU_scores_list = []\n","\n"," for threshold in range(0,256): \n"," # Convert to 8-bit for calculating the IoU\n"," mask = img_as_ubyte(prediction, force_copy=True)\n"," mask[mask > threshold] = 255\n"," mask[mask <= threshold] = 0\n","\n"," # Intersection over Union metric\n"," intersection = np.logical_and(ground_truth_image, np.squeeze(mask))\n"," union = np.logical_or(ground_truth_image, np.squeeze(mask))\n"," iou_score = np.sum(intersection) / np.sum(union)\n","\n"," threshold_list.append(threshold)\n"," IoU_scores_list.append(iou_score)\n","\n"," return (threshold_list, IoU_scores_list)\n","\n","\n","\n","# -------------- Other definitions -----------\n","W = '\\033[0m' # white (normal)\n","R = '\\033[31m' # red\n","prediction_prefix = 'Predicted_'\n","\n","\n","print('-------------------')\n","print('U-net and dependencies installed.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"7hTKImff6Est","colab_type":"text"},"source":["# **3. Select your parameters and paths**\n","\n","---"]},{"cell_type":"markdown","metadata":{"id":"S74FbqV6PNNv","colab_type":"text"},"source":["##**3.1. Parameters and paths**\n","---"]},{"cell_type":"markdown","metadata":{"id":"3np5EpJF8_q2","colab_type":"text"},"source":[" **Paths for training data and models**\n","\n","**`Training_source`, `Training_target`:** These are the folders containing your source (e.g. EM images) and target files (segmentation masks). Enter the path to the source and target images for training. **These should be located in the same parent folder.**\n","\n","**`model_name`:** Use only my_model -style, not my-model. If you want to use a previously trained model, enter the name of the pretrained model (which should be contained in the trained_model -folder after training).\n","\n","**`model_path`**: Enter the path of the folder where you want to save your model.\n","\n","**`visual_validation_after_training`**: If you select this option, a random image pair will be set aside from your training set and will be used to display a predicted image of the trained network next to the input and the ground-truth. This can aid in visually assessing the performance of your network after training. **Note: Your training set size will decrease by 1 if you select this option.**\n","\n","**Make sure the directories exist before entering them!**\n","\n"," **Select training parameters**\n","\n","**`epochs`**: Choose more epochs for larger training sets. Observing how much the loss reduces between epochs during training may help determine the optimal value. **Default: 200**\n","\n","**Advanced Parameters - experienced users only**\n","\n","**`steps`**: This number should be equivalent to the number of samples in the training set divided by the batch size, to ensure the training iterates through the entire training set. Smaller values can be used for testing. **Default: 6**\n","\n","**`batch_size`**: This parameter describes the amount of images that are loaded into the network per step. Smaller batchsizes may improve training performance slightly but may increase training time. If the notebook crashes while loading the dataset this can be due to a too large batch size. Decrease the number in this case. **Default: 4**\n","\n"," **`pooling_steps`**: Choosing a different number of pooling layers can affect the performance of the network. Each additional pooling step will also two additional convolutions. The network can learn more complex information but is also more likely to overfit. Achieving best performance may require testing different values here. **Default: 2**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during training. **Default value: 10** \n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0003**\n","\n","**`patch_width` and `patch_height`:** The notebook crops the data in patches of fixed size prior to training. The dimensions of the patches can be defined here. When `Use_Default_Advanced_Parameters` is selected, the largest 2^n x 2^n patch that fits in the smallest dataset is chosen. Larger patches than 512x512 should **NOT** be selected for network stability.\n","\n"]},{"cell_type":"code","metadata":{"id":"7deNuPZd5d-B","colab_type":"code","cellView":"form","colab":{}},"source":["# ------------- Initial user input ------------\n","#@markdown ###Path to training images:\n","Training_source = '' #@param {type:\"string\"}\n","Training_target = '' #@param {type:\"string\"}\n","\n","model_name = '' #@param {type:\"string\"}\n","model_path = '' #@param {type:\"string\"}\n","\n","#@markdown ###Training parameters\n","#@markdown Number of epochs\n","epochs = 200#@param {type:\"number\"}\n","\n","#@markdown ###Advanced Parameters\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please input:\n","number_of_steps = 6#@param {type:\"number\"}\n","batch_size = 4#@param {type:\"integer\"}\n","pooling_steps = 2 #@param [1,2,3,4]{type:\"raw\"}\n","percentage_validation = 10#@param{type:\"number\"}\n","initial_learning_rate = 0.0003 #@param {type:\"number\"}\n","\n","patch_width = 256#@param{type:\"number\"}\n","patch_height = 256#@param{type:\"number\"}\n","\n","\n","# ------------- Initialising folder, variables and failsafes ------------\n","# Create the folders where to save the model and the QC\n","full_model_path = os.path.join(model_path, model_name)\n","if os.path.exists(full_model_path):\n"," print(R+'!! WARNING: Folder already exists and will be overwritten !!'+W)\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 4\n"," pooling_steps = 2\n"," percentage_validation = 10\n"," initial_learning_rate = 0.0003\n"," patch_width, patch_height = estimatePatchSize(Training_source)\n","\n","\n","#The create_patches function will create the two folders below\n","# Patch_source = '/content/img_patches'\n","# Patch_target = '/content/mask_patches'\n","print('Training on patches of size (x,y): ('+str(patch_width)+','+str(patch_height)+')')\n","\n","#Create patches\n","print('Creating patches...')\n","Patch_source, Patch_target = create_patches(Training_source, Training_target, patch_width, patch_height)\n","\n","\n","# Here we disable pre-trained model by default (in case the next cell is not ran)\n","Use_pretrained_model = False\n","# Here we disable data augmentation by default (in case the cell is not ran)\n","Use_Data_augmentation = False\n","\n","# ------------- Display ------------\n","\n","#if not os.path.exists('/content/img_patches/'):\n","random_choice = random.choice(os.listdir(Patch_source))\n","x = io.imread(os.path.join(Patch_source, random_choice))\n","\n","#os.chdir(Training_target)\n","y = io.imread(os.path.join(Patch_target, random_choice), as_gray=True)\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x, interpolation='nearest',cmap='gray')\n","plt.title('Training image patch')\n","plt.axis('off');\n","\n","plt.subplot(1,2,2)\n","plt.imshow(y, interpolation='nearest',cmap='gray')\n","plt.title('Training mask patch')\n","plt.axis('off');\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"V9UCjlLJ5Rfc","colab_type":"text"},"source":["##**3.2. Data Augmentation**\n","\n","---\n","\n"," Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if the dataset is large the values can be set to 0.\n","\n"," The augmentation options below are to be used as follows:\n","\n","* **shift**: a translation of the image by a fraction of the image size (width or height), **default: 10%**\n","* **zoom_range**: Increasing or decreasing the field of view. E.g. 10% will result in a zoom range of (0.9 to 1.1), with pixels added or interpolated, depending on the transformation, **default: 10%**\n","* **shear_range**: Shear angle in counter-clockwise direction, **default: 10%**\n","* **flip**: creating a mirror image along specified axis (horizontal or vertical), **default: True**\n","* **rotation_range**: range of allowed rotation angles in degrees (from 0 to *value*), **default: 180**"]},{"cell_type":"code","metadata":{"id":"i-PahNX94-pl","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##**Augmentation Options**\n","\n","Use_Data_augmentation = True #@param {type:\"boolean\"}\n","Use_Default_Augmentation_Parameters = True #@param {type:\"boolean\"}\n","\n","if Use_Data_augmentation:\n"," if Use_Default_Augmentation_Parameters:\n"," horizontal_shift = 10 \n"," vertical_shift = 20 \n"," zoom_range = 10\n"," shear_range = 10\n"," horizontal_flip = True\n"," vertical_flip = True\n"," rotation_range = 180\n","#@markdown ###If you are not using the default settings, please provide the values below:\n","\n","#@markdown ###**Image shift, zoom, shear and flip (%)**\n"," else:\n"," horizontal_shift = 10 #@param {type:\"slider\", min:0, max:100, step:1}\n"," vertical_shift = 10 #@param {type:\"slider\", min:0, max:100, step:1}\n"," zoom_range = 10 #@param {type:\"slider\", min:0, max:100, step:1}\n"," shear_range = 10 #@param {type:\"slider\", min:0, max:100, step:1}\n"," horizontal_flip = True #@param {type:\"boolean\"}\n"," vertical_flip = True #@param {type:\"boolean\"}\n","\n","#@markdown ###**Rotate image within angle range (degrees):**\n"," rotation_range = 180 #@param {type:\"slider\", min:0, max:180, step:1}\n","\n","#given behind the # are the default values for each parameter.\n","\n","else:\n"," horizontal_shift = 0 \n"," vertical_shift = 0 \n"," zoom_range = 0\n"," shear_range = 0\n"," horizontal_flip = False\n"," vertical_flip = False\n"," rotation_range = 0\n","\n","\n","# Build the dict for the ImageDataGenerator\n","data_gen_args = dict(width_shift_range = horizontal_shift/100.,\n"," height_shift_range = vertical_shift/100.,\n"," rotation_range = rotation_range, #90\n"," zoom_range = zoom_range/100.,\n"," shear_range = shear_range/100.,\n"," horizontal_flip = horizontal_flip,\n"," vertical_flip = vertical_flip,\n"," validation_split = percentage_validation/100,\n"," fill_mode = 'reflect')\n","\n","\n","\n","# ------------- Display ------------\n","dir_augmented_data_imgs=\"/content/augment_img\"\n","dir_augmented_data_masks=\"/content/augment_mask\"\n","random_choice = random.choice(os.listdir(Patch_source))\n","orig_img = load_img(os.path.join(Patch_source,random_choice))\n","orig_mask = load_img(os.path.join(Patch_target,random_choice))\n","\n","augment_view = ImageDataGenerator(**data_gen_args)\n","\n","if Use_Data_augmentation:\n"," print(\"Parameters enabled\")\n"," print(\"Here is what a subset of your augmentations looks like:\")\n"," save_augment(augment_view, orig_img, dir_augmented_data=dir_augmented_data_imgs)\n"," save_augment(augment_view, orig_mask, dir_augmented_data=dir_augmented_data_masks)\n","\n"," fig = plt.figure(figsize=(15, 7))\n"," fig.subplots_adjust(hspace=0.0,wspace=0.1,left=0,right=1.1,bottom=0, top=0.8)\n","\n"," \n"," ax = fig.add_subplot(2, 6, 1,xticks=[],yticks=[]) \n"," new_img=img_as_ubyte(normalizeMinMax(img_to_array(orig_img)))\n"," ax.imshow(new_img)\n"," ax.set_title('Original Image')\n"," i = 2\n"," for imgnm in os.listdir(dir_augmented_data_imgs):\n"," ax = fig.add_subplot(2, 6, i,xticks=[],yticks=[]) \n"," img = load_img(dir_augmented_data_imgs + \"/\" + imgnm)\n"," ax.imshow(img)\n"," i += 1\n","\n"," ax = fig.add_subplot(2, 6, 7,xticks=[],yticks=[]) \n"," new_mask=img_as_ubyte(normalizeMinMax(img_to_array(orig_mask)))\n"," ax.imshow(new_mask)\n"," ax.set_title('Original Mask')\n"," j=2\n"," for imgnm in os.listdir(dir_augmented_data_masks):\n"," ax = fig.add_subplot(2, 6, j+6,xticks=[],yticks=[]) \n"," mask = load_img(dir_augmented_data_masks + \"/\" + imgnm)\n"," ax.imshow(mask)\n"," j += 1\n"," plt.show()\n","\n","else:\n"," print(\"No augmentation will be used\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"7vFEIHbNAuOs","colab_type":"text"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a Unet model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"RfR9UyKAAulw","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","pretrained_model_choice = \"Model_from_file\" #@param [\"Model_from_file\"]\n","Weights_choice = \"last\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".hdf5\")\n","\n","\n","# --------------------- Download the a model provided in the XXX ------------------------\n","\n"," if pretrained_model_choice == \"Model_name\":\n"," pretrained_model_name = \"Model_name\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the UNET_Model_from_\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path) \n"," wget.download(\"\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".hdf5\")\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(R+'WARNING: pretrained model does not exist')\n"," Use_pretrained_model = False\n"," \n","\n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n","\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," \n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n","\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n","\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n","else:\n"," print(R+'No pretrained network will be used.')\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"94FX4wzE8w1W","colab_type":"text"},"source":["# **4. Train the network**\n","---\n","####**Troubleshooting:** If you receive a time-out or exhausted error, try reducing the batchsize of your training set. This reduces the amount of data loaded into the model at one point in time. "]},{"cell_type":"markdown","metadata":{"id":"tlTDGcmDDHDe","colab_type":"text"},"source":["## **4.1. Prepare model for training**\n","---"]},{"cell_type":"code","metadata":{"id":"ezFy_mpz_op4","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play this cell to prepare the model for training\n","\n","\n","# ------------------ Set the generators, model and logger ------------------\n","# This will take the image size and set that as a patch size (arguable...)\n","# Read image size (without actuall reading the data)\n","\n","\n","# n = 0 \n","# while os.path.isdir(os.path.join(Training_source, source_images[n])):\n","# n += 1\n","\n","# (width, height) = Image.open(os.path.join(Training_target, source_images[n])).size\n","# ImageSize = (height, width) # np.shape different from PIL image.size return !\n","\n","# !!! WARNING !!! Check potential issues with resizing at the ImageDataGenerator level\n","# (train_datagen, validation_datagen) = prepareGenerators(Training_source, Training_target, data_gen_args, batch_size, target_size = ImageSize)\n","(train_datagen, validation_datagen) = prepareGenerators(Patch_source, Patch_target, data_gen_args, batch_size, target_size = (patch_width, patch_height))\n","\n","\n","# This modelcheckpoint will only save the best model from the validation loss point of view\n","model_checkpoint = ModelCheckpoint(os.path.join(full_model_path, 'weights_best.hdf5'), monitor='val_loss',verbose=1, save_best_only=True)\n","\n","print('Getting class weights...')\n","class_weights = getClassWeights(Training_target)\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we make sure this is properly defined\n","if not Use_pretrained_model:\n"," h5_file_path = None\n","# --------------------- ---------------------- ------------------------\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we ensure that the learning rate set correctly when using pre-trained models\n","if Use_pretrained_model:\n"," if Weights_choice == \"last\":\n"," initial_learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\": \n"," initial_learning_rate = bestLearningRate\n","# --------------------- ---------------------- ------------------------\n","\n","# --------------------- Reduce learning rate on plateau ------------------------\n","\n","reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, verbose=1, mode='auto',\n"," patience=10, min_lr=0)\n","# --------------------- ---------------------- ------------------------\n","\n","\n","# Define the model\n","model = unet(pretrained_weights = h5_file_path, \n"," input_size = (patch_width,patch_height,1), \n"," pooling_steps = pooling_steps, \n"," learning_rate = initial_learning_rate, \n"," class_weights = class_weights)\n","\n","# Dfine CSV logger that will create the loss file (we're not using this anylonger)\n","# csv_log = CSVLogger(os.path.join(full_model_path, 'Quality Control', 'training_evaluation.csv'), separator=',', append=False)\n","\n","number_of_training_dataset = len(os.listdir(Patch_source))\n","\n","if Use_Default_Advanced_Parameters:\n"," number_of_steps = ceil((100-percentage_validation)/100*number_of_training_dataset/batch_size)\n","\n","# Calculate the number of steps to use for validation\n","validation_steps = max(1, ceil(percentage_validation/100*number_of_training_dataset/batch_size))\n","\n","config_model= model.optimizer.get_config()\n","print(config_model)\n","\n","\n","# ------------------ Failsafes ------------------\n","if os.path.exists(full_model_path):\n"," print(R+'!! WARNING: Model folder already existed and has been removed !!'+W)\n"," shutil.rmtree(full_model_path)\n","\n","os.makedirs(full_model_path)\n","os.makedirs(os.path.join(full_model_path,'Quality Control'))\n","\n","\n","# ------------------ Display ------------------\n","print('---------------------------- Main training parameters ----------------------------')\n","print('Number of epochs: '+str(epochs))\n","print('Batch size: '+str(batch_size))\n","print('Number of training dataset: '+str(number_of_training_dataset))\n","print('Number of training steps: '+str(number_of_steps))\n","print('Number of validation steps: '+str(validation_steps))\n","print('---------------------------- ------------------------ ----------------------------')\n","\n","\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"urpQ9UM-6NBE","colab_type":"text"},"source":["## **4.2. Train the network**\n","---\n","\n","####**Be patient**. Please be patient, this may take a while. But the verbose allow you to estimate how fast it's training and how long it'll take. While it's training, please make sure that the computer is not powering down due to inactivity, otherwise this will interupt the runtime."]},{"cell_type":"code","metadata":{"id":"sMyCENd29TKz","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Start Training\n","\n","start = time.time()\n","# history = model.fit_generator(train_datagen, steps_per_epoch = number_of_steps, epochs=epochs, callbacks=[model_checkpoint,csv_log], validation_data = validation_datagen, validation_steps = validation_steps, shuffle=True, verbose=1)\n","history = model.fit_generator(train_datagen, steps_per_epoch = number_of_steps, epochs=epochs, callbacks=[model_checkpoint, reduce_lr], validation_data = validation_datagen, validation_steps = validation_steps, shuffle=True, verbose=1)\n","\n","# Save the last model\n","model.save(os.path.join(full_model_path, 'weights_last.hdf5'))\n","\n","\n","# convert the history.history dict to a pandas DataFrame: \n","lossData = pd.DataFrame(history.history) \n","\n","# The training evaluation.csv is saved (overwrites the Files if needed). \n","lossDataCSVpath = os.path.join(full_model_path,'Quality Control/training_evaluation.csv')\n","with open(lossDataCSVpath, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['loss','val_loss', 'learning rate'])\n"," for i in range(len(history.history['loss'])):\n"," writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])\n"," \n","\n","\n","# Displaying the time elapsed for training\n","print(\"------------------------------------------\")\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\", hour, \"hour(s)\", mins,\"min(s)\",round(sec),\"sec(s)\")\n","print(\"------------------------------------------\")\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"LWaFk0JNda-N","colab_type":"text"},"source":["## **4.3. Download your model(s) from Google Drive**\n","---\n","\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"mEMcFNHZdmTz","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**"]},{"cell_type":"code","metadata":{"id":"X11zGW0Ldu-z","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ###Do you want to assess the model you just trained ?\n","\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","\n","full_QC_model_path = os.path.join(QC_model_path, QC_model_name)\n","if os.path.exists(os.path.join(full_QC_model_path, 'weights_best.hdf5')):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else:\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"pkJyRzWJCrKG","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased."]},{"cell_type":"code","metadata":{"id":"qul6BpaX1GqS","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","\n","epochNumber = []\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(os.path.join(full_QC_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(os.path.join(full_QC_model_path, 'Quality Control', 'lossCurvePlots.png'))\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"h33P0C2geqZu","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","This section will calculate the Intersection over Union score for all the images provided in the Source_QC_folder and Target_QC_folder. The result for one of the image will also be displayed.\n","\n","The **Intersection over Union** metric is a method that can be used to quantify the percent overlap between the target mask and your prediction output. **Therefore, the closer to 1, the better the performance.** This metric can be used to assess the quality of your model to accurately predict nuclei. \n","\n","The Input, Ground Truth, Prediction and IoU maps are shown below for the last example in the QC set.\n","\n"," The results for all QC examples can be found in the \"*Quality Control*\" folder which is located inside your \"model_folder\".\n","\n","### **Thresholds for image masks**\n","\n"," Since the output from Unet is not a binary mask, the output images are converted to binary masks using thresholding. This section will test different thresholds (from 0 to 255) to find the one yielding the best IoU score compared with the ground truth. The best threshold for each image and the average of these thresholds will be displayed below. **These values can be a guideline when creating masks for unseen data in section 6.**"]},{"cell_type":"code","metadata":{"id":"Tpqjvwv2zug-","colab_type":"code","cellView":"form","colab":{}},"source":["# ------------- User input ------------\n","#@markdown ##Choose the folders that contain your Quality Control dataset\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","\n","# ------------- Initialise folders ------------\n","# Create a quality control/Prediction Folder\n","prediction_QC_folder = os.path.join(full_QC_model_path, 'Quality Control', 'Prediction')\n","if os.path.exists(prediction_QC_folder):\n"," shutil.rmtree(prediction_QC_folder)\n","\n","os.makedirs(prediction_QC_folder)\n","\n","\n","# ------------- Prepare the model and run predictions ------------\n","\n","# Load the model\n","unet = load_model(os.path.join(full_QC_model_path, 'weights_best.hdf5'), custom_objects={'_weighted_binary_crossentropy': weighted_binary_crossentropy(np.ones(2))})\n","Input_size = unet.layers[0].output_shape[1:3]\n","print('Model input size: '+str(Input_size[0])+'x'+str(Input_size[1]))\n","\n","# Create a list of sources\n","source_dir_list = os.listdir(Source_QC_folder)\n","number_of_dataset = len(source_dir_list)\n","print('Number of dataset found in the folder: '+str(number_of_dataset))\n","\n","predictions = []\n","for i in tqdm(range(number_of_dataset)):\n"," predictions.append(predict_as_tiles(os.path.join(Source_QC_folder, source_dir_list[i]), unet))\n","\n","\n","# Save the results in the folder along with the masks according to the set threshold\n","saveResult(prediction_QC_folder, predictions, source_dir_list, prefix=prediction_prefix, threshold=None)\n","\n","#-----------------------------Calculate Metrics----------------------------------------#\n","\n","f = plt.figure(figsize=((5,5)))\n","\n","with open(os.path.join(full_QC_model_path,'Quality Control', 'QC_metrics_'+QC_model_name+'.csv'), \"w\", newline='') as file:\n"," writer = csv.writer(file)\n"," writer.writerow([\"File name\",\"IoU\", \"IoU-optimised threshold\"]) \n","\n"," # Initialise the lists \n"," filename_list = []\n"," best_threshold_list = []\n"," best_IoU_score_list = []\n","\n"," for filename in os.listdir(Source_QC_folder):\n","\n"," if not os.path.isdir(os.path.join(Source_QC_folder, filename)):\n"," print('Running QC on: '+filename)\n"," test_input = io.imread(os.path.join(Source_QC_folder, filename), as_gray=True)\n"," test_ground_truth_image = io.imread(os.path.join(Target_QC_folder, filename), as_gray=True)\n","\n"," (threshold_list, iou_scores_per_threshold) = getIoUvsThreshold(os.path.join(prediction_QC_folder, prediction_prefix+filename), os.path.join(Target_QC_folder, filename))\n"," plt.plot(threshold_list,iou_scores_per_threshold, label=filename)\n","\n"," # Here we find which threshold yielded the highest IoU score for image n.\n"," best_IoU_score = max(iou_scores_per_threshold)\n"," best_threshold = iou_scores_per_threshold.index(best_IoU_score)\n","\n"," # Write the results in the CSV file\n"," writer.writerow([filename, str(best_IoU_score), str(best_threshold)])\n","\n"," # Here we append the best threshold and score to the lists\n"," filename_list.append(filename)\n"," best_IoU_score_list.append(best_IoU_score)\n"," best_threshold_list.append(best_threshold)\n","\n","# Display the IoV vs Threshold plot\n","plt.title('IoU vs. Threshold')\n","plt.ylabel('Threshold value')\n","plt.xlabel('IoU')\n","plt.legend()\n","plt.show()\n","\n","\n","# Table with metrics as dataframe output\n","pdResults = pd.DataFrame(index = filename_list)\n","pdResults[\"IoU\"] = best_IoU_score_list\n","pdResults[\"IoU-optimised threshold\"] = best_threshold_list\n","\n","\n","\n","average_best_threshold = sum(best_threshold_list)/len(best_threshold_list)\n","\n","\n","# ------------- For display ------------\n","print('--------------------------------------------------------------')\n","@interact\n","def show_QC_results(file=os.listdir(Source_QC_folder)):\n"," \n"," plt.figure(figsize=(25,5))\n"," #Input\n"," plt.subplot(1,4,1)\n"," plt.axis('off')\n"," plt.imshow(plt.imread(os.path.join(Source_QC_folder, file)), aspect='equal', cmap='gray', interpolation='nearest')\n"," plt.title('Input')\n","\n"," #Ground-truth\n"," plt.subplot(1,4,2)\n"," plt.axis('off')\n"," test_ground_truth_image = io.imread(os.path.join(Target_QC_folder, file),as_gray=True)\n"," plt.imshow(test_ground_truth_image, aspect='equal', cmap='Greens')\n"," plt.title('Ground Truth')\n","\n"," #Prediction\n"," plt.subplot(1,4,3)\n"," plt.axis('off')\n"," test_prediction = plt.imread(os.path.join(prediction_QC_folder, prediction_prefix+file))\n"," test_prediction_mask = np.empty_like(test_prediction)\n"," test_prediction_mask[test_prediction > average_best_threshold] = 255\n"," test_prediction_mask[test_prediction <= average_best_threshold] = 0\n"," plt.imshow(test_prediction_mask, aspect='equal', cmap='Purples')\n"," plt.title('Prediction')\n","\n"," #Overlay\n"," plt.subplot(1,4,4)\n"," plt.axis('off')\n"," plt.imshow(test_ground_truth_image, cmap='Greens')\n"," plt.imshow(test_prediction_mask, alpha=0.5, cmap='Purples')\n"," metrics_title = 'Overlay (IoU: ' + str(round(pdResults.loc[file][\"IoU\"],3)) + ' T: ' + str(round(pdResults.loc[file][\"IoU-optimised threshold\"])) + ')'\n"," plt.title(metrics_title)\n","\n","\n","\n","print('--------------------------------------------------------------')\n","print('Best average threshold is: '+str(round(average_best_threshold)))\n","print('--------------------------------------------------------------')\n","\n","pdResults.head()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"gofmRsLP96O8","colab_type":"text"},"source":["# **6. Using the trained model**\n","\n","---\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"Pv_v1Ru2OJkU","colab_type":"text"},"source":["## **6.1 Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.1) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder.\n","\n","**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n","\n","**`Result_folder`:** This folder will contain the predicted output images.\n","\n"," Once the predictions are complete the cell will display a random example prediction beside the input image and the calculated mask for visual inspection.\n","\n"," **Troubleshooting:** If there is a low contrast image warning when saving the images, this may be due to overfitting of the model to the data. It may result in images containing only a single colour. Train the network again with different network hyperparameters."]},{"cell_type":"code","metadata":{"id":"FJAe55ZoOJGs","colab_type":"code","cellView":"form","colab":{}},"source":["# ------------- Initial user input ------------\n","#@markdown ###Provide the path to your dataset and to the folder where the prediction will be saved (Result folder), then play the cell to predict output on your unseen images.\n","Data_folder = '' #@param {type:\"string\"}\n","Results_folder = '' #@param {type:\"string\"}\n","\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","\n","# ------------- Failsafes ------------\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = os.path.join(Prediction_model_path, Prediction_model_name)\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","\n","# ------------- Prepare the model and run predictions ------------\n","\n","# Load the model and prepare generator\n","\n","unet = load_model(os.path.join(Prediction_model_path, Prediction_model_name, 'weights_best.hdf5'), custom_objects={'_weighted_binary_crossentropy': weighted_binary_crossentropy(np.ones(2))})\n","Input_size = unet.layers[0].output_shape[1:3]\n","print('Model input size: '+str(Input_size[0])+'x'+str(Input_size[1]))\n","\n","# Create a list of sources\n","source_dir_list = os.listdir(Data_folder)\n","number_of_dataset = len(source_dir_list)\n","print('Number of dataset found in the folder: '+str(number_of_dataset))\n","\n","predictions = []\n","for i in tqdm(range(number_of_dataset)):\n"," predictions.append(predict_as_tiles(os.path.join(Data_folder, source_dir_list[i]), unet))\n"," # predictions.append(prediction(os.path.join(Data_folder, source_dir_list[i]), os.path.join(Prediction_model_path, Prediction_model_name)))\n","\n","\n","# Save the results in the folder along with the masks according to the set threshold\n","saveResult(Results_folder, predictions, source_dir_list, prefix=prediction_prefix, threshold=None)\n","\n","\n","# ------------- For display ------------\n","print('--------------------------------------------------------------')\n","\n","def show_prediction_mask(file=os.listdir(Data_folder), threshold=(0,255,1)):\n","\n"," plt.figure(figsize=(18,6))\n"," # Wide-field\n"," plt.subplot(1,3,1)\n"," plt.axis('off')\n"," img_Source = plt.imread(os.path.join(Data_folder, file))\n"," plt.imshow(img_Source, cmap='gray')\n"," plt.title('Source image',fontsize=15)\n"," # Prediction\n"," plt.subplot(1,3,2)\n"," plt.axis('off')\n"," img_Prediction = plt.imread(os.path.join(Results_folder, prediction_prefix+file))\n"," plt.imshow(img_Prediction, cmap='gray')\n"," plt.title('Prediction',fontsize=15)\n","\n"," # Thresholded mask\n"," plt.subplot(1,3,3)\n"," plt.axis('off')\n"," img_Mask = convert2Mask(img_Prediction, threshold)\n"," plt.imshow(img_Mask, cmap='gray')\n"," plt.title('Mask (Threshold: '+str(round(threshold))+')',fontsize=15)\n","\n","\n","interact(show_prediction_mask, continuous_update=False);\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"su-Mo2POVpja","colab_type":"text"},"source":["## **6.2. Export results as masks**\n","---\n"]},{"cell_type":"code","metadata":{"id":"iC_B_9lxNUny","colab_type":"code","cellView":"form","colab":{}},"source":["\n","# @markdown #Play this cell to save results as masks with the chosen threshold\n","threshold = 125 #@param {type:\"number\"}\n","\n","saveResult(Results_folder, predictions, source_dir_list, prefix=prediction_prefix, threshold=threshold)\n","print('-------------------')\n","print('Masks were saved in: '+Results_folder)\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"wYmwCQKjYsJ7","colab_type":"text"},"source":["## **6.3. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"sCXzzvnh2_rc","colab_type":"text"},"source":["#**Thank you for using U-net!**"]}]} \ No newline at end of file +{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"U-Net_2D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1VcTsLOL28ntbr23gYrhY3upxkztZeUvn","timestamp":1591024690909},{"file_id":"19jT_GoHGN-UTM1aEgkgrOjB8pcFz5AW4","timestamp":1591017297795},{"file_id":"1UkoWB27ZWh5j_qivSZIOeOJP1h2EqrVz","timestamp":1589363183397},{"file_id":"1ofNqOc7lz-m6NL4B-m4BIheaU5N0GMln","timestamp":1588873191434},{"file_id":"1rJnsgIKyL6vuneydIfjCKMtMhV3XlQ6o","timestamp":1588583580765},{"file_id":"1RUYrp8beEgDKL1kOWw5LgR1QQb4yHQtG","timestamp":1587061416704},{"file_id":"1FVax0eY3-m8DbJHx0B8Dnep-uGlp30Zt","timestamp":1586601038120},{"file_id":"1TTqmCf2mFQ_PNIZEXX9sRAhoixjYP_AB","timestamp":1585842446113},{"file_id":"1cWwS-jbLYTDOpPp_hhKOLGFXfu06ccpG","timestamp":1585821375983},{"file_id":"1TPEE_AtGTLedawgVBwwXofEJEcJUCgo3","timestamp":1585137343783},{"file_id":"1SxFRb38aC_kmKzKVQfkwWzkK9n7YFxVv","timestamp":1585053829456},{"file_id":"15iw9IOwHNF_GhiHxkh_rWbJG8JnW14Wh","timestamp":1584375074441},{"file_id":"15oMbXnMa4LDEMhPHBr3ga0xhJomMLhDo","timestamp":1584105762670},{"file_id":"1__NtYFNA3DxNB7LrUY13Bt8_frye3iWl","timestamp":1583445015203},{"file_id":"11jsQfqKeDU1Zk3nPykjWKwYhFmvJ1zJ-","timestamp":1575289898486}],"collapsed_sections":[],"toc_visible":true},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"WDrFAwpFIpE0","colab_type":"text"},"source":["# **U-Net (2D)**\n","---\n","\n","U-Net is an encoder-decoder network architecture originally used for image segmentation, first published by [Ronneberger *et al.*](https://arxiv.org/abs/1505.04597). The first half of the U-Net architecture is a downsampling convolutional neural network which acts as a feature extractor from input images. The other half upsamples these results and restores an image by combining results from downsampling with the upsampled images.\n","\n"," **This particular notebook enables image segmentation of 2D dataset. If you are interested in 3D dataset, you should use the 3D U-Net notebook instead.**\n","\n","---\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is largely based on the papers: \n","\n","**U-Net: Convolutional Networks for Biomedical Image Segmentation** by Ronneberger *et al.* published on arXiv in 2015 (https://arxiv.org/abs/1505.04597)\n","\n","and \n","\n","**U-Net: deep learning for cell counting, detection, and morphometry** by Thorsten Falk *et al.* in Nature Methods 2019\n","(https://www.nature.com/articles/s41592-018-0261-2)\n","And source code found in: https://github.com/zhixuhao/unet by *Zhixuhao*\n","\n","**Please also cite this original paper when using or developing this notebook.** "]},{"cell_type":"markdown","metadata":{"id":"ABNu2p4stHeB","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"HVwncY_NvlYi","colab_type":"text"},"source":["# **0. Before getting started**\n","---\n","\n","Before you run the notebook, please ensure that you are logged into your Google account and have the training and/or data to process in your Google Drive.\n","\n","For U-Net to train, **it needs to have access to a paired training dataset corresponding to images and their corresponding masks**. Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\n","\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n","Additionally, the corresponding Training_source and Training_target files need to have **the same name**.\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset**\n"," - Training_source\n"," - img_1.tif, img_2.tif, ...\n"," - Training_target\n"," - img_1.tif, img_2.tif, ...\n"," - **Quality control dataset**\n"," - Training_source\n"," - img_1.tif, img_2.tif\n"," - Training_target \n"," - img_1.tif, img_2.tif\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"JrGNzgEyxzGQ","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"wYoajeT54sQM","colab_type":"text"},"source":["\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"TpT6gbwURzrV","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi\n","\n","# from tensorflow.python.client import device_lib \n","# device_lib.list_local_devices()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"quzkzlRD45HF","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"eLwDxBnp4-bc","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"leK5kmgD5Ism","colab_type":"text"},"source":["# **2. Install U-Net dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"id":"vOeLpQfT0QF1","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play to install U-Net dependencies\n","\n","#As this notebokk depends mostly on keras which runs a tensorflow backend (which in turn is pre-installed in colab)\n","#only the data library needs to be additionally installed.\n","%tensorflow_version 1.x\n","import tensorflow\n","print(tensorflow.__version__)\n","print(\"Tensorflow enabled.\")\n","\n","#We enforce the keras==2.2.5 release to ensure that the notebook continues working even if keras is updated.\n","\n","!pip install keras==2.2.5\n","!pip install data\n","\n","# Keras imports\n","from keras import models\n","from keras.models import Model, load_model\n","from keras.layers import Input, Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D\n","from keras.optimizers import Adam\n","# from keras.callbacks import ModelCheckpoint, LearningRateScheduler, CSVLogger # we currently don't use any other callbacks from ModelCheckpoints\n","from keras.callbacks import ModelCheckpoint\n","from keras.callbacks import ReduceLROnPlateau\n","from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img\n","from keras import backend as keras\n","\n","# General import\n","from __future__ import print_function\n","import numpy as np\n","import pandas as pd\n","import os\n","import glob\n","from skimage import img_as_ubyte, io, transform\n","import matplotlib as mpl\n","from matplotlib import pyplot as plt\n","from matplotlib.pyplot import imread\n","from pathlib import Path\n","import shutil\n","import random\n","import time\n","import csv\n","import sys\n","from math import ceil\n","\n","# Imports for QC\n","from PIL import Image\n","from scipy import signal\n","from scipy import ndimage\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","\n","# For sliders and dropdown menu and progress bar\n","from ipywidgets import interact\n","import ipywidgets as widgets\n","# from tqdm import tqdm\n","from tqdm.notebook import tqdm\n","\n","from sklearn.feature_extraction import image\n","from skimage import img_as_ubyte, io, transform\n","from skimage.util.shape import view_as_windows\n","\n","# Suppressing some warnings\n","import warnings\n","warnings.filterwarnings('ignore')\n","\n","\n","\n","def create_patches(Training_source, Training_target, patch_width, patch_height):\n"," \"\"\"\n"," Function creates patches from the Training_source and Training_target images. \n"," The steps parameter indicates the offset between patches and, if integer, is the same in x and y.\n"," Saves all created patches in two new directories in the /content folder.\n","\n"," Returns: - Two paths to where the patches are now saved\n"," \"\"\"\n"," DEBUG = False\n","\n"," Patch_source = os.path.join('/content','img_patches')\n"," Patch_target = os.path.join('/content','mask_patches')\n"," Patch_rejected = os.path.join('/content','rejected')\n"," \n","\n"," #Here we save the patches, in the /content directory as they will not usually be needed after training\n"," if os.path.exists(Patch_source):\n"," shutil.rmtree(Patch_source)\n"," if os.path.exists(Patch_target):\n"," shutil.rmtree(Patch_target)\n"," if os.path.exists(Patch_rejected):\n"," shutil.rmtree(Patch_rejected)\n","\n"," os.mkdir(Patch_source)\n"," os.mkdir(Patch_target)\n"," os.mkdir(Patch_rejected) #This directory will contain the images that have too little signal.\n"," \n","\n"," all_patches_img = np.empty([0,patch_width, patch_height])\n"," all_patches_mask = np.empty([0,patch_width, patch_height])\n","\n"," for file in os.listdir(Training_source):\n","\n"," img = io.imread(os.path.join(Training_source, file))\n"," mask = io.imread(os.path.join(Training_target, file),as_gray=True)\n","\n"," if DEBUG:\n"," print(file)\n"," print(img.dtype)\n","\n"," # Using view_as_windows with step size equal to the patch size to ensure there is no overlap\n"," patches_img = view_as_windows(img, (patch_width, patch_height), (patch_width, patch_height))\n"," patches_mask = view_as_windows(mask, (patch_width, patch_height), (patch_width, patch_height))\n"," #the shape of patches_img and patches_mask will be (number of patches along x, number of patches along y,patch_width,patch_height)\n","\n"," all_patches_img = np.concatenate((all_patches_img, patches_img.reshape(patches_img.shape[0]*patches_img.shape[1], patch_width,patch_height)), axis = 0)\n"," all_patches_mask = np.concatenate((all_patches_mask, patches_mask.reshape(patches_mask.shape[0]*patches_mask.shape[1], patch_width,patch_height)), axis = 0)\n","\n"," number_of_patches = all_patches_img.shape[0]\n"," print('number of patches: '+str(number_of_patches))\n","\n"," if DEBUG:\n"," print(all_patches_img.shape)\n"," print(all_patches_img.dtype)\n","\n"," for i in range(number_of_patches):\n"," img_save_path = os.path.join(Patch_source,'patch_'+str(i)+'.tif')\n"," mask_save_path = os.path.join(Patch_target,'patch_'+str(i)+'.tif')\n","\n"," # if the mask conatins at least 2% of its total number pixels as mask, then go ahead and save the images\n"," pixel_threshold_array = sorted(all_patches_mask[i].flatten())\n"," if pixel_threshold_array[int(round(len(pixel_threshold_array)*0.98))]>0:\n"," io.imsave(img_save_path, img_as_ubyte(normalizeMinMax(all_patches_img[i])))\n"," io.imsave(mask_save_path, convert2Mask(normalizeMinMax(all_patches_mask[i]),0))\n"," else:\n"," io.imsave(Patch_rejected+'/patch_'+str(i)+'_image.tif', img_as_ubyte(normalizeMinMax(all_patches_img[i])))\n"," io.imsave(Patch_rejected+'/patch_'+str(i)+'_mask.tif', convert2Mask(normalizeMinMax(all_patches_mask[i]),0))\n","\n"," return Patch_source, Patch_target\n","\n","\n","def estimatePatchSize(data_path, max_width = 512, max_height = 512):\n","\n"," files = os.listdir(data_path)\n"," \n"," # Get the size of the first image found in the folder and initialise the variables to that\n"," n = 0 \n"," while os.path.isdir(os.path.join(data_path, files[n])):\n"," n += 1\n"," (height_min, width_min) = Image.open(os.path.join(data_path, files[n])).size\n","\n"," # Screen the size of all dataset to find the minimum image size\n"," for file in files:\n"," if not os.path.isdir(os.path.join(data_path, file)):\n"," (height, width) = Image.open(os.path.join(data_path, file)).size\n"," if width < width_min:\n"," width_min = width\n"," if height < height_min:\n"," height_min = height\n"," \n"," # Find the power of patches that will fit within the smallest dataset\n"," width_min, height_min = (fittingPowerOfTwo(width_min), fittingPowerOfTwo(height_min))\n","\n"," # Clip values at maximum permissible values\n"," if width_min > max_width:\n"," width_min = max_width\n","\n"," if height_min > max_height:\n"," height_min = max_height\n"," \n"," return (width_min, height_min)\n","\n","def fittingPowerOfTwo(number):\n"," n = 0\n"," while 2**n <= number:\n"," n += 1 \n"," return 2**(n-1)\n","\n","\n","def getClassWeights(Training_target_path):\n","\n"," Mask_dir_list = os.listdir(Training_target_path)\n"," number_of_dataset = len(Mask_dir_list)\n","\n"," class_count = np.zeros(2, dtype=int)\n"," for i in tqdm(range(number_of_dataset)):\n"," mask = io.imread(os.path.join(Training_target_path, Mask_dir_list[i]))\n"," mask = normalizeMinMax(mask)\n"," class_count[0] += mask.shape[0]*mask.shape[1] - mask.sum()\n"," class_count[1] += mask.sum()\n","\n"," n_samples = class_count.sum()\n"," n_classes = 2\n","\n"," class_weights = n_samples / (n_classes * class_count)\n"," return class_weights\n","\n","def weighted_binary_crossentropy(class_weights):\n","\n"," def _weighted_binary_crossentropy(y_true, y_pred):\n"," binary_crossentropy = keras.binary_crossentropy(y_true, y_pred)\n"," weight_vector = y_true * class_weights[1] + (1. - y_true) * class_weights[0]\n"," weighted_binary_crossentropy = weight_vector * binary_crossentropy\n","\n"," return keras.mean(weighted_binary_crossentropy)\n","\n"," return _weighted_binary_crossentropy\n","\n","\n","def save_augment(datagen,orig_img,dir_augmented_data=\"/content/augment\"):\n"," \"\"\"\n"," Saves a subset of the augmented data for visualisation, by default in /content.\n","\n"," This is adapted from: https://fairyonice.github.io/Learn-about-ImageDataGenerator.html\n"," \n"," \"\"\"\n"," try:\n"," os.mkdir(dir_augmented_data)\n"," except:\n"," ## if the preview folder exists, then remove\n"," ## the contents (pictures) in the folder\n"," for item in os.listdir(dir_augmented_data):\n"," os.remove(dir_augmented_data + \"/\" + item)\n","\n"," ## convert the original image to array\n"," x = img_to_array(orig_img)\n"," ## reshape (Sampke, Nrow, Ncol, 3) 3 = R, G or B\n"," #print(x.shape)\n"," x = x.reshape((1,) + x.shape)\n"," #print(x.shape)\n"," ## -------------------------- ##\n"," ## randomly generate pictures\n"," ## -------------------------- ##\n"," i = 0\n"," #We will just save 5 images,\n"," #but this can be changed, but note the visualisation in 3. currently uses 5.\n"," Nplot = 5\n"," for batch in datagen.flow(x,batch_size=1,\n"," save_to_dir=dir_augmented_data,\n"," save_format='tif',\n"," seed=42):\n"," i += 1\n"," if i > Nplot - 1:\n"," break\n","\n","# Generators\n","def buildDoubleGenerator(image_datagen, mask_datagen, image_folder_path, mask_folder_path, subset, batch_size, target_size):\n"," '''\n"," Can generate image and mask at the same time use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same\n"," \n"," datagen: ImageDataGenerator \n"," subset: can take either 'training' or 'validation'\n"," '''\n"," seed = 1\n"," image_generator = image_datagen.flow_from_directory(\n"," os.path.dirname(image_folder_path),\n"," classes = [os.path.basename(image_folder_path)],\n"," class_mode = None,\n"," color_mode = \"grayscale\",\n"," target_size = target_size,\n"," batch_size = batch_size,\n"," subset = subset,\n"," interpolation = \"bicubic\",\n"," seed = seed)\n"," \n"," mask_generator = mask_datagen.flow_from_directory(\n"," os.path.dirname(mask_folder_path),\n"," classes = [os.path.basename(mask_folder_path)],\n"," class_mode = None,\n"," color_mode = \"grayscale\",\n"," target_size = target_size,\n"," batch_size = batch_size,\n"," subset = subset,\n"," interpolation = \"nearest\",\n"," seed = seed)\n"," \n"," this_generator = zip(image_generator, mask_generator)\n"," for (img,mask) in this_generator:\n"," # img,mask = adjustData(img,mask)\n"," yield (img,mask)\n","\n","\n","def prepareGenerators(image_folder_path, mask_folder_path, datagen_parameters, batch_size = 4, target_size = (512, 512)):\n"," image_datagen = ImageDataGenerator(**datagen_parameters, preprocessing_function = normalizePercentile)\n"," mask_datagen = ImageDataGenerator(**datagen_parameters, preprocessing_function = normalizeMinMax)\n","\n"," train_datagen = buildDoubleGenerator(image_datagen, mask_datagen, image_folder_path, mask_folder_path, 'training', batch_size, target_size)\n"," validation_datagen = buildDoubleGenerator(image_datagen, mask_datagen, image_folder_path, mask_folder_path, 'validation', batch_size, target_size)\n","\n"," return (train_datagen, validation_datagen)\n","\n","\n","# Normalization functions from Martin Weigert\n","def normalizePercentile(x, pmin=1, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," \"\"\"Percentile-based image normalization.\"\"\"\n","\n"," mi = np.percentile(x,pmin,axis=axis,keepdims=True)\n"," ma = np.percentile(x,pmax,axis=axis,keepdims=True)\n"," return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)\n","\n","\n","def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32\n"," \"\"\"This function is adapted from Martin Weigert\"\"\"\n"," if dtype is not None:\n"," x = x.astype(dtype,copy=False)\n"," mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)\n"," ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)\n"," eps = dtype(eps)\n","\n"," try:\n"," import numexpr\n"," x = numexpr.evaluate(\"(x - mi) / ( ma - mi + eps )\")\n"," except ImportError:\n"," x = (x - mi) / ( ma - mi + eps )\n","\n"," if clip:\n"," x = np.clip(x,0,1)\n","\n"," return x\n","\n","\n","\n","# Simple normalization to min/max fir the Mask\n","def normalizeMinMax(x, dtype=np.float32):\n"," x = x.astype(dtype,copy=False)\n"," x = (x - np.amin(x)) / (np.amax(x) - np.amin(x))\n"," return x\n","\n","\n","# def predictionGenerator(Data_path, target_size = (256,256), as_gray = True):\n","# for filename in os.listdir(Data_path):\n","# if not os.path.isdir(os.path.join(Data_path, filename)):\n","# img = io.imread(os.path.join(Data_path, filename), as_gray = as_gray)\n","# img = normalizePercentile(img)\n","# # img = img/255 # WARNING: this is expecting 8bit images\n","# img = transform.resize(img,target_size, preserve_range=True, anti_aliasing=True, order = 1) # liner interpolation\n","# img = np.reshape(img,img.shape+(1,))\n","# img = np.reshape(img,(1,)+img.shape)\n","# yield img\n","\n","\n","# def predictionResize(Data_path, predictions):\n","# resized_predictions = []\n","# for (i, filename) in enumerate(os.listdir(Data_path)):\n","# if not os.path.isdir(os.path.join(Data_path, filename)):\n","# img = Image.open(os.path.join(Data_path, filename))\n","# (width, height) = img.size\n","# resized_predictions.append(transform.resize(predictions[i], (height, width), preserve_range=True, anti_aliasing=True, order = 1))\n","# return resized_predictions\n","\n","\n","# This is code outlines the architecture of U-net. The choice of pooling steps decides the depth of the network. \n","def unet(pretrained_weights = None, input_size = (256,256,1), pooling_steps = 4, learning_rate = 1e-4, verbose=True, class_weights=np.ones(2)):\n"," inputs = Input(input_size)\n"," conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)\n"," conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)\n"," # Downsampling steps\n"," pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n"," conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)\n"," conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)\n"," \n"," if pooling_steps > 1:\n"," pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n"," conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)\n"," conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)\n","\n"," if pooling_steps > 2:\n"," pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n"," conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)\n"," conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)\n"," drop4 = Dropout(0.5)(conv4)\n"," \n"," if pooling_steps > 3:\n"," pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n"," conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)\n"," conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)\n"," drop5 = Dropout(0.5)(conv5)\n","\n"," #Upsampling steps\n"," up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))\n"," merge6 = concatenate([drop4,up6], axis = 3)\n"," conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)\n"," conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)\n"," \n"," if pooling_steps > 2:\n"," up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop4))\n"," if pooling_steps > 3:\n"," up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))\n"," merge7 = concatenate([conv3,up7], axis = 3)\n"," conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)\n"," \n"," if pooling_steps > 1:\n"," up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv3))\n"," if pooling_steps > 2:\n"," up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))\n"," merge8 = concatenate([conv2,up8], axis = 3)\n"," conv8 = Conv2D(128, 3, activation= 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)\n"," \n"," if pooling_steps == 1:\n"," up9 = Conv2D(64, 2, padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv2))\n"," else:\n"," up9 = Conv2D(64, 2, padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8)) #activation = 'relu'\n"," \n"," merge9 = concatenate([conv1,up9], axis = 3)\n"," conv9 = Conv2D(64, 3, padding = 'same', kernel_initializer = 'he_normal')(merge9) #activation = 'relu'\n"," conv9 = Conv2D(64, 3, padding = 'same', kernel_initializer = 'he_normal')(conv9) #activation = 'relu'\n"," conv9 = Conv2D(2, 3, padding = 'same', kernel_initializer = 'he_normal')(conv9) #activation = 'relu'\n"," conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)\n","\n"," model = Model(inputs = inputs, outputs = conv10)\n","\n"," # model.compile(optimizer = Adam(lr = learning_rate), loss = 'binary_crossentropy', metrics = ['acc'])\n"," model.compile(optimizer = Adam(lr = learning_rate), loss = weighted_binary_crossentropy(class_weights))\n","\n","\n"," if verbose:\n"," model.summary()\n","\n"," if(pretrained_weights):\n"," \tmodel.load_weights(pretrained_weights);\n","\n"," return model\n","\n","\n","\n","def predict_as_tiles(Image_path, model):\n","\n"," # Read the data in and normalize\n"," Image_raw = io.imread(Image_path, as_gray = True)\n"," Image_raw = normalizePercentile(Image_raw)\n","\n"," # Get the patch size from the input layer of the model\n"," patch_size = model.layers[0].output_shape[1:3]\n","\n"," # Pad the image with zeros if any of its dimensions is smaller than the patch size\n"," if Image_raw.shape[0] < patch_size[0] or Image_raw.shape[1] < patch_size[1]:\n"," Image = np.zeros((max(Image_raw.shape[0], patch_size[0]), max(Image_raw.shape[1], patch_size[1])))\n"," Image[0:Image_raw.shape[0], 0: Image_raw.shape[1]] = Image_raw\n"," else:\n"," Image = Image_raw\n","\n"," # Calculate the number of patches in each dimension\n"," n_patch_in_width = ceil(Image.shape[0]/patch_size[0])\n"," n_patch_in_height = ceil(Image.shape[1]/patch_size[1])\n","\n"," prediction = np.zeros(Image.shape)\n","\n"," for x in range(n_patch_in_width):\n"," for y in range(n_patch_in_height):\n"," xi = patch_size[0]*x\n"," yi = patch_size[1]*y\n","\n"," # If the patch exceeds the edge of the image shift it back \n"," if xi+patch_size[0] >= Image.shape[0]:\n"," xi = Image.shape[0]-patch_size[0]\n","\n"," if yi+patch_size[1] >= Image.shape[1]:\n"," yi = Image.shape[1]-patch_size[1]\n"," \n"," # Extract and reshape the patch\n"," patch = Image[xi:xi+patch_size[0], yi:yi+patch_size[1]]\n"," patch = np.reshape(patch,patch.shape+(1,))\n"," patch = np.reshape(patch,(1,)+patch.shape)\n","\n"," # Get the prediction from the patch and paste it in the prediction in the right place\n"," predicted_patch = model.predict(patch, batch_size = 1)\n"," prediction[xi:xi+patch_size[0], yi:yi+patch_size[1]] = np.squeeze(predicted_patch)\n","\n","\n"," return prediction[0:Image_raw.shape[0], 0: Image_raw.shape[1]]\n"," \n","\n","\n","\n","def saveResult(save_path, nparray, source_dir_list, prefix='', threshold=None):\n"," for (filename, image) in zip(source_dir_list, nparray):\n"," io.imsave(os.path.join(save_path, prefix+os.path.splitext(filename)[0]+'.tif'), img_as_ubyte(image)) # saving as unsigned 8-bit image\n"," \n"," # For masks, threshold the images and return 8 bit image\n"," if threshold is not None:\n"," mask = convert2Mask(image, threshold)\n"," io.imsave(os.path.join(save_path, prefix+'mask_'+os.path.splitext(filename)[0]+'.tif'), mask)\n","\n","\n","def convert2Mask(image, threshold):\n"," mask = img_as_ubyte(image, force_copy=True)\n"," mask[mask > threshold] = 255\n"," mask[mask <= threshold] = 0\n"," return mask\n","\n","\n","def getIoUvsThreshold(prediction_filepath, groud_truth_filepath):\n"," prediction = io.imread(prediction_filepath)\n"," ground_truth_image = img_as_ubyte(io.imread(groud_truth_filepath, as_gray=True), force_copy=True)\n","\n"," threshold_list = []\n"," IoU_scores_list = []\n","\n"," for threshold in range(0,256): \n"," # Convert to 8-bit for calculating the IoU\n"," mask = img_as_ubyte(prediction, force_copy=True)\n"," mask[mask > threshold] = 255\n"," mask[mask <= threshold] = 0\n","\n"," # Intersection over Union metric\n"," intersection = np.logical_and(ground_truth_image, np.squeeze(mask))\n"," union = np.logical_or(ground_truth_image, np.squeeze(mask))\n"," iou_score = np.sum(intersection) / np.sum(union)\n","\n"," threshold_list.append(threshold)\n"," IoU_scores_list.append(iou_score)\n","\n"," return (threshold_list, IoU_scores_list)\n","\n","\n","\n","# -------------- Other definitions -----------\n","W = '\\033[0m' # white (normal)\n","R = '\\033[31m' # red\n","prediction_prefix = 'Predicted_'\n","\n","\n","print('-------------------')\n","print('U-Net and dependencies installed.')\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"7hTKImff6Est","colab_type":"text"},"source":["# **3. Select your parameters and paths**\n","\n","---"]},{"cell_type":"markdown","metadata":{"id":"S74FbqV6PNNv","colab_type":"text"},"source":["##**3.1. Parameters and paths**\n","---"]},{"cell_type":"markdown","metadata":{"id":"3np5EpJF8_q2","colab_type":"text"},"source":[" **Paths for training data and models**\n","\n","**`Training_source`, `Training_target`:** These are the folders containing your source (e.g. EM images) and target files (segmentation masks). Enter the path to the source and target images for training. **These should be located in the same parent folder.**\n","\n","**`model_name`:** Use only my_model -style, not my-model. If you want to use a previously trained model, enter the name of the pretrained model (which should be contained in the trained_model -folder after training).\n","\n","**`model_path`**: Enter the path of the folder where you want to save your model.\n","\n","**`visual_validation_after_training`**: If you select this option, a random image pair will be set aside from your training set and will be used to display a predicted image of the trained network next to the input and the ground-truth. This can aid in visually assessing the performance of your network after training. **Note: Your training set size will decrease by 1 if you select this option.**\n","\n","**Make sure the directories exist before entering them!**\n","\n"," **Select training parameters**\n","\n","**`number_of_epochs`**: Choose more epochs for larger training sets. Observing how much the loss reduces between epochs during training may help determine the optimal value. **Default: 200**\n","\n","**Advanced parameters - experienced users only**\n","\n","**`batch_size`**: This parameter describes the amount of images that are loaded into the network per step. Smaller batchsizes may improve training performance slightly but may increase training time. If the notebook crashes while loading the dataset this can be due to a too large batch size. Decrease the number in this case. **Default: 4**\n","\n","**`number_of_steps`**: This number should be equivalent to the number of samples in the training set divided by the batch size, to ensure the training iterates through the entire training set. Smaller values can be used for testing. **Default: 6**\n","\n"," **`pooling_steps`**: Choosing a different number of pooling layers can affect the performance of the network. Each additional pooling step will also two additional convolutions. The network can learn more complex information but is also more likely to overfit. Achieving best performance may require testing different values here. **Default: 2**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during training. **Default value: 10** \n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0003**\n","\n","**`patch_width` and `patch_height`:** The notebook crops the data in patches of fixed size prior to training. The dimensions of the patches can be defined here. When `Use_Default_Advanced_Parameters` is selected, the largest 2^n x 2^n patch that fits in the smallest dataset is chosen. Larger patches than 512x512 should **NOT** be selected for network stability.\n","\n"]},{"cell_type":"code","metadata":{"id":"7deNuPZd5d-B","colab_type":"code","cellView":"form","colab":{}},"source":["# ------------- Initial user input ------------\n","#@markdown ###Path to training images:\n","Training_source = '' #@param {type:\"string\"}\n","Training_target = '' #@param {type:\"string\"}\n","\n","model_name = '' #@param {type:\"string\"}\n","model_path = '' #@param {type:\"string\"}\n","\n","#@markdown ###Training parameters:\n","#@markdown Number of epochs\n","number_of_epochs = 200#@param {type:\"number\"}\n","\n","#@markdown ###Advanced parameters:\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please input:\n","batch_size = 4#@param {type:\"integer\"}\n","number_of_steps = 6#@param {type:\"number\"}\n","pooling_steps = 2 #@param [1,2,3,4]{type:\"raw\"}\n","percentage_validation = 10#@param{type:\"number\"}\n","initial_learning_rate = 0.0003 #@param {type:\"number\"}\n","\n","patch_width = 512#@param{type:\"number\"}\n","patch_height = 512#@param{type:\"number\"}\n","\n","\n","# ------------- Initialising folder, variables and failsafes ------------\n","# Create the folders where to save the model and the QC\n","full_model_path = os.path.join(model_path, model_name)\n","if os.path.exists(full_model_path):\n"," print(R+'!! WARNING: Folder already exists and will be overwritten !!'+W)\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 4\n"," pooling_steps = 2\n"," percentage_validation = 10\n"," initial_learning_rate = 0.0003\n"," patch_width, patch_height = estimatePatchSize(Training_source)\n","\n","\n","#The create_patches function will create the two folders below\n","# Patch_source = '/content/img_patches'\n","# Patch_target = '/content/mask_patches'\n","print('Training on patches of size (x,y): ('+str(patch_width)+','+str(patch_height)+')')\n","\n","#Create patches\n","print('Creating patches...')\n","Patch_source, Patch_target = create_patches(Training_source, Training_target, patch_width, patch_height)\n","\n","\n","# Here we disable pre-trained model by default (in case the next cell is not ran)\n","Use_pretrained_model = False\n","# Here we disable data augmentation by default (in case the cell is not ran)\n","Use_Data_augmentation = False\n","\n","# ------------- Display ------------\n","\n","#if not os.path.exists('/content/img_patches/'):\n","random_choice = random.choice(os.listdir(Patch_source))\n","x = io.imread(os.path.join(Patch_source, random_choice))\n","\n","#os.chdir(Training_target)\n","y = io.imread(os.path.join(Patch_target, random_choice), as_gray=True)\n","\n","f=plt.figure(figsize=(16,8))\n","plt.subplot(1,2,1)\n","plt.imshow(x, interpolation='nearest',cmap='gray')\n","plt.title('Training image patch')\n","plt.axis('off');\n","\n","plt.subplot(1,2,2)\n","plt.imshow(y, interpolation='nearest',cmap='gray')\n","plt.title('Training mask patch')\n","plt.axis('off');\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"V9UCjlLJ5Rfc","colab_type":"text"},"source":["##**3.2. Data augmentation**\n","\n","---\n","\n"," Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if the dataset is large the values can be set to 0.\n","\n"," The augmentation options below are to be used as follows:\n","\n","* **shift**: a translation of the image by a fraction of the image size (width or height), **default: 10%**\n","* **zoom_range**: Increasing or decreasing the field of view. E.g. 10% will result in a zoom range of (0.9 to 1.1), with pixels added or interpolated, depending on the transformation, **default: 10%**\n","* **shear_range**: Shear angle in counter-clockwise direction, **default: 10%**\n","* **flip**: creating a mirror image along specified axis (horizontal or vertical), **default: True**\n","* **rotation_range**: range of allowed rotation angles in degrees (from 0 to *value*), **default: 180**"]},{"cell_type":"code","metadata":{"id":"i-PahNX94-pl","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##**Augmentation options**\n","\n","Use_Data_augmentation = True #@param {type:\"boolean\"}\n","Use_Default_Augmentation_Parameters = True #@param {type:\"boolean\"}\n","\n","if Use_Data_augmentation:\n"," if Use_Default_Augmentation_Parameters:\n"," horizontal_shift = 10 \n"," vertical_shift = 20 \n"," zoom_range = 10\n"," shear_range = 10\n"," horizontal_flip = True\n"," vertical_flip = True\n"," rotation_range = 180\n","#@markdown ###If you are not using the default settings, please provide the values below:\n","\n","#@markdown ###**Image shift, zoom, shear and flip (%)**\n"," else:\n"," horizontal_shift = 10 #@param {type:\"slider\", min:0, max:100, step:1}\n"," vertical_shift = 10 #@param {type:\"slider\", min:0, max:100, step:1}\n"," zoom_range = 10 #@param {type:\"slider\", min:0, max:100, step:1}\n"," shear_range = 10 #@param {type:\"slider\", min:0, max:100, step:1}\n"," horizontal_flip = True #@param {type:\"boolean\"}\n"," vertical_flip = True #@param {type:\"boolean\"}\n","\n","#@markdown ###**Rotate image within angle range (degrees):**\n"," rotation_range = 180 #@param {type:\"slider\", min:0, max:180, step:1}\n","\n","#given behind the # are the default values for each parameter.\n","\n","else:\n"," horizontal_shift = 0 \n"," vertical_shift = 0 \n"," zoom_range = 0\n"," shear_range = 0\n"," horizontal_flip = False\n"," vertical_flip = False\n"," rotation_range = 0\n","\n","\n","# Build the dict for the ImageDataGenerator\n","data_gen_args = dict(width_shift_range = horizontal_shift/100.,\n"," height_shift_range = vertical_shift/100.,\n"," rotation_range = rotation_range, #90\n"," zoom_range = zoom_range/100.,\n"," shear_range = shear_range/100.,\n"," horizontal_flip = horizontal_flip,\n"," vertical_flip = vertical_flip,\n"," validation_split = percentage_validation/100,\n"," fill_mode = 'reflect')\n","\n","\n","\n","# ------------- Display ------------\n","dir_augmented_data_imgs=\"/content/augment_img\"\n","dir_augmented_data_masks=\"/content/augment_mask\"\n","random_choice = random.choice(os.listdir(Patch_source))\n","orig_img = load_img(os.path.join(Patch_source,random_choice))\n","orig_mask = load_img(os.path.join(Patch_target,random_choice))\n","\n","augment_view = ImageDataGenerator(**data_gen_args)\n","\n","if Use_Data_augmentation:\n"," print(\"Parameters enabled\")\n"," print(\"Here is what a subset of your augmentations looks like:\")\n"," save_augment(augment_view, orig_img, dir_augmented_data=dir_augmented_data_imgs)\n"," save_augment(augment_view, orig_mask, dir_augmented_data=dir_augmented_data_masks)\n","\n"," fig = plt.figure(figsize=(15, 7))\n"," fig.subplots_adjust(hspace=0.0,wspace=0.1,left=0,right=1.1,bottom=0, top=0.8)\n","\n"," \n"," ax = fig.add_subplot(2, 6, 1,xticks=[],yticks=[]) \n"," new_img=img_as_ubyte(normalizeMinMax(img_to_array(orig_img)))\n"," ax.imshow(new_img)\n"," ax.set_title('Original Image')\n"," i = 2\n"," for imgnm in os.listdir(dir_augmented_data_imgs):\n"," ax = fig.add_subplot(2, 6, i,xticks=[],yticks=[]) \n"," img = load_img(dir_augmented_data_imgs + \"/\" + imgnm)\n"," ax.imshow(img)\n"," i += 1\n","\n"," ax = fig.add_subplot(2, 6, 7,xticks=[],yticks=[]) \n"," new_mask=img_as_ubyte(normalizeMinMax(img_to_array(orig_mask)))\n"," ax.imshow(new_mask)\n"," ax.set_title('Original Mask')\n"," j=2\n"," for imgnm in os.listdir(dir_augmented_data_masks):\n"," ax = fig.add_subplot(2, 6, j+6,xticks=[],yticks=[]) \n"," mask = load_img(dir_augmented_data_masks + \"/\" + imgnm)\n"," ax.imshow(mask)\n"," j += 1\n"," plt.show()\n","\n","else:\n"," print(\"No augmentation will be used\")"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"7vFEIHbNAuOs","colab_type":"text"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a U-Net model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n","\n"," In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. "]},{"cell_type":"code","metadata":{"id":"RfR9UyKAAulw","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","pretrained_model_choice = \"Model_from_file\" #@param [\"Model_from_file\"]\n","Weights_choice = \"last\" #@param [\"last\", \"best\"]\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".hdf5\")\n","\n","\n","# --------------------- Download the a model provided in the XXX ------------------------\n","\n"," if pretrained_model_choice == \"Model_name\":\n"," pretrained_model_name = \"Model_name\"\n"," pretrained_model_path = \"/content/\"+pretrained_model_name\n"," print(\"Downloading the UNET_Model_from_\")\n"," if os.path.exists(pretrained_model_path):\n"," shutil.rmtree(pretrained_model_path)\n"," os.makedirs(pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path)\n"," wget.download(\"\", pretrained_model_path) \n"," wget.download(\"\", pretrained_model_path)\n"," h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".hdf5\")\n","\n","# --------------------- Add additional pre-trained models here ------------------------\n","\n","\n","\n","# --------------------- Check the model exist ------------------------\n","# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n"," if not os.path.exists(h5_file_path):\n"," print(R+'WARNING: pretrained model does not exist')\n"," Use_pretrained_model = False\n"," \n","\n","# If the model path contains a pretrain model, we load the training rate, \n"," if os.path.exists(h5_file_path):\n","#Here we check if the learning rate can be loaded from the quality control folder\n"," if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n","\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," #print(csvRead)\n"," \n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n","\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n","\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n","\n","#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n"," if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)\n"," bestLearningRate = initial_learning_rate\n"," lastLearningRate = initial_learning_rate\n","\n","\n","# Display info about the pretrained model to be loaded (or not)\n","if Use_pretrained_model:\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n","else:\n"," print(R+'No pretrained network will be used.')\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"94FX4wzE8w1W","colab_type":"text"},"source":["# **4. Train the network**\n","---\n","####**Troubleshooting:** If you receive a time-out or exhausted error, try reducing the batchsize of your training set. This reduces the amount of data loaded into the model at one point in time. "]},{"cell_type":"markdown","metadata":{"id":"tlTDGcmDDHDe","colab_type":"text"},"source":["## **4.1. Prepare model for training**\n","---"]},{"cell_type":"code","metadata":{"id":"ezFy_mpz_op4","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play this cell to prepare the model for training\n","\n","\n","# ------------------ Set the generators, model and logger ------------------\n","# This will take the image size and set that as a patch size (arguable...)\n","# Read image size (without actuall reading the data)\n","\n","\n","# n = 0 \n","# while os.path.isdir(os.path.join(Training_source, source_images[n])):\n","# n += 1\n","\n","# (width, height) = Image.open(os.path.join(Training_target, source_images[n])).size\n","# ImageSize = (height, width) # np.shape different from PIL image.size return !\n","\n","# !!! WARNING !!! Check potential issues with resizing at the ImageDataGenerator level\n","# (train_datagen, validation_datagen) = prepareGenerators(Training_source, Training_target, data_gen_args, batch_size, target_size = ImageSize)\n","(train_datagen, validation_datagen) = prepareGenerators(Patch_source, Patch_target, data_gen_args, batch_size, target_size = (patch_width, patch_height))\n","\n","\n","# This modelcheckpoint will only save the best model from the validation loss point of view\n","model_checkpoint = ModelCheckpoint(os.path.join(full_model_path, 'weights_best.hdf5'), monitor='val_loss',verbose=1, save_best_only=True)\n","\n","print('Getting class weights...')\n","class_weights = getClassWeights(Training_target)\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we make sure this is properly defined\n","if not Use_pretrained_model:\n"," h5_file_path = None\n","# --------------------- ---------------------- ------------------------\n","\n","# --------------------- Using pretrained model ------------------------\n","#Here we ensure that the learning rate set correctly when using pre-trained models\n","if Use_pretrained_model:\n"," if Weights_choice == \"last\":\n"," initial_learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\": \n"," initial_learning_rate = bestLearningRate\n","# --------------------- ---------------------- ------------------------\n","\n","# --------------------- Reduce learning rate on plateau ------------------------\n","\n","reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, verbose=1, mode='auto',\n"," patience=10, min_lr=0)\n","# --------------------- ---------------------- ------------------------\n","\n","\n","# Define the model\n","model = unet(pretrained_weights = h5_file_path, \n"," input_size = (patch_width,patch_height,1), \n"," pooling_steps = pooling_steps, \n"," learning_rate = initial_learning_rate, \n"," class_weights = class_weights)\n","\n","# Dfine CSV logger that will create the loss file (we're not using this anylonger)\n","# csv_log = CSVLogger(os.path.join(full_model_path, 'Quality Control', 'training_evaluation.csv'), separator=',', append=False)\n","\n","number_of_training_dataset = len(os.listdir(Patch_source))\n","\n","if Use_Default_Advanced_Parameters:\n"," number_of_steps = ceil((100-percentage_validation)/100*number_of_training_dataset/batch_size)\n","\n","# Calculate the number of steps to use for validation\n","validation_steps = max(1, ceil(percentage_validation/100*number_of_training_dataset/batch_size))\n","\n","config_model= model.optimizer.get_config()\n","print(config_model)\n","\n","\n","# ------------------ Failsafes ------------------\n","if os.path.exists(full_model_path):\n"," print(R+'!! WARNING: Model folder already existed and has been removed !!'+W)\n"," shutil.rmtree(full_model_path)\n","\n","os.makedirs(full_model_path)\n","os.makedirs(os.path.join(full_model_path,'Quality Control'))\n","\n","\n","# ------------------ Display ------------------\n","print('---------------------------- Main training parameters ----------------------------')\n","print('Number of epochs: '+str(number_of_epochs))\n","print('Batch size: '+str(batch_size))\n","print('Number of training dataset: '+str(number_of_training_dataset))\n","print('Number of training steps: '+str(number_of_steps))\n","print('Number of validation steps: '+str(validation_steps))\n","print('---------------------------- ------------------------ ----------------------------')\n","\n","\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"urpQ9UM-6NBE","colab_type":"text"},"source":["## **4.2. Train the network**\n","---\n","\n","####**Be patient**. Please be patient, this may take a while. But the verbose allow you to estimate how fast it's training and how long it'll take. While it's training, please make sure that the computer is not powering down due to inactivity, otherwise this will interupt the runtime."]},{"cell_type":"code","metadata":{"id":"sMyCENd29TKz","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Start training\n","\n","start = time.time()\n","# history = model.fit_generator(train_datagen, steps_per_epoch = number_of_steps, epochs=epochs, callbacks=[model_checkpoint,csv_log], validation_data = validation_datagen, validation_steps = validation_steps, shuffle=True, verbose=1)\n","history = model.fit_generator(train_datagen, steps_per_epoch = number_of_steps, epochs = number_of_epochs, callbacks=[model_checkpoint, reduce_lr], validation_data = validation_datagen, validation_steps = validation_steps, shuffle=True, verbose=1)\n","\n","# Save the last model\n","model.save(os.path.join(full_model_path, 'weights_last.hdf5'))\n","\n","\n","# convert the history.history dict to a pandas DataFrame: \n","lossData = pd.DataFrame(history.history) \n","\n","# The training evaluation.csv is saved (overwrites the Files if needed). \n","lossDataCSVpath = os.path.join(full_model_path,'Quality Control/training_evaluation.csv')\n","with open(lossDataCSVpath, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['loss','val_loss', 'learning rate'])\n"," for i in range(len(history.history['loss'])):\n"," writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])\n"," \n","\n","\n","# Displaying the time elapsed for training\n","print(\"------------------------------------------\")\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\", hour, \"hour(s)\", mins,\"min(s)\",round(sec),\"sec(s)\")\n","print(\"------------------------------------------\")\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"LWaFk0JNda-N","colab_type":"text"},"source":["## **4.3. Download your model(s) from Google Drive**\n","---\n","\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"mEMcFNHZdmTz","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**"]},{"cell_type":"code","metadata":{"id":"X11zGW0Ldu-z","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ###Do you want to assess the model you just trained ?\n","\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","\n","full_QC_model_path = os.path.join(QC_model_path, QC_model_name)\n","if os.path.exists(os.path.join(full_QC_model_path, 'weights_best.hdf5')):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","else:\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"pkJyRzWJCrKG","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased."]},{"cell_type":"code","metadata":{"id":"qul6BpaX1GqS","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","\n","epochNumber = []\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","\n","with open(os.path.join(full_QC_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n","\n","epochNumber = range(len(lossDataFromCSV))\n","\n","plt.figure(figsize=(15,10))\n","\n","plt.subplot(2,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(2,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(os.path.join(full_QC_model_path, 'Quality Control', 'lossCurvePlots.png'))\n","plt.show()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"h33P0C2geqZu","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","This section will calculate the Intersection over Union score for all the images provided in the Source_QC_folder and Target_QC_folder. The result for one of the image will also be displayed.\n","\n","The **Intersection over Union** metric is a method that can be used to quantify the percent overlap between the target mask and your prediction output. **Therefore, the closer to 1, the better the performance.** This metric can be used to assess the quality of your model to accurately predict nuclei. \n","\n","The Input, Ground Truth, Prediction and IoU maps are shown below for the last example in the QC set.\n","\n"," The results for all QC examples can be found in the \"*Quality Control*\" folder which is located inside your \"model_folder\".\n","\n","### **Thresholds for image masks**\n","\n"," Since the output from Unet is not a binary mask, the output images are converted to binary masks using thresholding. This section will test different thresholds (from 0 to 255) to find the one yielding the best IoU score compared with the ground truth. The best threshold for each image and the average of these thresholds will be displayed below. **These values can be a guideline when creating masks for unseen data in section 6.**"]},{"cell_type":"code","metadata":{"id":"Tpqjvwv2zug-","colab_type":"code","cellView":"form","colab":{}},"source":["# ------------- User input ------------\n","#@markdown ##Choose the folders that contain your Quality Control dataset\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","\n","# ------------- Initialise folders ------------\n","# Create a quality control/Prediction Folder\n","prediction_QC_folder = os.path.join(full_QC_model_path, 'Quality Control', 'Prediction')\n","if os.path.exists(prediction_QC_folder):\n"," shutil.rmtree(prediction_QC_folder)\n","\n","os.makedirs(prediction_QC_folder)\n","\n","\n","# ------------- Prepare the model and run predictions ------------\n","\n","# Load the model\n","unet = load_model(os.path.join(full_QC_model_path, 'weights_best.hdf5'), custom_objects={'_weighted_binary_crossentropy': weighted_binary_crossentropy(np.ones(2))})\n","Input_size = unet.layers[0].output_shape[1:3]\n","print('Model input size: '+str(Input_size[0])+'x'+str(Input_size[1]))\n","\n","# Create a list of sources\n","source_dir_list = os.listdir(Source_QC_folder)\n","number_of_dataset = len(source_dir_list)\n","print('Number of dataset found in the folder: '+str(number_of_dataset))\n","\n","predictions = []\n","for i in tqdm(range(number_of_dataset)):\n"," predictions.append(predict_as_tiles(os.path.join(Source_QC_folder, source_dir_list[i]), unet))\n","\n","\n","# Save the results in the folder along with the masks according to the set threshold\n","saveResult(prediction_QC_folder, predictions, source_dir_list, prefix=prediction_prefix, threshold=None)\n","\n","#-----------------------------Calculate Metrics----------------------------------------#\n","\n","f = plt.figure(figsize=((5,5)))\n","\n","with open(os.path.join(full_QC_model_path,'Quality Control', 'QC_metrics_'+QC_model_name+'.csv'), \"w\", newline='') as file:\n"," writer = csv.writer(file)\n"," writer.writerow([\"File name\",\"IoU\", \"IoU-optimised threshold\"]) \n","\n"," # Initialise the lists \n"," filename_list = []\n"," best_threshold_list = []\n"," best_IoU_score_list = []\n","\n"," for filename in os.listdir(Source_QC_folder):\n","\n"," if not os.path.isdir(os.path.join(Source_QC_folder, filename)):\n"," print('Running QC on: '+filename)\n"," test_input = io.imread(os.path.join(Source_QC_folder, filename), as_gray=True)\n"," test_ground_truth_image = io.imread(os.path.join(Target_QC_folder, filename), as_gray=True)\n","\n"," (threshold_list, iou_scores_per_threshold) = getIoUvsThreshold(os.path.join(prediction_QC_folder, prediction_prefix+filename), os.path.join(Target_QC_folder, filename))\n"," plt.plot(threshold_list,iou_scores_per_threshold, label=filename)\n","\n"," # Here we find which threshold yielded the highest IoU score for image n.\n"," best_IoU_score = max(iou_scores_per_threshold)\n"," best_threshold = iou_scores_per_threshold.index(best_IoU_score)\n","\n"," # Write the results in the CSV file\n"," writer.writerow([filename, str(best_IoU_score), str(best_threshold)])\n","\n"," # Here we append the best threshold and score to the lists\n"," filename_list.append(filename)\n"," best_IoU_score_list.append(best_IoU_score)\n"," best_threshold_list.append(best_threshold)\n","\n","# Display the IoV vs Threshold plot\n","plt.title('IoU vs. Threshold')\n","plt.ylabel('Threshold value')\n","plt.xlabel('IoU')\n","plt.legend()\n","plt.show()\n","\n","\n","# Table with metrics as dataframe output\n","pdResults = pd.DataFrame(index = filename_list)\n","pdResults[\"IoU\"] = best_IoU_score_list\n","pdResults[\"IoU-optimised threshold\"] = best_threshold_list\n","\n","\n","\n","average_best_threshold = sum(best_threshold_list)/len(best_threshold_list)\n","\n","\n","# ------------- For display ------------\n","print('--------------------------------------------------------------')\n","@interact\n","def show_QC_results(file=os.listdir(Source_QC_folder)):\n"," \n"," plt.figure(figsize=(25,5))\n"," #Input\n"," plt.subplot(1,4,1)\n"," plt.axis('off')\n"," plt.imshow(plt.imread(os.path.join(Source_QC_folder, file)), aspect='equal', cmap='gray', interpolation='nearest')\n"," plt.title('Input')\n","\n"," #Ground-truth\n"," plt.subplot(1,4,2)\n"," plt.axis('off')\n"," test_ground_truth_image = io.imread(os.path.join(Target_QC_folder, file),as_gray=True)\n"," plt.imshow(test_ground_truth_image, aspect='equal', cmap='Greens')\n"," plt.title('Ground Truth')\n","\n"," #Prediction\n"," plt.subplot(1,4,3)\n"," plt.axis('off')\n"," test_prediction = plt.imread(os.path.join(prediction_QC_folder, prediction_prefix+file))\n"," test_prediction_mask = np.empty_like(test_prediction)\n"," test_prediction_mask[test_prediction > average_best_threshold] = 255\n"," test_prediction_mask[test_prediction <= average_best_threshold] = 0\n"," plt.imshow(test_prediction_mask, aspect='equal', cmap='Purples')\n"," plt.title('Prediction')\n","\n"," #Overlay\n"," plt.subplot(1,4,4)\n"," plt.axis('off')\n"," plt.imshow(test_ground_truth_image, cmap='Greens')\n"," plt.imshow(test_prediction_mask, alpha=0.5, cmap='Purples')\n"," metrics_title = 'Overlay (IoU: ' + str(round(pdResults.loc[file][\"IoU\"],3)) + ' T: ' + str(round(pdResults.loc[file][\"IoU-optimised threshold\"])) + ')'\n"," plt.title(metrics_title)\n","\n","\n","\n","print('--------------------------------------------------------------')\n","print('Best average threshold is: '+str(round(average_best_threshold)))\n","print('--------------------------------------------------------------')\n","\n","pdResults.head()\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"gofmRsLP96O8","colab_type":"text"},"source":["# **6. Using the trained model**\n","\n","---\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"Pv_v1Ru2OJkU","colab_type":"text"},"source":["## **6.1 Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.1) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder.\n","\n","**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n","\n","**`Result_folder`:** This folder will contain the predicted output images.\n","\n"," Once the predictions are complete the cell will display a random example prediction beside the input image and the calculated mask for visual inspection.\n","\n"," **Troubleshooting:** If there is a low contrast image warning when saving the images, this may be due to overfitting of the model to the data. It may result in images containing only a single colour. Train the network again with different network hyperparameters."]},{"cell_type":"code","metadata":{"id":"FJAe55ZoOJGs","colab_type":"code","cellView":"form","colab":{}},"source":["# ------------- Initial user input ------------\n","#@markdown ###Provide the path to your dataset and to the folder where the prediction will be saved (Result folder), then play the cell to predict output on your unseen images.\n","Data_folder = '' #@param {type:\"string\"}\n","Results_folder = '' #@param {type:\"string\"}\n","\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder:\n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","\n","# ------------- Failsafes ------------\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = os.path.join(Prediction_model_path, Prediction_model_name)\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","\n","# ------------- Prepare the model and run predictions ------------\n","\n","# Load the model and prepare generator\n","\n","unet = load_model(os.path.join(Prediction_model_path, Prediction_model_name, 'weights_best.hdf5'), custom_objects={'_weighted_binary_crossentropy': weighted_binary_crossentropy(np.ones(2))})\n","Input_size = unet.layers[0].output_shape[1:3]\n","print('Model input size: '+str(Input_size[0])+'x'+str(Input_size[1]))\n","\n","# Create a list of sources\n","source_dir_list = os.listdir(Data_folder)\n","number_of_dataset = len(source_dir_list)\n","print('Number of dataset found in the folder: '+str(number_of_dataset))\n","\n","predictions = []\n","for i in tqdm(range(number_of_dataset)):\n"," predictions.append(predict_as_tiles(os.path.join(Data_folder, source_dir_list[i]), unet))\n"," # predictions.append(prediction(os.path.join(Data_folder, source_dir_list[i]), os.path.join(Prediction_model_path, Prediction_model_name)))\n","\n","\n","# Save the results in the folder along with the masks according to the set threshold\n","saveResult(Results_folder, predictions, source_dir_list, prefix=prediction_prefix, threshold=None)\n","\n","\n","# ------------- For display ------------\n","print('--------------------------------------------------------------')\n","\n","def show_prediction_mask(file=os.listdir(Data_folder), threshold=(0,255,1)):\n","\n"," plt.figure(figsize=(18,6))\n"," # Wide-field\n"," plt.subplot(1,3,1)\n"," plt.axis('off')\n"," img_Source = plt.imread(os.path.join(Data_folder, file))\n"," plt.imshow(img_Source, cmap='gray')\n"," plt.title('Source image',fontsize=15)\n"," # Prediction\n"," plt.subplot(1,3,2)\n"," plt.axis('off')\n"," img_Prediction = plt.imread(os.path.join(Results_folder, prediction_prefix+file))\n"," plt.imshow(img_Prediction, cmap='gray')\n"," plt.title('Prediction',fontsize=15)\n","\n"," # Thresholded mask\n"," plt.subplot(1,3,3)\n"," plt.axis('off')\n"," img_Mask = convert2Mask(img_Prediction, threshold)\n"," plt.imshow(img_Mask, cmap='gray')\n"," plt.title('Mask (Threshold: '+str(round(threshold))+')',fontsize=15)\n","\n","\n","interact(show_prediction_mask, continuous_update=False);\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"su-Mo2POVpja","colab_type":"text"},"source":["## **6.2. Export results as masks**\n","---\n"]},{"cell_type":"code","metadata":{"id":"iC_B_9lxNUny","colab_type":"code","cellView":"form","colab":{}},"source":["\n","# @markdown #Play this cell to save results as masks with the chosen threshold\n","threshold = 120#@param {type:\"number\"}\n","\n","saveResult(Results_folder, predictions, source_dir_list, prefix=prediction_prefix, threshold=threshold)\n","print('-------------------')\n","print('Masks were saved in: '+Results_folder)\n","\n","\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"wYmwCQKjYsJ7","colab_type":"text"},"source":["## **6.3. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"sCXzzvnh2_rc","colab_type":"text"},"source":["#**Thank you for using U-Net!**"]}]} \ No newline at end of file diff --git a/Colab_notebooks/YOLOv2_ZeroCostDL4Mic.ipynb b/Colab_notebooks/YOLOv2_ZeroCostDL4Mic.ipynb new file mode 100755 index 00000000..752fc61f --- /dev/null +++ b/Colab_notebooks/YOLOv2_ZeroCostDL4Mic.ipynb @@ -0,0 +1 @@ +{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"YOLOv2_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1LWs9bFbYclR1nWaupcSPUYFN6yyUU_5t","timestamp":1596536407170},{"file_id":"1uUjR8Sm2l6vAJfclb84gUUH4MCwzQUWO","timestamp":1594734310956},{"file_id":"1zileODcR2RNrVSidXNuBfgFDv68JRRa0","timestamp":1593093410185},{"file_id":"1EpgWlJK6U_ZwlBGiomLfbxx9UUtRPBTy","timestamp":1592904104821},{"file_id":"1f5usS6p8Cu_efegMwcR3v68AVOXBSyIf","timestamp":1588870626184},{"file_id":"1fM7obTEQKnSgVZMDa1KjiBgiBar2b0t8","timestamp":1588693012611},{"file_id":"1owWtQQucUxUOZMaPh2x_mxe_qXKHCZhp","timestamp":1588074588514},{"file_id":"159ARwlQE7-zi0EHxunOF_YPFLt-ZVU5x","timestamp":1587562499898},{"file_id":"1W-7NHehG5MRFILvZZzhPWWnOdJMkadb2","timestamp":1586332290412},{"file_id":"1pUetEQICxYWkYVaQIgdRH1EZBTl7oc2A","timestamp":1586292199692},{"file_id":"1MD36ZkM6XR9EuV12zimJmfCjzyeYZFWq","timestamp":1586269469061},{"file_id":"16A2mbaHzlEElntS8qkFBOsBvZG-mUeY6","timestamp":1586253795726},{"file_id":"1gJlcjOiSxr2buDOxmcFbT_d-GqwLjXtK","timestamp":1583343225796},{"file_id":"10yGI51WzHfgWgZAyE-EbkZFEvIOd6CP6","timestamp":1583171396283}],"collapsed_sections":[]},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.4"},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"V9zNGvape2-I","colab_type":"text"},"source":["# **YOLOv2**\n","---\n","\n"," YOLOv2 is a deep-learning method designed to perform object detection and classification of objects in images, published by [Redmon and Farhadi](https://ieeexplore.ieee.org/document/8100173). This is based on the original [YOLO](https://arxiv.org/abs/1506.02640) implementation published by the same authors. YOLOv2 is trained on images with class annotations in the form of bounding boxes drawn around the objects of interest. The images are downsampled by a convolutional neural network (CNN) and objects are classified in two final fully connected layers in the network. YOLOv2 learns classification and object detection simultaneously by taking the whole input image into account, predicting many possible bounding box solutions, and then using regression to find the best bounding boxes and classifications for each object.\n","\n","**This particular notebook enables object detection and classification on 2D images given ground truth bounding boxes. If you are interested in image segmentation, you should use our U-net or Stardist notebooks instead.**\n","\n","---\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","This notebook is based on the following papers: \n","\n","**YOLO9000: Better, Faster, Stronger** from Joseph Redmon and Ali Farhadi in Proceedings of the IEEE conference on computer vision and pattern recognition, 2017, (https://ieeexplore.ieee.org/document/8100173)\n","\n","**You Only Look Once: Unified, Real-Time Object Detection** from Joseph Redmon, Santosh Divvala, Ross Girshick, Ali Farhadi in IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016, (https://ieeexplore.ieee.org/document/7780460)\n","\n","**Note: The source code for this notebook is adapted for keras and can be found in: (https://github.com/experiencor/keras-yolo2)**\n","\n","\n","**Please also cite these original papers when using or developing this notebook.**"]},{"cell_type":"markdown","metadata":{"id":"jWAz2i7RdxUV","colab_type":"text"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use ZeroCostDL4Mic notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"vNMDQHm0Ah-Z","colab_type":"text"},"source":["#**0. Before getting started**\n","---\n"," Preparing the dataset carefully is essential to make this YOLOv2 notebook work. This model requires as input a set of images (currently .jpg) and as target a list of annotation files in Pascal VOC format. The annotation files should have the exact same name as the input files, except with an .xml instead of the .jpg extension. The annotation files contain the class labels and all bounding boxes for the objects for each image in your dataset. Most datasets will give the option of saving the annotations in this format or using software for hand-annotations will automatically save the annotations in this format. \n","\n"," If you want to assemble your own dataset we recommend using the open source https://www.makesense.ai/ resource. You can follow our instructions on how to label your dataset with this tool on our [wiki](https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki/Object-Detection-(YOLOv2)).\n","\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n"," **Additionally, the corresponding input and output files need to have the same name**.\n","\n"," Please note that you currently can **only use .png or .jpg files!**\n","\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset**\n"," - Input images (Training_source)\n"," - img_1.png, img_2.png, ...\n"," - High SNR images (Training_source_annotations)\n"," - img_1.xml, img_2.xml, ...\n"," - **Quality control dataset**\n"," - Input images\n"," - img_1.png, img_2.png\n"," - High SNR images\n"," - img_1.xml, img_2.xml\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"DMNHVZfHmbKb","colab_type":"text"},"source":["# **1. Initialise the Colab session**\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"BCPhV-pe-syw","colab_type":"text"},"source":["\n","## **1.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","metadata":{"id":"VNZetvLiS1qV","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"UBrnApIUBgxv","colab_type":"text"},"source":["## **1.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","metadata":{"id":"01Djr8v-5pPk","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run this cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","#mounts user's Google Drive to Google Colab.\n","\n","from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"n4yWFoJNnoin","colab_type":"text"},"source":["# **2. Install YOLOv2 and Dependencies**\n","---\n"]},{"cell_type":"code","metadata":{"id":"3u2mXn3XsWzd","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Install Network and Dependencies\n","%tensorflow_version 1.x\n","!pip install pascal-voc-writer\n","from pascal_voc_writer import Writer\n","from __future__ import division\n","from __future__ import print_function\n","from __future__ import absolute_import\n","import csv\n","import random\n","import pprint\n","import sys\n","import time\n","import numpy as np\n","from optparse import OptionParser\n","import pickle\n","import math\n","import cv2\n","import copy\n","import math\n","from matplotlib import pyplot as plt\n","import matplotlib.patches as patches\n","import tensorflow as tf\n","import pandas as pd\n","import os\n","import shutil\n","from skimage import io\n","from sklearn.metrics import average_precision_score\n","\n","from keras.models import Model\n","from keras.layers import Flatten, Dense, Input, Conv2D, MaxPooling2D, Dropout, Reshape, Activation, Conv2D, MaxPooling2D, BatchNormalization, Lambda\n","from keras.layers.advanced_activations import LeakyReLU\n","from keras.layers.merge import concatenate\n","from keras.applications.mobilenet import MobileNet\n","from keras.applications import InceptionV3\n","from keras.applications.vgg16 import VGG16\n","from keras.applications.resnet50 import ResNet50\n","\n","from keras import backend as K\n","from keras.optimizers import Adam, SGD, RMSprop\n","from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, TimeDistributed\n","from keras.engine.topology import get_source_inputs\n","from keras.utils import layer_utils\n","from keras.utils.data_utils import get_file\n","from keras.objectives import categorical_crossentropy\n","from keras.models import Model\n","from keras.utils import generic_utils\n","from keras.engine import Layer, InputSpec\n","from keras import initializers, regularizers\n","from keras.utils import Sequence\n","import xml.etree.ElementTree as ET\n","from collections import OrderedDict, Counter\n","import json\n","import imageio\n","import imgaug as ia\n","from imgaug import augmenters as iaa\n","import copy\n","import cv2\n","from tqdm import tqdm\n","from tempfile import mkstemp\n","from shutil import move, copymode\n","from os import fdopen, remove\n","ia.seed(1)\n","# imgaug uses matplotlib backend for displaying images\n","from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage\n","import re\n","import glob\n","\n","!git clone https://github.com/rodrigo2019/keras_yolo2.git\n","\n","if os.path.exists('/content/gdrive/My Drive/keras-yolo2'):\n"," shutil.rmtree('/content/gdrive/My Drive/keras-yolo2')\n","\n","!git clone https://github.com/experiencor/keras-yolo2.git\n","shutil.move('/content/keras-yolo2','/content/gdrive/My Drive/keras-yolo2')\n","shutil.move('/content/keras_yolo2/keras_yolov2/map_evaluation.py','/content/gdrive/My Drive/keras-yolo2/map_evaluation.py')\n","\n","os.chdir('/content/gdrive/My Drive/keras-yolo2')\n","\n","from backend import BaseFeatureExtractor, FullYoloFeature\n","from preprocessing import parse_annotation, BatchGenerator\n","\n","#shutil.move('/content/map_evaluation.py','/content/gdrive/My Drive/keras-yolo2/map_evaluation.py')\n","\n","print(\"Depencies installed and imported.\")\n","\n","def plt_rectangle(plt,label,x1,y1,x2,y2,fontsize=10):\n"," '''\n"," == Input ==\n"," \n"," plt : matplotlib.pyplot object\n"," label : string containing the object class name\n"," x1 : top left corner x coordinate\n"," y1 : top left corner y coordinate\n"," x2 : bottom right corner x coordinate\n"," y2 : bottom right corner y coordinate\n"," '''\n"," linewidth = 1\n"," color = \"yellow\"\n"," plt.text(x1,y1,label,fontsize=fontsize,backgroundcolor=\"magenta\")\n"," plt.plot([x1,x1],[y1,y2], linewidth=linewidth,color=color)\n"," plt.plot([x2,x2],[y1,y2], linewidth=linewidth,color=color)\n"," plt.plot([x1,x2],[y1,y1], linewidth=linewidth,color=color)\n"," plt.plot([x1,x2],[y2,y2], linewidth=linewidth,color=color)\n","\n","def extract_single_xml_file(tree,object_count=True):\n"," Nobj = 0\n"," row = OrderedDict()\n"," for elems in tree.iter():\n","\n"," if elems.tag == \"size\":\n"," for elem in elems:\n"," row[elem.tag] = int(elem.text)\n"," if elems.tag == \"object\":\n"," for elem in elems:\n"," if elem.tag == \"name\":\n"," row[\"bbx_{}_{}\".format(Nobj,elem.tag)] = str(elem.text) \n"," if elem.tag == \"bndbox\":\n"," for k in elem:\n"," row[\"bbx_{}_{}\".format(Nobj,k.tag)] = float(k.text)\n"," Nobj += 1\n"," if object_count == True:\n"," row[\"Nobj\"] = Nobj\n"," return(row)\n","\n","def count_objects(tree):\n"," Nobj=0\n"," for elems in tree.iter():\n"," if elems.tag == \"object\":\n"," for elem in elems:\n"," if elem.tag == \"bndbox\":\n"," Nobj += 1\n"," return(Nobj)\n","\n","def compute_overlap(a, b):\n"," \"\"\"\n"," Code originally from https://github.com/rbgirshick/py-faster-rcnn.\n"," Parameters\n"," ----------\n"," a: (N, 4) ndarray of float\n"," b: (K, 4) ndarray of float\n"," Returns\n"," -------\n"," overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n"," \"\"\"\n"," area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])\n","\n"," iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])\n"," ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])\n","\n"," iw = np.maximum(iw, 0)\n"," ih = np.maximum(ih, 0)\n","\n"," ua = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1) + area - iw * ih\n","\n"," ua = np.maximum(ua, np.finfo(float).eps)\n","\n"," intersection = iw * ih\n","\n"," return intersection / ua\n","\n","def compute_ap(recall, precision):\n"," \"\"\" Compute the average precision, given the recall and precision curves.\n"," Code originally from https://github.com/rbgirshick/py-faster-rcnn.\n","\n"," # Arguments\n"," recall: The recall curve (list).\n"," precision: The precision curve (list).\n"," # Returns\n"," The average precision as computed in py-faster-rcnn.\n"," \"\"\"\n"," # correct AP calculation\n"," # first append sentinel values at the end\n"," mrec = np.concatenate(([0.], recall, [1.]))\n"," mpre = np.concatenate(([0.], precision, [0.]))\n","\n"," # compute the precision envelope\n"," for i in range(mpre.size - 1, 0, -1):\n"," mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n","\n"," # to calculate area under PR curve, look for points\n"," # where X axis (recall) changes value\n"," i = np.where(mrec[1:] != mrec[:-1])[0]\n","\n"," # and sum (\\Delta recall) * prec\n"," ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n"," return ap \n","\n","def load_annotation(image_folder,annotations_folder, i, config):\n"," annots = []\n"," imgs, anns = parse_annotation(annotations_folder,image_folder,config['model']['labels'])\n"," for obj in imgs[i]['object']:\n"," annot = [obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'], config['model']['labels'].index(obj['name'])]\n"," annots += [annot]\n","\n"," if len(annots) == 0: annots = [[]]\n","\n"," return np.array(annots)\n","\n","def _calc_avg_precisions(config,image_folder,annotations_folder,weights_path,iou_threshold,score_threshold):\n","\n"," # gather all detections and annotations\n"," all_detections = [[None for _ in range(len(config['model']['labels']))] for _ in range(len(os.listdir(image_folder)))]\n"," all_annotations = [[None for _ in range(len(config['model']['labels']))] for _ in range(len(os.listdir(annotations_folder)))]\n","\n"," for i in range(len(os.listdir(image_folder))):\n"," raw_image = cv2.imread(os.path.join(image_folder,sorted(os.listdir(image_folder))[i]))\n"," raw_height, raw_width, _ = raw_image.shape\n"," #print(raw_height)\n"," # make the boxes and the labels\n"," yolo = YOLO(backend = config['model']['backend'],\n"," input_size = config['model']['input_size'], \n"," labels = config['model']['labels'], \n"," max_box_per_image = config['model']['max_box_per_image'],\n"," anchors = config['model']['anchors'])\n"," yolo.load_weights(weights_path)\n"," pred_boxes = yolo.predict(raw_image,iou_threshold=iou_threshold,score_threshold=score_threshold)\n","\n"," score = np.array([box.score for box in pred_boxes])\n"," #print(score)\n"," pred_labels = np.array([box.label for box in pred_boxes])\n"," #print(len(pred_boxes))\n"," if len(pred_boxes) > 0:\n"," pred_boxes = np.array([[box.xmin * raw_width, box.ymin * raw_height, box.xmax * raw_width,\n"," box.ymax * raw_height, box.score] for box in pred_boxes])\n"," else:\n"," pred_boxes = np.array([[]])\n","\n"," # sort the boxes and the labels according to scores\n"," score_sort = np.argsort(-score)\n"," pred_labels = pred_labels[score_sort]\n"," pred_boxes = pred_boxes[score_sort]\n","\n"," # copy detections to all_detections\n"," for label in range(len(config['model']['labels'])):\n"," all_detections[i][label] = pred_boxes[pred_labels == label, :]\n","\n"," annotations = load_annotation(image_folder,annotations_folder,i,config)\n","\n"," # copy ground truth to all_annotations\n"," for label in range(len(config['model']['labels'])):\n"," all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()\n","\n"," # compute mAP by comparing all detections and all annotations\n"," average_precisions = {}\n"," total_recall = []\n"," total_precision = []\n"," for label in range(len(config['model']['labels'])):\n"," false_positives = np.zeros((0,))\n"," true_positives = np.zeros((0,))\n"," scores = np.zeros((0,))\n"," num_annotations = 0.0\n","\n"," for i in range(len(os.listdir(image_folder))):\n"," detections = all_detections[i][label]\n"," annotations = all_annotations[i][label]\n"," num_annotations += annotations.shape[0]\n"," detected_annotations = []\n","\n"," for d in detections:\n"," scores = np.append(scores, d[4])\n","\n"," if annotations.shape[0] == 0:\n"," false_positives = np.append(false_positives, 1)\n"," true_positives = np.append(true_positives, 0)\n"," continue\n","\n"," overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)\n"," assigned_annotation = np.argmax(overlaps, axis=1)\n"," max_overlap = overlaps[0, assigned_annotation]\n","\n"," if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:\n"," false_positives = np.append(false_positives, 0)\n"," true_positives = np.append(true_positives, 1)\n"," detected_annotations.append(assigned_annotation)\n"," else:\n"," false_positives = np.append(false_positives, 1)\n"," true_positives = np.append(true_positives, 0)\n","\n"," # no annotations -> AP for this class is 0 (is this correct?)\n"," if num_annotations == 0:\n"," average_precisions[label] = 0\n"," continue\n","\n"," # sort by score\n"," indices = np.argsort(-scores)\n"," false_positives = false_positives[indices]\n"," true_positives = true_positives[indices]\n","\n"," # compute false positives and true positives\n"," false_positives = np.cumsum(false_positives)\n"," true_positives = np.cumsum(true_positives)\n","\n"," # compute recall and precision\n"," recall = true_positives / num_annotations\n"," precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)\n"," total_recall.append(recall)\n"," total_precision.append(precision)\n"," #print(precision)\n"," # compute average precision\n"," average_precision = compute_ap(recall, precision)\n"," average_precisions[label] = average_precision\n","\n"," return average_precisions, total_recall, total_precision\n","\n","\n","def show_frame(pred_bb, pred_classes, pred_conf, gt_bb, gt_classes, class_dict, background=np.zeros((512, 512, 3)), show_confidence=True):\n"," \"\"\"\n"," Here, we are adapting classes and functions from https://github.com/MathGaron/mean_average_precision\n"," \"\"\"\n"," \"\"\"\n"," Plot the boundingboxes\n"," :param pred_bb: (np.array) Predicted Bounding Boxes [x1, y1, x2, y2] : Shape [n_pred, 4]\n"," :param pred_classes: (np.array) Predicted Classes : Shape [n_pred]\n"," :param pred_conf: (np.array) Predicted Confidences [0.-1.] : Shape [n_pred]\n"," :param gt_bb: (np.array) Ground Truth Bounding Boxes [x1, y1, x2, y2] : Shape [n_gt, 4]\n"," :param gt_classes: (np.array) Ground Truth Classes : Shape [n_gt]\n"," :param class_dict: (dictionary) Key value pairs of classes, e.g. {0:'dog',1:'cat',2:'horse'}\n"," :return:\n"," \"\"\"\n"," n_pred = pred_bb.shape[0]\n"," n_gt = gt_bb.shape[0]\n"," n_class = int(np.max(np.append(pred_classes, gt_classes)) + 1)\n"," #print(n_class)\n"," if len(background.shape) < 3:\n"," h, w = background.shape\n"," else:\n"," h, w, c = background.shape\n","\n"," ax = plt.subplot(\"111\")\n"," ax.imshow(background)\n"," cmap = plt.cm.get_cmap('hsv')\n","\n"," confidence_alpha = pred_conf.copy()\n"," if not show_confidence:\n"," confidence_alpha.fill(1)\n","\n"," for i in range(n_pred):\n"," x1 = pred_bb[i, 0]# * w\n"," y1 = pred_bb[i, 1]# * h\n"," x2 = pred_bb[i, 2]# * w\n"," y2 = pred_bb[i, 3]# * h\n"," rect_w = x2 - x1\n"," rect_h = y2 - y1\n"," #print(x1, y1)\n"," ax.add_patch(patches.Rectangle((x1, y1), rect_w, rect_h,\n"," fill=False,\n"," edgecolor=cmap(float(pred_classes[i]) / n_class),\n"," linestyle='dashdot',\n"," alpha=confidence_alpha[i]))\n","\n"," for i in range(n_gt):\n"," x1 = gt_bb[i, 0]# * w\n"," y1 = gt_bb[i, 1]# * h\n"," x2 = gt_bb[i, 2]# * w\n"," y2 = gt_bb[i, 3]# * h\n"," rect_w = x2 - x1\n"," rect_h = y2 - y1\n"," ax.add_patch(patches.Rectangle((x1, y1), rect_w, rect_h,\n"," fill=False,\n"," edgecolor=cmap(float(gt_classes[i]) / n_class)))\n","\n"," legend_handles = []\n","\n"," for i in range(n_class):\n"," legend_handles.append(patches.Patch(color=cmap(float(i) / n_class), label=class_dict[i]))\n"," \n"," ax.legend(handles=legend_handles)\n"," plt.show()\n","\n","class BoundBox:\n"," \"\"\"\n"," Here, we are adapting classes and functions from https://github.com/MathGaron/mean_average_precision\n"," \"\"\"\n"," def __init__(self, xmin, ymin, xmax, ymax, c = None, classes = None):\n"," self.xmin = xmin\n"," self.ymin = ymin\n"," self.xmax = xmax\n"," self.ymax = ymax\n"," \n"," self.c = c\n"," self.classes = classes\n","\n"," self.label = -1\n"," self.score = -1\n","\n"," def get_label(self):\n"," if self.label == -1:\n"," self.label = np.argmax(self.classes)\n"," \n"," return self.label\n"," \n"," def get_score(self):\n"," if self.score == -1:\n"," self.score = self.classes[self.get_label()]\n"," \n"," return self.score\n","\n","class WeightReader:\n"," def __init__(self, weight_file):\n"," self.offset = 4\n"," self.all_weights = np.fromfile(weight_file, dtype='float32')\n"," \n"," def read_bytes(self, size):\n"," self.offset = self.offset + size\n"," return self.all_weights[self.offset-size:self.offset]\n"," \n"," def reset(self):\n"," self.offset = 4\n","\n","def bbox_iou(box1, box2):\n"," intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])\n"," intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax]) \n"," \n"," intersect = intersect_w * intersect_h\n","\n"," w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin\n"," w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin\n"," \n"," union = w1*h1 + w2*h2 - intersect\n"," \n"," return float(intersect) / union\n","\n","def draw_boxes(image, boxes, labels):\n"," image_h, image_w, _ = image.shape\n"," #Changes in box color added by LvC\n"," # class_colours = []\n"," # for c in range(len(labels)):\n"," # colour = np.random.randint(low=0,high=255,size=3).tolist()\n"," # class_colours.append(tuple(colour))\n"," for box in boxes:\n"," xmin = int(box.xmin*image_w)\n"," ymin = int(box.ymin*image_h)\n"," xmax = int(box.xmax*image_w)\n"," ymax = int(box.ymax*image_h)\n"," if box.get_label() == 0:\n"," cv2.rectangle(image, (xmin,ymin), (xmax,ymax), (255,0,0), 3)\n"," elif box.get_label() == 1:\n"," cv2.rectangle(image, (xmin,ymin), (xmax,ymax), (0,255,0), 3)\n"," else:\n"," cv2.rectangle(image, (xmin,ymin), (xmax,ymax), (0,0,255), 3)\n"," #cv2.rectangle(image, (xmin,ymin), (xmax,ymax), class_colours[box.get_label()], 3)\n"," cv2.putText(image, \n"," labels[box.get_label()] + ' ' + str(round(box.get_score(),3)), \n"," (xmin, ymin - 13), \n"," cv2.FONT_HERSHEY_SIMPLEX, \n"," 1e-3 * image_h, \n"," (0,0,0), 2)\n"," #print(box.get_label()) \n"," return image \n","\n","#Function added by LvC\n","def save_boxes(image_path, boxes, labels):#, save_path):\n"," image = cv2.imread(image_path)\n"," image_h, image_w, _ = image.shape\n"," save_boxes =[]\n"," save_boxes_names = []\n"," save_boxes.append(os.path.basename(image_path))\n"," save_boxes_names.append(os.path.basename(image_path))\n"," for box in boxes:\n"," # xmin = box.xmin\n"," save_boxes.append(int(box.xmin*image_w))\n"," save_boxes_names.append(int(box.xmin*image_w))\n"," # ymin = box.ymin\n"," save_boxes.append(int(box.ymin*image_h))\n"," save_boxes_names.append(int(box.ymin*image_h))\n"," # xmax = box.xmax\n"," save_boxes.append(int(box.xmax*image_w))\n"," save_boxes_names.append(int(box.xmax*image_w))\n"," # ymax = box.ymax\n"," save_boxes.append(int(box.ymax*image_h))\n"," save_boxes_names.append(int(box.ymax*image_h))\n"," score = box.get_score()\n"," save_boxes.append(score)\n"," save_boxes_names.append(score)\n"," label = box.get_label()\n"," save_boxes.append(label)\n"," save_boxes_names.append(labels[label])\n"," \n"," #This file will be for later analysis of the bounding boxes in imagej\n"," if not os.path.exists('/content/predicted_bounding_boxes.csv'):\n"," with open('/content/predicted_bounding_boxes.csv', 'w', newline='') as csvfile:\n"," csvwriter = csv.writer(csvfile, delimiter=',')\n"," specs_list = ['filename']+['xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class']*len(boxes)\n"," csvwriter.writerow(specs_list)\n"," csvwriter.writerow(save_boxes)\n"," else:\n"," with open('/content/predicted_bounding_boxes.csv', 'a+', newline='') as csvfile:\n"," csvwriter = csv.writer(csvfile)\n"," csvwriter.writerow(save_boxes)\n"," \n"," if not os.path.exists('/content/predicted_bounding_boxes_names.csv'):\n"," with open('/content/predicted_bounding_boxes_names.csv', 'w', newline='') as csvfile_names:\n"," csvwriter = csv.writer(csvfile_names, delimiter=',')\n"," specs_list = ['filename']+['xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class']*len(boxes)\n"," csvwriter.writerow(specs_list)\n"," csvwriter.writerow(save_boxes_names)\n"," else:\n"," with open('/content/predicted_bounding_boxes_names.csv', 'a+', newline='') as csvfile_names:\n"," csvwriter = csv.writer(csvfile_names)\n"," csvwriter.writerow(save_boxes_names)\n"," # #This file is to create a nicer display for the output images\n"," # if not os.path.exists('/content/predicted_bounding_boxes_display.csv'):\n"," # with open('/content/predicted_bounding_boxes_display.csv', 'w', newline='') as csvfile_new:\n"," # csvwriter2 = csv.writer(csvfile_new, delimiter=',')\n"," # specs_list = ['filename','width','height','class','xmin','ymin','xmax','ymax']\n"," # csvwriter2.writerow(specs_list)\n"," # else:\n"," # with open('/content/predicted_bounding_boxes_display.csv','a+',newline='') as csvfile_new:\n"," # csvwriter2 = csv.writer(csvfile_new)\n"," # for box in boxes:\n"," # row = [os.path.basename(image_path),image_w,image_h,box.get_label(),int(box.xmin*image_w),int(box.ymin*image_h),int(box.xmax*image_w),int(box.ymax*image_h)]\n"," # csvwriter2.writerow(row)\n","\n","def add_header(inFilePath,outFilePath):\n"," header = ['filename']+['xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class']*max(n_objects)\n"," with open(inFilePath, newline='') as inFile, open(outFilePath, 'w', newline='') as outfile:\n"," r = csv.reader(inFile)\n"," w = csv.writer(outfile)\n"," next(r, None) # skip the first row from the reader, the old header\n"," # write new header\n"," w.writerow(header)\n"," # copy the rest\n"," for row in r:\n"," w.writerow(row)\n"," \n","def decode_netout(netout, anchors, nb_class, obj_threshold=0.3, nms_threshold=0.5):\n"," grid_h, grid_w, nb_box = netout.shape[:3]\n","\n"," boxes = []\n"," \n"," # decode the output by the network\n"," netout[..., 4] = _sigmoid(netout[..., 4])\n"," netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:])\n"," netout[..., 5:] *= netout[..., 5:] > obj_threshold\n"," \n"," for row in range(grid_h):\n"," for col in range(grid_w):\n"," for b in range(nb_box):\n"," # from 4th element onwards are confidence and class classes\n"," classes = netout[row,col,b,5:]\n"," \n"," if np.sum(classes) > 0:\n"," # first 4 elements are x, y, w, and h\n"," x, y, w, h = netout[row,col,b,:4]\n","\n"," x = (col + _sigmoid(x)) / grid_w # center position, unit: image width\n"," y = (row + _sigmoid(y)) / grid_h # center position, unit: image height\n"," w = anchors[2 * b + 0] * np.exp(w) / grid_w # unit: image width\n"," h = anchors[2 * b + 1] * np.exp(h) / grid_h # unit: image height\n"," confidence = netout[row,col,b,4]\n"," \n"," box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, confidence, classes)\n"," \n"," boxes.append(box)\n","\n"," # suppress non-maximal boxes\n"," for c in range(nb_class):\n"," sorted_indices = list(reversed(np.argsort([box.classes[c] for box in boxes])))\n","\n"," for i in range(len(sorted_indices)):\n"," index_i = sorted_indices[i]\n"," \n"," if boxes[index_i].classes[c] == 0: \n"," continue\n"," else:\n"," for j in range(i+1, len(sorted_indices)):\n"," index_j = sorted_indices[j]\n"," \n"," if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_threshold:\n"," boxes[index_j].classes[c] = 0\n"," \n"," # remove the boxes which are less likely than a obj_threshold\n"," boxes = [box for box in boxes if box.get_score() > obj_threshold]\n"," \n"," return boxes\n","\n","def replace(file_path, pattern, subst):\n"," #Create temp file\n"," fh, abs_path = mkstemp()\n"," with fdopen(fh,'w') as new_file:\n"," with open(file_path) as old_file:\n"," for line in old_file:\n"," new_file.write(line.replace(pattern, subst))\n"," #Copy the file permissions from the old file to the new file\n"," copymode(file_path, abs_path)\n"," #Remove original file\n"," remove(file_path)\n"," #Move new file\n"," move(abs_path, file_path)\n","\n","with open(\"/content/gdrive/My Drive/keras-yolo2/frontend.py\", \"r\") as check:\n"," lineReader = check.readlines()\n"," reduce_lr = False\n"," for line in lineReader:\n"," if \"reduce_lr\" in line:\n"," reduce_lr = True\n"," break\n","\n","if reduce_lr == False:\n"," #replace(\"/content/gdrive/My Drive/keras-yolo2/frontend.py\",\"period=1)\",\"period=1)\\n csv_logger=CSVLogger('/content/training_evaluation.csv')\")\n"," replace(\"/content/gdrive/My Drive/keras-yolo2/frontend.py\",\"period=1)\",\"period=1)\\n reduce_lr=ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1)\")\n","replace(\"/content/gdrive/My Drive/keras-yolo2/frontend.py\",\"import EarlyStopping\",\"import ReduceLROnPlateau, EarlyStopping\")\n","\n","with open(\"/content/gdrive/My Drive/keras-yolo2/frontend.py\", \"r\") as check:\n"," lineReader = check.readlines()\n"," map_eval = False\n"," for line in lineReader:\n"," if \"map_evaluation\" in line:\n"," map_eval = True\n"," break\n","\n","if map_eval == False:\n"," replace(\"/content/gdrive/My Drive/keras-yolo2/frontend.py\", \"import cv2\",\"import cv2\\nfrom map_evaluation import MapEvaluation\")\n"," new_callback = ' map_evaluator = MapEvaluation(self, valid_generator,save_best=True,save_name=\"/content/gdrive/My Drive/keras-yolo2/best_map_weights.h5\",iou_threshold=0.3,score_threshold=0.3)'\n"," replace(\"/content/gdrive/My Drive/keras-yolo2/frontend.py\",\"write_images=False)\",\"write_images=False)\\n\"+new_callback)\n"," replace(\"/content/gdrive/My Drive/keras-yolo2/map_evaluation.py\",\"import keras\",\"import keras\\nimport csv\")\n"," replace(\"/content/gdrive/My Drive/keras-yolo2/map_evaluation.py\",\"from .utils\",\"from utils\")\n"," replace(\"/content/gdrive/My Drive/keras-yolo2/map_evaluation.py\",\".format(_map))\",\".format(_map))\\n with open('/content/gdrive/My Drive/mAP.csv','a+', newline='') as mAP_csv:\\n csv_writer=csv.writer(mAP_csv)\\n csv_writer.writerow(['mAP:','{:.4f}'.format(_map)])\")\n"," replace(\"/content/gdrive/My Drive/keras-yolo2/map_evaluation.py\",\"iou_threshold=0.5\",\"iou_threshold=0.3\")\n"," replace(\"/content/gdrive/My Drive/keras-yolo2/map_evaluation.py\",\"score_threshold=0.5\",\"score_threshold=0.3\")\n","\n","replace(\"/content/gdrive/My Drive/keras-yolo2/frontend.py\", \"[early_stop, checkpoint, tensorboard]\",\"[checkpoint, reduce_lr, map_evaluator]\")\n","replace(\"/content/gdrive/My Drive/keras-yolo2/frontend.py\", \"predict(self, image)\",\"predict(self,image,iou_threshold=0.3,score_threshold=0.3)\")\n","replace(\"/content/gdrive/My Drive/keras-yolo2/frontend.py\", \"self.model.summary()\",\"#self.model.summary()\")\n","from frontend import YOLO\n","\n","def train(config_path, model_path, percentage_validation):\n"," #config_path = args.conf\n","\n"," with open(config_path) as config_buffer: \n"," config = json.loads(config_buffer.read())\n","\n"," ###############################\n"," # Parse the annotations \n"," ###############################\n","\n"," # parse annotations of the training set\n"," train_imgs, train_labels = parse_annotation(config['train']['train_annot_folder'], \n"," config['train']['train_image_folder'], \n"," config['model']['labels'])\n","\n"," # parse annotations of the validation set, if any, otherwise split the training set\n"," if os.path.exists(config['valid']['valid_annot_folder']):\n"," valid_imgs, valid_labels = parse_annotation(config['valid']['valid_annot_folder'], \n"," config['valid']['valid_image_folder'], \n"," config['model']['labels'])\n"," else:\n"," train_valid_split = int((1-percentage_validation/100.)*len(train_imgs))\n"," np.random.shuffle(train_imgs)\n","\n"," valid_imgs = train_imgs[train_valid_split:]\n"," train_imgs = train_imgs[:train_valid_split]\n","\n"," if len(config['model']['labels']) > 0:\n"," overlap_labels = set(config['model']['labels']).intersection(set(train_labels.keys()))\n","\n"," print('Seen labels:\\t', train_labels)\n"," print('Given labels:\\t', config['model']['labels'])\n"," print('Overlap labels:\\t', overlap_labels) \n","\n"," if len(overlap_labels) < len(config['model']['labels']):\n"," print('Some labels have no annotations! Please revise the list of labels in the config.json file!')\n"," return\n"," else:\n"," print('No labels are provided. Train on all seen labels.')\n"," config['model']['labels'] = train_labels.keys()\n"," \n"," ###############################\n"," # Construct the model \n"," ###############################\n","\n"," yolo = YOLO(backend = config['model']['backend'],\n"," input_size = config['model']['input_size'], \n"," labels = config['model']['labels'], \n"," max_box_per_image = config['model']['max_box_per_image'],\n"," anchors = config['model']['anchors'])\n","\n"," ###############################\n"," # Load the pretrained weights (if any) \n"," ############################### \n","\n"," if os.path.exists(config['train']['pretrained_weights']):\n"," print(\"Loading pre-trained weights in\", config['train']['pretrained_weights'])\n"," yolo.load_weights(config['train']['pretrained_weights'])\n"," if os.path.exists('/content/gdrive/My Drive/mAP.csv'):\n"," os.remove('/content/gdrive/My Drive/mAP.csv')\n"," ###############################\n"," # Start the training process \n"," ###############################\n","\n"," yolo.train(train_imgs = train_imgs,\n"," valid_imgs = valid_imgs,\n"," train_times = config['train']['train_times'],\n"," valid_times = config['valid']['valid_times'],\n"," nb_epochs = config['train']['nb_epochs'], \n"," learning_rate = config['train']['learning_rate'], \n"," batch_size = config['train']['batch_size'],\n"," warmup_epochs = config['train']['warmup_epochs'],\n"," object_scale = config['train']['object_scale'],\n"," no_object_scale = config['train']['no_object_scale'],\n"," coord_scale = config['train']['coord_scale'],\n"," class_scale = config['train']['class_scale'],\n"," saved_weights_name = config['train']['saved_weights_name'],\n"," debug = config['train']['debug'])\n","\n","# The training evaluation.csv is saved (overwrites the Files if needed). \n"," lossDataCSVpath = os.path.join(model_path,'Quality Control/training_evaluation.csv')\n"," with open(lossDataCSVpath, 'w') as f1:\n"," writer = csv.writer(f1)\n"," mAP_df = pd.read_csv('/content/gdrive/My Drive/mAP.csv',header=None)\n"," writer.writerow(['loss','val_loss','mAP','learning rate'])\n"," for i in range(len(yolo.model.history.history['loss'])):\n"," writer.writerow([yolo.model.history.history['loss'][i], yolo.model.history.history['val_loss'][i], float(mAP_df[1][i]), yolo.model.history.history['lr'][i]])\n","\n"," yolo.model.save(model_path+'/last_weights.h5')\n","\n","def predict(config, weights_path, image_path):#, model_path):\n","\n"," with open(config) as config_buffer: \n"," config = json.load(config_buffer)\n","\n"," ###############################\n"," # Make the model \n"," ###############################\n","\n"," yolo = YOLO(backend = config['model']['backend'],\n"," input_size = config['model']['input_size'], \n"," labels = config['model']['labels'], \n"," max_box_per_image = config['model']['max_box_per_image'],\n"," anchors = config['model']['anchors'])\n","\n"," ###############################\n"," # Load trained weights\n"," ############################### \n","\n"," yolo.load_weights(weights_path)\n","\n"," ###############################\n"," # Predict bounding boxes \n"," ###############################\n","\n"," if image_path[-4:] == '.mp4':\n"," video_out = image_path[:-4] + '_detected' + image_path[-4:]\n"," video_reader = cv2.VideoCapture(image_path)\n","\n"," nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))\n"," frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n"," frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n","\n"," video_writer = cv2.VideoWriter(video_out,\n"," cv2.VideoWriter_fourcc(*'MPEG'), \n"," 50.0, \n"," (frame_w, frame_h))\n","\n"," for i in tqdm(range(nb_frames)):\n"," _, image = video_reader.read()\n"," \n"," boxes = yolo.predict(image)\n"," image = draw_boxes(image, boxes, config['model']['labels'])\n","\n"," video_writer.write(np.uint8(image))\n","\n"," video_reader.release()\n"," video_writer.release() \n"," else:\n"," image = cv2.imread(image_path)\n"," boxes = yolo.predict(image)\n"," image = draw_boxes(image, boxes, config['model']['labels'])\n"," save_boxes(image_path,boxes,config['model']['labels'])#,model_path)#added by LvC\n"," print(len(boxes), 'boxes are found')\n"," #print(image)\n"," cv2.imwrite(image_path[:-4] + '_detected' + image_path[-4:], image)\n"," \n"," return len(boxes)\n","\n","# function to convert BoundingBoxesOnImage object into DataFrame\n","def bbs_obj_to_df(bbs_object):\n","# convert BoundingBoxesOnImage object into array\n"," bbs_array = bbs_object.to_xyxy_array()\n","# convert array into a DataFrame ['xmin', 'ymin', 'xmax', 'ymax'] columns\n"," df_bbs = pd.DataFrame(bbs_array, columns=['xmin', 'ymin', 'xmax', 'ymax'])\n"," return df_bbs\n","\n","# Function that will extract column data for our CSV file\n","def xml_to_csv(path):\n"," xml_list = []\n"," for xml_file in glob.glob(path + '/*.xml'):\n"," tree = ET.parse(xml_file)\n"," root = tree.getroot()\n"," for member in root.findall('object'):\n"," value = (root.find('filename').text,\n"," int(root.find('size')[0].text),\n"," int(root.find('size')[1].text),\n"," member[0].text,\n"," int(member[4][0].text),\n"," int(member[4][1].text),\n"," int(member[4][2].text),\n"," int(member[4][3].text)\n"," )\n"," xml_list.append(value)\n"," column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']\n"," xml_df = pd.DataFrame(xml_list, columns=column_name)\n"," return xml_df"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"Fw0kkTU6CsU4","colab_type":"text"},"source":["# **3. Select your paths and parameters**\n","\n","---\n","\n","The code below allows the user to enter the paths to where the training data is and to define the training parameters.\n","\n","After playing the cell will display some quantitative metrics of your dataset, including a count of objects per image and the number of instances per class.\n"]},{"cell_type":"markdown","metadata":{"id":"CB6acvUFtWqd","colab_type":"text"},"source":["# **3.1. Parameters and paths**\n","---\n","\n","**`Training_source:`, `Training_source_annotations`:** These are the paths to your folders containing the Training_source and the annotation data respectively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","**Training Parameters**\n","\n","**`number_of_epochs`:**Give estimates for training performance given a number of epochs and provide a default value. **Default value: 27**\n","\n","**Note that YOLOv2 uses 3 Warm-up epochs which improves the model's performance. This means the network will train for number_of_epochs + 3 epochs.**\n","\n","**`backend`:** There are different backends which are available to be trained for YOLO. These are usually slightly different model architectures, with pretrained weights. Take a look at the available backends and research which one will be best suited for your dataset.\n","\n","**Advanced Parameters - experienced users only**\n","\n","**`train_times:`**Input how many times to cycle through the dataset per epoch. This is more useful for smaller datasets (but risks overfitting). **Default value: 4**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 16**\n","\n","**`learning_rate:`** Input the initial value to be used as learning rate. **Default value: 0.0004**\n","\n","**`false_negative_penalty:`** Penalize wrong detection of 'no-object'. **Default: 5.0**\n","\n","**`false_positive_penalty:`** Penalize wrong detection of 'object'. **Default: 1.0**\n","\n","**`position_size_penalty:`** Penalize inaccurate positioning or size of bounding boxes. **Default:1.0**\n","\n","**`false_class_penalty:`** Penalize misclassification of object in bounding box. **Default: 1.0**\n","\n","**`percentage_validation:`** Input the percentage of your training dataset you want to use to validate the network during training. **Default value: 10** "]},{"cell_type":"code","metadata":{"id":"ewpNJ_I0Mv47","colab_type":"code","cellView":"form","colab":{}},"source":["class bcolors:\n"," WARNING = '\\033[31m'\n","\n","#@markdown ###Path to training images:\n","\n","Training_Source = \"\" #@param {type:\"string\"}\n","\n","# Ground truth images\n","Training_Source_annotations = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","#@markdown ###Name of the model and path to model folder:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","# backend\n","#@markdown ###Choose a backend\n","#os.chdir(model_path+'/keras-yolo2')\n","backend = \"Full Yolo\" #@param [\"Select Model\",\"Full Yolo\",\"Inception3\",\"SqueezeNet\",\"MobileNet\",\"Tiny Yolo\"]\n","os.chdir('/content/gdrive/My Drive/keras-yolo2')\n","if backend == \"Full Yolo\":\n"," if not os.path.exists('/content/gdrive/My Drive/keras-yolo2/full_yolo_backend.h5'):\n"," !wget https://github.com/rodrigo2019/keras_yolo2/releases/download/pre-trained-weights/full_yolo_backend.h5\n","elif backend == \"Inception3\":\n"," if not os.path.exists('/content/gdrive/My Drive/keras-yolo2/inception_backend.h5'):\n"," !wget https://github.com/rodrigo2019/keras_yolo2/releases/download/pre-trained-weights/inception_backend.h5\n","elif backend == \"MobileNet\":\n"," if not os.path.exists('/content/gdrive/My Drive/keras-yolo2/mobilenet_backend.h5'):\n"," !wget https://github.com/rodrigo2019/keras_yolo2/releases/download/pre-trained-weights/mobilenet_backend.h5\n","elif backend == \"SqueezeNet\":\n"," if not os.path.exists('/content/gdrive/My Drive/keras-yolo2/squeezenet_backend.h5'):\n"," !wget https://github.com/rodrigo2019/keras_yolo2/releases/download/pre-trained-weights/squeezenet_backend.h5\n","elif backend == \"Tiny Yolo\":\n"," if not os.path.exists('/content/gdrive/My Drive/keras-yolo2/tiny_yolo_backend.h5'):\n"," !wget https://github.com/rodrigo2019/keras_yolo2/releases/download/pre-trained-weights/tiny_yolo_backend.h5\n","\n","#os.chdir('/content/drive/My Drive/Zero-Cost Deep-Learning to Enhance Microscopy/Various dataset/Detection_Dataset_2/BCCD.v2.voc')\n","#if not os.path.exists(model_path+'/full_raccoon.h5'):\n"," # !wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1NWbrpMGLc84ow-4gXn2mloFocFGU595s' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=1NWbrpMGLc84ow-4gXn2mloFocFGU595s\" -O full_yolo_raccoon.h5 && rm -rf /tmp/cookies.txt\n","\n","full_model_path = os.path.join(model_path,model_name)\n","if os.path.exists(full_model_path):\n"," print('Existing model path will be overwritten')\n"," shutil.rmtree(full_model_path)\n","os.mkdir(full_model_path)\n","\n","full_model_file_path = full_model_path+'/best_weights.h5'\n","os.chdir('/content/gdrive/My Drive/keras-yolo2/')\n","\n","#Change backend name\n","!sed -i 's@\\\"backend\\\":.*,@\\\"backend\\\": \\\"$backend\\\",@g' config.json\n","\n","#Change the name of the training folder\n","!sed -i 's@\\\"train_image_folder\\\":.*,@\\\"train_image_folder\\\": \\\"$Training_Source/\\\",@g' config.json\n","\n","#Change annotation folder\n","!sed -i 's@\\\"train_annot_folder\\\":.*,@\\\"train_annot_folder\\\": \\\"$Training_Source_annotations/\\\",@g' config.json\n","\n","#Change the name of the saved model\n","!sed -i 's@\\\"saved_weights_name\\\":.*,@\\\"saved_weights_name\\\": \\\"$full_model_file_path\\\",@g' config.json\n","\n","#Change warmup epochs for untrained model\n","!sed -i 's@\\\"warmup_epochs\\\":.*,@\\\"warmup_epochs\\\": 3,@g' config.json\n","\n","#When defining a new model we should reset the pretrained model parameter\n","!sed -i 's@\\\"pretrained_weights\\\":.*,@\\\"pretrained_weights\\\": \\\"No_pretrained_weights\\\",@g' config.json\n","\n","# other parameters for training.\n","#@markdown ###Training Parameters\n","#@markdown Number of epochs:\n","\n","number_of_epochs = 10#@param {type:\"number\"}\n","!sed -i 's@\\\"nb_epochs\\\":.*,@\\\"nb_epochs\\\": $number_of_epochs,@g' config.json\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please input:\n","train_times = 4 #@param {type:\"integer\"}\n","batch_size = 4#@param {type:\"number\"}\n","learning_rate = 1e-4 #@param{type:\"number\"}\n","false_negative_penalty = 5.0 #@param{type:\"number\"}\n","false_positive_penalty = 2.0 #@param{type:\"number\"}\n","position_size_penalty = 1.0 #@param{type:\"number\"}\n","false_class_penalty = 1.0 #@param{type:\"number\"}\n","percentage_validation = 10#@param{type:\"number\"}\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," train_times = 4\n"," batch_size = 8\n"," learning_rate = 1e-4\n"," false_negative_penalty = 5.0\n"," false_positive_penalty = 1.0\n"," position_size_penalty = 1.0\n"," false_class_penalty = 1.0\n"," percentage_validation = 10\n","\n","!sed -i 's@\\\"train_times\\\":.*,@\\\"train_times\\\": $train_times,@g' config.json\n","!sed -i 's@\\\"batch_size\\\":.*,@\\\"batch_size\\\": $batch_size,@g' config.json\n","!sed -i 's@\\\"learning_rate\\\":.*,@\\\"learning_rate\\\": $learning_rate,@g' config.json\n","!sed -i 's@\\\"object_scale\":.*,@\\\"object_scale\\\": $false_negative_penalty,@g' config.json\n","!sed -i 's@\\\"no_object_scale\":.*,@\\\"no_object_scale\\\": $false_positive_penalty,@g' config.json\n","!sed -i 's@\\\"coord_scale\\\":.*,@\\\"coord_scale\\\": $position_size_penalty,@g' config.json\n","!sed -i 's@\\\"class_scale\\\":.*,@\\\"class_scale\\\": $false_class_penalty,@g' config.json\n","\n","df_anno = []\n","dir_anno = Training_Source_annotations\n","for fnm in os.listdir(dir_anno): \n"," if not fnm.startswith('.'): ## do not include hidden folders/files\n"," tree = ET.parse(os.path.join(dir_anno,fnm))\n"," row = extract_single_xml_file(tree)\n"," row[\"fileID\"] = os.path.splitext(fnm)[0]\n"," df_anno.append(row)\n","df_anno = pd.DataFrame(df_anno)\n","\n","maxNobj = np.max(df_anno[\"Nobj\"])\n","\n","#Write the annotations to a csv file\n","df_anno.to_csv(model_path+'/annot.csv', index=False)#header=False, sep=',')\n","\n","file_suffix = os.path.splitext(os.listdir(Training_Source)[0])[1]\n","\n","#Show how many objects there are in the images\n","plt.figure()\n","plt.subplot(2,1,1)\n","plt.hist(df_anno[\"Nobj\"].values,bins=50)\n","plt.title(\"max N of objects per image={}\".format(maxNobj))\n","plt.show()\n","\n","#Show the classes and how many there are of each in the dataset\n","from collections import Counter\n","class_obj = []\n","for ibbx in range(maxNobj):\n"," class_obj.extend(df_anno[\"bbx_{}_name\".format(ibbx)].values)\n","class_obj = np.array(class_obj)\n","\n","count = Counter(class_obj[class_obj != 'nan'])\n","print(count)\n","class_nm = list(count.keys())\n","class_labels = json.dumps(class_nm)\n","class_count = list(count.values())\n","asort_class_count = np.argsort(class_count)\n","\n","class_nm = np.array(class_nm)[asort_class_count]\n","class_count = np.array(class_count)[asort_class_count]\n","\n","!sed -i 's@\\\"labels\\\":.*@\\\"labels\\\": $class_labels@g' config.json\n","xs = range(len(class_count))\n","\n","plt.subplot(2,1,2)\n","plt.barh(xs,class_count)\n","plt.yticks(xs,class_nm)\n","plt.title(\"The number of objects per class: {} objects in total\".format(len(count)))\n","plt.show()\n","\n","\n","#Generate anchors for the bounding boxes\n","import subprocess as sp\n","os.chdir('/content/gdrive/My Drive/keras-yolo2')\n","output = sp.getoutput('python ./gen_anchors.py -c ./config.json')\n","\n","anchors_1 = output.find(\"[\")\n","anchors_2 = output.find(\"]\")\n","\n","config_anchors = output[anchors_1:anchors_2+1]\n","!sed -i 's@\\\"anchors\\\":.*,@\\\"anchors\\\": $config_anchors,@g' config.json\n","#here we check that no model with the same name already exist, if so delete\n","#if os.path.exists(model_path+'/'+model_name):\n"," # shutil.rmtree(model_path+'/'+model_name)\n","\n","Use_pretrained_model = False"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"colab_type":"code","cellView":"form","id":"NXxj-Xi3Kang","colab":{}},"source":["#@markdown ###Play this cell to visualise some example images from your dataset to make sure annotations and images are properly matched.\n","import imageio\n"," \n","size = 3 \n","ind_random = np.random.randint(0,df_anno.shape[0],size=size)\n","img_dir=Training_Source\n","\n","file_suffix = os.path.splitext(os.listdir(Training_Source)[0])[1]\n","for irow in ind_random:\n"," row = df_anno.iloc[irow,:]\n"," path = os.path.join(img_dir, row[\"fileID\"] + file_suffix)\n"," # read in image\n"," img = imageio.imread(path)\n","\n"," plt.figure(figsize=(12,12))\n"," plt.imshow(img) # plot image\n"," plt.title(\"Nobj={}, height={}, width={}\".format(row[\"Nobj\"],row[\"height\"],row[\"width\"]))\n"," # for each object in the image, plot the bounding box\n"," for iplot in range(row[\"Nobj\"]):\n"," plt_rectangle(plt,\n"," label = row[\"bbx_{}_name\".format(iplot)],\n"," x1=row[\"bbx_{}_xmin\".format(iplot)],\n"," y1=row[\"bbx_{}_ymin\".format(iplot)],\n"," x2=row[\"bbx_{}_xmax\".format(iplot)],\n"," y2=row[\"bbx_{}_ymax\".format(iplot)])\n"," plt.show() ## show the plot"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"eik5zLKWpN_O","colab_type":"text"},"source":["##**3.2. Data augmentation**\n","\n","---\n","\n"," Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if the dataset the `Use_Data_Augmentation` box can be unticked.\n","\n","Here, the images and bounding boxes are augmented by flipping and rotation. When doubling the dataset the images are only flipped. With each higher factor of augmentation the images added to the dataset represent one further rotation to the right by 90 degrees. 8x augmentation will give a dataset that is fully rotated and flipped once."]},{"cell_type":"code","metadata":{"id":"RmTSfMO-pNMc","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##**Augmentation Options**\n","\n","def image_aug(df, images_path, aug_images_path, image_prefix, augmentor):\n"," # create data frame which we're going to populate with augmented image info\n"," aug_bbs_xy = pd.DataFrame(columns=\n"," ['filename','width','height','class', 'xmin', 'ymin', 'xmax', 'ymax']\n"," )\n"," grouped = df.groupby('filename')\n"," \n"," for filename in df['filename'].unique():\n"," # get separate data frame grouped by file name\n"," group_df = grouped.get_group(filename)\n"," group_df = group_df.reset_index()\n"," group_df = group_df.drop(['index'], axis=1) \n"," # read the image\n"," image = imageio.imread(images_path+filename)\n"," # get bounding boxes coordinates and write into array \n"," bb_array = group_df.drop(['filename', 'width', 'height', 'class'], axis=1).values\n"," # pass the array of bounding boxes coordinates to the imgaug library\n"," bbs = BoundingBoxesOnImage.from_xyxy_array(bb_array, shape=image.shape)\n"," # apply augmentation on image and on the bounding boxes\n"," image_aug, bbs_aug = augmentor(image=image, bounding_boxes=bbs)\n"," # disregard bounding boxes which have fallen out of image pane \n"," bbs_aug = bbs_aug.remove_out_of_image()\n"," # clip bounding boxes which are partially outside of image pane\n"," bbs_aug = bbs_aug.clip_out_of_image()\n"," \n"," # don't perform any actions with the image if there are no bounding boxes left in it \n"," if re.findall('Image...', str(bbs_aug)) == ['Image([]']:\n"," pass\n"," \n"," # otherwise continue\n"," else:\n"," # write augmented image to a file\n"," imageio.imwrite(aug_images_path+image_prefix+filename, image_aug) \n"," # create a data frame with augmented values of image width and height\n"," info_df = group_df.drop(['xmin', 'ymin', 'xmax', 'ymax'], axis=1) \n"," for index, _ in info_df.iterrows():\n"," info_df.at[index, 'width'] = image_aug.shape[1]\n"," info_df.at[index, 'height'] = image_aug.shape[0]\n"," # rename filenames by adding the predifined prefix\n"," info_df['filename'] = info_df['filename'].apply(lambda x: image_prefix+x)\n"," # create a data frame with augmented bounding boxes coordinates using the function we created earlier\n"," bbs_df = bbs_obj_to_df(bbs_aug)\n"," # concat all new augmented info into new data frame\n"," aug_df = pd.concat([info_df, bbs_df], axis=1)\n"," # append rows to aug_bbs_xy data frame\n"," aug_bbs_xy = pd.concat([aug_bbs_xy, aug_df]) \n"," \n"," # return dataframe with updated images and bounding boxes annotations \n"," aug_bbs_xy = aug_bbs_xy.reset_index()\n"," aug_bbs_xy = aug_bbs_xy.drop(['index'], axis=1)\n"," return aug_bbs_xy\n","\n","Use_Data_augmentation = True #@param {type:\"boolean\"}\n","\n","multiply_dataset_by = 3 #@param {type:\"slider\", min:2, max:8, step:1}\n","\n","rotation_range = 90\n","\n","if (Use_Data_augmentation):\n"," print('Data Augmentation enabled')\n"," # load images as NumPy arrays and append them to images list\n"," if os.path.exists(Training_Source+'/.ipynb_checkpoints'):\n"," shutil.rmtree(Training_Source+'/.ipynb_checkpoints')\n"," \n"," images = []\n"," for index, file in enumerate(glob.glob(Training_Source+'/*'+file_suffix)):\n"," images.append(imageio.imread(file))\n"," \n"," # how many images we have\n"," print('Augmenting {} images'.format(len(images)))\n","\n"," # apply xml_to_csv() function to convert all XML files in images/ folder into labels.csv\n"," labels_df = xml_to_csv(Training_Source_annotations)\n"," labels_df.to_csv(('/content/original_labels.csv'), index=None)\n"," \n"," # Apply flip augmentation\n"," aug = iaa.OneOf([ \n"," iaa.Fliplr(1),\n"," iaa.Flipud(1)\n"," ])\n"," aug_2 = iaa.Affine(rotate=rotation_range, fit_output=True)\n"," aug_3 = iaa.Affine(rotate=rotation_range*2, fit_output=True)\n"," aug_4 = iaa.Affine(rotate=rotation_range*3, fit_output=True)\n","\n"," #Here we create a folder that will hold the original image dataset and the augmented image dataset\n"," augmented_training_source = os.path.dirname(Training_Source)+'/'+os.path.basename(Training_Source)+'_augmentation'\n"," if os.path.exists(augmented_training_source):\n"," shutil.rmtree(augmented_training_source)\n"," os.mkdir(augmented_training_source)\n","\n"," #Here we create a folder that will hold the original image annotation dataset and the augmented image annotation dataset (the bounding boxes).\n"," augmented_training_source_annotation = os.path.dirname(Training_Source_annotations)+'/'+os.path.basename(Training_Source_annotations)+'_augmentation'\n"," if os.path.exists(augmented_training_source_annotation):\n"," shutil.rmtree(augmented_training_source_annotation)\n"," os.mkdir(augmented_training_source_annotation)\n","\n"," #Create the augmentation\n"," augmented_images_df = image_aug(labels_df, Training_Source+'/', augmented_training_source+'/', 'flip_', aug)\n"," \n"," # Concat resized_images_df and augmented_images_df together and save in a new all_labels.csv file\n"," all_labels_df = pd.concat([labels_df, augmented_images_df])\n"," all_labels_df.to_csv('/content/combined_labels.csv', index=False)\n","\n"," #Here we convert the new bounding boxes for the augmented images to PASCAL VOC .xml format\n"," def convert_to_xml(df,source,target_folder):\n"," grouped = df.groupby('filename')\n"," for file in os.listdir(source):\n"," #if file in grouped.filename:\n"," group_df = grouped.get_group(file)\n"," group_df = group_df.reset_index()\n"," group_df = group_df.drop(['index'], axis=1)\n"," #group_df = group_df.dropna(axis=0)\n"," writer = Writer(source+'/'+file,group_df.iloc[1]['width'],group_df.iloc[1]['height'])\n"," for i, row in group_df.iterrows():\n"," writer.addObject(row['class'],round(row['xmin']),round(row['ymin']),round(row['xmax']),round(row['ymax']))\n"," writer.save(target_folder+'/'+os.path.splitext(file)[0]+'.xml')\n"," convert_to_xml(all_labels_df,augmented_training_source,augmented_training_source_annotation)\n"," \n"," #Second round of augmentation\n"," if multiply_dataset_by > 2:\n"," aug_labels_df_2 = xml_to_csv(augmented_training_source_annotation)\n"," augmented_images_2_df = image_aug(aug_labels_df_2, augmented_training_source+'/', augmented_training_source+'/', 'rot1_90_', aug_2)\n"," all_aug_labels_df = pd.concat([augmented_images_df, augmented_images_2_df])\n"," #all_labels_df.to_csv('/content/all_labels_aug.csv', index=False)\n"," \n"," for file in os.listdir(augmented_training_source_annotation):\n"," os.remove(os.path.join(augmented_training_source_annotation,file))\n"," convert_to_xml(all_aug_labels_df,augmented_training_source,augmented_training_source_annotation)\n","\n"," if multiply_dataset_by > 3:\n"," print('Augmenting again')\n"," aug_labels_df_3 = xml_to_csv(augmented_training_source_annotation)\n"," augmented_images_3_df = image_aug(aug_labels_df_3, augmented_training_source+'/', augmented_training_source+'/', 'rot2_90_', aug_2)\n"," all_aug_labels_df_3 = pd.concat([all_aug_labels_df, augmented_images_3_df])\n","\n"," for file in os.listdir(augmented_training_source_annotation):\n"," os.remove(os.path.join(augmented_training_source_annotation,file))\n"," convert_to_xml(all_aug_labels_df_3,augmented_training_source,augmented_training_source_annotation)\n"," \n"," #This is a preliminary remover of potential duplicates in the augmentation\n"," #Ideally, duplicates are not even produced, but this acts as a fail safe.\n"," if multiply_dataset_by==4:\n"," for file in os.listdir(augmented_training_source):\n"," if file.startswith('rot2_90_flip_'):\n"," os.remove(os.path.join(augmented_training_source,file))\n"," os.remove(os.path.join(augmented_training_source_annotation, os.path.splitext(file)[0]+'.xml'))\n","\n"," if multiply_dataset_by > 4:\n"," print('And Again')\n"," aug_labels_df_4 = xml_to_csv(augmented_training_source_annotation)\n"," augmented_images_4_df = image_aug(aug_labels_df_4, augmented_training_source+'/',augmented_training_source+'/','rot3_90_', aug_2)\n"," all_aug_labels_df_4 = pd.concat([all_aug_labels_df_3, augmented_images_4_df])\n","\n"," for file in os.listdir(augmented_training_source_annotation):\n"," os.remove(os.path.join(augmented_training_source_annotation,file))\n"," convert_to_xml(all_aug_labels_df_4,augmented_training_source,augmented_training_source_annotation)\n","\n"," for file in os.listdir(augmented_training_source):\n"," if file.startswith('rot3_90_rot2_90_flip_'):\n"," os.remove(os.path.join(augmented_training_source,file))\n"," os.remove(os.path.join(augmented_training_source_annotation, os.path.splitext(file)[0]+'.xml'))\n"," if file.startswith('rot3_90_rot1_90_flip_'):\n"," os.remove(os.path.join(augmented_training_source,file))\n"," os.remove(os.path.join(augmented_training_source_annotation, os.path.splitext(file)[0]+'.xml'))\n"," if file.startswith('rot3_90_flip_'):\n"," os.remove(os.path.join(augmented_training_source,file))\n"," os.remove(os.path.join(augmented_training_source_annotation, os.path.splitext(file)[0]+'.xml'))\n"," if file.startswith('rot2_90_flip_'):\n"," os.remove(os.path.join(augmented_training_source,file))\n"," os.remove(os.path.join(augmented_training_source_annotation, os.path.splitext(file)[0]+'.xml'))\n","\n","\n"," if multiply_dataset_by > 5:\n"," print('And again')\n"," augmented_images_5_df = image_aug(labels_df, Training_Source+'/', augmented_training_source+'/', 'rot_90_', aug_2)\n"," all_aug_labels_df_5 = pd.concat([all_aug_labels_df_4,augmented_images_5_df])\n","\n"," for file in os.listdir(augmented_training_source_annotation):\n"," os.remove(os.path.join(augmented_training_source_annotation,file))\n"," \n"," convert_to_xml(all_aug_labels_df_5,augmented_training_source,augmented_training_source_annotation)\n","\n"," if multiply_dataset_by > 6:\n"," print('And again')\n"," augmented_images_df_6 = image_aug(labels_df, Training_Source+'/', augmented_training_source+'/', 'rot_180_', aug_3)\n"," all_aug_labels_df_6 = pd.concat([all_aug_labels_df_5,augmented_images_df_6])\n"," \n"," for file in os.listdir(augmented_training_source_annotation):\n"," os.remove(os.path.join(augmented_training_source_annotation,file))\n"," convert_to_xml(all_aug_labels_df_6,augmented_training_source,augmented_training_source_annotation)\n","\n"," if multiply_dataset_by > 7:\n"," print('And again')\n"," augmented_images_df_7 = image_aug(labels_df, Training_Source+'/', augmented_training_source+'/', 'rot_270_', aug_4)\n"," all_aug_labels_df_7 = pd.concat([all_aug_labels_df_6,augmented_images_df_7])\n"," \n"," for file in os.listdir(augmented_training_source_annotation):\n"," os.remove(os.path.join(augmented_training_source_annotation,file))\n"," convert_to_xml(all_aug_labels_df_7,augmented_training_source,augmented_training_source_annotation)\n","\n"," for file in os.listdir(Training_Source):\n"," shutil.copyfile(Training_Source+'/'+file,augmented_training_source+'/'+file)\n"," shutil.copyfile(Training_Source_annotations+'/'+os.path.splitext(file)[0]+'.xml',augmented_training_source_annotation+'/'+os.path.splitext(file)[0]+'.xml')\n"," # display new dataframe\n"," #augmented_images_df\n"," \n"," os.chdir('/content/gdrive/My Drive/keras-yolo2')\n"," #Change the name of the training folder\n"," !sed -i 's@\\\"train_image_folder\\\":.*,@\\\"train_image_folder\\\": \\\"$augmented_training_source/\\\",@g' config.json\n","\n"," #Change annotation folder\n"," !sed -i 's@\\\"train_annot_folder\\\":.*,@\\\"train_annot_folder\\\": \\\"$augmented_training_source_annotation/\\\",@g' config.json\n","\n"," df_anno = []\n"," dir_anno = augmented_training_source_annotation\n"," for fnm in os.listdir(dir_anno): \n"," if not fnm.startswith('.'): ## do not include hidden folders/files\n"," tree = ET.parse(os.path.join(dir_anno,fnm))\n"," row = extract_single_xml_file(tree)\n"," row[\"fileID\"] = os.path.splitext(fnm)[0]\n"," df_anno.append(row)\n"," df_anno = pd.DataFrame(df_anno)\n","\n"," maxNobj = np.max(df_anno[\"Nobj\"])\n","\n"," #Write the annotations to a csv file\n"," #df_anno.to_csv(model_path+'/annot.csv', index=False)#header=False, sep=',')\n","\n"," #Show how many objects there are in the images\n"," plt.figure()\n"," plt.subplot(2,1,1)\n"," plt.hist(df_anno[\"Nobj\"].values,bins=50)\n"," plt.title(\"max N of objects per image={}\".format(maxNobj))\n"," plt.show()\n","\n"," #Show the classes and how many there are of each in the dataset\n"," from collections import Counter\n"," class_obj = []\n"," for ibbx in range(maxNobj):\n"," class_obj.extend(df_anno[\"bbx_{}_name\".format(ibbx)].values)\n"," class_obj = np.array(class_obj)\n","\n"," count = Counter(class_obj[class_obj != 'nan'])\n"," print(count)\n"," class_nm = list(count.keys())\n"," class_labels = json.dumps(class_nm)\n"," class_count = list(count.values())\n"," asort_class_count = np.argsort(class_count)\n","\n"," class_nm = np.array(class_nm)[asort_class_count]\n"," class_count = np.array(class_count)[asort_class_count]\n","\n"," xs = range(len(class_count))\n","\n"," plt.subplot(2,1,2)\n"," plt.barh(xs,class_count)\n"," plt.yticks(xs,class_nm)\n"," plt.title(\"The number of objects per class: {} objects in total\".format(len(count)))\n"," plt.show()\n","\n","else:\n"," print('No augmentation will be used')"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"tZvcYmxTdXQm","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ###Play this cell to visualise some example images from your **augmented** dataset to make sure annotations and images are properly matched.\n","if (Use_Data_augmentation):\n"," df_anno_aug = []\n"," dir_anno_aug = augmented_training_source_annotation\n"," for fnm in os.listdir(dir_anno_aug): \n"," if not fnm.startswith('.'): ## do not include hidden folders/files\n"," tree = ET.parse(os.path.join(dir_anno_aug,fnm))\n"," row = extract_single_xml_file(tree)\n"," row[\"fileID\"] = os.path.splitext(fnm)[0]\n"," df_anno_aug.append(row)\n"," df_anno_aug = pd.DataFrame(df_anno_aug)\n","\n"," size = 3 \n"," ind_random = np.random.randint(0,df_anno_aug.shape[0],size=size)\n"," img_dir=augmented_training_source\n","\n"," file_suffix = os.path.splitext(os.listdir(augmented_training_source)[0])[1]\n"," for irow in ind_random:\n"," row = df_anno_aug.iloc[irow,:]\n"," path = os.path.join(img_dir, row[\"fileID\"] + file_suffix)\n"," # read in image\n"," img = imageio.imread(path)\n","\n"," plt.figure(figsize=(12,12))\n"," plt.imshow(img) # plot image\n"," plt.title(\"Nobj={}, height={}, width={}\".format(row[\"Nobj\"],row[\"height\"],row[\"width\"]))\n"," # for each object in the image, plot the bounding box\n"," for iplot in range(row[\"Nobj\"]):\n"," plt_rectangle(plt,\n"," label = row[\"bbx_{}_name\".format(iplot)],\n"," x1=row[\"bbx_{}_xmin\".format(iplot)],\n"," y1=row[\"bbx_{}_ymin\".format(iplot)],\n"," x2=row[\"bbx_{}_xmax\".format(iplot)],\n"," y2=row[\"bbx_{}_ymax\".format(iplot)])\n"," plt.show() ## show the plot\n"," print('These are the augmented training images.')\n","\n","else:\n"," for irow in ind_random:\n"," row = df_anno.iloc[irow,:]\n"," path = os.path.join(img_dir, row[\"fileID\"] + file_suffix)\n"," # read in image\n"," img = imageio.imread(path)\n","\n"," plt.figure(figsize=(12,12))\n"," plt.imshow(img) # plot image\n"," plt.title(\"Nobj={}, height={}, width={}\".format(row[\"Nobj\"],row[\"height\"],row[\"width\"]))\n"," # for each object in the image, plot the bounding box\n"," for iplot in range(row[\"Nobj\"]):\n"," plt_rectangle(plt,\n"," label = row[\"bbx_{}_name\".format(iplot)],\n"," x1=row[\"bbx_{}_xmin\".format(iplot)],\n"," y1=row[\"bbx_{}_ymin\".format(iplot)],\n"," x2=row[\"bbx_{}_xmax\".format(iplot)],\n"," y2=row[\"bbx_{}_ymax\".format(iplot)])\n"," plt.show() ## show the plot\n"," print('These are the non-augmented training images.')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"rQndJj70FzfL","colab_type":"text"},"source":["# **4. Train the network**\n","---"]},{"cell_type":"code","metadata":{"id":"_cvRRrStGe3y","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Loading weights from a pretrained network\n","\n","# Training_Source = \"\" #@param{type:\"string\"}\n","# Training_Source_annotation = \"\" #@param{type:\"string\"}\n","# Check if the right files exist\n","\n","Use_pretrained_model = False #@param {type:\"boolean\"}\n","\n","Weights_choice = \"best\" #@param [\"last\", \"best\"]\n","\n","pretrained_model_path = \"\" #@param{type:\"string\"}\n","h5_file_path = pretrained_model_path+'/'+Weights_choice+'_weights.h5'\n","\n","if not os.path.exists(h5_file_path):\n"," print('WARNING pretrained model does not exist')\n"," Use_pretrained_model = False\n","\n","os.chdir('/content/gdrive/My Drive/keras-yolo2')\n","!sed -i 's@\\\"pretrained_weights\\\":.*,@\\\"pretrained_weights\\\": \\\"$h5_file_path\\\",@g' config.json\n","\n","if Use_pretrained_model == True:\n"," with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n"," csvRead = pd.read_csv(csvfile, sep=',')\n"," if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4):\n"," print(\"pretrained network learning rate found\")\n"," #find the last learning rate\n"," lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n"," #Find the learning rate corresponding to the lowest validation loss\n"," min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n"," #print(min_val_loss)\n"," bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n"," if Weights_choice == \"last\":\n"," print('Last learning rate: '+str(lastLearningRate))\n"," learning_rate = lastLearningRate\n","\n"," if Weights_choice == \"best\":\n"," print('Learning rate of best validation loss: '+str(bestLearningRate))\n"," learning_rate = bestLearningRate\n","\n"," if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n"," #bestLearningRate = learning_rate\n"," #lastLearningRate = learning_rate\n"," print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n"," \n"," !sed -i 's@\\\"warmup_epochs\\\":.*,@\\\"warmup_epochs\\\": 0,@g' config.json\n"," !sed -i 's@\\\"learning_rate\\\":.*,@\\\"learning_rate\\\": $learning_rate,@g' config.json\n","\n","# with open(os.path.join(pretrained_model_path, 'Quality Control', 'lr.csv'),'r') as csvfile:\n","# csvRead = pd.read_csv(csvfile, sep=',')\n","# #print(csvRead)\n"," \n","# if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n","# print(\"pretrained network learning rate found\")\n","# #find the last learning rate\n","# lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n","# #Find the learning rate corresponding to the lowest validation loss\n","# min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n","# #print(min_val_loss)\n","# bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n","\n","# if Weights_choice == \"last\":\n","# print('Last learning rate: '+str(lastLearningRate))\n","\n","# if Weights_choice == \"best\":\n","# print('Learning rate of best validation loss: '+str(bestLearningRate))\n","\n","# if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n","# bestLearningRate = initial_learning_rate\n","# lastLearningRate = initial_learning_rate\n","# print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"wQPz0F6JlvJR","colab_type":"text"},"source":["## **4.1. Train the network**\n","---\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches."]},{"cell_type":"code","metadata":{"id":"EZnoS3rb8BSR","colab_type":"code","cellView":"form","colab":{}},"source":["import time\n","import csv\n","#from frontend import YOLO\n","\n","if os.path.exists(full_model_path+\"/Quality Control\"):\n"," shutil.rmtree(full_model_path+\"/Quality Control\")\n","os.makedirs(full_model_path+\"/Quality Control\")\n","\n","start = time.time()\n","\n","#@markdown ##Start Training\n","\n","os.chdir('/content/gdrive/My Drive/keras-yolo2')\n","train('config.json', full_model_path, percentage_validation)\n","\n","shutil.copyfile('/content/gdrive/My Drive/keras-yolo2/config.json',full_model_path+'/config.json')\n","\n","if os.path.exists('/content/gdrive/My Drive/keras-yolo2/best_map_weights.h5'):\n"," shutil.move('/content/gdrive/My Drive/keras-yolo2/best_map_weights.h5',full_model_path+'/best_map_weights.h5')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"XQjQb_J_Qyku","colab_type":"text"},"source":["##**4.3. Download your model(s) from Google Drive**\n","\n","\n","---\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"2HbZd7rFqAad","colab_type":"text"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","metadata":{"id":"EdcnkCr9Nbl8","colab_type":"code","cellView":"form","colab":{}},"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = False #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the name of the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","if (Use_the_current_trained_model): \n"," QC_model_folder = full_model_path\n","\n","#print(os.path.join(model_path, model_name))\n","\n","if os.path.exists(QC_model_folder):\n"," print(\"The \"+os.path.basename(QC_model_folder)+\" model will be evaluated\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path before proceeding further.')\n","\n","if Use_the_current_trained_model == False:\n"," if os.path.exists('/content/gdrive/My Drive/keras-yolo2/config.json'):\n"," os.remove('/content/gdrive/My Drive/keras-yolo2/config.json')\n"," shutil.copyfile(QC_model_folder+'/config.json','/content/gdrive/My Drive/keras-yolo2/config.json')\n","\n","#@markdown ###Which backend is the model using?\n","backend = \"Full Yolo\" #@param [\"Select Model\",\"Full Yolo\",\"Inception3\",\"SqueezeNet\",\"MobileNet\",\"Tiny Yolo\"]\n","os.chdir('/content/gdrive/My Drive/keras-yolo2')\n","if backend == \"Full Yolo\":\n"," if not os.path.exists('/content/gdrive/My Drive/keras-yolo2/full_yolo_backend.h5'):\n"," !wget https://github.com/rodrigo2019/keras_yolo2/releases/download/pre-trained-weights/full_yolo_backend.h5\n","elif backend == \"Inception3\":\n"," if not os.path.exists('/content/gdrive/My Drive/keras-yolo2/inception_backend.h5'):\n"," !wget https://github.com/rodrigo2019/keras_yolo2/releases/download/pre-trained-weights/inception_backend.h5\n","elif backend == \"MobileNet\":\n"," if not os.path.exists('/content/gdrive/My Drive/keras-yolo2/mobilenet_backend.h5'):\n"," !wget https://github.com/rodrigo2019/keras_yolo2/releases/download/pre-trained-weights/mobilenet_backend.h5\n","elif backend == \"SqueezeNet\":\n"," if not os.path.exists('/content/gdrive/My Drive/keras-yolo2/squeezenet_backend.h5'):\n"," !wget https://github.com/rodrigo2019/keras_yolo2/releases/download/pre-trained-weights/squeezenet_backend.h5\n","elif backend == \"Tiny Yolo\":\n"," if not os.path.exists('/content/gdrive/My Drive/keras-yolo2/tiny_yolo_backend.h5'):\n"," !wget https://github.com/rodrigo2019/keras_yolo2/releases/download/pre-trained-weights/tiny_yolo_backend.h5\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"yDY9dtzdUTLh","colab_type":"text"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased."]},{"cell_type":"code","metadata":{"id":"vMzSP50kMv5p","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","import csv\n","from matplotlib import pyplot as plt\n","\n","lossDataFromCSV = []\n","vallossDataFromCSV = []\n","mAPDataFromCSV = []\n","with open(QC_model_folder+'/Quality Control/training_evaluation.csv','r') as csvfile:\n"," csvRead = csv.reader(csvfile, delimiter=',')\n"," next(csvRead)\n"," for row in csvRead:\n"," lossDataFromCSV.append(float(row[0]))\n"," vallossDataFromCSV.append(float(row[1]))\n"," mAPDataFromCSV.append(float(row[2]))\n","epochNumber = range(len(lossDataFromCSV))\n","plt.figure(figsize=(20,15))\n","\n","plt.subplot(3,1,1)\n","plt.plot(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (linear scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","\n","plt.subplot(3,1,2)\n","plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\n","plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\n","plt.title('Training loss and validation loss vs. epoch number (log scale)')\n","plt.ylabel('Loss')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","#plt.savefig(os.path.dirname(QC_model_folder)+'/Quality Control/lossCurvePlots.png')\n","#plt.show()\n","\n","plt.subplot(3,1,3)\n","plt.plot(epochNumber,mAPDataFromCSV, label='mAP score')\n","plt.title('mean average precision (mAP) vs. epoch number (linear scale)')\n","plt.ylabel('mAP score')\n","plt.xlabel('Epoch number')\n","plt.legend()\n","plt.savefig(QC_model_folder+'/Quality Control/lossCurveAndmAPPlots.png')\n","plt.show()"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"RZOPCVN0qcYb","colab_type":"text"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n","This section will display an overlay of the input images ground-truth (solid lines) and predicted boxes (dashed lines). Additionally, the below cell will show the mAP value of the model on the QC data together with plots of the Precision-Recall curves for all the classes in the dataset. If you want to read in more detail about these scores, we recommend [this brief explanation](https://medium.com/@jonathan_hui/map-mean-average-precision-for-object-detection-45c121a31173).\n","\n"," The images provided in the \"Source_QC_folder\" and \"Target_QC_folder\" should contain images (e.g. as .jpg)and annotations (.xml files)!\n","\n","Since the training saves three different models, for the best validation loss (`best_weights`), best average precision (`best_mAP_weights`) and the model after the last epoch (`last_weights`), you should choose which ones you want to use for quality control or prediction. We recommend using `best_map_weights` because they should yield the best performance on the dataset. However, it can be worth testing how well `best_weights` perform too.\n","\n","**mAP score:** This refers to the mean average precision of the model on the given dataset. This value gives an indication how precise the predictions of the classes on this dataset are when compared to the ground-truth. Values closer to 1 indicate a good fit.\n","\n","**Precision:** This is the proportion of the correct classifications (true positives) in all the predictions made by the model.\n","\n","**Recall:** This is the proportion of the detected true positives in all the detectable data."]},{"cell_type":"code","metadata":{"id":"Nh8MlX3sqd_7","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Annotations_QC_folder = \"\" #@param{type:\"string\"}\n","\n","#@markdown ##Choose which model you want to evaluate:\n","model_choice = \"best_map_weights\" #@param[\"best_weights\",\"last_weights\",\"best_map_weights\"]\n","\n","file_suffix = os.path.splitext(os.listdir(Source_QC_folder)[0])[1]\n","\n","# Create a quality control/Prediction Folder\n","if os.path.exists(QC_model_folder+\"/Quality Control/Prediction\"):\n"," shutil.rmtree(QC_model_folder+\"/Quality Control/Prediction\")\n","\n","os.makedirs(QC_model_folder+\"/Quality Control/Prediction\")\n","\n","#Delete old csv with box predictions if one exists\n","\n","if os.path.exists('/content/predicted_bounding_boxes.csv'):\n"," os.remove('/content/predicted_bounding_boxes.csv')\n","if os.path.exists('/content/predicted_bounding_boxes_names.csv'):\n"," os.remove('/content/predicted_bounding_boxes_names.csv')\n","if os.path.exists(Source_QC_folder+'/.ipynb_checkpoints'):\n"," shutil.rmtree(Source_QC_folder+'/.ipynb_checkpoints')\n","\n","os.chdir('/content/gdrive/My Drive/keras-yolo2')\n","n_objects = []\n","for img in os.listdir(Source_QC_folder):\n"," full_image_path = Source_QC_folder+'/'+img\n"," n_obj = predict('config.json',QC_model_folder+'/'+model_choice+'.h5',full_image_path)\n"," n_objects.append(n_obj)\n","\n","for img in os.listdir(Source_QC_folder):\n"," if img.endswith('detected'+file_suffix):\n"," shutil.move(Source_QC_folder+'/'+img,QC_model_folder+\"/Quality Control/Prediction/\"+img)\n","\n","### Get the coordinates of the predicted boxes, ###\n","### box classes and confidence scores ###\n","\n","# from the csv containing the predicted boxes\n","with open('/content/predicted_bounding_boxes.csv','r', newline='') as csvfile:\n"," csv_reader = csv.reader(csvfile)\n"," next(csv_reader)\n"," pred_boxes = []\n"," pred_classes = []\n"," pred_conf = []\n"," for row in csv_reader:\n"," image_boxes = []\n"," box_classes = []\n"," box_conf = []\n"," for i in range(1,len(row),6):\n"," image_boxes.append(list(map(float,row[i:i+4])))\n"," box_classes.append(int(row[i+5]))\n"," box_conf.append(float(row[i+4]))\n"," pred_boxes.append(image_boxes) # The rows of this list contain the coordinates for all boxes per image\n"," pred_classes.append(box_classes) # The rows of this list contain the predicted classes for each box in the pred_boxes\n"," pred_conf.append(box_conf) # The rows of this list contain the confidence scores for each predicted box in pred_boxes\n","\n","#shutil.move('/content/predicted_bounding_boxes.csv',QC_model_folder+\"/Quality Control/Prediction/predicted_boxes_QC.csv\")\n","\n","#### Get the coordinates of the GT boxes ###\n","\n","df_anno_QC_gt = []\n","#dir_anno = Training_Source_annotations\n","for fnm in os.listdir(Annotations_QC_folder): \n"," if not fnm.startswith('.'): ## do not include hidden folders/files\n"," tree = ET.parse(os.path.join(Annotations_QC_folder,fnm))\n"," row = extract_single_xml_file(tree)\n"," row[\"fileID\"] = os.path.splitext(fnm)[0]\n"," df_anno_QC_gt.append(row)\n","df_anno_QC_gt = pd.DataFrame(df_anno_QC_gt)\n","\n","#df_anno_QC_gt.to_csv('/content/gt_bboxes_QC.csv')\n","maxNobj = np.max(df_anno_QC_gt[\"Nobj\"])\n","\n","config_path = '/content/gdrive/My Drive/keras-yolo2/config.json'\n","class_dict = {}\n","\n","with open(config_path) as config_buffer:\n"," config = json.load(config_buffer)\n"," for i in config[\"model\"][\"labels\"]:\n"," class_dict[i] = int(config[\"model\"][\"labels\"].index(i))\n","\n","reverse_class_dict = {value : key for (key, value) in class_dict.items()}\n","\n","df_anno_QC_gt = df_anno_QC_gt.replace(class_dict)\n","df_anno_QC_gt.to_csv(QC_model_folder+'/Quality Control/gt_bboxes_QC.csv')\n","\n","gt_boxes = []\n","gt_labels = []\n","gt_label_names = []\n","for j in range(0,df_anno_QC_gt.shape[0]):\n"," row = df_anno_QC_gt.iloc[j]\n"," width = int(row[\"width\"])\n"," height = int(row[\"height\"])\n"," gt_box = []\n"," gt_label = []\n"," gt_label_name = []\n"," for i in range(row[\"Nobj\"]):\n"," label = int(float(row[\"bbx_{}_name\".format(i)]))\n"," label_name = row[\"bbx_{}_name\".format(i)]\n"," x1=row[\"bbx_{}_xmin\".format(i)]\n"," y1=row[\"bbx_{}_ymin\".format(i)]\n"," x2=row[\"bbx_{}_xmax\".format(i)]\n"," y2=row[\"bbx_{}_ymax\".format(i)]\n"," #gt_box.append([x1/width,y1/height,x2/width,y2/height])\n"," gt_box.append([x1,y1,x2,y2])\n","\n"," gt_label.append(label)\n"," gt_label_name.append(label_name)\n"," gt_boxes.append(gt_box)\n"," gt_labels.append(gt_label)\n"," gt_label_names.append(gt_label_name)\n","\n","#The essential outputs from this are gt_array and gt_classes_full\n","#Each row contains all bounding boxes and classes for each gt image.\n","\n","#Here we create the Detection Maps for the first three predictions\n","#Prediction\n","\n","pred_box_1 = np.array(pred_boxes[0])\n","#pred_box_2 = np.array(pred_boxes[1])\n","#pred_box_3 = np.array(pred_boxes[2])\n","\n","pred_class_1 = np.array(pred_classes[0])\n","#pred_class_2 = np.array(pred_classes[1])\n","#pred_class_3 = np.array(pred_classes[2])\n","\n","pred_conf_1 = np.array(pred_conf[0])\n","#pred_conf_2 = np.array(pred_conf[1])\n","#pred_conf_3 = np.array(pred_conf[2])\n"," \n","#print(pred_box_1)\n","\n","#print(pred_conf_1)\n","\n","# #GT\n","#print(gt_box_1[0])\n","gt_box_1 = np.array(gt_boxes[0])\n","#gt_box_2 = np.array(gt_boxes[1])\n","#gt_box_3 = np.array(gt_boxes[2])\n","#print(gt_box_1)\n","\n","gt_class_1 = np.array(gt_labels[0])\n","#gt_class_2 = np.array(gt_labels[1])\n","#gt_class_3 = np.array(gt_labels[2])\n","\n","frames = [(pred_box_1, pred_class_1, pred_conf_1, gt_box_1, gt_class_1)]\n"," #(pred_box_2, pred_class_2, pred_conf_2, gt_box_3, gt_class_3),#]#,\n"," #(pred_box_3, pred_class_3, pred_conf_3, gt_box_1, gt_class_1)#]#,\n"," #]\n"," #]\n","\n","n_class = len(config['model']['labels'])\n","\n","plt.figure(figsize=(15,5))\n","for i, frame in enumerate(frames):\n"," img = np.array(io.imread(os.path.join(Source_QC_folder,os.path.splitext(sorted(os.listdir(Annotations_QC_folder))[i])[0]+file_suffix)))\n"," show_frame(*frame, reverse_class_dict, background = img)\n","\n","\n","#Make a csv file to read into imagej macro, to create custom bounding boxes\n","header = ['filename']+['xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class']*max(n_objects)\n","with open('/content/predicted_bounding_boxes.csv', newline='') as inFile, open('/content/predicted_bounding_boxes_new.csv', 'w', newline='') as outfile:\n"," r = csv.reader(inFile)\n"," w = csv.writer(outfile)\n"," next(r, None) # skip the first row from the reader, the old header\n"," # write new header\n"," w.writerow(header)\n"," # copy the rest\n"," for row in r:\n"," w.writerow(row)\n","\n","df_bbox=pd.read_csv('/content/predicted_bounding_boxes_new.csv')\n","df_bbox=df_bbox.transpose()\n","new_header = df_bbox.iloc[0] #grab the first row for the header\n","df_bbox = df_bbox[1:] #take the data less the header row\n","df_bbox.columns = new_header #set the header row as the df header\n","df_bbox.sort_values(by='filename',axis=1,inplace=True)\n","df_bbox.to_csv(QC_model_folder+'/Quality Control/predicted_bounding_boxes_for_custom_ROI_QC.csv')\n","\n","AP, recall, precision = _calc_avg_precisions(config,Source_QC_folder,Annotations_QC_folder+'/',QC_model_folder+'/'+model_choice+'.h5',0.3,0.3)\n","\n","print('mAP score for QC dataset: '+str(sum(AP.values())/len(AP)))\n","for i in range(len(AP)):\n"," if AP[i]!=0:\n"," if len(recall[i]) == 1:\n"," new_recall = np.linspace(0,list(recall[i])[0],10)\n"," new_precision = list(precision[i])*10\n"," fig = plt.figure(figsize=(3,2))\n"," plt.plot(new_recall,new_precision)\n"," plt.axis([min(new_recall),1,0,1.02])\n"," plt.xlabel('Recall',fontsize=14)\n"," plt.ylabel('Precision',fontsize=14)\n"," plt.title(config['model']['labels'][i]+', AP: '+str(round(AP[i],3)),fontsize=14)\n"," plt.fill_between(new_recall,new_precision,alpha=0.3)\n"," plt.savefig('/content/P-R_curve_'+str(i)+'.png')\n"," plt.show()\n"," else:\n"," new_recall = list(recall[i])\n"," new_recall.append(new_recall[len(new_recall)-1])\n"," new_precision = list(precision[i])\n"," new_precision.append(0)\n"," fig = plt.figure(figsize=(3,2))\n"," plt.plot(new_recall,new_precision)\n"," plt.axis([min(new_recall),1,0,1.02])\n"," plt.xlabel('Recall',fontsize=14)\n"," plt.ylabel('Precision',fontsize=14)\n"," plt.title(config['model']['labels'][i]+', AP: '+str(round(AP[i],3)),fontsize=14)\n"," plt.fill_between(new_recall,new_precision,alpha=0.3)\n"," plt.savefig('/content/P-R_curve_'+str(i)+'.png')\n"," plt.show()\n"," else:\n"," print('No object of class '+config['model']['labels'][i]+' was detected. This will lower the mAP score. Consider adding an image containing this class to your QC dataset to see if the model can detect this class at all.')"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"-n9CLLJ77FAA","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Inspect example output from QC\n","import random\n","from matplotlib.pyplot import imread\n","import imageio\n","\n","add_header('/content/predicted_bounding_boxes_names.csv','/content/predicted_bounding_boxes_names_new.csv')\n","\n","# This will display a randomly chosen dataset input and predicted output\n","random_choice = random.choice(os.listdir(Source_QC_folder))\n","file_suffix = os.path.splitext(random_choice)[1]\n","\n","plt.figure(figsize=(30,15))\n","\n","\n","### Display Raw input ###\n","\n","x = imread(Source_QC_folder+\"/\"+random_choice)\n","plt.subplot(1,3,1)\n","plt.axis('off')\n","plt.imshow(x, interpolation='nearest')\n","plt.title('Input')\n","\n","### Display Predicted annotation ###\n","\n","df_bbox2 = pd.read_csv('/content/predicted_bounding_boxes_names_new.csv')\n","for img in range(0,df_bbox2.shape[0]):\n"," df_bbox2.iloc[img]\n"," row = pd.DataFrame(df_bbox2.iloc[img])\n"," if row[img][0] == random_choice:\n"," row = row.dropna()\n"," image = imageio.imread(Source_QC_folder+'/'+row[img][0])\n"," #plt.figure(figsize=(12,12))\n"," plt.subplot(1,3,2)\n"," plt.axis('off')\n"," plt.imshow(image) # plot image\n"," plt.title('Prediction')\n"," for i in range(1,int(len(row)-1),6):\n"," plt_rectangle(plt,\n"," label = row[img][i+5],\n"," x1=row[img][i],#.format(iplot)],\n"," y1=row[img][i+1],\n"," x2=row[img][i+2],\n"," y2=row[img][i+3])#,\n"," #fontsize=8)\n","\n","\n","### Display GT Annotation ###\n","\n","df_anno_QC_gt = []\n","for fnm in os.listdir(Annotations_QC_folder): \n"," if not fnm.startswith('.'): ## do not include hidden folders/files\n"," tree = ET.parse(os.path.join(Annotations_QC_folder,fnm))\n"," row = extract_single_xml_file(tree)\n"," row[\"fileID\"] = os.path.splitext(fnm)[0]\n"," df_anno_QC_gt.append(row)\n","df_anno_QC_gt = pd.DataFrame(df_anno_QC_gt)\n","#maxNobj = np.max(df_anno_QC_gt[\"Nobj\"])\n","\n","for i in range(0,df_anno_QC_gt.shape[0]):\n"," if df_anno_QC_gt.iloc[i][\"fileID\"]+file_suffix == random_choice:\n"," row = df_anno_QC_gt.iloc[i]\n","\n","img = imageio.imread(Source_QC_folder+'/'+random_choice)\n","plt.subplot(1,3,3)\n","plt.axis('off')\n","plt.imshow(img) # plot image\n","plt.title('Ground Truth annotations')\n","\n","# for each object in the image, plot the bounding box\n","for iplot in range(row[\"Nobj\"]):\n"," plt_rectangle(plt,\n"," label = row[\"bbx_{}_name\".format(iplot)],\n"," x1=row[\"bbx_{}_xmin\".format(iplot)],\n"," y1=row[\"bbx_{}_ymin\".format(iplot)],\n"," x2=row[\"bbx_{}_xmax\".format(iplot)],\n"," y2=row[\"bbx_{}_ymax\".format(iplot)])#,\n"," #fontsize=8)\n","\n","### Show the plot ###\n","plt.show()"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"colab_type":"text","id":"Esqnbew8uznk"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"d8wuQGjoq6eN","colab_type":"text"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n","\n","**`Result_folder`:** This folder will contain the predicted output images.\n","\n","**`Prediction_model_path`:** This should be the folder that contains your model."]},{"cell_type":"code","metadata":{"id":"9ZmST3JRq-Ho","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.\n","\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","file_suffix = os.path.splitext(os.listdir(Data_folder)[0])[1]\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = False #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, provide the name of the model and path to model folder:\n","\n","Prediction_model_path = \"\" #@param {type:\"string\"}\n","\n","#@markdown ###Which model do you want to use?\n","model_choice = \"best_map_weights\" #@param[\"best_weights\",\"last_weights\",\"best_map_weights\"]\n","\n","#@markdown ###Which backend is the model using?\n","backend = \"Full Yolo\" #@param [\"Select Model\",\"Full Yolo\",\"Inception3\",\"SqueezeNet\",\"MobileNet\",\"Tiny Yolo\"]\n","os.chdir('/content/gdrive/My Drive/keras-yolo2')\n","if backend == \"Full Yolo\":\n"," if not os.path.exists('/content/gdrive/My Drive/keras-yolo2/full_yolo_backend.h5'):\n"," !wget https://github.com/rodrigo2019/keras_yolo2/releases/download/pre-trained-weights/full_yolo_backend.h5\n","elif backend == \"Inception3\":\n"," if not os.path.exists('/content/gdrive/My Drive/keras-yolo2/inception_backend.h5'):\n"," !wget https://github.com/rodrigo2019/keras_yolo2/releases/download/pre-trained-weights/inception_backend.h5\n","elif backend == \"MobileNet\":\n"," if not os.path.exists('/content/gdrive/My Drive/keras-yolo2/mobilenet_backend.h5'):\n"," !wget https://github.com/rodrigo2019/keras_yolo2/releases/download/pre-trained-weights/mobilenet_backend.h5\n","elif backend == \"SqueezeNet\":\n"," if not os.path.exists('/content/gdrive/My Drive/keras-yolo2/squeezenet_backend.h5'):\n"," !wget https://github.com/rodrigo2019/keras_yolo2/releases/download/pre-trained-weights/squeezenet_backend.h5\n","elif backend == \"Tiny Yolo\":\n"," if not os.path.exists('/content/gdrive/My Drive/keras-yolo2/tiny_yolo_backend.h5'):\n"," !wget https://github.com/rodrigo2019/keras_yolo2/releases/download/pre-trained-weights/tiny_yolo_backend.h5\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_path = full_model_path\n","\n","if Use_the_current_trained_model == False:\n"," if os.path.exists('/content/gdrive/My Drive/keras-yolo2/config.json'):\n"," os.remove('/content/gdrive/My Drive/keras-yolo2/config.json')\n"," shutil.copyfile(Prediction_model_path+'/config.json','/content/gdrive/My Drive/keras-yolo2/config.json')\n","\n","if os.path.exists(Prediction_model_path+'/'+model_choice+'.h5'):\n"," print(\"The \"+os.path.basename(Prediction_model_path)+\" network will be used.\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","# Provide the code for performing predictions and saving them\n","print(\"Images saved into folder:\", Result_folder)\n"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"GcmBwMJVcFh1","colab_type":"code","cellView":"form","colab":{}},"source":["#@markdown ##Run Prediction\n","\n","#Remove any files that might be from the prediction of QC examples.\n","if os.path.exists('/content/predicted_bounding_boxes.csv'):\n"," os.remove('/content/predicted_bounding_boxes.csv')\n","if os.path.exists('/content/predicted_bounding_boxes_new.csv'):\n"," os.remove('/content/predicted_bounding_boxes_new.csv')\n","if os.path.exists('/content/predicted_bounding_boxes_names.csv'):\n"," os.remove('/content/predicted_bounding_boxes_names.csv')\n","if os.path.exists('/content/predicted_bounding_boxes_names_new.csv'):\n"," os.remove('/content/predicted_bounding_boxes_names_new.csv')\n","\n","os.chdir('/content/gdrive/My Drive/keras-yolo2')\n","\n","if os.path.exists(Data_folder+'/.ipynb_checkpoints'):\n"," shutil.rmtree(Data_folder+'/.ipynb_checkpoints')\n","\n","n_objects = []\n","for img in os.listdir(Data_folder):\n"," full_image_path = Data_folder+'/'+img\n"," n_obj = predict('config.json',Prediction_model_path+'/'+model_choice+'.h5',full_image_path)#,Result_folder)\n"," n_objects.append(n_obj)\n","\n","for img in os.listdir(Data_folder):\n"," if img.endswith('detected'+file_suffix):\n"," shutil.move(Data_folder+'/'+img,Result_folder+'/'+img)\n","\n","if os.path.exists('/content/predicted_bounding_boxes.csv'):\n"," #shutil.move('/content/predicted_bounding_boxes.csv',Result_folder+'/predicted_bounding_boxes.csv')\n"," print('Bounding box labels and coordinates saved to '+ Result_folder)\n","else:\n"," print('For some reason the bounding box labels and coordinates were not saved. Check that your predictions look as expected.')\n","\n","#Make a csv file to read into imagej macro, to create custom bounding boxes\n","header = ['filename']+['xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class']*max(n_objects)\n","with open('/content/predicted_bounding_boxes.csv', newline='') as inFile, open('/content/predicted_bounding_boxes_new.csv', 'w', newline='') as outfile:\n"," r = csv.reader(inFile)\n"," w = csv.writer(outfile)\n"," next(r, None) # skip the first row from the reader, the old header\n"," # write new header\n"," w.writerow(header)\n"," # copy the rest\n"," for row in r:\n"," w.writerow(row)\n","\n","df_bbox=pd.read_csv('/content/predicted_bounding_boxes_new.csv')\n","df_bbox=df_bbox.transpose()\n","new_header = df_bbox.iloc[0] #grab the first row for the header\n","df_bbox = df_bbox[1:] #take the data less the header row\n","df_bbox.columns = new_header #set the header row as the df header\n","df_bbox.sort_values(by='filename',axis=1,inplace=True)\n","df_bbox.to_csv(Result_folder+'/predicted_bounding_boxes_for_custom_ROI.csv')"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"EIe3CRD7XUxa","colab_type":"text"},"source":["## **6.2. Inspect the predicted output**\n","---\n","\n"]},{"cell_type":"code","metadata":{"id":"LmDP8xiwXTTL","colab_type":"code","cellView":"form","colab":{}},"source":["# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.\n","import random\n","from matplotlib.pyplot import imread\n","# This will display a randomly chosen dataset input and predicted output\n","random_choice = random.choice(os.listdir(Data_folder))\n","print(random_choice)\n","x = imread(Data_folder+\"/\"+random_choice)\n","\n","os.chdir(Result_folder)\n","y = imread(Result_folder+\"/\"+os.path.splitext(random_choice)[0]+'_detected'+file_suffix)\n","\n","plt.figure(figsize=(20,8))\n","\n","plt.subplot(1,3,1)\n","plt.axis('off')\n","plt.imshow(x, interpolation='nearest')\n","plt.title('Input')\n","\n","plt.subplot(1,3,2)\n","plt.axis('off')\n","plt.imshow(y, interpolation='nearest')\n","plt.title('Predicted output');\n","\n","add_header('/content/predicted_bounding_boxes_names.csv','/content/predicted_bounding_boxes_names_new.csv')\n","\n","#We need to edit this predicted_bounding_boxes_new.csv file slightly to display the bounding boxes\n","df_bbox2 = pd.read_csv('/content/predicted_bounding_boxes_names_new.csv')\n","for img in range(0,df_bbox2.shape[0]):\n"," df_bbox2.iloc[img]\n"," row = pd.DataFrame(df_bbox2.iloc[img])\n"," if row[img][0] == random_choice:\n"," row = row.dropna()\n"," image = imageio.imread(Data_folder+'/'+row[img][0])\n"," #plt.figure(figsize=(12,12))\n"," plt.subplot(1,3,3)\n"," plt.axis('off')\n"," plt.title('Alternative Display of Prediction')\n"," plt.imshow(image) # plot image\n","\n"," for i in range(1,int(len(row)-1),6):\n"," plt_rectangle(plt,\n"," label = row[img][i+5],\n"," x1=row[img][i],#.format(iplot)],\n"," y1=row[img][i+1],\n"," x2=row[img][i+2],\n"," y2=row[img][i+3])#,\n"," #fontsize=8)\n"," #plt.margins(0,0)\n"," #plt.subplots_adjust(left=0., right=1., top=1., bottom=0.)\n"," #plt.gca().xaxis.set_major_locator(plt.NullLocator())\n"," #plt.gca().yaxis.set_major_locator(plt.NullLocator())\n"," plt.savefig('/content/detected_cells.png',bbox_inches='tight',transparent=True,pad_inches=0)\n","plt.show() ## show the plot\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"hvkd66PldsXB","colab_type":"text"},"source":["## **6.3. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"Rn9zpWpo0xNw","colab_type":"text"},"source":["\n","#**Thank you for using YOLOv2!**"]}]} \ No newline at end of file diff --git a/Colab_notebooks/ZeroCostDL4Mic_UserManual_v1.2.pdf b/Colab_notebooks/ZeroCostDL4Mic_UserManual_v1.2.pdf new file mode 100755 index 0000000000000000000000000000000000000000..2269f0cd697b845be86338788e67b4d08ee8e55a GIT binary patch literal 2313410 zcmeFYWpG^0mMv(pES3cpGc%PKEM{g|%#0;wW@d|-nVG?2CX1Pwape2mp6Tw_5%d1c z`!y9&Co8k^?99FP+UsOg#U_;#5}{?J1Hh1WJ>Ag!=!FTG0L<)61oScl zOrKY!zc(8WGT|DHs`lx?yB(0yHIH{d7ab z%o1p1|JhpV0gZ%=3~UTPXZ_R4>DgO35c~x}(8kiy%G!Z|g@9hp-pJ6*0BB=R@aZ}) zF9E&kUmOUSKHr}ph+7*PxqJ@%#gL8ZpAlo5PbB{CM1g=_!Nvyo`6cT=J;hI037FVf z|7F$b-$=3kH+1O5g$RCf1B_W&*;tJj*qHzZtc(V1h73j=jE1a+3;-qr4kK1J-v9R* zT7N^}0JJyKvx0F+H_$iSG||=9W&cW|X8D8WJ3-GOA`vghS7QKy9xD9y1n9T8l^=e; zV|+vH_8?;PAqVCCC_uk_se`Qg8^(W|`9B-b{|3ze-~Im%fzRCe%hCT30U;v?paTID z8{mY|G)E*3BdASIjXDs z$xz);U0q{c-S0+7dOn~GX@S0I!-neBx=Ew?>X`6w025Y{^+80#V~U4Um8oD82{HS`zx>iPe=r9 ztbs<>f1|?4@lP0}KZQw8z~-;~{0$6%gOz}th54@reG1vzv`275jt-o{R>}?Db zjewu)(tkclJ~iLS1^6k2R-Y^p_;)M#?^gVCJAZFc1ll_q0RP#S!>3XDe>n7)SpS7U ze_i_z68?__`Zsgfn3?|za|%?nEY=v{JSVFzZ?Y>%Bx~Uf5Vf3N+U@lCbbrDIA}qXl zr=+vnt!Qpk0j5^fc;g&t@G)_h9Q^O;?QHq9gZKn7W6N28@Jb5VRdoej;IG=$JyB#Q z5JxsQ=|GC8X4)WHKVLnwtnz|C+&bT4GII&R46N^HBGQ|J3_FnV?jdae}@^+K0O%Sh;%7V!r33(OkN1BQ5KWqxE%}!a% z0;a4Qg2`~CPk;+QZGjH4JqtaJ#VseG5f}n+Vh~MDBM$=v{O~aZrw!7&GhDWlf%mO3 zfM&p|EfR_=A)Uw)1$)YlHL!$UKu|`aBBG`kBIQe1+j@2T0$n1i7hE#hRE7Q1SbzXb z$Q*KC8}8JWy2RkHbz3X=_TBzQPb9`Y(Fa$SUH;CK&=s zP}W|VbkjzlAE|UOD02dRuJqVV52Yp|8CS5XKiIIhk8%|hvlUy3pCC6VSq=PtfDJ^# zFgZ1bX5y{V2{_)8p`miNqB@~#<0-KfYQhwarQfSIzjzTDstj={o$hrhT&sXoYp^N2 zB%`F_YiSO8e*%4vM3=JznLG$s$Kq5)j=3oTy{WVvDQ0{SnJh<2dig|)b}f|t8HQrQ zorq?0_;R;^!y8jmvO z|Cj)aOnq-|oSe=pt~FhK?qu5M7dS^svX)3oA%r?8skfXa`R=GWS}U%ABg^A%a+5Vk0jGub@uXEY| zeJGgs`09Z=(D+cIZLH&7X!6@I^|v8N9MUinI?8Wo@MR%rWM)e;B!Q4qu3S+27O^kL zF}TQ#wk;St;3SZ$;ArwKVIg575RqZj+fi}19z||sX|`EC4^QhC%0;qSm8F%OcU2BD z8ONz#k-jhxZhf)FOUh(?si}7d2|;jx4Sr}T(On_Fxpm?D>chuoZ9iSVi}ITbB*DMB z-F`J)-P%BIc;H1+?O-R)423M3ncD^H#~Y!K@j*at>g*2)x-Tb0x)%(@AM9DVd(i!GBHUL_ilV2g z7p$p(V}4l9e9kH2`O&#Akoh7EL;&BfC75G5qXa%abD-$?D%C`2{BUoOtkr}zP+Zpt zwgip>s52C{fnEbGwuJcb)5AkQXlB$xV72_%ZHXO6=fJMSc6!3+V87<*5P%$!iUssf zO~!!#PKl4WH8F)ci){=>5aap*GlME7{0qh`CyM}{ljsckCCmU+!WRo*rJtmL^yEaxEaWR-qK=v?{13~V^|h&&Rt#2`edfz*7!Vwl0QW0M zex5G)%rpbjEk}f}d=C94D@LRTVFbShcoQKm^{ky-NIgcFz=0ljgrElKK+dEt=**2A z%ucLmq=*J2O3XS!L5{56Hz^@6rd7OW@Irz&)WTkp-J%}D{8>Gi8B7L%5)`u@)^(jO z=XISPDjvwfQ~;RgJemu=d45@S;ydE%=qp z<;&A}HQo{Wk`IvD5-uais`~2_l%B&O&l186%uXTRnC1QSbPv#V5v0sN6ez2&Zji6lN zyj&;{pF0T1r$p!piYMy!#V;pb4(vt1rYD}MBJfQNu@^-w%MVN}D+nupvO8WZD{y7% zs|x}`(0ahZ7G-|$rdd9HH?tUGU!7QVw}Y5Yz(H&$c(8y?_vod-p^hyo0A zUU2wC75>a(e7@eqZ>SSN+Ch_>nuiqW!O<}vIJ_e7u*HJ!l8@j=s+W{ARa{177Kp#q%e3(pSHHykQ z@jRFPoF-oNU8n;)*v7?~M6?d)B14T+4I{bDuAhKEToI%~bd`tsnm zotNvR6>qK0ar|_9^l+83!$8AaBKZC+F8&)z`xTzErlnLp^~h26AE7c~hO`LC4w*9f zvILk+15D78eq`V+-7qaIr~7wE$8}!qc&=h>kE_C40%*GE#nbx&s2QRLKMx5c8ZiF_ zLWudz3p{8l1uq>V+ZbsU&x@X58~YXqe@&uyS1438s6TsXF71WRn$mk99%LBvZQVO| z5UE11C(*)>Y}vzFI4tlz9?{h4vwYBkFo){c&&^!M0?x8u%(VF#8n0N9nt#OWhy6;Hv9{| z#V(B=9gUFOG*>Yk>{cem?RN;hz&6f1xu+2gln7d>X*Bona1k`{WukI9cuRe2GcN|W z%^2jAK&>rxwd2B@HBmC){c20WxqW+)BW$eO%apoJqK+nKXUy?e%G>)MCdUguqfEid zY@fx|Y!1@mA@gT0Ub~vTmZ(r`x1ZXbW4Non+l>@=a$y>qKc+_=I+n0mlYTCQPl&tC zw*Nj}>Qc+Z{Rks+ww+$8t-eO9uw?ln+d^6Vvg}{yf8#GZf+&(rLr0TGl#Dj5x1ZE9 zsv40T+^T?wMODt#RF24VS5B2o8JV+v$~x-IZRC{ zVkTI4(9ig(M7~)8dSsUCa59-bhJF&{{*e9{CB#seDk3|2I{un^LL>Z=WoopJqS7=kc z-%Tiuhz!*zm-!w^9kJQ1x9%(0$EmO)26NMQKhAOlp%OV) zGP{HeV1~Mv1X`bJSL-zjZe@nSdY-E?0Qt><93i|r@E5i`-7!h9h)bobI!|1qn&l<5- zCde||cPPZY9SnL$#L`w#rM{UqA4f*yD7%`3+5GIXQG^x6T_{SjLxoXk?T57WY)aDM z2RioM5rk%GV_+co$*p7#dJx@%^aY~;F0Gn0e;h6e>o3GZ7o(5Kl{N#tc&ZkMQCQ;E z1$(vF`P}B)V}m|UGJ1qbv`QQFc3?o&=sErN0?La*n}cgF0215}^0>`EsLIIix^)U? zN-@U>F%i?bvMoWzX-aEJ)Cm(rb@X&)s->5APFR81UNN;#B8mmh%0f$0(yARA~duEfbT87lTkLh{1}F}8in?;B8a^8{l3 z71eBBhOT+7@b}%?bLk1w@Av$q^377)zz*V3?w{etvS>9v#xs?s@eC-;f*e6lX zDa6l*qp^eS8Z?{xH`LPlxRu#$V$;cs%(HzS0qr__sxOWanRd!3zZwu7GJ7L^e|3A| zPNS#-coWG@7UP8H&AX^I4UiWugGt*TlJ+ZYFy*UtpdM4CYVx#Mw#<(x23%<_bFa~% z-UMOJ;S!R*Ll)1_^p;@y6^3L)=b!T(MscFsz4ZKmaSX~T)Qgd*hC(y7Juv8R`$IIO zznbAKjmVTVMuh#F|H&oGL--hzSuupzE7fW!%8;5PQ*vvhCA!(o?b3;RaI+@NTL-xe zqf{;1MeNF-Ng}n7MvhLExBNBvT2#5v-0wJmQ-(*fL**^~SuheW+WHq`MU+iL>PqSg zxnuU|g4J9{?u!(DMVGivan+o+DH2>0RwI@+gA>cTv8I*B*ki1?L2Hi^7RUYTm7euO zGS2eH&X8}{F8ec|lZS2M&QK%10GFO&nL%9n&H5hWIw5k`4hfZNwFJw0i2*81 z{+@5c9GMQQtHf{3>Zbgk&jC(nBI9UvUS?a^A~`D5T(%a;MQbg)zU19VRf_mFr-nx% z`h#|l%r(^y@RD?U?<#D!p~$Ug!yz3$&LZa@s`_dfoCc5|t2P~%t9@GSmfQP$4D=WG z<)^C9aB@VWf$C%n zqrYf#+2RwPM4n_8t^f*?L8N0^yO(?Qt(=xSG;L|>QC4!`Qh*3Q_qsV?Xu{J ziHX;e1(D-ys-+i9j`j_Zzdqe+>@v@iXRXn=7{%3L)utq>1BMhby@=hF8xuOHPWBKL zWaf+*(J6(u7sc~_vlg(p6#zW~S>jG}4`8-`hNj;A361WAKStr4Y#NepAw+8xko7JX z$pf#OTog8ANNJ@&%^)D88d>Qf0oH18{&3ND*e!_|BWK#ehS7Hs5MQiZ^MGl0i${a8 z@lYU>v9|x#QLU2JVfQQc!3cOJf(lc(xNkt6jNCBr`+R}iFeatdisJy?^Kt%uWi{#u zDq+ngHIvOvuIkGDL5k9?xb4>ENvg9@e>p*rO?=WhW98;t*hpdAx0fF=>@d^e zg|0kKTb7>ojIAu$Tgl7Qarrp5Zy3yO62Qn?zI=Fgf8zPHRd8A2MzaJG|gru>u*PrpCDwmA^QN>lR;IABrw+ zlhnc-8XV^)B!^uKgVcR@l$Dujs4%01ZVj0L@chQY!VqU*Wz)PAL^Q<1AtVaeYRbW1 zVFmF6CKzls#nf=V8t4(bAkpisn=ZBZV)kXdNA8>)wGC|L+-Sn-dyV=Raj-_WN;1#PQqmn5+z(%Fpr*7gGu47z9-+n(O`{J!_u%r5kr+ zGL7#es(iOUZQ;D1B976O=L!Y#K*o$MrK;Wa(0?YRsh~B-|HkQ@&l@UAw9#Z|DR3+lnS>+if z;j=P%k3E~GbhEt;mF=61X*I9@1B2|24XgqNtN{wZ;anORCsYXSpecGb6aZQ@p5xN= zE8g_1vZ=i-(VcU5K5qQI*mvhD1}2?3L?+(H%6gBXcG9#`Xpqj@^C$QXKDB1Er6jQa zdQ%JAo`^e9R;+L9#4wymmO?9jwZMRrx2t_6hf+M$Of^*;YYO>q9VT^c@pwivTsc~> zzZDe5Hsfj0wK9o2mNlobLscfQvtm+@>g3ZRnzy7z*w$UzoQ!g1Js9;)bGsyUGF^3^ zpX>%IuZ?6+wWyzSPtS%XPwMO^dI6S+_mZ90kzf!%XIoNM(5kdvx6o?9u8W*_kQoW$)>MVN-}B zvqA~|2F9Z8w+axghwzLFa2%7W3lA$q%5kqemX`{Bj$@;!C0-#3KAns3T5oi9>XQQl zEeX(Z3Y4vVNG+-3AWgXgyBxc8xJR)C61UZdH6EA*8X$xc*6{MRfjl#E%qb_$`j6rQ z`I9FIl3i4#O3~|Y4?~$+cC)o73-mjSR!M`lm|6vrSy?fiybn0c7FK6bQ0De8#VHpQ z2>$ZDhW#M4fg{6a72TK#kQe(tJxE{+DdQkzC<$GP3{=GP%sHY$-o=1->cPOC6SW1NVCmyuGS;m2??&o`jx5 z-`B~sc1=O2rL-Ffk-zfd606| zFWbL&1L|hVqWTby>9J5>Tkj6AY75p3F&yZT@%gjjg7Y3W56?COfnpC_;KIK~6B{`; z?MeOVFdcioW|kp#ipIaQmeA8NN0v#=R{!LmI?~Zeh|p>ldOqJ(KYHm}lXrQUZnL3J^Hc@v zvK@W#voJhodX;HdDfhuR+5aI0hn4T;@M8=7!W)T~+XhJ3eaEQ2K-oi7 zmDJo{pUGzPCVQ8h^)#bkq^5d5g@ATYmg#XUnmf9|?0l6OtV*}mtt#;m<0PS2KsvdY zG)|)OtuL}OKPVry~Y@6H9`PpGx^ zY9+h~H{QD!Us-+TB;Fo>>Gs(g({I;;{fzEgh}`U+cDm$3vkva-Ak$@GUufpS@gj3Z(K&sgVE(XH-0F)oUv8Ivx5x+An$Bpq9m<-{F#NCwojizPdTLkD`4`t%nnc zm5CItj$;-^%|LZ_c6K)cPJ@x zW^juz*;OJ04vmtJVhlb)z5-m1y3q?)Qg+$8;`288)Z&1ioz*LMo`2xG=5|C zS;oV>!@FfSvTu*9A!bF2!rjxF(K8oqZ_msRoTwg+0SG4O6e-vlBW!Kb7;ErsGao6c zw8k@-Joi`Iu+)vNdmj|YMvrAO+d%3#a6B#rlS4nwWU4jWGA3hU{-$sxIYXr2h$5tl z$TEAuqQ!-uWycD$cXq`Vrk)y%13(w4a_z}DX1pgCh{GiVf8Xv%FL#25C<{LA+SD-jKB_z%~JP)spRUz`vSKF#Hh_6S<`4u)@#5|8P z;fAYirt2?QDWdt4cqDxc**n9wX0l5^ZSiwxu&U+C>L^N5P3PE2Saugwl98{{*U{CIi8Ut2_;kEd;s874Rtv?n$BKsk4> z9WFF|%|F4-~DwEQ*ZQ_~`QI*AI{OJ~Jd7Ka@p6ld?d%T4=x-$SEsFgw!Cd zn!SY>*yM-HLv9I*vWLQq8UJY6QtZ!y=Dx9|jJD*7c%jbyd9Vk**55wZw3;km;0TE*Z5lR~vVZO~l`Ks5A8n{pl1lGi-~+@g*n!xG=rmZ*ielcVW? zhqdE|{Z|H)c$_pDiM%2o9&uGAlYc($f0HkjJ`=`~WRO|s8v8dxCCe4w)JhFv<^C8| z8L`rbHs%lB6Ev~&JKY^VSH-5wFe?^~UvS`KaZ@Gtu@Aw?kgLi=4&j%p6;T-)(IyXm z&KNZAm7dH>)Es8Q79vVt>XT^bsr+e&4M(5xh&5YUeU~*rlstSRJ3=!5(Z-}0iiMY zC1Q5AnFgz%PvWDZsSufwmMv2JSZXdKBO`UT(BgPho2y9#fHlOyA7M+&Ax-{6N$UHt||_0pVIB(&WLm@Z72&D<3kKH&>^R=e8< ztMTpO^l&7m=g$;=PEJb?GHOQz{YPQ-os-)Bq?hwy>o2g0Fc7nQ9bjv~X*W?B{Vx8N z$+1$G(jd4DM&PsGK=>2Y{DE+M*aC#2>clTVc^E&djqlgwk6pOg^(!+5exPeIziGiLN3}^u6$X86`ZM+$L;eMn{S6@ z6J!_3v(5N6>saKZ?sg8=W;G6kYt8r38}gzqCGV%rCT(`g?)O%InqRJH(>_9PJjraD zZ%1VLByD#0sssk`mEE5^zT@#YARJ^)Bq5lTQJT+FS3F08l))#AGc<*1SdG8)M`=)o z`D7-wyk9vke-yK3|IlC@p)7(af5x;!O%WUTRC0Tu-VT12rV}?G! zL+Kg#y;6_=Zjkn)e5FP2d!)C@0X(^u*ITIJE~3%uUEk+kjW>Lm@mWT;1mG;2{UvTh zkAUXu6E^+Rf7n{NQs=P2$grC=vSVj(D7Fd}ZtI1J#MkBB3Gc2*3OnFWbnLb%?0+UEKOFPq1wY5tY+5tJf$rs_O11dA-`r zH#!|(<)()qwlCFNYCirf7D|0y5FzW$zJeWp$l7l zJ@=;1#ZxC-u^~{2G(~m=0}xx}f$r5a5Tu0^2RyXSF*@zXnH%$dsLhYXIkwsM&Dw4! z{n0Kr8`hJFO7Yx$4-zfbG0{bHibs25={;7Z77Ef!6k&Z06>*a>6qHRHEiLU;k;#Z7 z)8Y01jV32b>8Cp~zv3hI|eLKCeV}?r@Lm zK$b8Z4&sWcZo4@favL#N>_LVzHgUzhyCTf5t41NUx3O9xD#|x>U?xJ0;CB=t3a?HT zrf3EO?0LP-yx9yjcu?%W0K?xGHq+BXuC+M?nTQ>Dk;!A!xh=GSi>0A2)Ma60YjyWm zy>WWs&&tZS%ccqjK^yjeCa#|^QVxvAmRLIUKsylBr%UvR4%S$iriv*klFKREO_3r+ zn0;Ck1%e^WNCldAx&Y?l5YB6SE#*lXy{-=t*tl0D{xzPoZ}WjVMcv{mOFt~PxJ-BS zzIz4oz_p5jAKlRN#q$z2GRm%XJs%Yz^do7!eMQMf|x1vvuoc_L2tTBh-NF(~fdp-(1X&$6y$&q7$ zhBY59y93nj${HnWu*pn{W4~B#yU4~$|NN;9uZNMekKNlzfLr_$c*P)sD9&hzi4mV{ z+R<-BqDz9lz7xrIyrb?9f|D)?PNzXCVgnb#D)(j$k?0=}bUg2mxftcSp4I*{ksC!IQ#d}H_ zUFit_jqjNE(F$_k|ETQHbXHb^r^;%08QHf&T9w%*p#2kEs03dpq>ZskXR4Ip5Q!Fj zLTJo*cF<=T8rdYElcIfp#t8fXk{*L*daqx~IE$O3M)crCHJZoPhdv?7(k#WL&Q0<+ z_x?2PuA8}0$eJ+Ft2WV0xZ`*KG(gFy9t%4oqg@3Md>HG9`-eQkBx>Ul8cf5@a;UlzCPZE=Ajkmt982A3c(tV`Dv+Sfx;*M8z9xJ==U{?y zp9VIwBX!0F=YEV?^6lrwL{x=$fXR?H?@(=vhbMd^UEbK&;iK-`{BK(H7IVJ^X6$}? z=tWC@gyi@Vj-bqd8Q!QL)dQbNT4XiGT?^(7TX#)eLg&Q5Qdq8;8%^V243%h+E}Di% zgC)!Qmcz+F8?9r1J^(mV8IZyXg#)~9-NOvG5q7fYZU#x~R+kxNbd=OP6|vCpdiKO+ z=@iVIiluB`T2d}Eeb&M|0B2uWAzfn6Kd*CxANAzAs(>*Qs^eq^SnSK?mb>qbG)677 zz*r_a>?TJh;vz}xxZZLJrS-kf#CXQDK2&~iP zU0J#|Cj;;EEWkzKHV3RkE1VS@fz@>N(t8Qz)0wr5m9rpfU8$jx*T*}Fvx_*tZ;z!+ zWBo;f*aozO>vh|*we4)YeGF`)4A3azJRvjl7)W-s)3MI(tzdF8zrGZ-!S_g{Q=WZo zl!DzgWSi*R+|E=R5=h(pvrM+SmDHytJjS+xt=(=0c1Bh`eG`Cy;l5FSpVvl8b{jT& znwV-XS?eBNz&sNxDmF>I{^muSp@gu_@>;##2Rq(~4B0_Gk(jOYpms{|Q~cs#xH%6A zCUAq9(heoaoryrg9&8qGNAwNlYQ0$vZ+<;^R;P4$1rHf)rx0WpX6CCH$2XDa z`=g>uKmLa@Q5Yx?kX^aTO7S6?-7Y?+%!Yi-ugJB!pNH}E3_Q`jY27Ag zwDDSZYqC2GAnGnb>vP+1OCuOr5^_gQOEmuWhPDpbmw-VD>rhRqnee`%eC8Ma$-aAdLi>0vQffNdj+dgr~Wn-(DlA0DGKemHED^2 zk@ad98Es=MZWe9HRAoJJW@!D8X~@g;8dE#%SfqMENl97yV!E8UaffoirZ%tA5VYf( zCr(nIhV#T+I9QT<6C0{1L~5-szI-aqQ%Qc4ip*RzMXhr)pQ}ZR0&~*YdXsfau0%TUdsB zTm_H`BbUkf3_nYB?ZPK0FaXMAQR^kBFnFeIT_2PYrDt8gT7pU|;kO~LtvBP<`q_tg94Ph*O(5qF%YzJhN;5gcKRLk_eI`^990gXZAq(?*oF z=8<9VP7)O}g!jml@(qbhgAyubLV$xq++1nV(rvGe55SiqGxvWPe34{Oq4SumL68s32I>VC)gfd`Dc-ZEtB_%%1RjHh#u1j26YX-G|J#{VZXezg1?BQ_e0UF z9hHzu(d@7@7O_5lY#fmV00|3ty?E(@+`P4_TH$<~e=n}U$iqSB7ksOOu~HaP2>yqsU(RKp^7wk2?MehV zgkbNpH|L>~wD}naLnKQqB!J374a^u3A{`TkyOgye@jU_T*mvj*LUV9hUm+Z{v!C-F7Z2+=4ieO+wZMvJw%LKA~g}xyIBm5;#+BP zaez1?S^Tmk3$$_IME>`2KXHasIO9C{4%cQvB?o_Pv#Fc*sHS6hNU&%9j;P$oz#!@c zD~CV9MD-o!CqQ_aec9y^e7)6XB|=a-^80LsV>d|jVkya_2 zA;7>@kw>ES`)%&zhr~Z>+ACtqT}-vu@Q7ZEEzS(En4`DC6Bw zK~dS;AQfqix)o`MS#mL@$LEIwNic||sF+(uG@@G*rqW}HqAU{rn{tJQH9!+p^KhJ4 z;n$U1nlwzH{FYD#n?;0q{IoHP0ve-^?-k&SiN!xWZG)^cm$4ea*m4}oWiU?!>8UNH z95m0+pk^U$rONFHV{KcbVn3x4VqG~$!ey1O{%LSz0-{Gr>rxECU&2DI<)>R?Dvi~% z%P`v6Z$I$O#^y!t{k9AGsq8KG`V(lw5Ep`gMHEp!VgffA@LN*D!bP{Wv{w@gWhgL8< zL$RFCyp$FfWgdsC$yie(kKkOsfNmul;n=pyNGEEk#MGSI3%{J>f&6O_W zU)&UaJ36m+*iOV6X7wVs`tJ-8^F*sO`bHEKfU?9mz^?cke)Kbw{QVf=G7`qpi=(h^^w&tMz|A(PZ) zf7;3HZIHEjhe4%tBEQS0r*lSd;|5k2${Q8tF{9eQtj_`ER1Vt38`UWr&b`@CY$w>@ zU23cg94)0wOE0j;gL8eB+LIR-a?1fH$!hypAg-3B{4HftFU{=ZtF|=~+-9c#UhK&i zH={uxls{NLZ?uG>dT1P2rGl6$^4fOalBPUb(s_d?8Qiz%Rs#+gB>7`TZA;O}F6~2I zqSwtXR-wv7F-iz#?gmK^c8<1caTk$A9M8#3^`zXfTL7YH|Uo1!?vOhfI`kDh*zy2Ze zSc(sJg>*iC5OzU-HC5Nk>?36%?!Cca6&7|0Y;S+?zhd~hPmlqDqKAXJ<*=j2n%Q@i z4#LdnOeNJ5Q^^brka5v`3TQh@<)6ZTN@FC4pZq8-#CF?qBue@|YULak%}tZ)^Aulw z4tE&IBkTV(EZx7VU^xK)(a-GvKB>p8koE|-3#nH@`Pfh6q5o?Y%+m|<8iT4Eercbi zowW&|*ohsj$|*x-RHMm^FG)U9KzcoFRt40e;-d?&{TVB&FGyY^n8XDzK)-pxbDz*adW|73}FPSHon&Ah(?l39;ETYg5Q>HCY zE3$64=ygyTJt^=S5oUT1-PpL$^~}yo1A%A*Qu2pHSf06bf=bTlXc} zwKalagn$8z3jm2rWQ`r9bNW>p?8b2fS)EM9^f9VLV$_}sE|SOr5u;l$lLY;*Q#aqb zW5qfy!=_Q0k^3PM!y%KlI)gf`2nw!)=l_squrBM>5_MI@Am=K@H7hj zKa^Jdt$6$2D6RM`f@S+J1xQ+Pf6N0Ikb*8e!!#Sehlf^b@COL1UgkoXzkIP~vJS)& z1k!uFkTJXaKyF`-6Fv=zt2H>54J8KRJ0;yj6=~n`&1-Ki zjQ}{X)>zKa;FjW_t4s3=oHlB}cIPa90-xl6v_@N!&sv^!p658H8+`2rcQfLqkSx@- zCGWW3ugLNVRBNWLkAWzOt@9hW{bpG;iG;dZ5r-D})98!FTlj&J zBi630ji=LY9Y^%ZH%cP&7zN)1+)xUqO)>QdB_pvXY|~ zs-pbcKvbA#he$R)loFcZ3|z z^WHF{J3B74+l*?EyNqxu^o4$#Q*ZQbE;-(MFFOOpZaO+7qN^Bf?F~6aKJQ?AsgO#y z2Ua~M9x}FYIX#39G9Z%1EeRVg(sznpnuHWNVR->52{G!p?fk|5Z$*mDfwN|?C_@3O z-#eAX1vR>ydh5AZH28i^3v6pmii9G~E#j{EWe_P^qCBu$`TEE|xS&3Yv4EF?^5L`w zxN%*!G>cmzM~68J6{x|sLcXohouihzNO4oIBYgx{8H@^k z52Kj1MkZ^8Mt zek?Mrz%owzBGIo%4dvp3n^28#VV*gHUqW?iepzm|1`IQ!6O7x&HK=@}{q{8i>C`G= zNt-VU#C6|DsUuu4>m|wuGdN_+8ACFdEC!zRanD!WYWOuDlcxiRN~A4Ozi;vC(9k^< zwI3yzGJDaXC2zSlYibf!qN?t+G|OmuYur;2dsi$`S@$#NC2RSPMM54GAchw8dv0sj z2(H>_j#6m?T8TWg%p)z-qAjYwnKa+$BhTLbbbB?g=xsSUSU~5VH5#tsC>e;`beyxO z6qY^?JojIh95qwbXP)qV?8lS`2HL%o*OKcbc8y%$HNWRmJ7gdq0p0Git`fH)$GX!l zD|M7JtRYd>OfP>wRScb2GjIRwuh?c?4L^gdQQUXBbh3lmzt_suT8+#?@nDMGpgw0j zPp9GxAae@SYL0&hU(1~x{!m`#8@YhnZ`euY*#@~H$cz!~*Dhy?-U((#Bz`buXqz0C ze{FUFtWSsw z#fG@asY7oZs(xNGUuQC&&e$<_ag&rJMVrawI@AAEcn5YUlJ;`U2(0nB5wRQM>V1wO;#{%a5ZS%xT{)JC(QUQp$Nn`3f1zd)MnTN=h$c^6QAsxoU2}>ftDdDt_J4 z;py4|#zviXM+cm1Xvv?6VT6oVB&{u#V%A&EgDpNL| zc*l$=3*hn$Tiac{K>s!Y+TLO>n$sQh$0*<#cN}-`R=jP~oL!2hddMC%&9cQ4!fkK1k+_nW?Q$&-(in zJKCEDIAR34@tafB4v`|2F`Ye3E2F}*mXv*|e%S0msoj&7OVFyK$8*ck;!1OuFllA( zHdB^elfSI^5hZe>iDG^jDyl>F?iMDESksLaV7o@dgB!5#McgrOR8;U!#Fs17ehd0~~`TajT zsJ&K(Xgp=)cikmWZnBEVDUr|jJWE_9u2KCru^=U6mE9RkC!kAwNTPN+cFdbEqn(aTUIXD-de|T1p2J%MKiF zJrMT#ZavuX;8H2<>}~-i6{~@cFo+4VOQbwuWF3+wCIoWFlCINU{aJpZR**{jio`QD z_rZlxq8&A@Be8g8jVj8AJ4o7D>#W=j)g#eENnApS0sv4RA#nE}k)U~z@9eqqXy@?O z+0&h9zAMM7Z~qr>Zvj?EleLTDx^Z_4?(V_eJ-BV$o!}kCbaz$vs_Nd=Yt>p+{k{heBEBxq{adzbeLVAUEAEHv9123p z$z6c0^S)S}?Tu#nqEGREirorWC*8 zGkhkq;tV4{`6im1olHAx5~WV@kSq3dXnRnsG&C{e*p&_AAbu2#@huV+=a>RFpX`=#TnN>MjQ|P39n(*7Vb8_VZSs^V;o*zj{vR?w9Dh{ZP;~R zogiBc%#WPr6BJQitl=J@@X5$8zS0wBb6)Tw@Q=PeJ&pU{e%{!7QnIOvYTH{~WX<=( zULEO|nuP7rxeq!WuRl&h!}Y6RJyu>&gQKYFnV`qa&LU8Ny|9{p*VE_7sv?g*O@gk; ztMM)Z?XvR`{qkz`>t@WL0Eg2LRby<`-4;_(%%juORTsUsPa$?}ec0|lM)Y|%(=jEs z34LGp<+P>Nh0wVLj>1R0&dwh{*{yeB@(b8ZY-*vP>@%CHnIQE^(Qj^E7*`oQCz4qd zAQ5p`ZdjTPZg6N-TlNhi5%byBTTOIlMbKuk=VJ*b7X|w#f2ATpQqyd4E$SrnvN z!^bYWmW~)EOtPl`n#EUU=-f;zFoJ1~e8$euq%{elRNd5Tnt*|xur;#Vy!}07d*M9) zEJX74Ma=S=Ki_?h|1&&`*_Epi9N8KKm?UMX-*gz#t&U%SE+`tA#_WqVryvfl=i z{G4hB%=CclQ_o+vOJQdTiBflb0E%!r6Q{^K%7ZQ zoivM{ZqkHlj{SK#7pC4O#99I)dS9S2It$P8np<4HFK#zy9J*wZ^i;rr4RC$udmwnQ zJ~4UciZwU^_m~R=JbVDlj#wd7`2>=rC<{EYsDnD2E&2H5y&@d{(s91tRS%a?r^f5A zB_2g1|0>TaeW&l5!x5;^cY20=|A%GH~ zh8KZX#3FB=`kf`t7(xuK*E38s%n4wMhkz>pxCeX$v;a(TR{*?#e7rFe+&-3gW2j!o zFxxO=fEZpF3&0%rJ4>uFn3#VULzo}nD*z900I*=YHrwxYgb~9RqZ4z0!4YE)!-Sy@ z4$h?(V-rJx;TQ7=vj@lk^zqQ~&~bm_wX%F?{m!<4Yl54HR{>A~V24SE!NIVL@rdDy z(TKqXe-<&01(3tBbvu69r0LBD__I8XofGt~?S1D|y##=2 z_V)gBx?Xw!ES`ZmPj^lBCS@27o`I2P)+TS51zwi1XWAxh7&YE4tH8H&o?cUcHy*K> zXUL{;*e6_Kpl5(FiWqgTeb^)3sp&#rO@fH&zL7GXJIl(jBTDZo-~`~ox-#Sl+v^^- zjAw7!oV1A@mX2p{ve55H(W@CofLmqJ9KER%W`kR0-W;(h8%B@IWUBv;4R@Y0T;S!vNj>Y+yLz-asMAyBtZLX-2dJ6uX-)Vwf`6EM}D{ND(xfQ zr9i>DId<^Iu)M4KEDEDa%t|WV!kaHa4a)WL$9fYmJ$XGqEl0B09*N&L*!U zt|Bmg&vK}x#tYO3j4b&f3W5e+H4qao2!L9EZa|ko>jvWc8vKZ|p|U}`A-cgIL*@yi zdG74AX(C*i6<WCh4OW6!au4a_gul?I{B`0!|REwX@efnp_f-+;j- zDS<%9!3Q-9ZQyf>K9x3%Hs9h`pzqK_Y^I2Qaa^HlcE^w&u%qM%la0E;CHCf>9MzGC zIrUMMTQ<~@WO9L9HsDA$S$YTx z+bMO+w8#&1P)ll}@k%&Gsyte(nQy3?HmZ*6kR8$*M2_!}cMm8#ruP0E&m7+&d@Hc% zqQ0XIvY@%=SX2Wl#h*}k2U)*YoHPY`4-prFqy~wLw#9%b@q)sQpDGfpPmA{bK#zm& zaVInAE=Fm|JVbLMMe!%3Z7IjZZ=VG%4V_M**BT7-m!|L!HmWV20Gr}gW!(`MM}gdN zb`))Ke9N*OE5h!AN%~UmjEkH=Ew$isn%4MZ1kiA)E#i$c-eRG8;r1h5oNSRTKuRJgfmx0X~ddb-Vl;oqXA@`@u6(s zPq7V6Rz}iQ98HO1q|_Cilb;ixlh)@}zH%8nYzU!SFgdhEo7IzU46uE__|^_*s-kU? zqbicq=3_O~M(IGZAOw&zXc1VqUtlpcXdbIjoWPV{_3TPYvbe~xV)X_ZbfYS(Daw^p z@gdTqx5)D10gcM$;$cfrCejcwy`;~ghe|n#9CV>0a0?BJ&2s{p(rV}QRR?{n|8-O2 z!|2D!pcLs4x?}AGUBe2r2N8lMbqHTNhwEtY3;Q07DTX91YajAiGZ=Iui4X~j`W}qL z8Wz|VQ0WNdxuhdU1CR>RAnQLoe}Ay4eOMVGKav_{KY+sn9?bmFc1!Et11(|;;|i^7 zA2bdXcJtnQt7Oy8d`<9jioGdt9|8&v9s^g0TCEzt*1Q|#LY&j68x5n3r;P1Oa3hP% zhpC77q-8_3u!kY;6#hc4BtP79ZK<8{XTVD*kcXWII+C%X zy&33vfi!=stTzJVEWW3oRYg*xbNVy{ceCVE`bO~YRt(tqJ7j!W!rs#PpxdrN3GGc za>TvjqS0&I#^I*ww$W3op~;hxacauMf!3P0sjy=w6Wc@Y!?x@h!dc4+efw~ST*vTI ziWiA86KcZ`zB`(;_*-)aX%`L{*Mk^_hub^4smeFH ziK}f%ciP~j@<-LRCNRa@!eab-%sTrZGfsaubIoA)#^SNvBLAiSShc#TV@5Y#|2T8W z;5aiwaEKrN{_~>58vKdG+Qs5ld+5nlyJ;O_J8?$)zVXrKu`%cXGIa)$0?qyF^7?5b z3CQ091N@Ix>Wa!EA+oLV)Ln*KO@8X-U0>!5cRp#+(MuJH>vN;c z+Xra#ozDp3d5n`^EoCRh8J$e50ylZ@w;{uD_xM;RlQdBiNKMfR;Ufm(NF|PQ7tTx+ z;A56xYO0jXsEcnKjn9ZVH%9+;1e$zw?xwNt_D8 zjqJ_ybYVzfj989O(AsDO-ZG<79VDts6N~`oq{uJ`p;PDiG0aN7uU|z^#b00NDb}Ux z6r>f)Ywf^rHi-Ls;G~oq6R2-8rk@_gUYhuSQood)^?Vk}_~N-*q(IKyBCWu5QMw%e zEMZOGT`2}e8qbd|Q6=Q7Y3gF?pYY&died6xycjO=VAxrL zZ|hEBppaqvaHRRVZg^=Z7~ub^^=aaj)iBOmgrVatupv_;AO4=4GunJOD>@$ zhbuM*0zQ0HK$zI#1dAmN2NXU4DF{XsGUr7Zdj{o42wRTpzI+Mu3@jg{Y_MwXS}fur zbfK`~Aoxv4d@gY;(IK*1aC}acGI=6cpO}6Q4-Ga*gjJaX7@9|lhzTztY)(Un5G^#J{V_W=KZ_5l7s{DAy`{Q$!U;fv%8 z?F;VV=I?*<27@C~Qjre|UVP?If z-0l3nl9KwC_-fdJ{F>hN^m+*Xl-?C@KK{UrT`Um$ABIZU|Ida>-T+AdfHeIFMyP(Q zg7tS6K(QZ^{|e%l$;!mDj5>?hKqPb%7I8I9ZCW+S#5GS zJ}~!+leekH*Wb{XoG2q%HnxL#I5*lyxi$Rx)8D=8eQpG1w6RD|HiBISx5*VDA&)a5 z|03Nc;C)9z#p9FdJ@=e8g))ZAMw83QL-=tjQl{EmG8T*9J^JyzgOvFTVYaTQW!d|a zH{90dccyXjsmk>ca$?>)QxL(A^A7ITxF=1yhkn2&)u<^6L46Om(&JH$qC~qhv=iCy z2Zu7&73hPo?S_nf>Lr(c;3H_xNdMu1{WAdWZ)8Fx6KgeByT3+}*m!w(-*6EB6Lzd3 zDc2iNuB4T-iz_K72kXB)&{Z967lIZvzwzqs{5uiSQ;LQLT9S(;?PaIMwPSN_pB!l> z3EiKA+tQ$?qpKSLd;q3Yks00F3^8;vJ>R@8`v6&Os#MiztIsIo6@5}hY`8v>MWGh5 zh~@xu;|MqW?1+|)^HS~D8HW$wtEZA>j3M!(W$-ZZvxeLeZLPvXqUCkSgOCcZ(B4Lz zV`h})C1sE%Q1>-q(q*L*2&yaxCn3HN*jjTw!+MQPq5AoD$3n}+h3mk-d@j*IjS1Ho ztw+FZ3bUfH1eq<-r2(*b2`-xdG^cZ~WF6_9#^9N!h1 ziF`ZYf9Wx}Cd1M46(64o)Vlm)8W@G}y6o&gAX4S7|U^(~&i;RK3Nsl7V# ze$*MYne*o2?Z}v^n>?T0HtF$Fq8gqAz9$~D!8ZW;QREmoECk%VyeHAvTJ-thH9nB= zQLwx^CaZRBIdbdafzW1sv$|XA=Ex`PGsdqF+hi_^ih0!bZ{1s)2gjbQ%L+?tne{Jh z2S|%C#2Uq0t9rlMoHyIPmSCHHsEl!Xz~cz{HU*#ENungg$eNBf=gp2cn+in2>9=lf z)Jm2jtF8TF`Hh0%O44~x9#F4fvfF3-Vytk#>0_{(RNtuAkLoM36l7H2PZV2^eb%=( zJ*}{0;Iv7~vmf4_d5Z}bvWFHoU^L@M!Q#VQkJy57B6=z|ne4AQP@CmCbdt9%zNBoq zBg+FNiGn0;hZcVp?4Ha&&9wa<)9(4BUx&a&rn7f%AzX;7;sxr4DW(GjiU9=UX|N}m zCwUgM5Uw3(X+9s0{vLtFXPXno&G;Q+-0H7l<$dAaxtTkPZ{?v#v~~IkwU+UpR8AJ; zU<-K6CDcaki9j}<1=^EcM%a^`2kswUG_*ZA&e7Iz2!tD@ZwGi6SSun%6ZZOh)2vFg zpz^9Pg}hZq`v{}$scyK<4pWcRTTGLRvuP5XC!=OVD#Bszm;I8=seD`PqjLS|hjeTF z)(X2x@v$=6U`;)~&))~by5q?F*@KSX>4#@ZThyXR=rHzfx5j6x>JC@Jsf(lP9R>tJ z8}!GC2iLxtmhenV921g=#_O#s$B&b=O>clh2v~N+C1j9!p9{rE+dD{>LRUV!lilT7 z9`$+FdkUQitny6v+hAIi5Vl13D32A8OMN@&6f|9s(Sx=JwG<*B}}%V4pB-~KJ$=KY{Fi` z6mDgOcQo;6X=y#VXas*WU@Ype(oj&MmfEteKYl)$tc$sdT)#6m;0I2SPbnxW!^uZi z?Z>Wb?@0qPG%yoP#`PY`J%zF~B9-cf!|u_J;hFbkz7=AEibL{dK1~~v70jhrlNi54 zW}o7KFD0DURIaiPWSZJ6fE=l9(R?)jGSs#{Iz~0Dl~B$-yXg4xVCQmldAVx;+Ie*n zbMOAzIZ76$6qEhS!}|brw*1}vjcct;3)pH$kDT&2IpwsZ3s1;`TIBk(BYrWNV$KG0 zoD=S*Yal<229)q?*GG;$3YVLhPmeXxso#*;db+>F62f9IhU%a=VgV@>!=05@y!7HoqVzuT2f5f*t&59_pq-@LG2E!mVI(M8U9Zp7^x@f$_*Mtv})JKCf5gk z*3in4AI{l_UZi>poz9panoYhPYUatAM`6GrFnty2(jpPc zvSRCH%u=DT(kFFI12@KzC>cW3oDX=uVSSE2!>h{}wa{ofBQs^C`vu&0vs>Ho#-_36 zoZ7nynnm*Ygk{fCFFWyg#8XqLmPjEDX6qa|YxW@pi7;S|Oj}y=v7(`iSXHx8H=ls& z!+=}QMViDrYxLQ`BAl!%i-g7N0&`+}OhVan&b9etxCe*?m1(5@1IN~aRE6;8eU%&d z#F$un$9V86S8?!Voz}K^M=U6U&oo)INJ~jd7R4B$q>XHJZXNhp3ij=84W&1|yc)bm{m~gzE-#HT+n zI5HWJ`Z&GM9rcKm{74sgg@iIQ;uOV$Efx;+Z?3MWnLq9Eo(2B&qcCr3&f6Q5T!1q8 zpx`16(+~%D<7Y#P&Xt7FeG+QF5nn|XtT~!)v+-VhGgSqfv2-5+e5vs|Ufv}zO8|;4 z*uC;c<*2V)xE?u{vl&THwI!-&_kJ!1W}><+$-1T*K@SA2?X`k1jdq^gvSnT#H^i{S z78y+dZ->z|WVALB zLo>GGi=rqOU(55DFH)os4W^jBPN#mUA+x|IL4CuNn|Q!oTv+*qcNO{`OIM3INySo# zo}vN7=4@C~lhgACd}h)m&)onTtwarfgdra)T9Z4PRI06a6U1wF9IKy>iV=V@exe&5 zCXN$?u6k$ZqETy(E|{E{c)|k4vV_!tl;(hWjkpV&n9Xj&O9Itue8`;vun!T4Q7?yc zN7k^J;7Vg#m`uzxW_k$ID$tf*U^)9 z|4>71Xl)#Ia2iFza6TgtR4D75?R0Pr&QNjU)&MtW#4Ke^wR*mlc>qp=(v^Fmbf6E zjJQ+464Of9k~Dvm*g_II8Kq888q*ANQ~vD(QZdIWnzEIdKZiS4$5MtNQJF#^nYlt`4(HxAz`Sbuw0hLc;-dft0xjD-N~H5~M~ zx1itVU!DJOvy;foAa`$QS?((iD$u((PCs@~xY^}E^1c~m~_itXo zKhFddvA1`4gI<5*d0_vIf&WHw@#bTx{^>#e-u~fp{c-$jz6Zky@cu4^_qP<@-%@ygOL>E(|MP<1Qr^(%|AA!xX9n~CAdWo`DI44G`v14! z*uVLG{~5>r?Z*C3CXuqTzcDQQvjg6zy&S#Fh3b2!wcpZBmqGRkCx@oysN!m+M_4FF z0RmGx@ZBp)ViTE_Hi*;f0RN~^d@OcvL_;T4{i$8}=~7b3-8T1&3IWvE#{amBnbw!zIfH5apqKVUYbsaQ>Lhn-Y? zfBDr-^s{r8$bjNY6Y$BkUffF7?100pLWnArmi65!dfkpgn$i*Lvw>>Q$Z%c7XP0m} zj-z5)(j}+vQz-B{lpkvdXFyWy3mPm!_SlIKaI+GcsT&((GBU2*XnnJh`J?$C(utgB zW|=KB-qnvAnAl5k(KQnxlnmm>BM|D}Pl?eUMM3nea_aM9X79+}qr6~zDj0rlS7e+U zsC9yHe^4Ef6Q~|5oR?j@eA6iq_9<51X+@=1sdS8paZ!nz2da(e3ZfGaP6VwVXr6{ux|w2yhsKPzCP%- z=DJsD#+xasHkD-8smy;l#-H`}x+PmWd?)ii1uwV>1rfu>{o{A^@8re*GVsnL6-bHvw}{}XTHep?_*&di_2-paq$Xz3KHCoP8ck zntbeFj^NQZGbbbT(L!{|^XKlqZ3n7H`AT{w?9n1tq*HaDey>6(syMKiQrFa9YpEhP zRM5LN%tp`~cYzZ!GLky}@oo%-&RvV7ZI=S8jXpN#vaF-pybamysK`I*jYFo4o&2N5 zDS~)-JJBg?)~8NF~Mn}IM(MQYuX#{rzhBce)W)lv+jRbs=uq~ zzpOj&Zxi>=HtM$x`|r0=%u;U_n2qyyfqxjI|AKhsjfI8tx8-D({QqUWm_NLQG|fzZ zle>I)>kj-j(r-ooQTo5~v#|XacI}T#{{GNJtz2D{&78#?>>M5J|6rSW;{to5&2n&7 zb2K*nlTypw%G69%>WvZR&t!FHV|y2K2WPuC8Gi**#l_VBPQfK@W@TaNO3M8QJ&>B4 ziR+*2U4L@Q{Jk=NzX4_~E8rWM7dQ7CJJ;WsEqHiXIk^7GlD;myZh)c6O3FxrfrEpA zfxrF0UYEhdz+j-DVW6R4V4z`OVPW79P!SN|;Sn&AQ2?kon7Ft&nAq6(M3f}>1QdkW z*rW`k6jao-bhLORj4X^aER-~~G=B&IhlPbjfJ4AQK)|5E$Hu4mf4#hRgQ3Ae!a>47 zf|G+mpn*f8fxmtSBYLYO1SI&|0`~U>8Wsu$4iX#!{w>=A6$~5_0sK?H9V_@f?wuf*FH3K|>|1_Jgi zQw$9Z0vrMY1_mAy0vhV=w6}-*Rtq#TXmnN)6(=_1xY}=%Fc_HR>>Po)=c>+|SZWlU zZy&sw)b(6o=e-jP`uVy926(#+0u2%kObF~SQCt`e4NP1d91Rufzk3&(QldXToAur3 z+^+ppz_0`?6Ir-l8BQB$E56^4EaL&rln5jXX(myA-=%Rvejo8%hdLrih?X@;1`f&# z`GneYHrI3OE;1@Ty+Uivwq0azv5*iaY(`EF79I{kP6`(u@ISd5xwSUO(z4B&V=l81 z@Q`nh<4NmcDkMj9OW}Bugyq)W>AYVm1JsRdZX3kpfruGw_t~9*C{W;8wLp;b1px zE6!rQW~A}x+3B)v_-Wxthl?}%gx4-SU~(_NH29`QdU8D3*t8tnL_^ogT4aYDd~LA} z0gp3+zfgk~yqekQmU#)g*P%^BD?w&^=eCOfQ9^8fVK)G0I!hz?6Np#?T?Ce*CIsds zQoF&x5Y1Kt*vk0rNq2b<(abdM_7lq|jxXN`mA~EmUuYZu+crl+xG|T)6EMO4u2S*B z#SIw7^BMDPs(IsaQcC#IkW)eU4aH4}lfVz`ESwqng*?2D0n#v~Sw?{*HMm@Rdm)yS z%Cbe9u+O>QVMqJ+K3RF|mgMIJu%;Whx2)`#kp=uy>&dTjZ!QP-b_PBX^4!iukaEbT zzAO4VDZi?P(qOSzWnbVKp_YLxBn4_DNCLP`V7CE-C-_q<`8*Awsv=a%ZZpEgsD4cm zf?Gim<%H^jJ$S98Q51rEcizW>`d+Zd>B&S0jXS2nf`Y+L2EsB4C0dk$} zY3euWb?vzi6BMKmbiq7LvuE2J2O*R(r2l$Fe(k~3w=4&YZYz&U2r$k31<20v3J8n$^WIXytSL~?;HZ{ zgzcu+wNV|(rLO#vGcW=0P}uQsE28LnE>7>Rl4|);8fMvehRfl38!8DjZLzIsn)<3f zuDOU&WqqwYsIOrGE|?M(g+6ecH`_(9Kjy+vg@LS&%1U3sG}ya$sabC;%WU)sTlOGz zJ}%cvsc-IXD{vH$SigeNLGBQft}Hdbg861ia6?UwbSpa`vYc_3&v0;nTvDYm`d>9Bf}Ge|C!h9Ngg<1`Qi(6N zo@M2O<5X>RMNuJ2Y4Pk%p$%svzq^HX^V58z9v&ql(o~qv*AKh3wKJ?^;}p%1X%KZW z(#>r+Yxa(oV*|EXvv#S|!_KWEyHW!A-ljJ0vAj1cXYbA&RsR|FVK!oC_@ zo$U@^PUE?q>?f~6%U$W`)l@fR+xgWf$U2tF_sq=L$0`C+c8OBZdf)eiQ;d6vV>6cY zg1Ee_1$Z7^XVC~3tV#B@E1To8?_?pof~8aE@rQJ^^1le##P`b4=SDXk@uym^tUjgd zHoGf5(9$cW>f^8&(34Ik>9+NNDP-Am>u_ec);Nl*=Grtbl))*@B z9l)Y6g7E#56cpRX;f}LcYQpJp6DzqIBYHn z%(jPSkI;)1Dv=&HU7IYQeD$~HVH9n~TSHDmj|fFYp7nCFM8ebw#qO1{DG$JG9ygtm zj1h78D6TpC54!y4o`CaKD?>b^;b$Zm{+<~Jv)L-Uu91%;(XPFWA2KWJ*z_i|M(W0` zN3x}V%>#bg0C4Y+mkt=_%aW(WIFaNEgg+loNL7MCmqVVXmH%bxT?=g?!_`W;w5B*k6}t=OM8MQ zV=8eCr8W88c-vtk=#GG6KK3q>W>v_`qgSDCM5xGAeO|`Z->~VgJ*~r*Wruc4Y#HP8 zN}4&gN9_WTjN4M5(iF)W)jnL`<^RBAHJe7$*Hyin#a{{}ErtSi&jlvHFW1$eVplOu zA?+;8{t^OPqCNFU^Ax{nz=}^jD8)Y$C6t`nN?qXnbuzq*GT^azp>K~mfNjZmS2UH@ z>J>rVDr=p3NBDUO?oA&#&%4?QXc~kgQRv!N1F=%rq4h~4;YIy875 z&Yh5Q!>RUC>VKbnU-WTv)`d2Wn{!X#8$Xx3C`9Z80w3Dd9@wb2N@p;W6xHnwqvEYoe+gdu~ZG<`|NiRW;zo}|=Lrm8?0Nv*{}@nn#WR9JVz^0IU%uG7|z_<cyya*9_^zsDvqOhu~>CQ{S8h!K6LyXTW{*li6jHCgpsD|Bm z;O42|uVC-qz~8VFE4h3LmTPT{vvO0oBxx)cgk(5CfMpWXXp5SL$YlO0N!&(&NW^(z z!2({wp>a-`OIfMu`=Ra|rr~Hqlk$tgcb`Pk$XaAn)MV`PN@I%3N}q{%oEAxszeMY2 zB;Q1_L_s$}Yw9yUX!dbp(;AbFVzElo;ch19OIB-SdB@^p7HulHH>&EWn zMbg+m_G7WlS_iDM#-$f4UD-G%*gx=Ph7cDxER-%YLh&~oion{Hq|ewzR=Jx9`M$p*a;NS2 zxT@=X>-|MIQkOXHnyu3(>oO*eI2HX88_K+j!l>`zYrVaKZIXu8F+Do6SN5AxWrw6e z*#P4M07E4*;m00p|m@eBMLhHcYzh#kf|bcq{&wZe$#a0xaKpcL9sj8p`RA?>o8-_3Z3?k8k05rPouv_Dok2b-Ekbzw_Jm(-12^m(M5XhhpOy-=W6JdoOQ`B5xW%J3wJ;m~S%24l9~@wmkL zW401IW%%}OMh_nMcLPif#_}TI$jWBD3$fX0*uVNc9Ei7{Y8W3bG+f(0~+oxA;lcf)AHpUYYW$&KQi5XqiYHUs-KzALQf*(Bk~kG0Z1*{;2Cc**7~j{Y8JkT>Il zWYdr0^G3WnV>zRy%_CP#%zz_o@pbW!D;^1p^x6JAgc4`^<+};@30HyN4Q`e?t%SaU z+yC@MGs;kKi{Eht)yxe)ERrUjUv>dl0?v|@z7Ty1!{Kq%-tYU;q&^wmpSzsp+kU&( z#VPXQ$P>BNrZq}^MZE_Z-f`66Gv&_)u~=*A0;z4Ct|Dg&1hJsumL**^lKSQwNIMJ` z66IGg9n-=PxYZU7L#?!{_iJqr?CPiT?zJZ*`)E+!cmHJeIx*zP9?8x?Ccmqiy^?tv zL{3wlCuFs3F(I(KY>Wb}GJYE^%xDhAbsSm-AKY8id;ZT?1$AteD_m|_SGB5gd6ny} zbPx=OZ)exp_cl4Mb(+YtTYj(1wL6Sz5DhDAaJ_GilF@BhMlo}_`^TO5{dIe!OcfUS zw)9`hNh`W^p&DwUG}ZekHWRs1TSCyeGLObSNi1*$@q1eyyPkIj6=-~;C)^}W^rjXP zwD2h?v-rnzw$OecJw>YgVqkk+RX|Ohro|(ep8<=GVq*fa)1Qihv?VFZN(7=SC4E-! zPTh&AA)bOG`ZB*l5XiEIRGh`YZyS*~{%|0}9Iq5|hpseMbb^Dg+W|`66ro8@E@FM> zKq#W|lH^({7gS`{8J*cMZ&5GMzh@1i%Iq8Pvq&Z7mS+q*`Jl6K$0ZU;L?!s(KL?0{dhs~xTy^4TEv9mR_A~HUl4l~NQ}R- z06kT1`w5`z=j@nijsxeJ*R%Y>6FQXi80%9I6(m3)G)lmE+DGzt0lR2%cxNju1OIp& zLVvZzSR!bdFx%^9a{)mjD5g3#CSg@+&c)IZx4KcQ+h@4_m#{4&TQ0b@U#r!GY6|b- zSk6XR#~2^6bofYXvAylwt6#wq?vo1hDru%nJ%+?o6ltm^*%Mh|ZP+9)7zMxF1_glL zqBStW|JupUy)eM9|2fQm;_)CkJJE6y`+X04clVSx2q9G*+5$VM-ia`93ozXrjhrm{ z-p;H`o8k5dtCNp_6G;$>&@T*^0$_Q(&2y{iOiFgVU_EWtvRZ|`B>7F-Ev`;enox;u zGfT>9vHeu~e6&bFgXx_ag~YColk7vL>DO>nH|KH24M#P&fS^7e$$^MgsnJItjSdQK zW~D0W8UOda^|l1aeSF~oRrU{<-Q2o~4YOR>Y}%QX^j5p^xZC_!3^u`jeH}}TiCi|W z*pC_v9Er-@nmopqHah&^W_kZ?TmR63Ut08k7L3O2yz32E;kSgR=B==D3Ad5Fl#MFu znCeY6W^23k@Hx`0sK}~Z9J`c)i3;$E{Px8)8p%{Ha|Ff`_%$7M7A7zQR!or|D z^O=UIC?sf9mr1;zf}~MXm*x1=%HWU8OdG&0|rHh?=gJH#JkGrxEkn0Hm2;{ z2~LdhlXBs0?NHbJrJ7MLgeDa>GlZG#GZ0)*C{sY=0#}!GrS=c(*KW^6e)LfjOV+o$ zx0q$)xa}THC?)$GVbM+b4zxWyoMCt0*D@w{lba!?*hnjx4x}v)S4nF4KX@a$DgR2& zkddTR&pd`Wamz;$TrYdeBFzQ-?G_>RU#XDPX4;~0ql7fE; z8#=>e8SEV>UR21;qJd|F4>eeOdiA<|*Vv61l|3xW=_QLQfGlGhIibj4tX6z!^~kN0< zN?KZGX?&izz_?vWBF(_B{Bs#xq00}VH}K*7Jr1ZojP?do|F%HNIR zofGCaGWkv0tUS{i4u9sA?q=VBveSA66PvGe>uMdn3w60o>u_~~b#qv40CzB|L@}1@ zN2^=0A+O}*RIeQ-vlD^*@MTOaDD5fCX#BAJrC$9#{L~A2y)mL-YhzJq14n&rAE_OC zI5H$f+|Q!7c8<%us*$bX1$l7~_?A7Z^=w(Z%?{(QVR)0*<(~q;IV~G6>k#_uEW-gW z(w;^=BF`4(J{`JJt850d8|dB-NMEDrDkkJsR%sVGW$^noGn%6ne#{P}>Gyb45O_9D z%*^tYm3_UCBvRZB`pIRtQ5FT`b+GV)neDm_Yy5AWtf*Fl{mLZu>sdB4K}mBiq&>zX zzB11PtHLagFbM*=m91{*@elEjg6pVqPRNY2{`;`?mi*Xhm+`^Mo`QY8H&3=VP9GX? z-5i*OxDR*>9%y%;sX4MGtgs_?_3SRx*^M2CdcUrSj_BE5pPcaW$N{uerd1nqH4zGk zX?LTlM-LUP-bW1PwW`g@nua&?Gh8|ja~3g6Q@UcXeHc@;8ey2a4%<=i8o^(WJJiHH z_i9tP!HFUgvo8Hg&&}i0=gl;S@#SYRPK245f&i|##D4c*0dh{99oWJ6$l4o$G2(KExRDSsL+UrruN^@)`B{ld)V-D3bq#IPLlG@y&2#q3N!w%)J+_)du-%j)$K^Yiz$pl$ShirrTe^empjoW`$+zY4i6R z;)Np@|MgG(5n+buDqvPl4Q{8pK-j!}KwHxP_OHjnZUg`JT3W0&xT5@X>a(j8%AYT* zpz%b~!;M*9%e2itQq@C%w4 zwo$XUa-(1Br-%KQSYa2T-B+;Bf|m`x)Nk=TKZjol>-1Vv{+Cs$2jTuzF$Yk8+rZMB z5FxBrX!0EuYWTLCtBRX>n=;Rlx|m}s1`E0#`y(i|Iv*&|jbqe1g$J8`Nsyot=5b-N znJ}!%jM zv`fcz6Z3j!4l^QK(+BCt_lxcPa&CVP3#lvw9U&!8s_66LNv;)kF0R0irK7N?h3FO5 z;!~wKp-^Mk)>j{{K_0^Ke}Z{xn^grdVI}C%Ta=f&*H80w41j89WaPAeI;NyB9hR58 zWJCqS>OIJI74EnzE7jTWwgo<61(ogqH@&mdxaN*B_99CX>PGxDh7X1X@S9_(H^#aq z^(iCV-H1oFeztqdgox{HE@9xhH^r4~zZ1$&@Gz<@)as5EA%N4(%&O5;g~fP~ykWYm zYMVSd0iKxOnzyTH_)5VZ;mnMQOUz(>`!ie&4~DKNOd;(emL>va4)?S6lxa4n)Puub z$XI|Mv9l0fh(`p=$ZVx+mEPX3jX&B}sA8`CHf_P!9W*^KDz zk6$0(t6Lc?_Z|KyP}_~M&RDZzXT|wRo1bVw80Bxd+hyZphF4*7A(dRSgoftDB4X;V zrs3bV1+77^MevYT{=AM$SL$;%wVn}kqJMnEfiv9c2e-muHIF~GEkM_!aT>+9rM>bg z^n+aCwZ&7X!1_m}B0p{{1#_&C{-WC0l$2u;b!Lp}Y{W(&oltHL;L(q?jFpCzxqldQ`wZdx1B!~QbyEv+kVvk)fJM~?fmjg z-j_^h4=jE$RekfQPf7IRNHT-S+NC4}~$l6>PJcUkKvCPWJkM z!CHpN0WvC-cRVLupqxQPiN7FP47+h^f*r5I!+70B?W*os)0X>DeGDnO6)?^ZIxnT- zt+sR_{6l}G%DNqB{Bl-G_^z07a6K(qEf`iB%(F5CYqYHnoEU<vW6 zyhxVs?iA9jn;Ri9hcXrUI&$mI}j$M|!d zMuoNGO$VDs>LD9N!vWXhq!v#h8VR#O=3Zpui>h$HBmMt_s?cY>qhifgY`&%k_ z5o|jkcn!9P_J`2Jz%zLlO~A8r%uDSnm~=7CE7%dqb;MI1b_g$=S)5?0Z{c@nIZ)Ti`3$u+b&0xY#>`4{eX8)GxZQaYhEmzk==^ ze4*#Dh6M?ut$a*^ux+fHdF%)zCmK0~rPdi)Ru9sKh=0+Md(ggNE&Y&wN02f7%ZUJU z4X!UiLxTXD0uXkO;SwzZ;@cm%j=S}(^K?%ppSw6t9%RV#&WP{ha6r!Y<^AD4di?SI z_+s)k^kAf6x7$2H7*^A`H%od#K4I;riG-bDKV9~fMkp36)%H;|1W zrC5|RXHzhSqF{onitVsg`bz5` ziFYItg(~V1@rZnrA1n`-e)>zEL=%MuMe5^&@;OyeIJ`2{rz06qMZkhn`9&mKbEp)takTt*WP<`OI1U zi8qPX03!8Z$Q9S?5qM3F8)+IqubvjASIt;p9}?<5aPUDpOt5BSSIJZ9S~}3{M!wl| zNJ$=%HhF;z2BN+n^^4;lcy^6hJJ8N+KZe`Bd1F_-&>n4lZbrpqKNP66mUmdbVp|9f z7)C09uq*cCQI&DZE!e^A0RnFbBi?7B%(A_1rq05U*c_tuV0MAu9L!|BBqa*3wa@>! zS}!~l8K@-&o!r$*yr>Bdm{C=9bv6nrr&KskcIN{+Zp)n}ts%)OJm6Tk(UUaI;wj8X z&k4S4FL5L7+G^!MN{!}uA-5T!+}~(!%^RGt1b@GP`Ql4{U626D$N7>g9eo6s_E+ku zUu8yC9*7V6KkYZw?dtC;%*awt%qO0CoF*a;@^Xb#?S4uqNLGQv$LPd8*Jo>0tdWj`3lKTg_|1+`3sf-f7bu1-NJE^9o7!>La${Py$W zQ9zZe31kA|^#LzI)0$=G_w^Oli&{5P`5=I*G4XAE6k_urIqt~zD6k4XE@i{I6RLCg z`np6)mH0;iQe=}0&AeFKSLy_%+st0dJ;(5{X3`4f=D>5UHQk=vlEH-7;0Lu+R6_9< zP2#&(PU%k6l_Mefb>m(~!02-B52Wu$aI5m0sv_Q}1{7wSW6Lq!3>s)+)~eN+4+kz{ zyhdXLS&8dmujr&S&jq;iP{GGIk(3YOX4*R{-8`(u#g0@!GFL(`ic^fe<(cB1;pR&X zoVft_5>hec8=7{Q7r}}0H~ElMwnMQFStAXo)UXB#QDCaun@+3to?Ex6xeFIe=Ay(k zHQ&C=GF0EaD_g7r!~Eit^8AL%@v)-T`oy|xJ!=P~s3%#~U*98fR3JKb$5xfM-Ug)w zL!R1~aZhFK85jX~Zj;?a!GEoQQti+p9c#YWA7!tcJ0gF>B@L#1r@xvUmp)p>=&rZ; znT7`zxN68{Cn6F24VSm4sd0aCEaf^i(@Jkf(BA8%J$_LNQ@#IOL@NcFU)r2s*ld$* z0f-GaTwj9$j^_fjsaj!Z--hl;-I@>B+U9KwmsDL!b}b1~h;()iwfJa23GTRw2c@k2VlN<8$Gb~$aPaxGu$DF%0Jv$Y?;fY-29fWo&4g6Dh!U-!cTZ#{ zpDys}OwuU>HE^R&1(*~$?6x4^0%q*gJj+D3c?$y;JYNwV>J*GjN*r2x?!|A{>g(@L zRWNY`T-$LiG#+~Bn!jB=&JX`|xCTg{#yC%XR1jDbke}nbj@ns5CGeW^^h|5v>cQbH zK2X>E?n`}(L+EZVI-kwDP|_0fb>pS-F#?HaTJcZWA3@hX^cSjr)@R%iACim4-4czK zSQ7V9fdbyOFYbs5sUqzhR&JPV@kSubMV6{+MbjEv*BT`Co~)H9a$W?qZtV-Y)bDeZ zsJ7n8$&%6H;5TUFzAbGUSUM(~o;_P;Cur`i!W|gHv_%`9o^4*e@A7@UoEbFD`}xs> zsde`z);yny$*(tJsT-$dvVQHvHXcPuZoe_&)p*gaHq#2CBsM61g*Az(Ej{fO45v^a zI_k`LS!+K%l#23opo67b9s@1M_APJ|`aaPUFV^Y!g2m|+Jfs$=Jr)-YV&cFioBC?j9eJ3qt@iI;8 znZwTN}cxoPmzlR`(E{ogz%3GL(p+tu9xE;mW@%1NRj3>5p>vtn6Vb4k#QvPwWJvY{=ge zbp_oZzPmB|T5`#!v(C#xO{3ysXfRgXV2t8S#nu-Hy>He_7&#AA=bJKGs{6((6AzrU zxD!zW?c06nS|j!;2=mc${B2G71j;-n`I;)5xKP!%Bf2__$l*alhA72F=Jj?w2W~h{ zN;8Af2u49U?7kh&O!AJBj$UG zDnb%sH-o=gJ6yu}aoN~V^0dc_+vBL}No-1Lf zb;_K_JA0YM5A8>l-t`L9g0_8Y`p{E#C2JCGHMn+yifB)wfe^a_o9Q)^{p zyR?X8FCy?XvTAWNpMW5AA*FYT|NC8|cetg8^BCEJ_*KB4KgqfC8YFAnV(BVW)nowr z6;+v^SNYs!ME;kk*Q7l5=BqsGt^6vg(y%>HscDCVo4o2(f9cS9Gr(78|jE`Dy4F!sbWMpp9~o zj+KV}M-cFSS5q7@HcW5L)>g?U^nf;?2BdcU(XNE4C`M3n$weTbxSiAxP(cyCxUwVT ztagL^QT9~x0d(N;@Mbsy5%}}?oW$1IuBf5w^r+<7=d-r@UC5}zK8lz_jn_Tf0-fMs zW!%d73QIfn*g}gCU)tBSe#;}U#N=ycN%WX&_zSrf2WfGR!9|W{HKm~=6XzJ7=Dv6@ zfKR_2Q;lp=4HL?N){DD^Le)J$vf?UONbNUVz=KFPgJTx1k1+|=^1ut7omhl0HKg!K zoEErkgi;;e#!J^1b1ZeT;A`ZD{r&F=NCx!2p%(X!k(KYOEjL@5I-AR+7UHeqm+HTY z=t8B8uM%Dqt02RZ6WIKKIR1v?No^mSEQu*0=>S-32`EP#n>wuPcmv|DG6sTPP8U`! z*tT(PYAhDsGU2tqfFDO4e6Z=3PYL{62T>|0;Zad2Z)r_8SDZ(YgEbC<-yawXqIW1qF zpj=N&M}p>pqTSzc7tLhSjPw&5%iC26_Ja`q2wNNnI+bfXV<6Dj(5hF{BD87$=WjT% z@88@O%W~PYqhC0%sMh*B$uFu492R&ecPeO8r=CuBRZUkZQ!dSh;t@L8dGNHkLMr zjkoMUVCY}Os|&tcYq2{;3p*v=eA1;FUY7*$VX{biXInQCq;2W**{J0(|jv@;qfB+2619c=Rj9aB<>>{W?MAXSr|xzXuSqe*%cIKLNxdiz=iM z&`>u%9{^vm>GrP6?=r>uxC@#Gms9Rn!v=o=|EuhP@bxkm*VNY=+taZV=L}25hJ?IE zbIWG*KqJZzWyHc-oN-LD7SVgCj?~A}Nu3c|S-!&}TVe;oPbM6jdwvJJ1Nf$%(_i)-KG{|+>Q~F_ zKVZBNS|mQromRn~Q4!H=Di)&>nk3<{Lg{imUjv3)D|vPnN02g zord%Z?7`n~#r$+8Yfvn-XVtbNZWcv66TNO^6w_Ny+UJah3TVIV)#5-+YT67uUM@OU zwa4I#*tY8DL#Trm3B{mmMZ~@D%5-Uf&Qxj)#s2dM%dxK93{zKpu%*xm-J{=d&G2#_ z-}^Fu!>tL{J@b_RvE0pVtRP6fpk{uKU@x5*x8>@Z(9v|0!!@>41s9z%>_Z3Cvts|I#Q0qo8r;nRtGxCcPBVWgpvxy`hQEbneS=y{%RG%>JB9f6bMA>g!}Pl1^w8?hx}nqX z1ITlzpb^IDWUJLFFRun&2}MDEShaFbH?c#V3E>nQ95erndIgyLZ0w;Sn!!aL+WOy;is_OY44s3YF<^>R4x z&>D^mChO3ko%NTYuP6ckEtbkVQ=zpDE)O(GGO96m|S(u6q#vk=@^r`PYqow92 zy}fMj%h;1&wykSOeR+6Dxw~n$c9-<7eS%+dTTP3Ae}Jnm#WUu)(>Y@9)PTN>eK@ld z0p4T;h8w@(q{JV~fN$_;7kl3*e_G7xt_DUCz%j=?kGyOAjkn*D(w}F{ONrlbiJl*3 zeBQvaInl5}y{`-xnS;{`bg6cYbJ(nparx_DL*~uaE!tIAu=*q^S91Zn5t;S`zoR{U z4w7T4Uw6v4721!`8jCDq2u9Rq{W$+ejre&DtTV`tgX9eOG6^cXLqt{1wRFVBANEAe zd|>eZX1w6f!)$@^M~h7phrX zX{<{6L|BH`_Km-$#XZu=JxHfGSvUQL@2V<^wd1@o|(@@NrsHc zpxn!K_tqbGbABPV>>D4jXR&)r6W5g6%iOb4oa-CHiChq|oy*aJagjM(U*ya*t`{a1 z2To{v_|arWHC}(aj$dpY3(DW|))Zh;s5YFCvpyZ4_l_onLdE>@zx@km;58&v)94ZQ z2IQ^}yedqrN=-qorPqR8H0N3S@88`pxpvzWHYj4r0!pci-I##NYvuPL8Q-0n4to8G zk_h#a<$J4pgF&vxnyAMoxv{dUJmZ&AH?ew`7mtCC!wq^4?Hy~f;w0~hoyeoAQ3%Y( z{Mja_N=!b#M?J^o_CnSD92`nTb#M6?F%o|rMNc&wuokSb_CS+0ln;fkRGBm8Iq)t_ z!l-N87Y5r=ra%h@_O$(4p=&$&;hxBYN9x@5TQFKs!&(YsP*`54`p?=;rJq6YZAKE2 z^IEaUrg<9PLF(aiaZz#6#sjwYL(x4D<^)hq1sDZnc`VPp*@P(rp)Qi!v*}g6tt9RF zE;6Sfl{I@+qxzaJ{~h;k*J-*Jict+b;tLFe(j1$gpj&E(mkeK=(HEDvqX$jJuk>Y< zYaJUZene){|M2l1PnDWhRVqO*Ud3`3)szP~nT!cBOJ553aG=v%atRtZ0Xp5Bv0YZ@ zi5*1o%D~(PCJ=@#G8%0@o^+KxAgop`bahQaw>tm(WKH!s6a8Dxf;{7vB-fwJ`f@4- z`L+T+t%GxIcS3z~^-6ilY92-(Ap=ka20AKNA7!z!Rzu6t38Ed?wXiT^)Z~?aMo9gV z3NMl5pt$;c2^1_pc_`P^kl1G*J*z>NNB1EC7>QKN$(3dJzNeNNQW#Y*SscohrSl7V`VR+V=9)7|;uqj&4UW>l|OhC}(;=bmy(c0`i?zY&iia}qS`;^(E$ zo?!A_i4R}6QU?{1i-;TnPdrb)cZpqAfpy8juL`l&eSp#6&5pDSj*Z=6DYD5Wx1Cna z-P7KyC0(h^pKYzs*_GL-ejH}NZ#dH}q7`aeFAQ6IL{j@s>CfFwsoa#NNZt44prHaj z|9wH#_Udu3&(Es9jO@Td2sj|m+9fASf<%#*cuVC%0cgN3S!*u^;sh`gg z7)fv7)}^M;eQ6_Nk$z%g@YmkRAzxE{fH3 zkBh5VY+7iyy*UU|G`yaqli3@6DZq_Bnfy%)FH4Ldv332IztfwE6Ek-Ae zT7ulFZFeusO+D;j*rxJc%=wC(d@!12+~iCGL0uI&zK^F)L5v->V(q*?RO$JZxvdfo zq~zXdu+*=5s4ROS#(kTc*c^ySV&7E8BMM?_L15AukIJ*>o3wCHHKT4D_NdsCU<}HT zvDqg+s40FTpLT{p3Pgo`rO~))>uW|uH7Dv#6)RnN;-o>)Hl$nLl$s@CtxkHO!+IQx zO)sZn#_o$odMiviah6S?yNTuxVz3Oa!%=4^UoyvA>3J1c>IP#}5EkA{G)H(}C3>UP zxl`ApTBntz&qy*R2y?8UT9Zc^?Zyn2#n#d}7@Ys~-Y4nW?uR zum|0)HU99jbeQE3eCjT72%8B{EPS(%h_I#r<}Cw2P3R~sQH7)Ki5B1?nMt@h4EoI0 zqWpJb>Te1^r@*h)ahG=MtCKLH?+4`Oe*Q)k?b9<^zNrs~ql z#{Q1sz&Xlrov6utX$T3eBOBFg%KT5avleB|Jt<5TcrTt`8uWE$YT(YySDPqJS{N4Z z+v9%%%sT?0)M8DY)OCVmXj$rZ_nSaY@-J|IguWI>H6r|?!RQ_utV@&}U{hnOEdFID z^zPFl=vIqJfL2zQK~q+~oS@?IVxBK}Vmfx3`DIZDw8er@Q=x}+f#uJZaFiWd=F~dI zLs$`X_d{Ehk5!O&mqRbfGkb%LSub+F*xVOhyhpj^$jig()hQHxtmA~IqTrgE$INANGKDy(1-j!=y;U_P@ z_A=>;j0@ouEX^6`-yGR~T{Y_yzJQY(R5ul`f#IbRfY0&tevyBnnl~5OTvGxqa?~sB zG-T(3fkFJ3OOPK=DK2^FOck;q>U^E_>L6hAkTPy>JXAE<%$t22R97ITy2J4OC8XU) zlr$b%Q>m(2*qkr|u)7c?>zMrY@}g)rT*xQu5o=Gk^1AH9!i}$p-O(*s(OGyLPU9VA z%!c`II7AQSzy4kR8&0eC$NKuvSFGeozdCvo~04(|!^dQdmAcsDJtx09|;#b0iGQaFU(r{ejY0=?RJw z#*KLI!Nw&3-rM@K7U2|v@DO~rkB;eFUxn33d1D3=p;RO`Ra1DMtA(A~?aTyMR5&>5 z+Ebv35a93i<8p1b(>jWP*8L!+x?6vQ>a9jerL%mnD@Rl07YcFCR2NxW0bq;P5j8TH zJ90qGMYsy>lE7}U9(?q=XFr5v^(7UHC|av;KV)0tjGJ=7t$tRV`bP3-&qNq)P3nMS zFNxpe$0kIFD*)nx@4Lbn>Dqdf=dd!ULh()hF?+G|Y4Dis?%hS8(Qmk0vf|NQHN13b z@jADOOa~@X=msyE)X}nNFN}xhy6cjVoX&)Ro7bbV_8x*}^5YLHpA^-I3j zdMvipN%YI}eB|P!Ju58eso(`s)JQ2wl_xpbV)JY_A%hYXEoR%2 zHKjztGW^;(-uNOKZkVVE9gXeh6oof^=MYWM$yAEW(j!hyP z?)uii$;6IzQGK#ffUv2~7OHiAymGB4I>zzU>gvLTm$7lBjL?ze`yI0Nsq4sKMl84M z4MV-T{rcfQ161|pZZ;D%@Ky!GLRtO76{8h08LA#LbEK08DxQ0AJIA*Za||19HS zM7IlaAw(6`g_fK!Ks11X@}7J9!mUk3G+&*}BEslPZO>+n}vVX8TZ{A4 z?jw#1#&BUqZsu+iQazCo#}g6OehYxfFk-ZN2s} z(1Z1yfa2q(@q34OyUd&RJHGfysX|JX1N!TxPkF30OI>n!Y-F*wZ-e7KO7=79Mt4G( z90j7i!q?iiM9*%2!=ZVoOc3{;)?5uJr+n>tLA+WsD#Wk}@xgD+P$fK{i}u*gBrxp^ zo0*^Cph7-s(?=svsRPGqNl-bU0+X}@2}+lj{ntSXko)g}#bv}t^u(5?d=eJVuBFZT zr7F?!=qKWY3k)A4A*8~FfN9Qnt5jQuh)1&#PpRpa0eO^p>$juSmCue~ujW-Jz(qr{ zz3A}4RlM?mAeK2(8*f~#v{qnMY%q|jw&-r{H zrbnpL<(qEOB^v%NUs?m5jY#L=FuUmvKJR_#CnxmixipsDyvh(*A!ar@~trj?LOw0`xf0O zkeJ`GLMNLoJ6gW)%MV9K`;5w7FZi+TP88iJmpmn)X#SFMb~@9tt7yIZc43^CO9h});BQ8QU{@?Y1bOLMAk7jo&roWQVw+WdO+C4HfRPp-H&UC9xO(6YO zOC|dSD6J)aksAz~6@OK|EBSbKtiQuGlKKDZift}ej_G-2MIQS${=WdUo}0+AGqdS2 z29WUXCJts(>X>N*zq zE+mELW!TxRB&X5HVkf)O1KsqqoWhVhLRJp#{%Kj_AIjD=uag3C8}}EfJ-=f8oCV=> z?0lSyVKH#Z8v4_iXCYm0hFL;G|F>}O{~SK{?=xtSv+5m#RyvVFf?M_;g3SKYw~t`` ztgrpNee(1kzjrRl>hhk!)r=5hiR4s^kNQ;kc}=9Vn~dMGuIjs>3JWeZ3v3@OG4shL zxy)uD)(kB>7d^{8a!Jy%#8jH{Oit^D5W}enM%J;bhnD0vZxhgDH?*OMq9QBH@H~rk zZ;EY^4e_hhSTHMM^w#VwGS#qZ#LZ_}ZrH~%r%z&^6Q9Hq`3qY;N2P8N$;g@l!=uIF zk`C;z* z0p8fthP}r3Hrjg8o)j!M%$?$`(S|FF>H6La5mvGxHDqzp?Ybee3m~s*SfI*v8l}dpvF`~&ef@R`P>JlQ7-s4ge;OW6X8$)bl87*S5Wv@ z|LzT?i6H(n{QenBe`wYI8Gio^Rrx`O{Rd^{fwOg4I!j#V-n;=ko_<- z8FRhbUYgb`Z)X1fB8xlBA;_$-eIa$V4$I!Mwe5C#w<;Mz9k;!c{2PzC%cA#*vKOhqEj8*SrQtQ0GKMaijtrQ#crO5`*0ECCBRb{OLRN!8X3R$dZDZ> zIyyKiJ-@<$r3t)F|620{<#`|N+!HlmdrEZ=i?{nOWPS| zbpx`Z(gS7Ez1Px&TOOwkx`XmFQvJn$jw{5_o|y>C z&FR$`CDLb}QwOB&TNmdtQ}Sx8u?vHyC5Q8);hX^KIa%}4=x#i|cPEp%?(cGlMc}CS zkk3y0kEZ6*oT-8@QCNvvh4$Ow%`15;rN)0?eo^;)Kb^q)xoN|aqHbK+R^A#lKY?E| zEuGT@SuX^pMx$O}_7kPcfA1{dtku{r1T%{A>TsrZT1nZk?W5#7^5(1FY-ebcxZ?>CVRpDGCgpXkaSe0ONN(HQ@18hr7(L~OXI;y3a41(%c$QU#uxK0$Ydv1Xz=bIrD zMK#aay5!jDQmqjz{T;sxH)lTwCT7$X3vQDGahd`#4BCAkNw&t|jaA|s#^@HtqLWNE zG3T9dS`u}ZiW?j!SV{X$&S0-CD*c!?&!=F@;7LLLh`|k`&zdNADSws2A(qzoa%m|D z{#Gi91{5`(^{Qf76;kZ$P0CB`NGgD zFV^31X;-reV_}J6lL#n>n>=rHUJegZz9Km2tSe*<1~V^!LI}NeO-eXNCuPND6%K_d zDlvxdP!Ot4g%}nbKZ0 zqo=BjZjq)@_jqTGl9erU*w?Da&6gYq0sv!zJ~oi{q;W%ad5c!#{2|gJ=Ynms>NXv$ zMoALe{(#uQ-=*Ng=G5w>4ZSTUbXM)^z0ru3Miz}o1~&nb7|3DPatIMw{o`8YJ5>5+ zK^Rs~!KKKGtFA^C=e2K$+vo*zvd+wuPZZ3kZxwf6rQvMJylkw>(KADy_;Rv3{^$>i z{8=pVR~X!L(c6pgh9`!gzL%1xXR|e*si=AW)8t3u3l`VFP~GDu-n)g-81K=Y1QdhA z0CV{~M&8@>Nhx3_r|}Q%faxpN6WtK6mB)TiC_3luf>iY44&%jf4aIW1d=`bwS$J!IyCV#s8+M7$>luzO z;%{DhnooXH3-4jzZK!PnjTV9y(jgNiHf*R^^o3Z$z5O{TsCj~6FVD78_rLso85TCcdHT((W{6g_BC@DS9D}uYwq{rtAN%@TxvF`Z<FN0K2MzVFrWJlvvhZUx#k%A(K_Fo)!4A4c4#&jsg+K+>q1QDkef_n zucZ3QdhsOhi7Kz+wzP<7);r!Q*n;=5GUY4@>f&8SpL61J6`ut$s#E`TNoHsm)Rlie z9#@)1WsH(GMDmc0%J8_O$AA8eP^DRRs{OMv(n!7$PZ?72Y|ZCUJmB*E^2#M6ocWTz zJvgBv!3P6BQ#+#v@-u!z<;2@bmVzBP9ariSy}EKQpItX5l(#Z2m|kL^Fk4@Q0bS3~ zTEN#G0&}2*Set_9>mlnMN3?JoW;3>f=qpE7k9}=xlx0p!@0zhHQ<#}YnTvD}Q8DsJ z^snruPRyFs)KwOYtWSe}$ZB2E2UQYwUF(dYlRGtwsaRY}CNyF1*a_Wo>7~f-2OfH; z9|v2VfDa&LnL0qn&GP6l5)#$zCz9v2bkXGm^7fVGQCcjU7*VOl`h+Id^03;RmSvx+ zr-17lv)8SWvNU8V6Mob%UC8W*xw%RHCeUQ~3!@?_bsf#AQO^#xTq&X_MCkOm03gz& zRX0Op)YaB?iVuMI8Q+@BI9vu8QPDI}LuAU)9hGZSW3BR;DAd6Lg;M7u3X2mN zmkh~Ny-(x3@-WT*7Kf_i%%>Tuyzz7Y@sxdGkU6u{7-S+u%d+^2D}=l>K;pfPqqR5{ zs*aEZY!fEzq*^J#VsRkDE8WUVs@I@tm7N#DTri#L|IuM{3Z6ELF>z<=z@kqbpfQl# zh&8`T(;OlxEyAF4NMT6Jrh!q>2qzF@=VI)Cyd&+&%uxHwj=!s@21fKnF(y7>Z9e5=TZ&tD15jRm

Pnki`LyMo^?4%&S@ngQDMhf(H)F%W4@f`8fC5t+^SNbjd`|s3Gy;1$CfsUCp(3U(K%^&RjJa*CY8% z?^~cD`;9ZI^BPC{US%am_0i@fbIb_AAG=5qvl+^rTq>dCqBV`EM|wqAI(^NmN)2w( z_(SF?$LR|k87U;Kswq(E2s_tNX5_LOU4U(L#e#Pa*j&6a&6u1xdbO6JcDg(PKMQFk z7kU#7!fkb-hWW@E<_&-MuzbNH26A7QoJ;|=zjJm;iX_YPu&o0=(=R$Vg9xjdv!aLo z1W|*}$!OGbYq7V{_}sh2F;|a|$XMbgqv_KJWHt{kFyUZC@)K(9gUDD| z%KQ^-UeN&>MP-{WeSbs+>| zpXKRpqLxmV#vzN^m?ADDs(N4cit-CHJp-pZbHmvVY@D1HIUz*Y78kGxPXd;&oXeU~ z+jIEoN<0NM5;Nt*Qqi=Yfx6fG^M7-XB0kK_d%q z>k%;h%e~?Xe3m>Je-v_jvIq8XttI3A@>pr{8}32d@)sL|mndp5t8N5#KYx?!iL^sB z3i8OY+X?c!I)qF5e0VIe4ZDU`B!2dF>+IZMOKn8ElqD2_l~g_&cnWHpxS)7o<#S## zb|fleDgzT+YeFvMNPqo&VUK=9g(Uo3W7I})9oj-})TQgff&b#YCnm6ugnc@7BY*dD zRG-;qHXea2-%?-{3?2aP*mdIt&FJr$uBSN%lKV%{}09KaJ@wAsc@T`-LJ;V z%=3F^p=jOk1v}ip=E${7dE=jidWINH6KXc5^rPnUr zX}>|6wO`5{-vz|I!Q%81d9wn^;E|L&KDM&hVZ#MZSfa!EpDlw$gpHL~4v*FVB}eqz z&Yk6qQIG}LE@!1hZuEPrej@y}I}Q^(*t3@L=APFwiwQ`>&(;YQ4N1Qmn$!|{!1+gq-2pr7 z8Q@-C4g47S`?&v}XMO9rUL!|8RtI`+4)FNwSCt3l_HQ3YHcOPusL(Pi@31 z?OW!EBE@ixK=a$xTk}SPZGZ4zDHh&QWO|_8LEj+4pC~gHAc2draa%tEg9zrLNz@X| zykV3odDB1r`DrXYwYulr7hbh&ShvbsHj4txi%P@JKyq-A8KocR$g`>-&^N@Rn*JPH zF1-~!jr^ajeNx3P*k&c)x5dN=Ot@Aw|44K{97Rz!;K#G8Qj#)1ve#woGaZrGNy|t= zZw+BVwfxsdEh7B?)_j4bKE`L-=_qKsTnr+P_Rf+OUIMajfU9^wufy_sxLww;Rd0$k zAi;|(M5ea%1?JRb>ADQ5eRRV~C`DS`ATrI|M8s)aXKRFu10nrh=E(buMztcr9hY&%i{1@M%E` ze|y}5>!BffObxkCn%=a(qBxxDG<{spb|(afrU1>*h#M1qgTba+pJ{<%Sy>kQ`|R$R zKX|waX2&*t1u+=l(Bf=gt<^*TqHmpqF5mz&$WyIOr}p zF9cjQ&M6It)MGW@d7_vgi(8P$N$r1Xf ztLt8;3wyidfRrfyS0A zd@F}v$paxPikzt)r5v{fbo9KbVK#NAkx?0%q3}1m`JQ-y!-238+X0E~%7$@%)heDY zLE4LGo+djYj0^u%s9XZoH2U(hgdRONcXdTEEToX^UYw`v1UNog!o)=7F|KhOF(o?? z3@gZBP+QKA*5a|+oQ-Es(~os$MhT^Um)ibc_Ttama;@NG0jqQK{zOs=ZO>Vb?i0-j zLB`_q*DqS8>O2K`0lmI9wXf5@6g+XszV=nu*0^V~ZN8ar)h8v6$pxT9;e~(Xe(O=i zl;YeJ^1-xh_5keFbbLN6bC@RLPW8?rxL4VwYNn&1<{OVy@&TK!tr74Gisz51-BW00 zY?-r*cR_yS?5|X)iw%0zbsUV6hZsc#_8X3lDh0k{8%C`PHM%vFp$ggW)X(!*QQ?>% z56B2%j9CA;>_h~&jVp*XZ8}g!RQqxV4M-Dy$}N}7Z)?b`Ojyh<_QW!yOef+*0efI?!How)D)y@W{c06 z;ni5eFPe|KV}*dalL;P&P-Dco&-ised&L)qu2{ivYM`pZM=saW0$>mHujai0kyHB# zN6twkd$U&m8)!%RMo0lN-g7gV-7JnMa)sjHB-YD{yVDW6t24I z=?C7Uz1RY&xE(?TlQifnubuO?@)%DH;UNOGkeloevP3)bf>&-@i*&L;bSCdQ=Zh3z ziwV17EL!kqK_a#Fx~Df=lf0>1Rm*y-Md2xxS>{)j#~WEA+k&D^f2Q*Jn^H+!2Lx7T zcsJdSb?^wgoCOP5SAF;LONKx#J@KzAdfQ>Yj2iz>U7QSqQfkt^l!M&w7u<_%MTub* z7>3kuL}V$(sVwaIo+Sioj#K9$6<~On`qPX8zMA&lAfhgP)eT39i)us8V`AxZbb&d{ zpqMX>77jKEE2A;ggQ#$|>Wa3UNaLvDe3qsT|Cnt5h-vPwdW|&QDRw=jq{Y09%3 zP{6;~+Y*X_kYCip0pDw1UPfkZOcr*B+mdJ~pFiA?D!6_A8!nJoon);@ggZDuM;O6_ z;mcuRCsaNJ;@ZNy?6L2n2-et6;2A@=L}0-fuTXjJ1pFi{iCf)D;5XI{DDXZmW)1Yw z+6qS-;@*_+!pjFeuvG(=PU1Fx5ZcV@atG(VGAc#w=QNM?RjTrnBv{;k+yF_p*t%TZ zO4~B|Uk+j601xu8boy1jS#vKUO0(tY)Y+ZGX^E~+JyT3Ct(j4Uu_-nD`*?3IzY6JsKePOS3{vc1^&3l(8u z!NZ5&D!Z=zWaI}fd{_mr_{;?(aBuxI6oi7ef0c(qh-+fv~ZISeY(#&gd z=@CAaftNg12TAbt@&e<=sF|Ddb!L)0aClQ6;R0Urg$fIAd(&9-&AEoBwPAv-RV7Lb znBvA~V@>OJUC)SK-}bwM<{}usEq}O*EXzrw+~3J(99va|pNs_Nczf9%RdHzRirkDCvjjs0^2ojt;60XCold{UlD@6#!5nsVY|MAN|7lpvAqEFM~>3%wO?&M zJ938hkDS|QNpb5{^oQpqj+F!_))QwKkOpMosdhK%K1!c#+_y@Ex|Af~Y*BaS=oiVU5C8D-KEK3{h@QzBw%_@x?Y#rq{$wyXX!hy3xS974!BP zfoskq4=c^>M5yGu29jt?6n7Nno~1bWoNWk>VY;NxrBy-Vtea6caiR*2U6FSb5u1bM zN)^%#G}oKLqIvm@^TB$nNjvk-xMXkLByN&`duZFq*EkErp0AocMnh+#f^3HT|8i!c z{ymrLZvQ>SC!pvZ8qc?4+&12Yui+-;#Au!Jt#9lDoP{m_%eLb+?th`r(Z0P54Sw;@ zq^L8aEN#o+19(F2mMbEA(tUk`=nZb_2SVOr zfF)&VUMkN~p@p~Oz~+8?kprUL>#x(R{||ZZ9n{p?{g0wlsiO3*fOM4JL8OZTDWQdc z^b&gL2q;R2ARv)0y@U>-_YTrKgx-4-h%`Mn=e)1)Ip6Zvo%@@)bMMTa*-yycnzy`iQI_Y7L(}pVOOZDpZsBg+!Yr6h)(61*>N%s4AQ4Ek8I(omu$E) zxDNLcXiTEnf_PzNYEeQRn+)eW$nz;_cpng8xGO>%+)3kI0WbpckePj`ns|d|+4|t; zy#w+@nW)KUC{O;P9sjY~w0%EU+%4v7qmr+g(VdRcHn+p;gQCz9q+LT?J0(gRotB2r zwY8+?`b+!^8}3nv*sHdFb$O`#AU1sLE2ZYS#+neM{Wa=MK#9^gSl`m~`>VdjZOQCy zeY$n0Fk+SM531F%T|ezXF1xCgQbNwza_Vx4JoHI>I-TJ?wmhb`jez3puU{5Q;2v?u zE)@&rWyq-kb$#-oN@cezGVue-pol_gvdov<>U!!F9wA}K0rdz*%A)&jx?XXDF!{-D zZ9k@|MVPciLRV{A2KH-%jDBPc?1U~ z4hL)p#7HHI=N-KMmY3>9_KOUa)b^VI9nN!(XDf3Iq7P%S*<;+1^^=tr>bO>Gr>e8_>b_a^->l1O zC6ZD{9*h-3X`S{$%w9=kpI4Mpyf)?nYw8Z!_TyN(K~)#FVv*!+!TXxRz-KBV($R2Q zyeg)NM3-%%u{$lct!nY3Qy3cDQKxj5j6>clo`qv8LZ?n`&`YYe<;XYk&z`+3S$ z)PRjv0=A%cn9ayKg0T;svpt0qASOk3Y9Ec+pT$C7E6UdEGM*xz|0xkAt@SVD>po4H zhnVAXP-iFD?Po{Tf5}C=Q;ks?t=rdNYVbPRWnL8U%txQM+QOuHI2**{kLy0 zIzo=G%65qN2*uK3wgUwug;X-)_Te)7yfuwAe}O^+jClHTGGh3fayt2Nxo#2 z_BSyZ*y+JRE+4sW+tYC3$K1ehf47O8l2zhmry-Nx2- zq)@!+ffr*ErPy|9OY)pg$Lw5eyFZ0c^2;6)1Af=A5q)OiBwx-;XL_okZ0|>iF(=ym z!ti)`zPzNE=tELSvbocn4^1qh1xs_Rkaeg@FVDOkruCHxEibZM~ z=R*L6gAO51Bq@nUfsTguOr<080Ud`m;KC#wB`u9xQGvD+)1q+pY{iD8&(5h(8ej7a}>>Hdw%{-%#zJkH9# zFkOA=Dk42n+!&Jy1{;#Dl|&Qy`0Rx^(gt> zGA24UWteWxExR+s`j}wweoNNfvf|pAot{zxRdEUsud;k zpD|kSiLb1vv8SXZ4cKGf-FN&-3Y=baK=RcYLEfL)cO!_nQOe2&$)5L~FNukhdgSUV zh!PZL_0Z7OCwQBei*i~g#!ZV!H)xm1WM*v9K0^K~!rPU|}>-`%b{DBv;VrjVQ2 z+Qj3vXg{TTF>cR0g%8QGkjbYOb{If74R%zVrc2bA9o)ZFVmJj(4A2O_O{tiR3TbDBlAT`qqSl+B z<6!%*tKSEzt)}njqlbA)67m2GZZJgMR4250B#J zMMSB-M4AX#ePF|9dyApX>5duKpw>rcZBasF3A6&G$E)gbxh_Jv-(vcFaP~)6DU7J>8)DVgKQYioQ^r zP24Ik(5%^@)J-YO7QuzcarJR8nR~=xN4gN8w9GAgV_j=nGCsPv`xue?$j8b~E2i`2%Z@m${GslmCeI_3kfV>x$>}Z z`?1}RNGl#&ZdVhcyhOu~A59#TTJx)i4$`OnOxUxnRSX-&EGC91Uy%=KvRZ~HhYB1@ zgp#_nK3uRX*JH0Wb_rmu)f<~qX3c~f<<*a?wB{7ircxkAH_DKk6-ZZFH~sj`x4Jc*Y2{sIo+y zNU24L1|;MS%skKCA7=0FC+vBXl3Un*>H^2Ppt%~NFYVA1=lTb%Z zv4sVE>TUk!n?T)Of}*B+%Y_Op!H zkWmd(q6wxq=7MC+v9TR9BPY7^&0g24D|M>W5=JgjG;I=UD|qDBg(6!VcR*UVSbO(N%vG2Go@Z$!CJ#IyOkGVJ&D?(l=Im^#r>Lw8o!KS z4cJ8@yxGv%x2&%8gdaK#DjSlI2Kl1r8D&|pl8p-Hi*Sbp4%WB4nZi?gi97Cc!)qFq zrd&sttdnbTzMV2MwqG@ItG=SHfT8QoNBt1w&s z*O)kz)B>eOitmaWaC?f9S)`ud&;nmn_D{yxi`|IW{0g1zx73pLK4dzIJj6(jNvSku z-Mo!tT88|bMVAR2OtmT;SXOd=mG+GBQbJ!n9AV5}9P@8cMBkVm^;Y}DSeINgTd>C} zUacej8lKknrT@NCE#|53DN{7)C*nMML(3b%@%zlPUfUn;rp~c4Ei*S2hb9(tMFIco z9KsLLt&1`CXG}w&bJ-sC$Kk(El>Ya}&~&c;b>OF;h_+wOL<<_)N;-GVNVC3Y_Nt`0 zcH?gF40$PJ8SVbQ)NTuhp^E+m>i2oTKt=zxWy5UR%v=Oa4dBQ{Li-1n@zhb;X0=)Y zcdX+@_TUaM$~ct?{A$tNF6J=t`{-T-%bxwU8?C+q7efx&t&z951CL_?o6a}+zK}Sw zzPg}gkg0Xk#1b#?N!&hQhiz$&zDEMT(r)YO#ct}ITt89%%9p6#nnk}LX+(h~N1=qT zD6?f}+ikFc$!oNxuk+&Ct!mAMOn{c*i3lIQUD8a=*lb1oJms{*yc)P%ML$+TWN8|Z zmhXZp^Jw!dc#J4-rx==!ZcDo&pJE7;CbKf7V;R3G0OK+Bihz~-cAP2$)rJ^)Kh&qy zozAyfbra4j{ozRm^R=rww`$Xliw32=OpWC@`f4R zoRk>Tt-HuHMzBYajpyT1cWAC3KW~kdAuC^P0T!jZ-~)Bd8Z-1OX|g`O)os|=(N4?q z<50c5Xj*aIWPp?76X#&M1S;fX)Slx1TntKO^M`?rOjBEa|GpI|H2=Mh!hc`N&%f-4 z1*)mHO7ACGMclyOC|B+8Bi83zsGEm8OW{0L3fcaOFvvUF`^KtL)BX+rSSG_cqrD}b zjK|Jrlnof|C8aOiQhG7PpT%hBHT7}mDiv?f>;?~b?0>Hmi+|5cCb@RUKkCDO_@}WO~$?8S1 zo?xVUtmWg!pX~ptBfI-`91vZ{UuQljz10pdG{m7+cQRCZy=&=UZtbUb$7L`YhV3*Z zyBNb5)P%g*$0&Y$^t@51un}|Qj2|?k7gtbjIZwY=BjRr*hgfsly3;Xb&`Y6h#6_CV>5=<|FSbYK zVN+ThwTsz4A1PCnHtNtN%#LdgF&({~8sC++QR+QevHqk zXnyN`_}y;Gh0UhbcbTW;AwR$Ti9>uO1CcnFkB84i2B!7IFF4B2^>&YXut z%@q-|B77KiW2Baj`VGACX+ft6g%`pqaYcE7tcJ=z?q1s|qf=|h2Up93tN2inAdrvo z(UTnn^R6ay0j5{VC^_|0GpU57tQC9Lyr*4APro$F(J{TTX*?wWtF<4F(A^0HX~C4| z5k4Ef25>^bP#x3v=2ddK9vl<5n+-`3@T8r<@7HKLcIzqE7-VL40cF^w7(Y*r*ji`L z9+5ITQ#Sj|)CB&4^}5USYS8psR#Nr=E}KVLwH@6lKwpPv56oKX(p;LfG;}z%SJh`N zUToHMQG7Dh%W6J`7Ah8_T6Hkr68zOet)nN>P+^?KG?Y3{W;ycNK~HeIB{jEOcq$R{ zpoO4X=H@*whJkNf6rOJF6S;sOl1}J1uzU^2ya{+jH6~c~iwJBd@GxQZOsdg^?jj z>V`2$97d=xd?fvAQ73Q%!}v*#a`K8vGv?}z>4Miv@|(v2U}*tuLhpGYhMxWEj8FHb z=i!DKkGkU~k3#0Bx~IQAbfFZ%0n&N8W&cFRsKEVdoIwHDl}4qB`Kp2oho42_dPSaD z>~YOKvfb4?;>Tyl8u?+={G(g*a;@ek&2`TBQ?J}Vum~q{>EHXSp!`DDboYYVTCxvi zI#0PhuuKVX?muTaU^&HPSV?8kUwG2?z?(!gIr!c*wwN&rhdNS3MUOwEMZ*S0Po!S# zObyhj%s69vb?aZ+lxF>jyZs-b!szheGBWQ2*CYcUD7BVg{|(J8k7B&{H%o_@W)(oC znnQI)%=+~B&EE!%^8EmXz#uDCYY|Bhm!@NKP?=!r;wz1wcI|+k{$5v9@l=d!W^VoK z5ha)Wh`cxqwGi?zYJq_G-Cr{HVZfmuWX3MRsO$%CBA_zDM}mtE4zs9qpUjoH`kw90 z;R+T+$r$*R`xgn0lD0~5!Sw2l7LXRlBL)lsvBKQk2pd9-L@2A%vxOi2nc)Q0Fl#$v z$zXlf!%}PRCrm9R^E^2joT97|-RZ})c<3TUAoT^#-zjeIqL^%)b*2g5tb$bpyo_H1V_V_#X3tp5}X2bxeZqHb-gI`Zk z6ttbk!z%xiFXx66x{moA<4oH;GA+s}(}J;^{& zpi+5Db-EdbTZ)OQKC}hXh8I&Es9UGcdFMYI|0d=HWe}4oU7Ep0k|X)}`vdD%21;ZZ zdZzHQLcGFC^yftHjS$$Z{24P`K{HtSYa*KIbk$9*LyQKgS)&qk$)kPva#9D&qmLpZ zlRCY(K*24Q@O1@TE1XkZZH;y|?H-S0bXpf>^(TV9LqbH$>fQl9Sj(@&eH^%YwPaN{ zXHIX%0m;u}xCJvauG<$kR{-U@@@t!7m1|11XPpd5-z1T~DLyKKqTPPJF+fKUu;3%Yvu+C4N&i3_gTi|wC&PU!OvmGKCW=e5D7QN0Pd}=~XN3(BX3r%AflpOmuqS!es9oZ4opkbPGgr#DD=g2xb zC{q=9ze#1`<%7A#fit_^o2BpU0(egwC=l1Pr85M?_T(F`yf46i-%IAsWU6 zwD5Vzre#0NeWypeRtr{dE#YdEqaoz4YVtTFAI@or|Pny9oVw{0qh#0xK#b@9We`>_Yb}nWq)Kk+r`?LIfBqpW z-A`5PRtqnh^;G`r9^jr)l2ng*_};dKVAqx5)d;YWwa6CjiuRdr@=SoY1R8zZ0ZbH> z+wPcY>B5jEDubcVUWGn0)v}lQD#*}|+58ppfB)^y*`vXSj|QnHpTF&c;`BjrFYYsA zjI-c9(Jv+Zl*a_q2gtO0zT>MVC(%xQn0D=TVkFBj{^F+D!;2{sAAaIZoA+g&1@UhN zPe5NcMsP5$7v|mv+QcgD*o=EO{(4gPbFKUn!%RepcnX=F{i%8ugq^|lDn<1=ag8Lp zgnfK3wHkF#g315e3;0!jA1MChXAMSHYm$5${fg=s;8lZipNU}fs2dx*$0vLzB+s+E zBz}8@&s=DUYIS>nIMDW>)g&Nf5jke;SxO1J(0IJ>i5^J;x!1Z^~OhUqTYQslQVJ3#ctFOR9mQ=DtZ zNw*r7lNMtJwcg8faUt`xup96MiIo=oeXVb>oy=xbY0bP4pO+-vOfRL6pWPlU#;95# zL*nD^&Sj0268ck0r3Dvn_K3#$0-F|dkFSK<<>o+gQ-&KLnbvWN#r_to=240~nGQ6s zOjLQwfnEfU-P`@7NqvGDS5OtsMItCtCq7&;%DmEiht#px2mKoDO{s(*2bDP6)l zjwm}B0V`tOzKi|OOcp(Bo<+dC*Gl=xjB%?=hA2|;IW?nQ=ZwbBMb?`<{%&KBd@``q z5BHd(#CP?+|DfY^F})D~llxA9NtF;L;q~>qi|O;|{);U#(CDAWJ>##t9(y>C08zLZ zl=nm(r-b({21_yi7nag|h!_(-ijy@eQ~553u(tX>x3CU(T-yd<=tuM_*hsV& zNi)67VnM>V_T|a+{D!A<@WT51vlEw-44IQ(Qb@1^dJpxbVUf>Dr$ExAs1VAXl8$6M z8l-CE#}B!XdW(E*>IkHoZ{`KM14*XhxbWV@v|*Cep&F8T5mi_d6O+OXTms!7<2!xF zDXuGetJx&%nzn^`or&W){TkmN*Dh|#Mx8`J+i!;#ybqr{Ql+Hzs?>6WsMyo(4G{Op=O z>&>f}vQ72eq>v{Qa>3? zoxAF__kSZ`*%vsR-I98b|K5n#jB`u_AMQK?+jN*a6$i9{c=^2#6hX%8)FpSS& z87V6=c6(2cD}Xe1?CFa;k>usb6P6PmJb0-Q*PM!*XHCU7!t@6L1NCXEMiVaIK;l>F zC(}cY(hGCEF#b30@Jc8zJy>&&;qbskhrGE`pe(w5&}C4Zp`Ga1QK6q0?!zd;%E*ol zLza`|@p}nX3{%N64W%vjy6m3G6z$|X{FHLyvofHS3AM0h=&c&mQ);Q<-igd6wV3s= zjJ{#$h2C#oNtO&V@er=Q*kz}5*C5v`x4d^QNM?W3YZEkJ)N(Ew;BY(G zBfiPX@9kp(7>rZ33GYzN3rCfOuS1i)DSd5I)Rr98C_g-G{W6r~2^b_s>j~?~7qon-Rwi_Va&^C)(2f9Dgx8Bfnj+D;u>+4U5d<>G-Q( zV(EV~r)7+?*Gqq3t+_L=Wtl9Gzbzm{r~F(LI1VrlxGafOV)PhicKUM5X>=%d(_pc5 zTle!6L=_>qpCh~Y^z!vP6q0>kJ=m{9q>+!~g$c4wzljx8dAE6VBK;*+Ru+Vf%&{WonK=OFkPqpOj|C$VtO8Y{gyE~ zPEpw3p|%!II-RA~yem8LtzpEWakq#(vbq6Qr(&1MOZiL`@M>As2Z9Y>$(PSQQ@twc24UqW<%aRY=bbTIoK!ftxr!uKX^Z|)0u)1 zzdK6`{sXJZ)3l%)qs%Tu^aqypL915emF_j?_4hTuDMfyiWzF{DS$)0Al`d@`cz{BX1HT)CMvi7h+dRGR*NQ|)aVqL3<9GQs)gMwJv4en zI-$v4ALa;wB?|?b)XCNj$j{Tto6hIKVs6gwo_c8i0yvr_mo%<AD-bEF43Cye#Io7OT@^6W| zol6RGXg3RatX%x=oxhrgzZEY=HP}M9axeuraqTQqP$nT9Ui!mi`eIi(gLaU{p|3rh z6u}X|n);MS1f_;xbZY29`4u}(^EqcEwJZsBK{1IctcH=L@C6Z-kf;3chDj&gr`dI? z2g@&~b$iwsx{hrj8bvp>l<{7VW^d>)sacKULE^t6M7D*sMu^suqT@J%W&}E)t$5$B zwtXxvn`N%BiD_b5x~sDElWU>HwA`v%#4UVn|N2?36^-V{uwyPz<6jP zF1J0Km07RgTDj6mOI%>(F*%~KKGn7@XDrhBzQ0{vV91YdOI)IqHi-01Fuq@SRw3G1DSWOxc(DdtLyiKuI}cw%KR|2>y$X3#32Z&G-J07H%l+#6@XZ zdlBSGGY=m2)|9dj>t#v`P9>S$D=dDksFZ`oB&A@Uv3maGOf^ui!>3Eth7naeVHZ^? zt#$zyh<~-=90LAgsHYHE((gV$=2^Qg?8i}5c;o}r=>}r27QA`uM0-E*s@XTI(_o{g z<1t~lW4s`|dF!QDnt?rdq&?=~jENhpye6L(Q7dP68O*l)LP{PIBH$zyqdjD zm~^3sre$%RPEKfrBn|p8=tlD?0$&T-dxiF#CO+~bSyfs+RvlUvyzG832`923+2=Lf zcg@%h74-G`PFZn{$zqWys_(dQD%;Jvw6S3|q~3qbanqT~`5@MUJ_I(@x2z3osi!FOB z$&-^mcBUkNUKn2QwJ(BQYzqraD(>XHdFxCok?*SI9saIKI8A2oEGadv8drG2N*Xv?%4ng8M1F^3~(VC%%6<$5r zSe6s=VU?vAXYY9hRBB$x`;hOaZDJAWs5!(&ZGjG54|^~%sd#8;m&+8Qy4c={X>8op zKHJi;>={X95h_qCc2K*}HWjil)|pgwr#aOtcyw+Fv9D*S-t=BCJ_KTB@%VAN9cF_6 z&zjVZG3A7ErLQV9zRPZ1y6zXq$tajXDq{+R*oO+oK)63spuKGXM){a~&6iBA8)ZmH zT=vY-KFx|*newIU*Ai6xT<@Odw7Hm$-Qyo&yp)d)#e5@@eNxhkqn@(8{m7hHTHZE` z2&JCQs(W203W!c8K68J%MrM(CN)$!0MBep7n#?P=7DW~^T<*?$W>;(v#F%;!G6~cQ)t}G_(t!bH%5Odl+LT8LK zHB7Fptoi6==cLNqL1Se#P%we-)zU=iShCohUemSafrB_Rt^S(^MCkLGNtC@jrSS73 zHmh$cgR5PxVEimT?l&nK(dCOZt6_F#rTJNwZXHJ7x5P;@HmLc^ri3pjJdhX2hYiU! zN!KTXLx*x<(khfz%Ddh617_H2*K%~*B0m=TeC{8w2#?Dp6r?G?Tcbzl&a84*l6X^0|uB5RV_;vw>fk&veD#3#wY*nEzjzZ`R|n)x&( zN((-(8B*{(7ox2{fi>|=F4cAAU5`7|Oh1Who()aSuuL#CoV^Y8nHv`8i@TRy5F9Vh zY8k$6SccI$h6Xj02^bBYPpsB|1v-YGp-c{aBSeW8@ug!)ojh;*i1(*5r*s4xmN|19 zIs`4}JOss1(yn$l)JOeZj-8)_vRKt!KUkg*OMz|xviFKCwVFc)c1XcEbdkV6Os!hwRm9sFA4F^Cm&YN0Ldj{EI293MaAMo1bZ)zS5$BxgY|979yL$R?VyA2vf2@hKNoJR}am$pZQ2y_6 z_HJ3(J1~`6!Jyi-PXI%JxVF8}1CMmmP-fVJ+RiVJNaNT^zkl#nXv$ck>TDlP#=a=5 z`~z$FW~K;NAN7wHZ?pE-HB*iy_rN&hQ&!sIua+1XDME{_iA4l1U}Oh_HyhQN8B-qy1XhYGy$eIOE2g2L0~_d)9SmNC#jl z_CcNcsBHImvNhW|K$u-fjpH1pgT*zb2erg}bFM?O_XW;KG=`5}TQe=T3EmZuA$A3@ zeXgDycboJmKY%A|VIo_VyNg|%VqzvE+FS?Ld+rx&Cn){N{|%F!%q-u0Lc ziQdt-$w*q7i}4e>5-nR){#pt%qd>ppTx@I6Z#MGrd;GW` z$=e8MKpeV+L@X0+6@K2B7Pgs}$!HZt5f-Jn3%lP}(*g>8Fr>$`>h=%~Kli~S&J+=` zieIv)!>J@G3G-79;E0&f&db_oF#c}+Tj)jCTowhwCD zMAHYzGI^y4820f=miBV%hAg^t4>G#kKS*GDc^1Tc9F5;e^r=N2aJP4!x#zmq{BVeP zwe_LmFy-v5g%9a&cyf$vtD)i@@VkWr^7r>4nqmFOjrZ$`M((@>_QW;qS;C>v~u zNnI@T-R6>Z9bvLk{ok!xLG3*|%7&*U?GI-8+ywK~M2@r;I{g{8UTpOiRt2;t!#lg=15s$&Uy8bugWDzSyq$8%#NpajYDdJN^AK6x8l`@3E6 z>zdTHRBPWLOeP8{ZQT|m*x-_PHwb*+$OZN^C&Yq;Gs3!JZcqAwLgEoAeLG=dSB+FU zPGhfQOW$6K!_ibmL+qz7$r9e8@+0*N_nAK{KDRVu1~-u`Q<|=(H-x(#kzdTWEuI2@ z(e4*~`ahO=nV<7k3LIEH+HYMDK~z#&DHY6;R^QDQg<~(rmvMhhi2h3+e)suhdb+oD ze%zv8mUNm&4fI--aag8`6y47PW6mkEU|VSRZi+v26v~xPq8ASHWty9&E_p5$FeYP)hcY@94K!>i7A(5rEK06q`w66Dgl|Z(VvVrLu&KBH--WyZEu2d z{0XC%y#BrEpgcL1hpptpVw5p9jW4gW9UCjk!8Ag2EkT(rOLAS#geDx}=9bYTz(Z16 zv@(SB6q4UCL4XiRL^FElMQvPuZ6D$!o~bUgNJNJUbUvOTZfEGWTsZ*hyn8}%!BC?2 zz^|r9$Q{1sRo?`rmhy+4RL}GE6zH`~ctHRioL727G-QzMK47x ziiCZmQcXB_B>ybjd-OjF_e=)TPdu*6?k*N)5%i?h4trQnlqZU$Q6+K#` ze!;k(TwjyWbYU{o9V{6ExgQoGQU3oy8;xlu6zzt|gc#LSa{Vh40n>sHayIvW&Ov;t#CO z!rGVzjP{Z(=bjJ$VQ4}3mh`IguGSD^Q_|<-f(DlLp!9y4m96?b1aBCtH;8LVFO6?J zX7UH5;-eyvEh9q!_ORM~-uWeXWtwlWTS!!meB16;j8Zl%oi=-tgH!G0Q#_bz@3nB# zDy^y5lf8Kp^iz>enC&lo*0QmK!x z>zA-<{YXPk9Ve?Nw0fPxZX|2>QdlPeDQ(q^5joh`OOL60Nm zovEN6+{4AGTc-|Pb)l6{blcDhls4v})th}v*JQdpb}d?DMy`&ZjFG!5xfoFIi?sZKWmi~Z z!evpES>RM0t15(FHT!3Qm*LL4%>|1PCpxJzOR;-@X0ys^!!~}FzDpY&_PNAAlJ+e2 zm=u#FRL^b63R%~r#N@^jyLIyXnOCwBJXCj#Kee-!4Tt0wbvzIkM0dGY&1n5u@cr(e?Jy{l{#p9ZNm7DYNxeoc7Za^s zs=sz4EA;v`1#}~Pf4 zP$^S?sH@$qUI0!{9N2!d!u+Kt$W@8QBPXBqC!=oQ%yZ2wvo*0=S8aVc-Qi>9%gq<& z3qtB)yN}4=M(WW#^+w#6q&nLDhsoYB&$N#Y+#w13*(R=V*Xh6){zlv`ksuePcaidK zm%|9qm$Xzf2KU6cClDnbQF+&>Ky@;kc_BYP^#J|YxVULp2NA%sW9)o&Up+!%(@BJc+Wd2bCx_ejf zs%7G|gnGHceQrr#&{PZ+Ad+>1=1-COmNUM{8PDhWEG#1DGgW4;g9tG4830_l$m=={ z>6JDv%k;~3aj}yY8sNR;Yod($rRFCqlbuWAzynmI-A0!62Ht~BVOCorJ0X6%tF!?K7Q_1*0 zVRo3Pl<=Aoa`^<0jro4HN0dQRWiBmtS5>%?8{pe~)=KsZHtLF8j=A@34eL z%9sDH^(TnP9*2KE(&N=v`*`A*S!=8yrLe#@qWw(yjU~b0h7j8)ED+atf}wra85d`< zq0{3$BqTSodVp@=!|71E6!_$>`Lk`r8hr^up2N>G=pL?dq2A|0X*O1N_6VXgR(duk z%V@~#iqTe`HGxz+(K!PA%r1bJ(v&1_Kw&HIQL1p5=blo@4ip=b>MhJJ=~l#Nz}uCR zRbdk%-tnV|qy)Yo+^c71l#;-9oXJ*?o)OKA;g3vbg-`o2oY4#E-&@ibo7v@2zjXQx zK4p5CCko%NyvwKE<}+Zd?q|t^0T!p{W1cN|K@8me^*N-;SQ=>Y(CPkHAG)~21Q4s# z(sYLARU+E1-K(+4EKIpj5_$Uzv5yK>0o|MWteH9KdhbgZoV@W<@4oKgKyb&uxK?gn z9~23LKsa~iK`#zpdhgDzFde>lE>U-i#kI&}{z^+Hgnx#Q#V|O}UT~M5LF<$0L&IX@ zJFbHVkDSu($s^I(9R=xlw=LrMV!$@4Gsa9iN$%d!UD8Epoz|F|x*7mDetM5FWl2P# zIt||5>N(Z;N%^AG#RXnmlm7WLoU=W7#33ndYkQ;=lSrAOJZ@k11X|_Koh2`?g}-Bkl|_ zA{gOk`*e3^KF`?o!&BkU*RMKbJLR=K0oWo8_z%YKK`PfoDY->GEGw3NSR`dmN7$%5 zKFux+sf?pbHl*i$h)F3#A3-s-NZ%e# zyAl3iwD;BrJ)Sv3cPSN$X+{L5Bl^}RTXi_6D5KHc7ZR&Qt5Nva=fXTcs-!z z>27W_JVRY;K~Fl=z8-HE5bG>cX~CeZ)*Ai1adnV#K~ysS9et$g^v^PP=o)R46 zkcoDjavZi;%mQCj(C{fvp?O9;9hN_k@j)@qnLM>~x3-DtE?+g%R!(kd~saNzbU~ z?-75=2b6pnY*ZPU&)_F*#6JI$T+_(F^OALlIzJI;a@~JR^?Q#PF|YqMe;AX*F|GX* za=hV_b^h;PVDk4K&GYrJW%eq@Q2nB;HFgU*#Quhx!O_XO>RS8w)xGRe+o7?q_Nxbp z-Z4o|zs!Js`y3}m>Hpq0Pt-K!vWv^-HLP!$YiF~6h&u>Sp`X#7G2)*)CRufDGdni|M)kz^q;&hc9SV?>8{ZFGP@Cpn ze*YJ{w=}BtUo2mBY05^Ere;>FIJ5Fe*2#|?YfPQJc`n}HNZyJC=7*Ym z@i+j9fP~wZW&Cs&&EqpuIyLEO9kix2uIuq(BVJ~2Th_+cfY$4P)yQQg>EX(Rm-a-o zPg`S0IWx`!mcDqBUt#qUe7G|ooR`O+*_}io}SyW zvws@f_Up5tNJf(2p1kK^h>uOJTk`Lft-5Q@$iA}Rs2NZ`U>exzZ2Lg##o!eyTmv^6 z2dz*k;MubM)iKEqM&p|KJ&@6h2e6P73#*eWmbX|=;11yxO`*5+_ z3ZKQ@drkZH5TBxP8{R8Ehw5gstl1o}(M(|xXQ#A~L2*M(@~#GXw}kWlcV4AI142>b z2A<DE!%m+k&ktTXCSKpuJYXw57o)gXf1ShW5VguY&U$ z<0X=U2Ifn&R$rFM>eqm*T*W8q0Rf6LFZjR!Yoex>(k#0rek^RBfiQ*?Pe&~ovV>|6 zx;MQQnmW>BJ=T+!aS}t$H_C(Wr6`yp!7)gPmS-n#`;5`Idl!!@$A2ir-TstcQ$Hd=YChCZ?{Fu3Tc??*E@<7Yd~LwW5kt~` z<|X~VczesRwia$}7fOM)NTE1{8ZN=zDegrQBuFVva0ymuaf$@DAT92}El7(y6oR|E zTML!-WvzFu^{&15+1EKg&innFf08+qYtAvpc*Yp_Q|Qs1Dj*_3!at8ah0jHxH(pWs zXf-N%e!(Q~Aasmmh|6QjI=zO6e!4;AYY z*+QwSbx?J(q{(y*OP7xQ-JGrC=OUi^!Ov z*HybVB9Z%P@Nsw40Ia;of?n``K}1|1v@0&))gc3fcb>p**R^{}$lxr6zPnyW%8!2q ztUksCE8}-~XVX<;cLS!9GXXPn&WN67p$Uwig%uWD44E)GL7xpu)JST|KN@e8;xOJ# zub*0rd5Su+iR&I#kpIR46>dFr2Sl%?ipKB?nAOj-v;!374luN9f~(fDi?0yCyX(!y~br z+8R=zqOsLmy_2-ec7=x0Aw;An1S0&%uJ7KeJIf0O8r_KMP)|io=GlcZGOI-4vB$;u zrQ$0YMzE9lx%4R2fKUwPY9v}GbdJ!`Igf9H}kB=k0yTR^q<8n zkS~64DQdjz0rHz9X?J&2w_Hp=wKf^`u80Ec?r)gydKYwo_wA7SiqlHk;&uk6Y6Gq? z8OMGHnrq5EIr~Z*xsUaz$dAUYT^^F`20j}8F9bckc4oC8)C#^uzbdO^z$r_~f+uNsO>lGfJUxQD>pR(4&qbsS~p|3B)>x zX^$$|hA_ue73^M$t`krwpb8NOy6ETZKC>3??}>o3KIa1pwyG^8I;+raqNX*!7*1Dh z@Gu7x22$jgQ6mxMl{xm9K+q~iEG}y3aYYb~DxY;-P*!Ryaoy-$SpOYbvRZdo|WD})|y&P@@14J9flr;n^kN&PsX`Jh4UgzWPkO1ZkQY?psV&=y8X0~~Rt(9mTVi?b5DOyK`MfpGcz1VY_q zZJuvbU351!s3UdI7DAX*BYx>w)yT)dcuN&MTGk>;@p7RouL7=~&L~LI+9N43GL=9T zbL8$#j)!4mViYyjzZ3H!y}ur zI3u0)TrG~ZtUGt{dx$f;=v@&{0?>}Ex>~?n(fB~ZFD%ZlV${1u0TJ!v@1y+nKon-p z*V&M1VS$+PFC^2IE`|j_ShIC;63Sk z98lF;)(oQVL)j3!4}!@fTO7dgZ4-@Jenz<{;e=(n&K2;w0F((B0uz0EuEk$6VM z=6OLO`tA9&G%=vna0@)Zl#gJw3(9g)XpdO1zARa{USq^g$8a3fZ#qNo-6qc8LE zU595@WqGPbqQR8C3&}{um#>Y$R$BV0x*TsylOD=G?XrQ6#5gbNAFKg zNNhufw?9~RwTe9D!(94*Ou!%{_(woY?w>JJHw+NY`3;MtXNL7}o-M|4GP=8g&WX8Z za=ucpyd<~SsciNbFp3{Y*Fl$rcq&q4z(~I8G z~SW|A> zM35cF?BbBL`Nka2*%MadJipu~LucN(lgUE(0j`OthtKvL-Hi&F799Z#q0+vd$#aK;`u?9ZwpwOHtpk*-tzE2Q!?1(Fj{0c;=;?4B;h*4S6go zy3O0qo(BT)cy@C;r##V81iTKg8O49%y1V; z27VdGLZ%Gp+O=|Oo_%}gQy#(1_)pHC%VvP$M#o|7bBTM;x zs=|A>Ta3I7_%NBkF;1s2NkcSFv=ik)x4s{sTF(D{x&}_{E_Cf0`Gfd6xulEp67;$z zB#;NQ)%0a>+2R|;fr%2mv6O>Bv+`meR6}z`iQrpv9_DNEUU+Y(ySqA;L^A<&=bNsO zM9ypd$8C={Al4$)1fkWVaDy=xkQ&zjTmE|AL+r&Db2ATC0ah*z%gKJow zx3Kmn_HS|8imF;3Dm6{dz&Rb8AOJ&-hb$l!<^j7uKlwqk5OpA`HYDtDX4rEW@UVPt zoYdhN?a@W$rT+;_2q(st4L&6+=r~wNYorXRV4P?Yte~%_DFEm?bmWTn?CT7Amn_rv z9=)k#wCsg5A{i#a5-N>kno+8ICb$j4WEwBJCKW};8oVrjq;tKU-Ewu;J~a6Dyc49y z)&+me*;4&gK8v0)L1z_3YH;edH&_LEa$IGp<*+rc2V#R-TSDR17Mr2zhUVW-5P$dP9=l8OsBXa z={;UY{seInN*9+()IPLeu~zDT(R%rIoWZ#=K}_9B5r`If$H5lp!LlX{%~+uM5hYA@ zDYUl#EkBV6tT$ikHc~yfA;5U;z8^m7rwm)!(wKP}k%fP%B&a5cK zksokbjb(QD?R({u!3TrTPyon7kW_DCaLIP-eaDE)&Rx#Xj(N6;m>s3BcoC2H%Oqk) zb3>2!mDjiZ^kLp-CeB)O+~)3^DNosi=14P2t2+-2Xeqb-#+yfm3XQ*6QqkXk^!ZJp$*0OHRgA72E}u+s5QI_voKnafU1 zbVXPsAQPCoyhHO+i^jNK`dMcOSe#RME$=;Txm1!-bMJn6Cyn_mUw2Q-K4R4&glWVBA5@zBV=q0K-T|;rSrW!o5SYh!E3PwW7e&( z>Pz8$VKNmXJcUhXxNM(fPm%AzPjWljRJt!x*#$AV-Q+R!!U`Wo7;WZwE>0KqsX@9* z5#|Yn3A{whPl*(S8jK4endq_MfEW2Z*^FgYy>MF&dWE<7u6trj#`Vn`-W%EL=uf_w zhogvV!|EuZ+3f20ZCT0H@mPSCyZ?_UnQ^K35?73K?ta=Jr=0RQI?3?Igra}qLk#-W zdEefyTzhzsi>Ox(FX%aE_KQEtga1^+{U5J*0+k8t*y3u;v#G?byuyYp;CbleWPTm{ zc#yy66W>xi_5fn4j>_hhEBcs(n0s!`}28R(Ztl1iKCyzkhcniomt+ z%V=aHI6e9K^yTR63E?rJiy5Afj8W5WR|yh2zVKeD{1f^;?nu4b$8*#(7y!A5^^Cyw zy&Y{440iR8ZpOb`82ds45H$HZ!j`E3SqP<(BSu%l}!07S4 zJ0W`g3}lccGQiP%uZ#>!m=CZ3JJ9p8Cw0U3$r+)ETQlv!UKDrlrSv*AYU}SB6H`O| zwR!ok!~YB`_pftMUF6*wes66TH5>Q*vHL3};Dj5lC%zoC?O_`8X84nHq*Udw&X}AY z2WuuBzoM4iC0}FOFWi9CZLO)6I?x+a!oyNDh#{Dl%ac?^o2YkA4W!)HB-y>c*+)O> zq!dfqquvwA73Y==vvYzS3|qu!=dK7d&LM>Mn}Udq{T@8l(iQj(3y+(cGfMxb#Zqh} z-7n(6ZY2YvprcO_Xx+ z1Q%~YLFjPp1Q`*bI4b(OHP+$iXJFiF0}$Ibbav3y?^E^J?6Uy%Xa_-Od|Uire6l{5 z^|CDbwbb<=7EZN7;4Z+)sGYG4X|GVz(MH_d+<@0sp}0xuf|^sKiW(7DiRYD+?`SdZWL4N53QH%K;K2wxrsOtp5s4(5 zSmz83!8oZ*jZrSC)NJZy2y8PoqPk9CED`8a!xX=h(!4zu?x-saSu)wLQQ@f1 zOW`kmB`(Uxp zGCH!fZSUkatRKH@Chg&mglu;T<9c*yMiL%k@8G9yJr>Q%*8=08C#ujZLDu5 zr>g=|wNLlWxnpEM@3J_gbsw97$BVQHIXR9e2$Sj%m`N`oqJJ4bc|~fIAs^i@(pcqg z%w~PL(K0VM1HHNI1ZirU5>BRYksC&HEPf{-S_4g^lPJK zTq{O2YC{4ZsxM!cR!@V}6q9{2)EdJ`IY|6eIOtnwIeGMd|~tF^XA@`Ra>(D850(($SMh z%G&%^Y`K_X1}|1yXMtuEe2vcijnaI}0?(P<=|{FU+PPisTTNtx$(}`NPD$4UpK?A; zkl1T!46g6gplOIue|Nt*ZbpU4lqP*GDU~I22?xGgB>gI2qv=TJmbwu2#SVoE`VH%a z?Xf_EDD{tqKGwa6{satR{<|>67kzeGNY>p;{RiHqb}Z(WRfAtJMje5;*aKh9X6F7X zaXC`YjxnSXt}836wpwGJ)<$1p1rB&!5{+ipK-6GP0@q4c8ebO~6L6LHpaf_diR@ZXJIqFwE&6n7Nk}uzi9GrQ- zsX4<84`ti*jWl}`Ahz^s)^Ycjk2O{Ls%G7Z4zIzKZ9VdhHSxV|KMJ%tT?|jvF7^l1 zwPPx#QH=}F%PcsC=rd8M!*^S?5a3SrMQI9Z^&mzNx|5n*YVG9lE-`&|RW-O;dB=2b zd521LOf}H6 z=B@5cMaay&X3fkSw$utQ6Fa}eA3#®y(UUEon8`HH?DJ4ggwsY7DGR8f1~ZP+Gq zd>!Mt@*=e&Blz)r5=am&cJUyE<1@A2w7!+|xN~=WJ&9fPVW-?W=M#DXEqy z4@S3o{`Bd8F%z+V-8Q+@7(nol$aCoH0~{zOqx^>wcnJFU*lq+t>c6eHaJBw1p!H8p z@E`IZcj(7B<-0L?QW$dFA5{g;$mA)iAQ_K!M2^;AjzF=>?rGdZ-lq(C!zZDUTs#eg2^K(nh@m=vq& z)|XV_RfU`j@=um;HyD=3rctKblv!`h;U_u|si1GZgEwprOt{?%%zDYU>b3Q-tB8&51`5&GeXJ0hzR90*WpRMqABG7zf&GE%6dq?|8XQbvvgLhM z+xI{XG3#(gW^?83m;}ZB2?EnEt8e9ht|Ifhsb8n8sfuZ`mXtEkE9T)C9e3b#=76Jx~L*qcQHnoK1|2}jlI>wm%v1EQp? zTqn^3YT|3f(S01mnsx+Z7$L6Lo5F~Dg079`zD^jC~v`m18{%o_qhNfhj+YL4b5 z0}Hb%EwRha+?dZ?8Ubmij!I3N*w?Ep)ta8RieLVbx}+BIu9X*37-rfv-($v1=eXzw z;cM8B()L~*Y%Pr^kJeAp-jvYA0!5Z3c4<09 z6+elcuQ)0$0KIBpXeuqX{Yfe&fbgAAdo0R4cR{3R<{fXWHA!FFX0`0k3t6r>O|B`gFI(&2x^Rm9xM!PX^Q%PoH>|13JFeM*Z6Dh!qc4e$ zaIe5w-LU6EEy%jIfnSuB?EjBJf{o*2d)8sjhvCWJux>HA3T_4(equXga>?d9t94eO z6~ZN7>+8C7M0?O4$_=^yOQ7U5ly6~u2s0uhd`|w!BPyPh`~qV*_8;V%{o^^lSLXlJ zF5~{H_-XBmWyKzv!20<;!Ozh5L;Tr4qptFnc-M?EUBJ@Mw;SMo^_|pUZPdqiu${Sk z1T>hOL(ji2*XAx}v`Ei=VtKgkyqziqT@_=UKx|@q6!q#e%A-SM@PNK1s3sTQuAB67 zMxJ3yi(i?MY<7(AT^%M}Q6Pvs(@4F)pb_w_NmfCtxit?}@{k|W)>lP7?EKKebZHbT z1b-5iuz@8wpSbO@NOIJO?toz zxxZPuH28Sjtl%c`%IS#07(NZ5iDD^1ybY#`rc@~VRejvoL-0SO1FB{Zrjc)?wr_g4 z=bn7)|5|&6Ol=N5KT+f;wv3v)Wqv$(UN`x9!HVBRLc`_NU8;94hW1d-9&(qRQ$X8E zH)EB{HU+5iMDa8+o5XG8Nhw?t=%-~HM5StLfxGJXbghG7- zM&SfXyr{{h()-yLk{Yo1Fdd{@UCFMhb$5B>n*^`8F7d82p#gWDzAqbBWm(=^Hj?a0 zlfXIBI;&yT8VaS{BM}2$4_+OU{K#?N#n+^+QSHMIDW* zpX=9z+OavT%bOn%NoE5Ul9IXTjr0A3)7V1Kj-k{$Z5`#d!%dm*09!f;FnZ(zk%Ax| z^&4SmuR?1cv5Sn)PNWsG}nzPT6IZJA&Yw=cHL$ItxQ#oikUA|Eu9aC zs&Iz$X(d)QIFh^`8}hZVRKMhje*0O1+wE6+el zPE25E2{1lIBMvdwpJYP~P3RUp?9_eTFoCbDBs)M=wa>$E(gj+; zCKhkptoVszk@+m5(i3)T%%X&$6^H{Ei!6zt*`Iws0%*}yTN%nX6B(Ly;26ebmt*02 zrB_y>?&cLE6KTHJAIY+_@N2`*adOYu717#ocsb_tXd1=5U>_K{h>syYnq`G(Wx2nE{ z)yF_G`I&bk##Nj)hkNmkehklC$=a2v6pD{~&VPk_FznTEQoSH(YmVTq&mg1Yn83>Yj zn)wxOdDoPq@78ww3b*|xvL_qrDPLhxfN1*n+ zq6nT-AwK{3%VGOex0qS}=c-o+9u0&lZrx1@J~WJ1f)vDYa+D?0#B>O?C`Bc{7BU4L zJ`-AV8;rc4;V3c;W5C8%xrCarzp6I>P9Xue6huqeeHMj=*@p$sj&%PBG!~N@|MjHv zn_}3U1~kuS6M}v;K!B;}l=O_17t+^0vmfMc+|_^W@I=n5PUbeUMFRdNUU9;7*A%ZB z%XtcjUytbfO#etkQ=LtDSXWX3cNy^vFiaw)IJg~Mk1!(sc>}1Cu}v#o(#Ru@Xr&_^}m7Pvbe0PZJ5q2-}45cLUo9js09 zG`W_4SiRoOT~xEk%(-zaK#f7;+u(R~-9V6a7aD~OO31xOq-!AW95*^cGNHH_=;U87 zIj*fsQK?Q%yk(^B;Zh&_CY>zKa;{|?7rWZdUB&xEsYx17%hmC&7iwm7A&QRLk2o)* zlqSC4lS|v6R@v98Y9Xgr5EkVqMIArQ+{UW)(!pN_s_dKJx zEwlT$CU?U&TP1D)?Vkr`I)UhP*dL1JDn(=zEbPa%~RY5i95 zLmyt_SWUoF4@v}tEqV=Py2w)>geh+8DKc)}rXDml02XT@){H%0Apt=qk$X$`TB3|W zJ}8N~K@oxMq71v);!O8YCm1}W?5<~=>OgLVqvd#WkrA`983w<(n{iQ+2`Z4_H~X@N z8@6PypRw35TVkdJaN>@r5;hr|i?bzS5y0PgygqCmjiO;Y(R2jF>!~p~jS>)EUUk&5#7pexGFbf@MybxwJwxOG6#^^ zrjYqi?bLSv8*;=XugnQhSWevfrN+*tXwrq#F1;IY-hcb72ixZ#l{Mz@Z-?RkKTYKS zIt4+dz)jA~d9p|m-bAYMx@8k2_i=2tL0`j;?MkcT%J^>UAUhP#F~<~k0b*;VmCihC ztKpFh!SLKve$V(!NQ>HtMJr2sM1)d>0h^T#oh#GsLnM!fvRb`jT5e`cYGh2iGSoDu zKt@poTzR{+(ki;`MUjJQ_CFT?r#jb{I5{HS5!FlZ95VrD@YP~Hblv2>d%)euC%kIc zO27ETspLfDh`_s*1bWlv2a*c%b)+a2tK67TpCKQn=7dIRky)+p+^>Jb8e0DaBKvsq z2U^+`x0NGrTQLiWn6Ylxtr>UB{b(5{(N~59;o`%M z2+1NII4tI^4GoEN!^0G-@I#YntkewIg`(| zO?G$k%Az-S3(f5wYtKQXFzZ>ooo5YnpyRxP4AlK|1dr%Dm=E!^S~D**ZOyv9X38fB z#wF}=c#5@?a|i6~oI-P5DlA&u=<9$*^YntT=4(-`tl%pCffYM2&xC+|h0#zCsmc95 zk(#!g-LxK;UOIipy2&PISl+DG@L}ru+Bh(($lQ^hs4(t><#YG`Tg?F#&?S#)+Qvj$ z&=QBYTfAiW;Ru@;HMf?_UaOqb`&J*3?2GHo+6s$C?&fF?2li;g{5OV?^SdYdr6qP_ zm!$E4w;Yf-Y4+f%*<}aU`?3*G z&_Uc(Qz&|JD3Mn&zNy5LG97;Q*1`){VTf)$`E0O{Pw~-=VAt~r;ULkO%Qn|a1^-gI zfP~!Q!hCpEa*{*(v4z`~<+O`?tj((tPG=ji6B$Kuak(~R)-{{4>oP25$PY7%&b=kd zY7KO1lv3RqH_h5-Dy7t55TpGLdbxS)WPnijIgPVQOdoX{hUrT_&jK%JfkIN#3;B~^ z^oa!(43$jNDWhVg875nw3$3@a%kff&eP6Vh={*$A^O6kn}D7R4B;9BkZ2 zbi@z{#(U>6_(PCU#E)EkXLi}s4nrUj2sxEF`zEfsh~|oZ)gyrHHgM^o-;0Mf3BWZ` zJp5Sk7=?25=0QnH+kIbDC2;KGj0(l~OTa25FWPjm)&$zWZMzyN3{@g`kT|j>82nd; zc|37s!>7DLUJfhUh_i2ouisnM4`nfgwv!J{d)xHTdl=JcX7|xJ+!FkE#nAZu|Li$6 z_M|epqntO{Fzx*3&)IgE{}BBDQ;VH{H_c^Vt3L~`=8aw_5G3jtKeX5h5gh@1J{hX2 zEpPC1j0{Wy+tb?9Djt#Q_EIn2oCEwgBuPkNt&-Mt)05LiM|uLDp5iu$d$fH+jZ(#d z=?jha80Zd^%=SCJhCsqd=wWP%5>?xAZglD+cgD=d5^=7_wFbdGDQH>enM)I(X8%=- z4^gQ$3F+4bAO`CtvrjjLE9lPvw`Py%TKFKf)K`;RQbkEtST8n545*95mr&g$;vg_S zYwqK%iyob5(36p0;#o>Lvau@Mg65Sp^kP{FRNv{fYee^GsN9=^Qyb9s62HRxT!R_g&YDr8<~uOjZwaHR8`Igy$QKyF?aSQyv;}$n zl-Dt3p94h4224u2^eDgN>7b4YuEXmIe9u!KMb-r{2tq5lS}F;7bj$W+<+ZXU$;d?0 zg@x$x-ULE7cQrjK_P|#6hXD(<=EFE?+orRLgoeD55-zU^K=i$=qqVs5=JVlIUZFe@ z#Gbop<6FI3b{*G0h{A`HFlGntt9ceiv%$?04Gzn)`eZK5lqq`@%XtqWPdsTW1&NF7 z0-COstbdJO)jh`T6C0c(9a^8Q}o8LLvd(evIFlZ zo%+fI&xzgKt{+{>a0KB44q#2#q+=5JRvFkWT9_hYG#ddsD@!p1jM_^RzZw<}E441J zGGql_3}bo`*(5U}Hra+3HXT~Im{RJWiu3Bhjv1Ha1vgyZ8%g}v{519qiuwxK)pqpj zb)!PTuElf<@z=(i$R!e5#}i~Sef8=CD8a4l&oAHZg#I_HGC9W1=g;2#;}p$ZS||YK zks^OJXuRdZr@xT(^>aRla`5|jM92DRWxWrO7eUv zlE>wHMd3IGO+-b&C`Mh7;Q=y9-`8-D!y;?s4R=g;-8;NfKFRu@qFM=gmZ6fvlgqWmWis`i6klq%%G&nHl7 zkm5z9N?(qBT^H*|tsZoRDm;r)Uap*#DMJiuY&+s6${U|A`c_wqHz6EfV!TDPd?GYD zRh5r^a@eMDT7>Ub6H}A)MHs5lxZtxMXuhW!Xd-rUo_u@(P(lNt`CqM&Md zSZ!ffP-fIvhVb6}YSD;uGslG72tN)YSOQGeNF`A9dJ<$X00a@+aYE6t{q6uNgG;s` z0TVW$yByM!i}rJL2XLX@SmEOdwm%k87gG(n#eGrWc1so$csqnugY)@9UjT;cKGFK0 zs{aLkKASxQZ6(RwTemhaA$83}CD*!r+)DX5=Xf*4OIBCFoV?xfB#H9AMPuYI#D$oo z4nK~*KG*1v8MV?EsXvboq}SFq>ap8i`Ep#-McP*P0t;Kl^f_o0O$gZfMFF$;H7qv6 z4QeLmXc&K#$!KI1I?K@46gDuBZDR>zch5z{%-rJvl36E^>H5PT>j>L7=ZpXuALBnS z;)TannNp8$9cL#Lq^T}}`_nPI!YY2V=;|zO(Nt^2ks<-S%SEd&GqVYeZlPeqin9L5 z{;P}aA;|**<@NmoLGCpDc@8EeO@&HruwvQ9SZTC1F}lNjo40s;DF6=Lt;&vdrjUrC z%OaXf8C9sJY~w-|Y?@lY3kN z3$u~g?BP89vmc&9qZGel!8S#w>GK{La`O)m<(@es5z>l-_F>A~*Pq!_(YQewWP871 z-I;e{mXnpll`W2m<=iq@vax?8w5d#UO_iPOlAMTV;Yhc1T7k+&>jT%t*nG8HGc4bC z*}BjX5BK^f;hwoQX)mxn0lAe0C8TFZ8g3HW9wk@~yE^P$F;$<5Jypp@lTt!9wBVj7)|nb?7@ zf}y?nI!gxM?SiuHz2f*?>==Z$HT{HPB=k()tq_O0&fGJX{G4~0A<6tgLWv{k>`l!Z zv;%b;Ar(RynYwG=Lu-8B)7`h2<5;Ln4DR)-p^Kb8UCItuM!57&8I&T#L%E)K=n0U! z$Jpi#)7$LRha@asUtFg=?YM4b8kY@w(;1>Ot`FI_-UBDw|Mz?uyOk964!5oq;u~dA;CC3|l!4~m* z#0jP4h57lOAdo!IL>*Mj?lJ9W(E)MNsk(|BL1amYc9>ghv-jlWkffvIoK=rIX!^U5 z#1~}jX)Uum7uDc2q_qE1-AYsEyWUJ)JK6i$}#!>}?1bAzV*euKsov4~-AP<-d-peMwD z#tkCzskMVi6W;KuWMozn?CY2x`cxAS*+=iSrkgQg%iha4RR zc_t%sSuUy^hAm;&&2dzo{?}+{3g-451&xUjXwjApRP^b|3zPJ@4}@P!Z!e+_ObRop z?CTcuQo7temSlswO0U(o!|mhNvJaam3N%81-d(xwkI-gog5Z6Kqoizsdj#B&4{BtL z_aiv#JUO_)Fg)S$AESgfYC&){6;WC**^H~Lo+ zB09YmHW!0>ex{JS%N%V4xW0FAd2z*Z*m$~i|fP1cT)hK!Mipw%rcO4a-$HUT? zi$!`yr%9UqrpNf4t)-MLgj4JA_YV4MauH52n&%B&&k%tfmEQY!C5Mhpk z*q2on7C2~2>j=xmb2>HRCD7Fe@B+jXTVA1B#kKP?C2=b*d-3W_sOhkX29SWB7GnPp zjGW3V_<_LigXbsx9;Fu(_^gMc)#AZ2^XooAQ&q?`9vE{=4>YGq4fo9ohNqKtO4E+* z(tq7pLEM794n@A_5#&E`9A7UZlCvp_EVUlORWWz5xK9&KZ(p08E+(|z!rW8) zyYUNh4{xO+hTZlDOMZ;y*T{*+8jeD-2)UlM-q?c>#h$EN)}ypycC?Y*6hvHf1hO>N5AWi2 zd7}ex>rGg45PJfnMQ>RoCMPG<$TjIY{1hSLG!?(bJaXC~Y{x>S#ibYzo=Wm@aiJzI z|E?s2?6_xwn&2m*1>P{SN6On8N>9n!;EZu+jr^@n5_4tzvR=mgo}Y(dM2$nx+^}5b z?VO~9Z_KmC#FiJqn=%H<7>>{1iLzJZ-}&=PdcT}lqeJoKm2D0WBhxTRXw`||+q8;t z?84>SJ?(|ZScO%e@}Dd{e8&n|F<}?C_)I=cbs79U=kzWgro%z~&xY}zduRVID}M$U zLx%ppW}+#q3bQE2?e0w5vxRuf24#g8?BDBGmM%-I9`@!Cq(XTzX{u`bWLr@%1xsgm zm4Q4H_OzW)XcaX>&Gmujah9?_l&s8&ik`K-m2(Zi?@H?w5+<4dSmuYPo(g5O-F|3? z&(#@NvzvH#trlq2FhEv1+b5bZDT$^8z`p>L@oN*BepTQS8k;hL>=&!l`*vS;2zqN_ zshN1he3@QmbW~zoi?@`q{H!E8(zo^C&;YDi`{gU1caC+LnC?u_2XduMPs@$Z&!1oW z{PjQl2l_n^YJ%}i=EiZfqp|=}7E)hWyKZDF2cbB-J`K)b`==cKyykRBwz7^IBN~DQ zXloBdcTlAy$#Gdp^pM*`>=okBD@dVzT!4254N@{BVEJiWkbXscND|Ovo_i;?n;iAB z_nT4W!%0JjZ`RqlI1~ym%l5*R7u)VS*bd|@)Uv!{vM{;ydCzgl5K@J<+slA8fOOk2 z=BkDHPB2Sxc&89Y4++IIL#LPOPr)^H4bA?T(j_HrT)F6U3DLEVW?}d#!no5*UCXt@ z-N=wE-Tq$}SoW_A85V!` zrtZrg9r*^G$TYf;P6*~qM(F_#X$lC0J|}#DhG#SBGkj6Lh?z z*n47#xsp((-UaXloxOo10pb@i4bhLT8cG5eByZnrZy&5L$%7Kglwyh~YS7hlJJ%3d z-1)>=1L!&%F)n_GEEmz-Qz$%CdB0=%V#>rh3_*3D-^b6>eWVqyvwdBH#iJ6fN#0#| z7%_h|=}Drw!K_8>?c$_?*)As~p-^)H9T4ulS?6Q|JghQ>ByYc;*tZ`f9!MkZgl#Kz z?tp84@2cKmEsNQwwIh<`*8tgEIf&Dp!dLSgYrz8%b|a384Xu2SRf3uMpD!)`tSQ^y zqkUOiqO|N-kkS)NwN{r%U^vL-%vrsyXn4xm@RM`-EUGN+8&bfbfV`8BfItm)VAJ^J z!WBZy+f^r0OJvNMYS6U@ea^`OESKu(sv(w#B@J@i;RVlK>NC!>CE~0vR3bdaGfUXl zqx(3C@P(XN_|PrY)pAp|E6R-=N?8kq(nSG}-F;{*BSVCc24415PQx)FI~QdoBoyu3 z;~Ka#_h%i!v;*Z+F#HCHiKT_fxQO2?Oc%!Pu-GVu_Vo{tu5-9@8Zx1ML_wV0S2HMD z4*k#x!^~a^97Qt9fKiI$OTAy;AEg!DH|euI&Uv{&9 znnk;8v-MCH8zZL&gBkLyc;I+_Dg7I^;>11)8=q|Wn#Qs7=Uvp3#0=UkaE$tTTyrDi zbn!?G49ljVfBYXWh3IKb4h_z)%wz-J&l+7xBnwWL*af;TUdPG2#ctf5t&o?|xvrqK zC}+Amok<$QW_Nm#z0*PDPl7OdwXQkfpZ$AK8Rvgl`+O>}@Lyj_xCDL&1xgOycMzSw;@)~lVTGxL z$jkk?4^J+|-3|M}63$Vn?<4Qw5ue(U)fWsIRq4Nrr(m@|gK<*MDQ_C@fM};OXmO4> zBv`xAR-mUYC>qTxJN2k$%;4UDE=5x2+-KF^cO)Fbg;?%+(Z{JdI zkA{E!5CdGcyevAw&#}?DBu&pie)>WEc9iY^sy7DweavK zvCr{*+)@R4k?~_#dxGlj#Y~aq2%j+Urct7E+n6cBmRCzbkZF0+a?dCN{sL^@_J7EG ztFX4#cKw$MrG)}*aczqf3GPlQ1&RiDcemg{S}0H~xLeU+!GlY2hY|?x?(P)2Gw1rI zthKLw?f=QzC;KciGR6fNdGF_aizEHL-uhvYJ28);+;nWMkFwiAq@=!4eG@b2%oXmboh@! zbGNnfoJO78x`ssxA3*VJ2AnN9Uo2|G1~lTl?t^C~`Q6I+(7-)t*YQ+aOj=>?JOZG{ zhQuKY(B>fHkB>A;#PoIM{j^=kGSrz>c~T{!PHJ-VHbZ_u$IXCoUXWKF&OCtE4ub`u zFN9Vh-X-nvaqKZa%qZRUn1;!I!q_?``MgQV?}!$ED&=KbVFIXv;XsS37kG9M$S*f? zHBkt@i>K&T7hVl1H>ho~sZ+4xS%>82Te_Qv5S+LP2SlI${n00B>m!DoM7g z`BxzL{=c<25B}Sz82`xxYQb-_QH43CGgtw7(f$)ttwvGvZ!VhOVRm2ngb{B~w%E_&bxtAiK>k5qK@oTm^sjN(6h;j5KCk%68apUBuV zJSmh^-3Q(sPd)m)&lE2P@=;%93HM+wOFhyn$yA7AupK%+(f2dyPsw1kyyhb&Ug)9h z+cdCjR#tX@Cd8~CHXMeerE`pJM^c8$?2JgdrJu(tV2TdXhcXDi_`rPCAp^?g6EtXZ%V^ehb9aH^z zh}n#fszh2iX2Fi3pbL!SISnzHn^!Q#SAEb#gA+@b5uaj&hk43K@f8m_C{G}bD352> zo@>LK=jJIi7qFuc)p#LLtGl_ez4ve!^zP%^x+e z!~MeL#|g)q>8xu%nRAlQ#}RIjV!b{N?Lk(~^Bv1n_}SOPgqK5|Rgvhye#E*01RT== z9M#@X+v2eid%$kpRujnEm$dB?Jno1Ytf(UL{iA}|O^#62QpFJtNA+shb9X@@sD+BK zP%14nmbnD%@w)sF6HA6jn{45$6K(&P3WXZBe5pnkmj&B$k>7R9<}QWV2<{}pfNgHb zf2~(@+SqTWn7Y>&BDUmqR5LfG89gNl7*OxQ^OCH%`-JpAEHrS)p6X^9 z=W)&yLW8WVP&SItgp?w@xTq$%Uuk2@WTAR`-3#ogLp;n(>mUX(Q$){U`F^#Uc}R5{ z`{Ri66?>V)SKSEBjR?)+B;hm^>A?KB!+cj>`s*rRp~Olup<_HhU!UPm6y4+mbu<4X zmWKp?Yv^}@OL0p0lzVgQp!!yI^K-9BqlJs#XzV)K|A&4F_`Xzi*O9>o?r&cM5H1K+ zXU}z50b;LeIYUOw=H}!1gAiY)!dvJYAuezPjQ9{2Mab><;PGC^}ve{5C^4 zTsQU%Ial%!>D>XU!Af(f;Q6$8Q&e{@wY5%(;ehUP0dbnICD%^pe&fpBp7~B)QN~YN z;eJFAh=Ki;KA|?M9*A9R{Z4OMVLJa5Xu^XW6edtH*2!n8)Ml{NAj>DqvjNZrCD84R zOP4HZZ&s66S`Ji{$ZW4E_gY}A>bipKtW+=;EZWAjcbUV31avgipAVVzS=!rSP-o~o zm}_LkT`6F6T+e8IK?-Je^=6J59Umm#;5)RO=uN94$L9fz`V_yidN>efy$didg*$u4 zDJ7iPk9#_fuxFh9DC>@O(Xc|9x{m0%mcXD1Q@X}>{FXRFL&EBYw~C+3HQt?>i;Zc{ zK2h0CaIO#$HfpZ#9ZlJ#)!Sy3AdGmU4kF=%Hkmfejszv^~N-_Me+xRnt~o!stwjl{}A<@4(M!j%oI3b&Cyp z>a=4?jRbblyREYFO0aRgX)q!*f}dACcCdVJDU9W|<7J1Xl^6Q76ZuMBaK0EmvQGZf z*YIIhH3=b4BBk;1*P^}Z%M7)7?u{I*jOBhoVYwjRFu&f0)dxn_azugjwuIODDG5=0 z?58jEUDNbx-#j^~W^(`T%^&K7pE#pE84`uv~b*Yx27SGOdK+f`?%K5|*!ckL>y2@!HwPWt2wukt)2qW<8^3 znZ2+yyc{e-i2b15_wjLcwGXGldMX{ceyTjrKcmq<*ZE$;@7L+PP`Jy&lTtWmAZVHK z$kb^;(xi|Mq9ceI+T3ZT4r6HIVe+(CkylL#rwQE{pnaT9G$t@%!gdmak#zSgHcH^y z>{e@YddR|Z_im5fpsnzzLUYPOoX-Kd9-ep%^dM z*WKn9Q9L&diKKtx?d}~qRW*0?0FmxWP($h<@-bGZB(Q6E=YhM%mb=4zZDqOBeW2q3 zTQgZ(M&%^PSGQ!`9_^D)#bbxPK)M`x!YRkc@dSipN@|WM%0RrU0{Tt;W~?(Bn!>&7 z3S3)_J%g37yyMgXIl}ap_K@i`&^nTkTn< zd$!@05R@%;w;hz`b<}5#+K;QGT(LRZ?;lEidf@)*@!&waTA5OgjqZ$m7+FJUxo(c7 zMR`;t)|w5! zV&;%F2Kuain*COfwkfvB$we<>f4Rzd$LkM3j8S233jg+orMI)>5^7-7>9GS=p4&-{ zOk6EhvznSy$^w{*>I6kw`>*`<|Hf=t(OFyGRkf2Re0Hr$Wa;KA{f$TA3wW}i26ZZM z7~bO|p{P7#k2haJfX{tYQK>ven%@lfQACGD@xEEk@O)WquQ?&T98#g2mc5aEq%$y2 zfNs78ftGB0rR=e*$XbId6G{cT2aU+W!i~PC)g5DAWR*bcl*BZ|gYVU0%&Iu~ zHuo(_>{;aqzrT{2eqCV-&w^oFKFBeA8-g5J|m%5-~i(R54=Kq2)aa8(;a+ED-wHNUBNVwFM5<7Eobob z;dP@LEtB4Q>C$-D^$>}u!(#*v89>?~w4K_}EBFFXdqp`C?|TJ=@F(yKY+PQ;qDex< zwJ4)>e8zlKOek1uyab2EM&CVQ;}gc`UgqdrJ`67Zr-LuMyrUzcAt-J_(sFFdGK%47 zQ!g82kTjXny;|)>r*v&{M3Z}<&wP9L8*Ri-xc)0aWjkQ*;9juaHU?JGI8}arAKGyP zHk(NJRAe{ogtVFCSkK;Y=bJm2xu{kXM{#6QQp9M?dBS@hKKV##Oem+!p*c$fw3%8i z`O|F#^HmGVo=StZzyHVf>ze#np;W$22hLj|k1~a(;x^@iQyrn?4KC^r_Au_an(H!x z=}FG#@fQQtL}{Wb2=`E*h*+j+PQEVs3pr8qqbxQb1sB$=LI-L`f2jr!V8sMD?dhid z{EqsxBeY(bCp&(vR#=0(gO0retjqVJ;*c#RnGF zi1CT+9mpXNwal%u5rJ=*X(;t*kv&SMaqeEG;Y6Wr-+Ih#G=9Nc8)b^ zMB#x)#tDQ5W6eJ(3~#z2Yd4z5=C~GnL?pn>3=R~q3sLH1H#Orq{b!}sw)F5r?t_fw z{CoZrx|BOLRZE_Ftc5zWY}QJL>4I!NeV8ci|=mRGj&A&CZah+GxeU zo@gd_UFLF$&3il&En2MOgu1Z!)Q+oL8Hd3C@K3Zsmeh@p_pw8vR-Pj)s@H^Ol|`H4 zk8<$i4u@u@QRN}L|6JYT&(Cw(NtAj1b!MT-25?a7)n;!d64vk=ZRS@vm_nfxxnT=1 zK+qLDmpwKE&hvZ!{nvfzNz{ow;-RwHDx8_rDSAL<>`Wfphy{?Dt@ZD@E9(^$2r)^t z=AY=AkaY)3tprAs6~k&ju@YjI#x$wynbw<6>vZ#DR2tp#21Xk1(l$2dp!U_e4aJOV zx=Tk^>8W{`)p74CEEV#0eKDz` z3sVDTH7ZkBd*wTj?>nO2)K;6!X=YSialCak!=`1vPfi&dBl-4<_olbT&g!~*r9p;N zJ#Kp!_^xIt^=7?pz4XjX2JaFSHg(~HWklHg_YD!^+=ZvT-C(>iYeykc?f_6yr?$6n z91O40pGc?$v;Ri3Iib4!^Z#V`t=d}7M_OmVCEYbNC)y}MUuLDpn#}=JIVT7>mC9u# zohjHZI3UCf4Z$g<3SH~r?@4-4U1l)8<@y_~Mqg4m#v64j@m6bJ{&kn(4S}I`R;>{4 zu#s~c`__wAYLOn5Vz_52CNuc&w>JMenh||3A+2E=)3WjS6PeR zWr7LR6*D{X!o#O_IR$8fF$G1Y_Pq!Ecq!kx&RuKAS7({cxP55GBdeP@JT`2SR^H7# z`qEn*@g$`=0L*#t*1$qin$&5ALoi`6d!I_(@}w+p$5XqB0Z^5YUSEG^dgv`4<~e;5 zm^U^)Hooi9bSzuksv>%>+(lL18b|4I&@bJ2{?^O4)$wMh_18LB% z6aaTh>LFi1HH&ZaGbI%CRlt=%B9unyrPY6NQXf3{H%1D;NpO(A-*}|yVN+^Qr$~l! z$;J97Swm!8`VULwy>9bArj?9-|BS6a#sHk@!OOZmJj)&o@mz~xL~cLKw**je-J33e z68y!BpC1Ss3q25dSPBV?RG)HRx%NF`v<(1Y^KQMmd9yrwg|L zRQUI1`hL<5ek#uogjE%mo*faaVY?AFn2M!5^RTxQ_q=`e;IyROPS~ur^>n}Y6l6!z zg(zrVm?XR%`;E4LTZ4-GEv>hAx0Z^tQL!1+Foxg6%?|u&@P%h$$>B{--^%y z@vheQ!TSaT*Osuvp{)NgxbY^03zo-Sa-!>9m(=i~xqFZH8$$v0cCx^StV$vB&&C7zAi34xt>8F;K_kuZt zMG4Y_%Boby5va^)PBS9iOJKv6Wx#`a52SF8O&*I-y>a~ZG zAvq7!6Zqls)cICoeQh1nI2IsEMmg;mCV--2th@;X*&rLbyMP$7(;3^vCN{Zp@?naL zAFYJ^MnhS+aed$OSZ4Cx6E$C{g?D;b-*HQ*QBSMy$gTt6Tf3SZEGTf>A6Fk2U_(46 zZb&{;8#JSQn`g+)Re#P(Ryy8;C8eG-QEn%f4TvUu9SF#V>1JK%qmf)iU1lH#8gg_ zQe7JYrH-`PhoQb=}{i&>FLx`UJBYCt2JzzBkk-B^1r4f zD=(M~9#}BVN(^J=u_s`gHwP}zQ35XA zYT7{1mm?_egWWDp926Ry91*hf8!cGlH=4l@8=YU;2DkTqB!^nE=ysgjs6=OZlc~11 zaKN-qK7=bkI3+u!Ev!h7o93&OxNF=###a?e-v__k33O&zR65jW%{JBG^}a#o;mOLQ zogl&VBJKRAMTRk$-)3nzur=5_*gl+-G_a~`b=@1c@~7C$wjc17Lrfw5zTy za|cL`f7A!Q{mYR@QS)?MgzBmx=O7KLN+@)W`>lng;1#-DHwF0_GBP(7&T&Y`v`oDE9zGNdwNK((U)!#f zWg0bKU7vBXjpfKE@fSkVWkaE{FWcwsB-3(=4ew)EyBd3}#9frwE4R9Ngv=*(3)}8b zN^Q21HGBKKqeiegqf|e-ng*_dQalK*5&FV~OTXqPjFL)yN5=-q_(X&X;S*OijTaoXwZuDAr#u#+jFar6V-xK^JE%YPR$My_ z+ERyh!dn1p;`@oAZk-c#gyU>xG&mzBGsm?W!rzYod!rnd12iPncj@to_|fy(kXhGQ zWmqEv;OUt`<-v$aq@8wqeq~#t29Hb}d?gXb@2nN0)nw#}3nauPlD6FvRq0I5KP>IB#qyzQ9-Hmh+1uNMNiB4cEGt3F61mgWJX9wrqJqu?^dU$lRE32nsmp(l)%Ux;PdiUwBBw&9| zy~+V*r=vev-l6gmnLy-V^h=6e%LbapFgLc6+(jpVXSFsuLoEAB``8SUv2R*vCqdIu zy&)Jz&d2A3Y2>8-qO;car8lpw$VY$OK(j~lajx>e(EvRd-oeJZa#&&ed=p(1!vi9X zoZ~4(;ko__Okj_f7e}hRgeO@{l>B zl)OyK<%#eas#w-i& zYwKxC_U4IET5b`_7K8l>ErVp}=N%3@zMFR&PkS>c;$ibXLI>yWfnZS>etMm^Ic&6O zOx^)ty>->k&Cs`!6{cBA3BKB&O&(2QLp0k3EzPR)mL=3oD*yW4Ff7^3IZ~b!u}250 zvlrxbG0`Rexe=u;X&BMT0!ZPJ>FEKGUM)NY>2f{>V0;vijeRB$3_cbX&+RyLgg{y} z%q{!o@q=$#9?bDlq~_|(*?_BNRJJ3qci79sz>Sl*t99ES4M>+Zg1P(=oR94RJ4vYG z-=CB?e2MN${8s`Q5kW;I4kozDBVYGB+N&qZ6u!C^2@!1!zw39&Q(DHC0dhHh+@_1J z=pIc5LEba_<{vBR$}&f+8Lp~PyJdb}>@2Yu8_)ol*Y^!H+1VPJkqL^2?8V2&WYr_M zZEmaYrtTIK$3&_Xpdav0eNS)wgy$t5jFNdZ8x=!d6Z`J!Nz;nI^AXQZq#G*IDX$9{ zc3!Fu)2HhsVw4VMpyA1qb2?JkLr{+^8Oe#$8p8(Y!Jy8~ai{93>-|M)cBVV};bk4Pt0908`ZKOktP;+AH z%9kapu@Xaq02lZc6WP?{hM7CrEH&xX9Qj!Fi|7_FnSA8WI*zQyry@a;w;G-v4Bp~q zuGDL#wWtPvNcGu1S^dK3GA0#WjXL*n9Y18|sn*u*=#-tjx_(hnyX+~!bwantBIA}# z5@5@)DAdALrJH&OL^emx%Lh7EzGwn6vF0TskSYrd90>^x3BndHZ&5_`H+zg!!Q*F( zPYj+JgaM(eD3+^w4|l+Tkv@!j4ll7&mb`N0lwwLaRcmVIZI3wRa^i%%%#QXAYO

hDv33hH5h{kHh#csM`Y7=|4i$Ml(i2y*DU4aQh}4S| zKytlbquB)WXD%zBaXV=!b*Ra$Y?)btrAf#Wt22xJEDO9zP}!R~sjsjTue0-vam3*X zmxQ${64IS|a4O772vz&k-%ldJJ^VKI-GE6?wa3cX1*;Bo=bTk8$B!MaBpSA%m;5_T zAWCXWMlo@tMFj)!-2%0eu$P_i>~nab65rwMl*Qcs*(IZt3X+p&vt;s7Y|?W5kkC`5 zgxf#~bRFhesEzO}Lor8RRiyQS#RXp>5+mFTiD6usc}NS>%L4f9m>Ev0|X?rfYU=^6*f|_{oS_u{mvz7ajx&VeGMC zU3P4Un6`MmiA5E&V*K;3eOMz~*3P}6<(}!wnqwdPa+spb$1dE|3Y~ILX|`+|bqL?$#?C#y+u@XBPE#G*EB z(~@isM4=(ltTS8;lJ~j$W1Yl)xw6AE}wQnm%V&aNjHj?c?d% zS2;U!7TRcYdY6d=hTC8ArGI|hIl1dgS~cX{DBm~M6Z_tB!aGusw3`>4rtsq-jKQju zIOv%2ZP&|hn~|cN`}G_Bgs)8xaFQZ6gO+Mlps+((L+QSy2&hC|{%UBUgUb9?*Yk1Y z!I!vte}%QLe6ASK6ue?5-w;ZznG(spT7=5chh$1#GLvyGZ3E0&;f=|BbfZaUCxV4yn>R zd>%#K;G5%iia>O~VDsB0A{U zP6=s=`}qfH$j=VI$iO@6!$BuX72ij7osb@#CU*lC6=Okyuv^Z3b+xkFb7dxBau3>FoR(EYMXX^I8N$~)JD zsVXC@B#+Nhk(&wpO`yFEIi@*&?}lZ;T~h_8hM)-xgr;sHFnef27cUatIT~}{`&Mf7|dBRAY;pA$ZSn_!=;N` z_8m6=Gtvq3p9kKMpAGY%MEquXsfQDB=PS9W*+riJ4YHK;h(u4Zt>CeDs|NvP8O}d> zBVo1qU*gSYeruCFL|kqw{LK^yp?l$+Aax^H7riQ(PBh-fk44#y=8-j z>}8|R6*Hy%Op)w^tX*Sc!-pCk8VAKCY;M^{f!nshTvi1Y=_C7z&a~L_vz))tWMzL5 zdJ?T@v8a5!TCZSr`zOxFV4{I!-AsdMP`Y(KcBkxoaA?-=*nK zZ$-;>%t1QI4l{Y*`=G-somW>ndc2!BzH~_*43UDz6mM<6&G6dnpCrNz#*U=^ya50V z&KOw!Mq5F_!~2(^2)|5aNKo+C<Ov}fKI1W;THf6#({85a8mIbyH1p0MtA@4_p(_tiiv z7L|jAj+T)|ccmr7jH=lI8k{s?^%g;b6>SusD(s0cZM?sT^8@wt{ez>nEO=rkzG|l8z3C5eOWh|^lgNE(DUH}O%JXmo#8AOXjywU$l;oX0|5R;AN0QV?aXlnQ8zhGe z-ZHA>+qc~Qaqr<{H;!ui`Tc+*O4gsm{2EH-@@WPn_2rwpC$7rG9^-&G&?Bq`1;DsqoBCBXSD)=pXmMgFo+#Wde zYwWZZ&tipo6pjT?)+H-?^uQF|*F_iL3iLuI69s}NnTOg5ztISV26ep|oaq!!${o^8 z>n^J^-fH!DYVrVjv0FUs(5K257+4}beX`Tc0It9sD|De|2SqdqR`j&DmGsoMpyU_blN&1?{`HxRzHk)uVUMPW9=EUbD5@ z9ERMu>(Tr9b!pbJ0@Z%;5vnw7i{IWecGY`%l`a1$**;5T#p5-=^h15YGc-lv*~iko zKqlO^wVl;Blw^eZt_!ysKJTsxnWglt1k*D8hqTGpiMv=RnQ8)IjBx9I8)19av(7H> zc$ZpCfj0Lth!VhvlgD1ZotLt}hL<|Azvhrj=dQ5+H`>ODAIF#jHje+3x`$&zh5bOTs4EautnWIrd#s|?70+MmmL zMpNIkfBj(#FhO6fb+pwyj;uJtHPL&NEwJ&u@h1~nD?ig~CmyUCU?eN#!2YwTrTVGH0 zH}6bDzUh-YQGW>kUM`)?+@2QiQ2JkSwvptn*~Q;y0Rf(b)1dj(jS<@n?mf z%g&DIUIRg4RNx>$=QTVx@C!kKbl$TcAN*6K_w#>lN8{CNgjrOB?AHVvr z_QPfyUYaCA^O>MVdOL9zEPpM>gcBlnuKfi3aBd@ym*P4x4#ucl6Q~wcj9P?%=(EYG z4u$pd;*U!kkPkN=MPZ*XLDW|d;Rb_?RNd9Cf4D>@R4e4hrARcxB1n|J=4LQ69|m*0 zYvI>&&M!zP(#nlXTF^%ud=kUi%ZkLvE7cplr#EOU%cmF2X;N;*r%b4Km%7Tbb$0f5 zIOQO>l8hM4dY-^~_;>_MG=IBs>=?_W!=SkA5bE)8lIcD*{}9;89Et5)+S-!7gt;m1 zy}G{Q+!E~E(C3ki(z*lxG~URzO+s^G7G4$D?G<|zwg?uc&$m$TDG#r4QGC%X&EfC9HFPxsBXVxYQem@tis&C z`37$Cv>N4sEgFbr1TxE-bb63!g`adt5Kr9S=)`uD;}eR3^F$2q_0>_JLnK31cjw=$o8qumy* zo?FIA2-q3tw%kkhY%^DS@X1zqrtN+cRRPZ9hN|mT!B7^wE$ClBxrLCT%8VLMPi=-= zqmbjEI>L=7CLV)!*Xln1IfgMCnV<7x-bhPVPo_y7Q>n`&SmViH9BM7imGZ)yQ5vW% z|6J<@03KdpW-1aMN?4(~QXZ)6`t-=2hB0#Sw)p|CKGN6Upl&61N_OSYzg03+$AC&9 zYOCXt74QFD9l;ly83~jy0)nckIr|75puU zP<%@ET~&%};3Z7fFO1ThMFhVd5BiTFmaI0FkZo$Q2=CK<(+KsUu`L|6_-KgxcPc@V z_|Z5e*;P&IK?-zyCjFhW(iAEmK==L67A;IHO0VEf10q$QvA7-oo{X$xqAuiXm-U@Y zX-cz-wB5gR37YDECm1lH@}~kbKIpwvR7YN@U{N@AY=ux~=s#&gZW8b?d&E;q7? zA*p47q6?@H@|7!vXYPTsFBZ3vs_RBxr3m{12LvW6NFLW){*#&!58B1c3+{@y` z<{WKNu{z6JG0ew!BX+}k5!$fVAe!4?S%#%+NoB^|7ssy9Utsc3Qo496vW9%=5Npni zLXNI;!-vXuNT0y4D#qiEO_*O2?DlKBpL4c$Q!7d4VrF}-T7R2i+hKfv!rRVpyujkT z0t9htBB*w$emyR6_HdH6=*UdffjohM)PR1(elh*e6YTOVhacs&v}8j{D+yJ9+(~OP zWbA8z%*kRp3wk^RG-)DgpQh*|ejdcdT3GxnlS#|G(OO@m?WpdF3zi%FCRpn>36siO zzrs0U*0;3_ehbo~?dPfE^ZR&Q!ryIYP*|}*>ca8B#oR0xceFLNx>-PF;GJEberne) z(@lTrz(lX0LFjSAjaOp*z!KIB>N(GA@vAbjGN=_etp%;J1#puthq8(*JPG!e3zPbT z@t|e%YV=Rfc5S3e{&fPOA1m;pbVXoyI}~T@5WlSJ|4~Wo4X}M30CcA@8^6((BL5FW z9nT82THHb_%s%(`lT4 zYfaYkd8Yr}L6m&x(g{$n+l@$D$sIk_UWn449ZzR;)_ix==VU={Bh={i)JLiVbxZ3Ei!a?N zWj9kj*;nGAIH4+__#4P3aX3Xpda8;0Nk*HF@vq))8I&s>e9y5L-od73HW41Ez6qR9 zoIceTlvXWGQ!hC+aP5mzNa4~VWY6_M^!2H%5U*0}FeR(c)v#}7CeQKO&r2gLM<~Q( zpDVPVf2{bD8?rJRbtPNAQxb!~qs{7wU9d701|DQkqK5(1`%GPcHdzG@*+F8l!EtUM ziqmCku9RVvy07V$PMP;vZFE)zt=0!DM)wZ*E-nL{d}1bryHVw0liV*2h~`T6n~2S~ zxoM36Wg-U3!C7&3)HDg$?XI|aZlXC4)fhufg=(i9tWT6DWpX3)z6yB$L(kM{vM49q z-0IoF1@6vuPsP6dv}6h00X%cOs%EVARR+u}iLRM8QK$yLnT!H|qUIB~KKCj4EdEXi zjlik{=o2$-=gXc#fBu#qs?JDV%O zb~n)Z>*q~I>e9e6;MnTy%x7y6^X`AXuy6rPWh^{{wc0AgZt3n)zPsvcb?$Ow{I}p0 z+nn7BLSzm%81J{;%fE$5e}{5qeYV~%E;{BR3^~V%J4wVcaZ8RQG2Zaincb`0)W*iv zB^Vzp$n$NozkVttM*eh#NUcOgS)z9&6Aogv4V9`|$f5UV22R{G+K+ zTe8~=j4sRP2k%t>R!@~sT;lU0oL|t}iM-aXNKT^Hn7Scj|A?o$(}FtoX-OjGMQkUa zOC*Lzb*n1-Upvc!=MQ#pv482vU&^cO*eS&F)9;DBB#Qm{XCWG$AarX3g}Cn`0Eibc zT5bgK#741zo<0(pRxYT_z{SHhiSAEeaLzJ&KH7)wilqZo0n#JAftJ1gBJ#mbm=ZcE zvZKE|l6W>PyB$m`Qc8-8J9*?fgYJRyDyqHK*7P>pixMt*Zh7)F8KIoR{4*YW zwg5G29cyt5_ag%>ea1qS3}pil?Mr-R)KB%%YV)&wM(4@xr(mgeweEjwv{lXx-bNEp zRm8}5auM(Bph5BeH(F?gy?#dVl_L|>6%l;-OD)X#?A)N^i`LjSi~c+N7X066;NNJ~ z(&K-r@fR&>>PCcs6g*;WZDuuUsZzi`b^gU2TTUNOPB1Q^-e~dD@uFPBW$STBs$FfH zYs<9Vxz`=5nxj7nH$~CDhR~zvB@Id64{Q#?tGfp$ES~N0FQmhd224_T1&6flA{^Y%U6K$PbMbZ6BwoHn6o>iUQ==X*G*)Sbimr6)m{aWz7VDNa4*Nu3hn;}n(6 ze2MG(UUQGa$4Yt1AWiAGnAikokNB48azU1;n#wPSd8JTy26Q><;xE5e(Z36E9~z<+pVQu6I?{-2vYL*w`+2c8gOi$ zQ@ClLKoGY11&#q#)dXNz#_pYGD3GJ6bB4ZrMUx7$L+cqbGulT6r(F2q&TC|&rv0QH zf7GT=cs$10MnZ|U=kp`xou#5eTT&RdORUg3x!la=>w70hm> za??-TZ60?V9WVoekn`9#6xxmx=5&?S10=09yS)+tbQMSUf}z&`i5MW&NR^stxk=!Q zx1V3=Kapk4-)dOkx~Sjhy!yYz6D*uv%xX_V)rCUHKiPb_ zp!NBF1|Mh;?ZUDrR?)mS*QjUE>+4!gMXek{|8qNvrCW7lpV}V?+rDo{4KA8fy#XMf=Q zFHBJ3eGsl<{35Yvr*Yj2^7T=Rpi0roG{rHRb zZ~Spqh|bhCr<`;)v%{#u^U!dXlVFdKKC6{@ z{rAno9;?wUc>tN;Z#2qMCGcyE$#GRcy8-a0yZB1{-cwXZ9EKBC_kW%nQ2a*IUoC@G zfFgfIUpIJp&hH#zUb4xmS?55ZXhWP?5uek(IDy{J&gv8WSDs!u!C*p5a3k@MxVcAI z!E#g&zAx{a>`o|#vXQ^rt-14QkvKh}%@y_9Z#8g;cB2B|n&fmet%+V!M&EBvdlE#7 zAG~N<8hmRY{>{jrv?1(kJhLrF2%1UK4<5*XOjR`g*K$p&1duDuCUY6;d-{WQkLtk0 zdbMhwUnfbXLTT2heUX+nHjFtSv69C(9`<#DAtQ%DY*%7wNV^Dg*65w#PJ`GTCx&|4 zt^DdN)+K(lY6pxgnMr}fcIN4Z4o7gE=4y?K-LRY^e%UQ00GMS$vzIC!vTN#OTo;vQ^Ls{A9 z_i96n^$=4GyM^hotKN-07>^PrG9I(6a&xE*PO-4NH1(M1WjM7iH68&J5gC#6PRPRV z_0_eZhVrzQWZFJC^-~@Yvi4D#ZRs7P4#x<&2FJ|UCta;&;=UabVv&+mxiy^hl1p;G z?%!w<>#tdve~fU+hdUd6#@KZllGd|BPT0WYOONGtbOPD?KS5Ub_@QCIhZRBR0Qx;C zH3j`K;_-8c(rjbfJCpwzK>Xh#iVIXk!8Nw6<2#j3_p^RNNd7q1cRh4Z_1_6*^~voE zck=kfLA5)?A!^t>_Wj3?=q9=I6aw2yEN?mLc zLR$+u@=JaNV_CO9^6UwYR;B97DRQYCkym>hmx!t1jOwHe&%|qe;qkIOFfL1m1y@~1 zAqX#gC66f70tI+&U5bAhAS^R0`0Xg3m2PM8NakBxMxI@Ir-#f`3*JU!^71dWzRPU0 zzk3Q)`-3`K(poNWnUrPoxip&|^GA;5^If=DFa}Kp_u#>~l)PT^$9JZp|~81(A3F8<@M5 zZJqys3c4MKckBS zL`i;!yC4g3rQXgeZuLC%gnwRusly;d{#TE=;Q`~AREdcwG^CV!S#pK+@VSbCuCYYp zf6E6j|CJBW{Z~GqWp^2P*?MaR%VK2IzGD~_*ONz=6Q))4OCILcNi^SguHY+@o! z2HB!jJP~v7K}rIt9tcld9Af56o#M4E$5Zp{DZhxjFD#R_QuuR~)t>Tr)NXa#;38tw zoa7|^KxI_2*TmL|y~IRP3`T-hVs~bzy>M>eChESv*ozeWrqVC+N2k=>AemYbDYg}- z3zGE=Bsxm@V}s&z?n{DmM#UTgOq<8=)7F)CrB~WfY&b6X55nl^$L3EBlx&3vg5u5u z4ZGD@P>6deo5rz?8|@?`8y(obDVnRiDBl7RrJgOEPzSXnoI2nql3G{~U`>fgaUEMN zVBUu?p30H3*KR)KaX-!gp=c=_l{DK^=x{)w+TyDt61 zUjt;cA;|IUP(Ci(IW9T`kI% z)Z}`LdbDay+8aSpXLTvr3?zzoMmL;AD>_Ld)QxPdJP|Lfo-h{cn0Ejne2i~EYur&U zNPDVdqkO^sk)#BK`8p&HIvUpqmGmjuZjUr`*e|13+Wf*`6kwyE@3 zjje$Z)y-{BLRe>x`MIzfspzVXux9AK@uWcV{-JOt60bIfL+5HhrusonQ_>PT3c{N!FfUXq~s+5a!cP z8Q*|N*A8{w`>_E6{sH~n9pR)G?vnq%*n6ia%a-O}w7YC~+3vD!+qP{RUADSx+cvsv z+qS!myL#{a?fbv?oQHc~&eJ!>8Y5$^nHdo?X5^ZYzd1AF4e)?7yTBVu zDNRmWR2%vW70<%92PrQ8Te@m-p2Z}aj3jlsFCDR_Ir-YyhAeoak1hM{U$^e69d!%q zMM-MzRZ>A5DFopc38qprZ=qC>PaSvYs)@9|2jeEIA7zB}yuTbS%WftU|x_?s@ri2ap0 zmq2yrceA!^#QF&>HvDD-F{}h)*oE-YfS2!lJL4C74Xu|)@reDV>r^p zzsUM81n(NIK&%K%jzlL{B_!Dn?orc3(B$mhCE@1su3@zq?MSh~@(_fZQV?`le8S2+ z|6U0~-{hA&Bj5W5Yn@6s>{%5Nrwkh1N}tmRcWh9uxm8;^FYy1!0l zUUINi_Z|gx6sL|kcfR`D<&ppPXedr0r=PzySZZWdK@??Pyf%m<;T)q)&B;90-o|qt zzp-AY@9X#$EWJx!n*Q%*LYPl(aq#~2s9W0X*DrKmTTOS;B-jm7;}v?2n0unkdY66Q z$nF7u_od$BuhKN|=kE6htZf7@`Wy~_K_SH*{}CelAJ<2Ik@lVMI^-@&27kc!_`6#s zyYywKPm1EEiJWJ6`jhl6*}wDkowWN@*ZzX3g|`;?@7Wo6GWu6(zpOikfOVy`1mNi zy`nEbHSM~6E#c{c+`|(@GkFLl}$g3XBBK`(?sZPtc0w%md1L1x!JaNML>j) zT)vLk=+FE!R4==?WotvTXS#fR*Kw0jc z2FvzC?nJfj>GlTYgB(mKb|?0rY-%ca_rld95lC*$gW*nNMJYEub~basTB)RmTlq~9x{owG5erDysHTw1!yt<5S6cJ{KkfJ;Rp&E-t* z5Qm&(T)<{@7}tT=6K#B2eZg=XGhuGp@5V#E+nH5XmcXrc=C?P#!XAUmb(a182pK#| zbco7^ehejQ!OIqFb=wu;Ptk=XH7iqYjfkR`s3n1{juy`r<@eRIpy3yk-v_@o`l>IN zv?u^zR19sgJ;&NkPG~qrhu{B9wf5~Qya3FN2y1nA@AmV{lWruWX?!4+EbMS}M`M5iaszT+5bUMdc8w9s}J?F{5)RQ5!^V8JZm15fKoalc+`8JQWAH ziY@b8PqT0BxMTD8%BV9G7e!7jvS56UTr^z2HPJ$iic#v*D+{flyRq4vM;j|T=H=6R zgtR@`_~^izrkHQ=M*Zj?p z7vqsfECxcFmXg2TrUzD-WdUt6OcyjSk2pEgBe_qUJ_n=`+r9yhkm;*qveYmAg^o04 zRORKMG(8up^E9Qf3%QCq8i7t_&lvFw80g)!dFz6iWo&@TS&12TKK?tHUbIgyHpF_P zHOjW4pBc?POqI7av$TEb4{?{_MO5atYA&&bts#R=L{r#3oL>1=;pw*!B+aW9=8AAK zt&_4U)R|brMq|rNCN0d1RKUKV$Eyx`19^p269)U`K0u-4dVYen&_;? z{28Wz3!YHV zPqN#!fAz4@Y6EYt(34Vy?T_Mk9$1^-!w z2pM#+TMnp5L`YwzHljGf=A8ePbi-4N^#@8DK070t{#p@m8_SlbXPI?<{3>*jI;4Ak z^6H<+tiuD3+r+Ne>V^Va$T4nG+de-SEswk`$gD{^Z&o|DEQiWC07*Gd0# z`Y4m8GRck0=z9M$;IIh`~b^`tXAnXTW)PshrL`uN-~thQAnb(`?U6!0i@Hg}tq<`e-bK zVOh0pL0jFjvp9ucfTM(VypDx@{V^kLCY5r3P87uWwu3%DOrvx6*b<%$)!NVT%3}HTbH;3|GDe`_pv_j+Ii_DEYmjjxPD%`}11#yltL$y=(bIaD_s3-;P9c zKg68QJR|>0ean6BT=9Nsc}I0ezVobm{cZn3b&F2{c)@{`?Qxa2Ty$UdUrHt9@X`43 zbnjk$Uv}bh>}p}?xPI8L{PPP{%HEdQMdDi0!U1%l*n3%=AZe)DZ!%O%8HRoVez1e^?a(K_WoDsrJBi# zuzj5p3sn4vtX2&dYUV54|csf@LxYC`}Z{Yls~`xJCm~ir7Ge7_4Dk1 z{rRiv|5p`+Ky{yuhHzg+f1AC9{Y!;U|Lf*q|4u%HZ~s4ZjXw#&8o{tlhqdw2ltt$O zE#9nF<=haZb*nC|_-W&Gi)Orv6*An0OA|5%tp(F;nZ7p8jP;@1xO0J|_eva<#Az!d zvymq;O3eUo2_sbilgGO-@yW)Sl z{IS`8?ef2OhBtAqO1ok7l@(ZZA6MBn-`0=vhjEVem$neZWg9UFjH4u1OKGAk_jMKR zwiRwe|KU1ZTHX+jW%EjuSOg)D%Ojr!d@wUwC)#a-oIhL6<_yq11G) za5bytc-y9b0i7j0`2;3mZnLAgjT=li*?C%KED#D;4f{DEIM;mz^G^H2^X)xZw%ccS zOYim`x*Q1%3$OeAZcg{}`iYfW@oQuA_hlF|-NL82>0vm?zg99p0|g);wxXL znCwr<;Y!d?2uR#PwV@JU%y)&OEj5T$UU%6$xixUi)8A*sSKZT;8tzi}x?W!ICkZD% zPluLtd4ykjIWBN@KX0D$7ccL!0o3mi%J~tGv1)60{6p}PAI|7fR`3XSz{Lo8tbXtQ z)FqMPt3q42YT0-o2gn}QJM17Y3@!CDYV<R!H3gl01CtJRVd|4epga*&h(Cyk; zR>R0kSnPcO|B_EX?$?YC4j){yJ?3|^cT!{-jgBt<14EAitK8>_qgWkP{;PUIiz1yZ zWqiFgey59&<)Wp*YP$yx-Xi%?6&NX}5D}qL+!h3LP6z$!s#8G&(mk96LjY2t=ZacE zqCGLZfxAv~ZN;q~aE4I;ZVk>{sU@+Ca#V^FTNODif`^^=s_;!{`*)9V6<4yiL2^E! zkQ4SQgR4lz8R9@<6YLPi&3$|d(NJzDEcJ6Vf;pDc?bq?}5yq0d=*_}}#16j|#ehMr zdaCV_^F)2BjB_1zblVrYf$JH%f=?dE6oY3e1vn%zVLWQSS}!!NdKZ=X3|0zaygixS z4(`(|JkNTwwbNUU+i?1PcZ{a1;Vt012=-rGdKRfQBERb6sOTdr!my)q?U7dLBPzr_ zLFm9EHCDEVH^lXd$mUE|BxY4}F?5S#IpX=?SKHqs75E((yqCntl?)iqWEcaMisWVE zK#Hxt`Ek1sb5TUH?7ohn7}(_qNUh?f=)?xH6Z@xBZ}|6fAtT{C1wGrpviq_Ify#At zf6GcM{Yo!Vr!jIROj7NTCa|rpFlD5wJbhs4Bf_IG@bo~Xr)}WCIAS9c%Z!2JIGrah z-Mr|+SPKKAF_4fU1H=i7hC~N_UggFLea;^jMkxmcO}^)`i=U0s{wFmfib+TgZ6>VDyv zhK*ztNUTebwdYWk6bHHN*L{uq0uT2?*Tma(!xm{~geqVGo71hfDRnp2CYyua zguG`(!RWigRmhgj4iC1eXke%HU666!wz$MIlo%suhmG-dv9G(>qBuVjlJvVas$%mhn&T3V&!p7qd|VcJyn0i`8PQ~}TRFc)iqi6kg>KuRFZ z?zSoG*^(2^4VcmZ&v9hpEKt#bV)XWQ#;q)DErw4lv=cvA)H(Z7^4KdB!x|CQiktsR zVudUZS#V65reIPw!6NlS6hG^*KJ+FQ8^C%npD31C%n@k6D1id8#0?H`h9x_sibZVT zs4p<)n!_ECms#7*i8kBJmm6Q;e#GsiAx>FufI^EyjrrDjpq}70@AGF4|Nq0_mMpG+r}VF_{AXr0*f_)n}9b z0L4Z|+^Z(c;4q)r`ZRro+5G(LA_LutO$1Ale`n{EWsX^X;mTOge#@Nn^R9PClXW|k6lx_|ba_wKeV%m+ z4X`3Zl&d)$M+45Mvz|=o_=LHwKb#1L97_A(YsgBMGS=fPH|3=1C}}u#GE1x@v%z2hf(~+~9t1QW=oi3mENY#+?-iKLKdNKK~Q775q*|2cfhjNEM zd6#|?CsN&7W^!tu;qgQwr-ZOJ)?VWN>ZK5sDtCoX*%yT^!c9%QcNIvs3{e4S+!NbA$=rLfW8Kub#?ZcJ&n&;w#F01AH*Fp7N($F z>swWOI6XT3h*fw@(}(b(2}A*#52Bl*4sCXR6-GGbT&fEyv47PhaC`J}2*E#qt^9>K zarksxKLp>L(^^*aGU=?xlSET2GuAWd&#_W*LJpqzUB(kfZC;E9IKY{DR2{s?@7b!j z7&vhr&MwKHyRgHQk`CI+dP7$&7h0?zg zlMt(i4FJjYc$LfYPmDr$@)-9-SjW&;YCa9QMp1DUurE4_P};cBT@KH&3_DQYdDr@X zJ&5e`Q4CVU)0kRdbf{rx4M2EIwiO$9jq+4 zfX0A_Jtj}uagHk7k9=yyd{HtE1$a=Otx+yZ{@Y3(9nA}Z@sxtiQc}NS70OJdhs#VfMlE4`jg~zT}F(kEybpAw$<@o}Eq^ zxM_d6UFXwu$}+dIho4nOapr|A!?)fjo(BW1q)eZyzqIN-M#uATLH4Ke3P!_`X}n;! z)`SdZ?`U3tvZ8k~-3}NfluIXJ7n>Er7(n33P)Dbef-FhrLjucJ!bT@?XRuz*uX*Q& z`1F(~aQga&W$f8t2e36k#|LgmxUFn(8CB?{%Wi|2JK(fG|1=DT_7993j!e3!A-J~U z>6w6L+SLhk9kdz3V@*4*BZUXRr-HufL{W7?`_ijxbG1laa7;!J^gn0lUxUk8W&?0u zE6_aJi@?n~!)ac&h7bt942Y*Z43pG;mnL6PBMv^N2bP)QN2kOW$?}yy=W55cyX-Mf z38QpDfk~xTXr!NTAjyC!+*%Y1;G^$vs-$GHC;<~pXV++Sj>C|=-Yp}M(C|}8;5HY$ z|p0DEc+g>*H4w=ejP3{#xNy#$NCcma=33^L}u$=IU=;mZkjPH?p_EoG! zy)?*J6RCv7>{6E~b3Ob1uC!PqcXq{W%8-g>%6 zu5#xj7*l4EMykB_@pznLl7sDY5M4iHLB9=Tly))=j55X`)TQi)VZRi(2B0fVJob88zfEwUVE)`WK zG48m{Ddn_U_MTkYVNV}BCd7-q9D~p=u(W>GDBqYQLH7UVW5(lKgsL2A(zUcSZxC9B z3he)iX)!a*9fVEJA6RKEMp*4?IljdDwI8!x%;$E|i(Pk;k|^yb+Ka?YgC_Mp6|Wiy zGVBVvH{Xl@r7DdkmGCao&fSL4IGQ?IABD|Y#rZCzNj$ZJs3T99VxV5*YeQr>t$O`W1!o5rK-6JnyQ(ByUiK*8) zODXJ@Ah)_(UGSbEy zS;348JUUDS`O)QkrtDlf2z;~_-riwp`arr&A#!C+&T?-{XKz_~32+I%`H_ zMFHhYG~|Cwe8Fq zRBshkupKNdvB{+~+CqNGJk2yh zIynh0WH(yZRgXN>53YCZTk+tm(c-qh>)*8=J^nF!6K)C}DJq*vdRVobzX(BK4d$Yw z*6LjDG>W@#@!;W86Am9VE;?+8eA?=;qdA&re;`+2n9(R>lq}oEvO#N33u1@L)}*uU zthZ@eb#u-^*I+5V=-=9%w=UoVjGl|rTmcMTQS5QnvrL(1^Tp62J#4He>$87CJJ|hb zEE9=U?|GN1}y{mjOZ2{ z>M^Iw!AC?}hVXJ$Ka9<$`qB&c3D}g+RZ-?Zb9q+BT%gaBCZc(-BikDIh*(@Pm)y4l zRvYD4yzm0ngZ~9TV)-BV5xE9e_rHzfu^#yF*07CxoW@cx>XJcZ-2lSNFcK~$6 zXJBFbYb>pTv7@cCgQ2k_{vT*d*v&~)(Fp)+{q;?h5ubtKZ&|dW%*^-<%zqE!;Q@eO z0EvHf5@7;#`dblN5kMb?e;A7}0}e3%=Ku>m;LAVCiLe2B{Z0SjB2H(V@@ku2Sq!5L%>iW zV;6HnV+B!wF8^ELPt4fd)XWK=i4Gu4QCi>862Rck)uB~%HgNh&_+O{;A5H$FJ*}#_ z5dbfwXJP}$`v*K^16&mr*8c~5c;4yirKFm4)lQMjSH=-U2t^K#BFBdWBrYZ<07Q}p zApjER2q%EAoM;Dk@x?meYXH3-Rsj9Tpv{`~>P1ugnhR$G=VU{>>EUEL>z&Ew`?`Df zxwuwe;j*>gDBe_wU34E8FIzf+QyVXJ zm`@BiT{Qw9!ztR$H^!+S!q4IBpO)WV8AL38FO}xzscj#?fCZT8APnjvs8(nSp*`(h zu}?Ga-W8j0P(AmtCN45l?OXnS-ZR|HI(>K|s@}rw;!dXh>G(QOwZ`KM)l4}zAe)4D za20@Z{sI(5H z*WZ2DL@FPyU#MADt#LHoTAx-4Ub2nxi-84ZrQp=8u>@p%OpTtUnE#Uxx}_B5oJ<{JYoL_OW79Ql5&T5d#81x(NGJ z*WWWTXggkmihYd$271eo&@Ea!y8o`1 zxM^&>(d`qf{NP}I%O<3uxxC)_)!Etkxe!k7i+CN5%kAMLtg)(G=X+`V{vI0Rd@z-8 z;*0V+K|GB|wi6TM?papd<>UGEprlKFM49)lZt>(|jL;?KH2MB=)vc6YJ^cKrt8>Yl zNIP>BBlViR`>1<=`-u_s84}sn#*@6n>Zxk~wwdz68xcIJOFbYoRxHI1Q1KH;Fru5H z%cbEhJ2pa2S65d>g(XeoQ{6%7g9B1-I~n(N39f1!LU%>s%q zEjFu%OhXkg<_>lei`T2T_{a%;%E3VgdZ>46liHS*-Znw`` zlXc6-(_TzAk9S*ZYimnOOLT7o4LS_Z!?IzqOlIH4O1h(4o9724Tu8i?l z0NM5jlblx_KJPEZ**-5XXVu*|gIEAQgWA}-KF`MM?+?x0fRfoFMi8P!5ool*aB?t9 zv^pJZhJ!F#fn{Pp1o&6Y!JZ{x?|=Wk*y_e(Ah5;?*xgo-f}yX}C}fTc?5hz#5(S!l{~0(zUW!)SFeoUfdSppjeEg4mp&sh~ zq4au-g))4i#J71sqNfgbGDRXyVG__;QS>2rCIG zjm6m)hVx1vmR6=#qu9GKvzI+Lgjlf)@xVRC5s6hPz!43xTTPe!9E;~`2)CkSk46!SPexR(zg-)d>7}=H z7YWdO9r2&Toptn_^h7<9lwc zP++uNm{{F?+25h~(ZEH+NdbaE;@CCe)tSItfyom*42t6sY+0HPJTy`tpBQ*OlF<}s zMn9u1^u9^9ng=HXe=d15A@%57@Wc`SZtea-qTW>{r7QZQnr3AwDLkU z(3#_#c4-%j*dTZ-@e5|WxAe(Jw89pJS^Vym-!kfFmojAU9%ulX#3Qd_+oy#4R_d(3 z|M>Q4AbwSKBx4NxA*lV$%-eR+KK`hKsmZl}hMsPldpJ=ar@{|UMSS8Xn~mt=@W(w5 zS*se_1t~XT-sR-5{nxk-va0HprI06Aqsy&01~*l>GLjZ*b~h^D?TgdQq1y?{&`+zA zFF?~f(NcHl<4laF(eJ;sy}A9fZW?boSQ8JiL|@~(XOT>~j@;=GcOK_Iu;_%$2s$P`l@4}Jqa z(wK$KZIWP2dSYzd{?wA0AXvyb+?b4=K3tof2y=S`$3xgoHtZP5wY5vsk4rigfDJ3! zdW&-8nZUKg=J!Jo4znTZ?c{(Ik$v)L(S+0&v&B0Wwc5YDHtG|j*>Q$r+}C$a!GUiW z43%&;XGbB4=)xMB73Yq9ebC|B+(^D&Qo`kGGb;Bs{2Qf53W;Qr9)94=z}xekeYoF~ z{5D~pI5v!u-TCF^pfluN=*)JJbh^vsdYC+FANtZx=q4U8i;y1^OfEf8zuuQpd=(&s z9}uxHkmah?A{4}k<`6`F?QFY*frfOgad9a~U-gBW@^&E15iCpF?9m~sjbcG&=_Ui4 z(18t~LN;Lq`7yM1LL(N{5LM-9ga#2U_@c21d<*R4!J6rw_0v@mn4+FCif8R#E|5^*inj}nCD6` z*)@P}#0QcoFM)cIWcmp@B9E%&&!m+h>Bu@A{JX)2WkRHn=JYfdSBqam90HWsuPrf6 zC@yBEHr>gQ$XZK6l6E?1&Z#Zt1-~G@jk6S)fdB+Zn0r`gytBcTq_V#fr(uuZgHEhB zvjKS|t{~Y%b%tX8TitM8h1Tdq4$S`LDx=nJ3^C#_<0_K` zhPlbe*Z}XBjlk}va_zXN*TzS~Y_@eo4!M4hEBAKz9Qw^jBebxUGve_FFt5oNk93y~dd5Wh;2S=QtiE z1;wp)dH~q*6ZhM1KcSlqCd&gH- zc$kqv8 zNFKADzMy07nZi6Edm%SKl&0BiBm6xUk#y-8L*Cr+a2!z+sw$h)6|V7S*)T@C)8iXF z1Y|Db;Riq`;mn-*lFy@T-;c#g&1nSsIGk{*BoZ+Z)Z2=Iet$y^jkINsy88P1x;i@) zWKnbT?|I6~%DF7lraS6ey*VFv?)!-22=Zjd#d1AqX_!J;boxKgm1DIcsSV6WCMFuk zISH}R;&7_)Ei-lrXTupVFfdwSWbAPS5Jd1p_`hUsKi%xbN!*Kmkr3eK!r^i{*`kl} zN#!TmBh(@0;_olg=E>4f2f4ptS4OrF*Ag&Jh zAbs6vy&@80_Y5d&jDC^%-RF^XaJW zhqkv67Qt%`Q3krg4B0ZeStBH+#J~iwVq*||17Z^Z+6^_Lk5p?y;lGq%8t&Dek?1p6 z=~)Y<1M!*MHawt-L;NagMqZ2rj#fCV@?Ikmk~toK)TEOxE{?8^NTB2OH4(pYcC*3}XA9nx+zjo-q!XJ^{K)&QJRvrpBV@d9fz@wr0fjpQK=^C=Xb-92kFD|<7F}= zqSWj;6c&a7-`AB)NL0lLRtPFXw1zuVu_=dJlQI;CW52M$@2KOu^rCoBd*RFs#6>|#v=~*1f7-BgdL>_JikqS6d@l(rDfu< z({3G4!r!BSdtl`^OF>mgLXM*&E8{X#@N$%Kt)*yUEwhZMW$)XL!%~etOhw!%555!? zU|O5|KY)DBqi+=)mxP1(YWvS{Vd)KDz&G9svRm^i7+=<(;)!jv)qIfesKGDNL> zAB<$m=iWG$f?_Jhz)5)5Tm^n_R}6V5#?wGzl{2XrgQUVnOG!mLrlCtBe#IPm!5Vxy zixZCYbuiYuc%#oXFckh_Yr(Il?*1jNeKsn(uZKZQ=9cx_YqG64?nHoibmrUBLylEu&*Ol;a7gaZ$Fl6yDzBiQlwh%o>is(Dt4E zYTAZYm5eSlQSh6oj>ldn+lE0quMY6s@w_Ob988AeU8J8~LAAhwD}o`=eM8}WWTb(Y zQ&=1+?#DNI*Bh-Nm&5#SW~!^x8?7zhHh*H{j>B{+EPUEVcx;tyzFt4&$~{bHvI`D{ z3lxV7hPv2pPU{^`4;EZc&Rvz0!axHgdbFDpj4}yR4oDGM4W)2NIW)kXvUvD9jvgW~Fn}+f}#=d@4)T~E28u3i+n2LMkLOY@m)*`yP z^8ytX#F%mjyproz{@8xmsMt5DN@!fa9cTRH#)$}Vv^`j?j^>3mAu{~sAOWCQlj z99)XtWk40;=Or3eBLqB_OgmH~Cqs{fy4H6%kxozds|H!oC(zy=Ctw(T*T50XC=`HN zF#`;{oNEAvgOA+kA4vxx%0%CR@A@2$;LiU;ElSRdxZe+1bRNARj3*XYS(p@xGKrE4 z4jXaMO;$>cEvQL>*A1jZZ9m$cXg+{xpXLyanBS|!-=8G);kiPAg1VqDjHxy>oKBoJ z=Lcq4ZyOu>UsBY48{Px$yvKWmv-;*f@B z*b1m~ksw5o;$qHjP-5~^gzU=WFeyt2vu|$@HNV~pym^#mOV{T{q6fvQ0+DQoHpIrj z6Bo#Z2inV5Mc@G5&%_x8N}V%*LDHy;MI|Ic$xR6#EA0p`E1E{pN)?%lMRe#rAx;Z~ z2U0Y*z-qM4rGA|84qyuHkxFBaFkx0?43ANwo+m6y6mBpBt?U8+nn8GIvKl#8+2H(@ zn(zB9Ic zm#ngqhwl=UmO}QlkBGEr2RuHnz@b0h!@`>#2%8d+_c$74%z~P3^{i-{HTcud-cgRg zN21GTF3HYqi~^@%(~n@AEn_yYeR7z6s3%?{%H$tcHbgHjSw&lYly}~Z3m+TSoc**! z3AL(8JgVHeV$vTl;DaN`2gGB}L<(Nb(;wv>Y|(M~^5WJ-ZD0cFpu&w5M~-Szw?bE< zKHX+So1k!+UPcbPsFh(>1|57K7nY?gi>*7p2jUaYyy;9~(@$<=W=YE^oy%P6PCJ2L zR3#H-(sF2f@9T`q>eDYRvz|qC(_3OR%(59c`1kT2a+;-02OHY38IS%vJTw_UneNGp zMt&^EP$Ye6u zeBN4~{Byor8e?K&PP^*uey%%C@jpG^9T8wdYHDcEB?Dq>dreKOG(|BHOZb6UkauWI zk#W&!u+ilCV*@+rN)FPQ6ea>rA(7V(-%lU0;BhQg8@?UE5IF9;amJJ$128CW8l4XE z;$!IkS5Rg<0DuSp0r`+a`!b+ijaP#f(G3nXA%DVs`@I`Md%4k$mKsh1P9HZ02$bP! zQ@%@r7zHNbJxjy{v!i*0x58M!EO5Ht8aYJ|U-tL|BLqP*dytfb2jg(p0SejStgfvU z%VtNXl;|kov{Bqkm-wFK+Cgg&5TNkg0G199fS5eDOdc44a*?hiTp_0e@Vs%gyt*sg z)L)$YUg%4zfF>6jmnaui|J209w@+9QBa!UX)YPmj8dWH0=&`XeNePMTcqL_J0!!jv zL80s*@a6zS+lyTx!0trB7V~K7PcOzWGQ#|Y_z%b<1c0+qbZNi$!)6k<4V86!s`Kg73qj%^T zQ1n8FH6L%dZh0SNx?YPA`1!UCVRU%z@&#eqSXrSYOm@OZ-1pFtR~Ubd#o&$&>lnWr z5O6R;Pd$nF3L-ipc}|xkE`$^=EZoNsDuB9JD%M2xEh!2pv~sX+Fce!7JdBh*Jd>YL zGZ=qt*wB~&3|XA~G}oyFyp|{I4l>rG$GH?75r}Fp{#(r`i3CtyzEcWF^{Rg5mH=UYTYqYxwcAlcUqM(A5^H!^tl<`a)LZvh?C`NalsdutE4E;oQ z7`q7)vNlF3JUTq{qprpPYILy(*>VgzvhUgzfu7vBvsb1XMt4`fGwm5I0u2}hNB@hcvgfcy^?N>Ok`}ZNgNXv_jRfp*a7^VM`|p(S-T178X_#X=nN*Dk}!2 zJSqCFx&NgsFP}mo(u8;fW?UM!!5B-Mhe#l zV+tmf{b#gj+(cvy_OAZpjaYOZ1v(~Pea~E9WzF%b1w+9I128f$6{1J`!EvGd+G)Nb z%Gq}uIp(FUM6~1JCVP41)WHlcL;CQfb6Z?nC)!ZX8fs%RVF5d9gX^%o?Z>i5FW1q{ zBQjh!GcPyo*=?t}Wd}R?^Bz8v$;$HIV6#@KI{(&#XzK}myn8F-F0wA>xuT4tUW zZn_$7Hut@PD*@Hgm#;1Ecjd~jU5bsLcKmjN4rb{X1Px@=PF{U_Y->zS>#0~AKat^# z!5<6Li#rX$T?@d#8GgOa{c2%I!GJRkYL0>?x(F`VF8aAWIX@jA_;J|+?0OLBQiP+H z;G~Do_*8kbvv}Ou`f-r@5%hZotLw(hn#q@pE!d1|1&et}r##SEcf{Ve{LU_}cEBtG{d1VzYjo#_Eed8ynR9jGa_r z$#;#zTlf)^hhM4Ibcnw&%Urv*40A|bHWnpAuR_(6obUTCurwu58SEWlH}N*tbr!~@ z*R@)8U+gp);A)yI=NKi~lUTL~S+h~9BEV=J zlylUEe2G*VAQI-%=@0ea#O~}~HnnRrnZTrP)w?IBLER;S^>2yzN>HG$2P}_XtCth{ zMZ_jHY^Qb@g$w~^^#Dtjo+mU)n2OyX$j>hF#pwS6RY9u0LbROO`+a0%C7LxiGHAh+0mO z5k^o@7$rjO;fgpBp`;klVq^~C=y{{_6;%~b zK?Y@HQ+7V(%d{Pz9Lhz90!=8Pk_xIQr*edG9h3TaiWi?291Sv}DO#)_Vo^cD zR7x$QvMDqL36Uy=QgBO50k~zEaVFE`QYuA+T*`$S_&}v*YObR?TtP+pr^g1f5t)OL0YthL2c3#L-1^#1%Oeedg2ryXo!+=|LRt zp?h}H&R*(WN9($&doeBUp`InQWC1O}r;EC9oK3S^sTBcaw1u{8qiv8di6)5=33}p) zFfu}sJE-G+y8kL&Jx!+%(!q_i@d0`OR}RpD^|T%m7SW=Gv=E{9HurUx`>@@;#`L~( z{~*qJE82P8?!II<+d6!P*h-ARL*%*cK?KK>5w82l{jd#VDhxKvk=t4}jBn(2V?5c5 z_=~L$vqg0tN26zRUmNB8B-8PJg5yVi`wwHCSDfy*{H{-nT%VRld|VpwyYAS3KU@9} zYZ7jbcAvAmPCDI>x*^axXlm#>>~O!D==xEL<5@S_K(pc6&8_PbF85E89eW(kB6Eo4 zxXbSRS(*20ocAf4=Z4dL+3k7V>%N2BQ@JVIZ5mQc>jYY3sCwS+zU6aXj55s^%h^EG z#3MR1iHINlMV1!9O;kwsC=T1W3(GcL1Wk(vhF;@&ev_i-TVc?Oc_lO znr^fzmX0@2iqMa0rJ&r>|L&uICJA3x*nKNhP%VIQx=y#9>gv<3;7~DnE zhmHe{-n41cAQ`81b#;)BywH(JMrat35Lu&_L8pVpR+bMS7fclq;nb;9H*Va(DKv+C zaqYNp;Q};A-~{sogV{Ic;Nr~!wb9h03&1=G%`e2_C^I!gkYzI9nP;9kdGaLs6!cS& zFKcI>dg>`u&gs*qaRr(nK6F4r#6hQk8bDi(i9e*F*AQO?^iX0-!o&@lh=&ZS0`-qB z09iwmlqLE30WNkB+RYEQk_|1j4cy2e-thMVvjvc7Y@E zO-)U;d{@LP21l`b$SNen5eFEtT_7wLA@PAhh2Rt~3<`y^TV^yoCNUu3JwdhODWKAD ziWjn~stT#Z5k=f@GBPrd5u`!B7>;BW56UQBE7meAS%D=67O}C26N@-dkrYm66IUP7pfr;LFL{!dZu;#VURd~v-H=L%~z zia1eN_Lln?vAT$b0SUz_B31>};v6#4-eTE35Wusxd`K*39&4G> zNE?eNEHT9ZA|?^By^Qo)k;;^6m!}|%w0GgUoC=nUi>0U7Rm37BYmpF^qr^n}yuZFa z#9k2H+}TzBVR&!4RbV0`&LkD=B7(lPjlXZ5K=w>Fq##<3Iiok{`(0*d=VX(DnY@Z^ z1@qZBvy#6nZ)3@I4MyT~<(?AdJkNHo1zP20jYauu+^wQM(7#q*b$oG-GN z$olZS#=KH%D(1L2Zq%=)8b59x_gP2OKWvkt*QV(DzXfr2b?QBJ9 z;n`3f8%gR+ryWzhiEJi<*an_vwz1>NY<7>C%vZWOIlB0nrX|EViOo3k*Z{)z9CM^0 zlc`!(4>4=lniL}U0xo_k*^0T+X0x;owi@M|%YPhFaG8{)n(i)UMa?CLOeCic-|gcs z?kAdy5lm}X!ylY{%v4MH>viOt!~QK5JOEo`I{9FbU^=@-ddK@Biei0}(~OQnV_B)O zdV;aM!MA72xX}8fP@aEro_~IZ(TNED(M9q8>Nww&*r=*FU)7k%vOr{Mq0!T3^w;@! zr~6w|{LK}{`ZS|G(ch3_%qR*h8E-61GMXSc+1HfpZ!R>JPc+un`gc_N*O&O0jq|r; z7_+7rTc#KrYyI1+j7=Hl5}uY4V>I1 z)|MOVas!LA16@h}hRmq8Y+w6$V{v=To=5Y8hx3C~W7eW9^~R0`V}kj{!gPNd)Jrhx zij1B{V-F-lPygt`l&GeBqdV4Dl@`^K?VFWjbXAX8w>2dQAx{<$ertT_TbJi! zdC$#BeJ_;vhqJ@8#vE7{Ke#(Bye2WUCegHb>=-w=KRvV|DKuxyfxT&=WeEez#|{Lu zhW4e0d*TP@ME7@&>Axozq6Q&$ATxZlDELTzD3lf4lNOvCJ#Zjn@a>x2S1a}(&L14g z4&Rd-e!6sUFe|t;K6poV@TE!pj~51aj0=mLy=lR-r31?n4o>&?bwv*#PMNW)+*mt1 zaInn3F5ADPGj7L;qVVRFfyU^aH8Gp&V>W%`(VB}Lu%pSAK(=(U6|%8z8o$r$%mKXn zUh+I@e%wdM^9ZlOd7M1#9};nqycgIgae~btN6B-Ryx(=Yewyp}n|9yd^~V1F+W5~F zj`{WU$X`tx^*1vjf7>1X_p9UnZe{Fm<{Q797V$}e{YQrVRlAw$L30ppI(uwoH*)0o zW|Z@1CC;Bubi5noL`F}Uxp>9yanhWFxOsBn!fawQTU5lrvX$*HtJt+nJWp2f^#kmF zbAX**P=xz<=5mxv^Z*y?h-p+Y9cj+6)5{BNka^Cuxy3;#{|$@6P46h^;#I zdZNb6OpH-6oWQtFR`-a1GDdXb*&<8nWqA?qikU?g&h+;7;wWAv;yr@lsO6D@m}2x0 zZyCsJZ*NB!0Sp2WMgXT66)#z`1Y<%-TfTg`m_cOKjL4UDLl7(9=!q8xj4p1O*U`dEG8G|hI+<;5dV^5NU5Ys@mCSc zhB%pE+KxaOmE}-Q24$2`Ne$IZqiGUZ9AL)Km;eRjD2tLH5Yv2w<0)ROE)asW)l^+f z#W;+M8ps8?Lor#2gDc|40tw>QVi{YI5yG;n z2$3PPp6VCU!l&u!Z_~Hys19l%4sKcQA4n)Go5YPrOd^OjnI_Mrx$9~DT3XvfJ^j=_ zL_=NFwTf2ZIFn|!QQItHILw@j<%l7fr_NrBR1Rn+_xm=C@-Q zO_>9OQZ_qG8fxqd&=ZeUxbbS)uELipb|io8EvI9qu>d-Dlixu`RT+AF^p;WS zJKQUHXoL~qSL|VAH~&Fwxny_oIwOROcr1ru=V-Fo*+LRYwlQQ&Vhc$o*)XiSeMQnp z96VC83^EC3Z}=auvDL-LcAFB@dDtB0o%XoDAL}~FJ;*X{`NjL?8~4TEjHae0%<{yt zfyu?yt5=U5JBBGIS~E;e(HNo)$BY0|a+!UgNtFFn(B{ZA2dxvPtB8*V4KXn*#0>HI z=buM>%p;B;KaNSgSRur51r6`hPd|O>(k0wNgNA8oLqh{vIov{1cKY;btD(Y#-~b`8^QsEzB$1#zG-B8z7W8tsDz4@!qsS67F;(8ZuBZf{Hl)ubij(p<83CxbglLrMy1QZHI-mzl`^2KCX7B`{x(0#~~4fI54;ZX!A z@c848i}I*pJeL_WW}s_E_k?;wd?fwv6(MVJ+QD^M$%6SidSP@HQlAiF`M02G zs9QV;R13;`;lc%|hpbU)sFol7=tnQV{4%aUEFxPz9+nG=Y-WO7Dk>_(W>Q&MS;3&j z&?{9}S6jx4va+%XOd*55sl2>gTt383BBln*wL|<|-2@sPnW zD=W+L4-w;rEHA>TxVS*3yT~kvtk(o4U5`>qOl!)(*IOka95(gOZ4zkQFxPmH`kt#xD)H&XB zJTjS{<0TPOp}Z~P7lE+~ULmN865*jwnKDHTR$^ojbBB0bh_OQ4C*;&}y%_01A=7*@ zv&humvU`kNP-B@y#LObL7qOh+RIE3aMMbu1vFsR@5HX2ZHWs;V?UXXoP$OeA%idx+ zmPjsg%hJ&D$q{>tly0QmL)60&|1VNYBHz+P%r%zFjb-f->x!67EYphEaI7UuVrG$O zQl%2jGO&o#i>M*_T8=9c8Cr=1SzCmGs2E<5M2tl;Ox;yc5#q(CbJzu?n%AM^vKfB? z`)SN&v(?q1D>3~YxnvslI!j4qycwD1?l^Vu-cIqb1177SkIV1Ctdm|P!Ci~f0M z>StZXKWt3=Y@zX1g6k|MgLd~%5;0rod@B#Kpoy~@w67~XW81tI^ z`_hfhaYk!qV183zceQ`ZIAdn2(Hc1_cbva9Kd@}Fv1z8UKg*b(WHcF(#py;zabR_3 zU~aLoa(rNkKXSt8h};VQ+FJk4JfnMJVAZOGaCu-&UZ5v8up}?Aq|U#i)i^N2-`D0p zfN+ViHqDroX0%TC_c#0dG6Hi-{A=#W4xKL@m>Yed(%4XCY+e{0EDUs48k>p&%b|9f z(Ozb(F^xHao>*U1V01~AF(*CH*k25(jG`Lt=v z$q9qUCxo6Z8N4SixFF`>I@8P&UOm<{Nv%j2Y&ZH>jSa3z4E4kX@68MKXP7IA4rGM4 zq=Y&H2d4SZXwBGgC~N3c@!*T)2TvCdt{xlgPY=E^W$$;Y_B~s6 z@Sa@LiREZvurEEdD0c9{@q_Qy?tZpxV0UV0nz6ekYFB^8;I+#AU9tU*{(arC!F7q@ z>VWA5)N1shu;Y!Mw%E-li^6y2gf=AJ(>pe}GI9Sm9<4cszX43(B}JvY;%^cgHhcK} z>?8Aw)Dygr2cMJd@9`{oZ`s{%JDsnS_c}$K1j)UX9a^@rm8Oq}yKC5_xtMJiE6KTy z9oO$*W6Q2#Z*cM0!4#%%IN=bv#R4NHohNvUgUb~0w%v7=!KnA;Gx5xK;TM=V9VS*7iKOXgF(th}V+#Y$)=sgvz3 z7|~%wA*+YPtp&qKF|eTT7ORVF=Yr|~iWMtlNe-s}mQ6sc1lFD^vX(~{C}ChM>xm#r zoKI#m$Ys%@MTj6K1+j<7s(;I8MivJ_CPv`$jUkYQHn@niNVR0i63dH5>_RAlD2xCu z%IYUkTWmq1Ifk;kcI}dVS}?4#Oe^S85s(fJU0ygG?(gqMABqt#vc|9nI$!`SW*;P! z_1)NvCvtprMEr-Ec&55i2^2r3Dd^DH%B$WZ;816;#nd z9jjN=wD?g5oz&19YyVIs~TE z^hRn#h0LRQZPZpvwe?glD}clS2Dh513Gu6_3U?77SGuWt8*N)h>kwf+&4>I>>V$?X zXvH0L$3Z%XyGY(ky=!RA2HJoCgrjC;*%#!rP|I>!zMFPK2vQ-=cG`}TLWuYX;aT9( zpp3_zbe!nC!}D&Q_v1p(OK$huW1PRL_WX9P@m`+ijL-der01g7v)k?pJ6vzaIX^1% zo=^1N<3hXSzKdKlxlK6hbbpZU{A^y}AJ-@RJlA#E>sV`dHrUKjS+ThU+m>tYGH1HD z%;eF_J~oeRAZOU?yy|j2k2c)zc{SesaklG|LeF^*nn&lkB=7&RKjWWvCBK{Oy5e=+ z80Gol_|YHbxPFl2`rbHmN#>L21#F(}=0bAw3#oNH413ArzTk52vbm1h+~;lH8y@Fd z9&;R6$|IryZdwN&?jeWk37Z@3{JrGaNA6A@RyD9gHCp4R?Cw)`_g-@6f2mi?A9|}4 z{!$=?dn*j9(o827v!qF1lu*nsUnOxk+Q_Y4eCGZwwl7?K?vNbKATq^#M@$n?Y>sZ-EF zOdrr+=Fvk#L$b^htubUG6*PJE)mKquBzfV57trOQEtOTG;`4(_K%H#gz8x(sIu3Ma zNRGa2^5n@-5`6@65l0#{?9d7&g)~f>WU&>h1(gORWd}}ij6pYnNgN7@2&hBpNnd&8 z71RclL;=x@iTMScR7FLFe4BvAA2Ma56?7B01!)&AUc7eg8Zs4^Av_eQjfYiRTZ`#G zrs1e76dAROEO5Q9t`4O*bLI@{2P)waAeCr^+CW{s_10T2zx*;LcgPOCYI%9N*g(XW z!}2E)#}DX+3a_uP$Gd^5Tgsq6!c&t4M0jMf45zrbxU8&9yg|eqB$CCN0fAy&5u*We zkxf^`I)RrH7gJJFG8kEx1W{tf5le+QhafEG7qJeBp+jVfpA702y)7yq#eeFlr|?3` zk{o$q@G9ahg7Q$(GA@Zl2s+~;3Wbu&_d$4n5VN770mW}@Y!rKoxa){R3(6}N7^ycY zpy-pFoGfM*QOv3Vah^dK4-O9<%2?J0yjn+&96`S?E>B|az+*+VH#awrtOYgJY9MR#STSx``|FC)RpeMJfJ8|#Vpz5s zv21*95tC(>8JV4|43Zicx%5d)9AfyjcAoj1IY=xdmSstbY1vz38z0NwVwGj&UNKTG zsSR;NvG#tkRw`NkLYB?M3W%rl$X#5-CL_il5jfJ`A`b;evA2jlM;5$W5)i=4wlqI~ zuem|4`IYNTHlgI1`!qUsvS&pNcZf6iyA1IyspHATRJNa#@;aJ4-rl5x4KmBfd5~NX zQqEJhIV^TO+bF8pq|nSepA_>K=xfd8MW(k*uQ_uuH$q`oeAD2N!EPSY*t&wAy_}aC zb#d0yd3{hjTOr!ay)oU-x}Cq6;rm&c^EdO2f83G$Sy%K=#yNiw>-{Lp@ek|be$^ED z>sc}Hr;WbuvfstuO>f}U2UFP8)BJ~hPVDtu#`|oh{+P;bB9}}a2gp5zZ6h*t!6%u& zG?C>$E7>Kcj+IC8v&>zEO_!B=wwZ``N-cW<6|$*k0S`i2*&c#%1!j5k$&qAk=WF`g zh&`%<=UJI#o5SCs%;nu(N_ce==3+H$`UtUPWB_>>guoN<5c8 z#;6isM3E6WAu=K-KB^|&XixGtX8Y$=_?OoOHa8pl=0*<`2bLxq(_?*A@xJOYQRS&d zYnstI&fmhmD{YwUC-|pEjmk^#)fO5{GXitQ1!g7%nkNO;l>1kYiJF*V%!v2b#QSU0 z0v*N1$_oGb^gw5d(GnjuwLN-wXLMhVu^`Q8M;3AZnhax3iLolrSdeP86$QFiB@ERC zww4>K>H^!Ujm>4o+KK+v)yAgwKtDbe#ySXxsEPiSW&RZ@{^l%Wen;%?dve3~=7lyS z53Eic*poI?F=lnSe?x_TLtda8@%^KVvV0v={M!qRrHTG&asKLD|GbXqzBw`b+sE|p zOEV24PZR}D6(2lO5IkBq_*mh<=H$@bxuK^^241WJ%a7M>m5Kf~xl*4tBq+r|ZVr-ts!3xy$M{5|((4u#S~-)yL68}@G* zV{V*l`nOEtm1tARUTb=Dnf8I5ym`yRY;rlzt88wN_d7Pv4;;=PN6@=2`*&@g8@$5k zDRXyO?~i@9pB6bj>k9nqw{rf!Yjywc)hYk|vAo~)#C())f5Yy&%&T}_A@4hpj!!b_ z!xY;JtJAu<3OV zn@tAEy_20o*w|v)VTO1m)ZSscig?28;VlAo{$aBh_kK3N+-nxow5vQc?8XDF-ZVEl zFgL3?#tICuAC6?PknNnE{6E;nF8v+ch>dV%G3_e7Y?58b#*<~Nm8>M%#Fm!*oO?fu z-OcK45T)>Va-hK=!<~Hj|MRLlBD`Ww&0@5P{F_BqLk( z_EPUkT3Jrz`IIjUj>Ibl$wh)#Z^XVNE-s=~3$?USn`~W+QqQ7UGij#msS9a!REJZ< zsi*p>G_`{|8mOU)s%ogFiJI_1LfPGAC+*xu+hp0%e44+UmfuVFK1dJlqkWJ(gJy`` z2GW+%vhB2eD{V#mwX}8`&v;nE6(KK22B(^W;0BMNUP2CG=A(9}n$j76B{-{P2 z6D7LtrMIK#ZzqrbZ%fAfw#)c+r}4|i$j{~nel>0MZ#(_}&=dW;uE1|+`F~hB`ncVF z%kTVUrRTS^{hv-5^$G<|sA80GpX+5VGc#|^LRlE?K%q~rQ1=lOs$ z=ypzHFOeK>&#QQl)5s&PJIF02-181|gDkXT-*UKqnCJQBB<~9`&byq>m*Tzuv?KB3 z8vo;LUAby^y%XvDILGzV37!{YT#wk?Cv4^hGCkbxts>_=U14tUYkn()5!b^u_jg7+FW5~Rj%*BAMvka2 z>8;%6j_&40c4cPwgdvtKhD|9LlHtl%c(B;`^M&>k%M{}_J3*VDO*-O26Mb7`@u*2M zf1bb`pl;yJZO)Byy_ezsAj5Sl-gPX(d4GiSf2IfiGY|*_a6}7?88=#fG<|#a>_M}J z`5oH8?c29w%GuV|hPG6`r9~TsrUr9-`Mn{`5zzkNBHBjG9nfMUEMLl_u|jglL3<_p zhhy$=^XAQ0Uwu`KFfsu|BZ+1l(lEbx;)y3@oeMe+@nAt)iY$AfrJgoz8nTlO zNEa|NFGD|tvg10MdU3)*z8wrodiLyD2uC!?l-~w|s?bLizIN@}Yp=bAIH-zq=gvW; zr=NZrGkuvxq4z=Vvg;{yfX=8W+(kv9@ke%O-qA1M*74)VaTf)-bm@|;dzD>lkS}@x z=!0Gi2~q9nGEhs>`1_>JIo zl8sqpS&*#Okz8;~go_md(ZnW`mX>BMAdeD(ih@4Ussv$ge4he~UQVR8&+n zH#b8=lp56~4nh(}k;O|2wS-J%TNSb4Kmu}{F=GZEge*Oi=Zgn~w-EwsYHCmwmV*k8 z&{Otlv3C8Hr9^vhD7;~{+T3WUFD?FxoL^e)YXnLQT-=R$ACBO@LQeR_sg{HlwfNbJH(ZjwU zZM-f97Z(lh)i#-X_G+H8O=hRoCUZVw&NbHa%Hm93L)6PNjvCX&#m0NgnzNER-U_B} zc&*KB-X*5pH29cXckUqfVw>|mr+p_opUg40UUA+<9!!PaNp<~h@tD799rNF|B>uzZ zyq{!`x)AODMOVzfpDy|JOy7rj-VY1iHx2i-c<(`*X9HVZcJR9C9^S*Y%q)WGdHWr^ z`!$dE&9U~ScH8T5?vE$?-*ma|CQk=BP%tV6zZp)J!yG8kz>K|Ho4Wj_GcQ~wRkHwb7fOMFF`8e5k(tYX2gEAfc=Pi zcs*1LJK?P2aZ54TWdmMW-7ty2R9eWcL9^JuWgbyNyf zIAdl^RAqEz#aN?0#h4L@EQS1u#_IM!|LTPBy2PQTz^+1nZ?V5O)!&-tZ=GN)pJZ&R z39PLRtjRMLW*PGmd{dK+=6GM@%$Ut5Cxq|L4ed)0J(3^VJnlfTvAQs@94aLn(_^A4 z#`#;u2YMzLy;;WGKvY?hzp=sCIm6gHC%S)6T6lf(!M51lYZJr$8Ro7jTgQbL#}2Mb z3~fmXZb=?kpA_s(2yPu08psUYnG@cb8k#kxf3kmTjelFK(N|~es4=!KjUU{Y6yBH| z4rPZ=6$igv5IkRc;GNoC|F&$|zb;wv!`hw43J2%J99S54aCh3!-8rF;r*HZ0ls!iZ z1|P}~ot!WTnOjmqJ5y1F@FNAG#|neT3WK}SOiRvPxglJ9G(Y@se&|qks4pYDJRvwc zaIiaWU~ihK@csuA6IRLn~g_ z73^5`Fu6~fOKc*(YxBPAb-W+2eGpCW$I$y@=!b6m8*GPp#@u+;`?kmVld-m6O!5B1 z`h!nLIY^UUJyncg6HMd5I#fkoP7BPO(=^n5`Cryp2qlcg*hPfAd{D#pvh# zb+@rWV><_S@HPUw*u!KyhY#?cx_9z+l#la9G^g2%<|?n5I>rt*_w#Zj*>XYlkX*_w z+Z=8r+lRLrUNY?8(lfl=Z~3sl$||}6BWPTo}}%v*fl&8D9Fc*)Y;ysz(l?CNur zt-dGBUrnM_?B9YB7Y1_HMk^R&h`j|@WP*?Jo0usqpA!t9^ zcEt4sgC8V8G+Al{fzTGG7zxWtAH+liT$lZAP&DX|i(*PbSiTyT-+q#^KwCTojM-33 z9HA#P$3E;v##GVB$ZUW>2N5xGNqku@sv~iD{H3PBQTnkL&W0r9BWXONzu*Mz&9>qAJ-p zrjjZPsSwEn6!4J`M@YskT+gN4WJOk|B54Kidi8Cl|lyY1Az zoR*hUx$FZYtB3HhJY>Y*W@LHA-ZGu0w^3UYHHnXkIJAu1Xm=LPLL6Kvqq1766-ONe zX3&gI>O{WtY5qpqc!&-S(7CPxJgz6A+*cAKUX1o$iueAsdh`!dJZGKmlP=e-6xWB@u8+z_ z{cVTwyEgxuDWe`G_Y*evJL&FUR7L!#bkr#yT1fX}MzZvv{QI4Ni zc)vT={hZzPahl_mSjW?D*IUt!BcojXHdEB)QSNu++^@LY-*q~ljqp~sw>;{0?zfpc-KCJt#Y=(4vEAcyM3~FoO}FQA zUViQ$_CG-qjFIBM(uc*(pRaIqi77FPgEY zDaQP6Le`|O;LhjCNy#F?d@oY&YnGsc_pUa;t+_&YUa$D zXl>*-QZYBbapMN&{^!r1#|-`S>C+HlNSUU}t}x88aSMME@{1+6RQJF{lZ!Ymu@ zGa7T05uFJ7AS8q)C!4MgBg7BO;u>yYIe> zM~P?s{`>Fa^vabhI6Zgn9JE4LB{l@{W)X`)N=k}2Y@pz%aQWG5OcF8umksTqA!`26 zp+oXhdWchAUS3vKHfho%94Ahkh-ZhivbHESH8m+IY23JR;sF8>M~o`gQLGb)03F2X zMa&WM+ty;tkws6qh$Ikh?JXngbgZ9*mSsnH(0Ie4sw~hz&7haZYk)qqi4jK>+3;4f z!^?^5s9RJHUTJaA!uuoZzmO0ksHsz@*45QnmKI4Xo9jaDw6rua+Mo`l>|(zX7ad$j z7N{uMK3Dc1Mnxf0JY4)2;DyA~6nA3z&PaZ;(AtGqwya4{PZ!&QeDflkxLRAC$h2K7 z7}josv9`dqe(f5Hi6_jX!@ol` z(#&GHxQyHd*IF@T?UySi8moFO9~-N-Eu)R7C;lz+Tx0?Sdprw_AuQ82r6m{yc>wmB5D7e@~#HRp7Gg&F@L)t=10lS zPfH>`TO9q_;@G1R9_SMy*E#ZB@|Zu2(!vficbH!yyKlt1{%*GKXIb=8oaYBIu75dH z{M%~Z4yU=9Fy?N&*2$ba{tm|C*0X6KpN?2*#QrjKm4Q=vpvyqT?3 z=?Zp?nagvu`D|v9E$<*71;=orm90s0c$g&L2Z>pxlow?e@$#!qmebA_9{I_qRp!<) zwk7O1)M)N3Vg6VlJ~_U5xxNKifq7$%X+_5JcwcpbZ|YcoLxR69CaQ8w)I{^All`V0 z!`w^OXdZ7Yo@#6_GJ0~1u3V!l&1f?sCyXAI6B}7oVDzL0+EV>9s{EUa{mUUC-d~$* zENBh%)%dq(`R61V)2oclEyn(_QFXpi1-Ztejxqb|1KY+KttrNgY-3)$uO<*tG|u0g z8|Y3m+Q<6q%Z$~P#)jrGJ3FHHwFmZhM)y@4n-hF>G5$$OMpLQ3cV$9&X=#l?igpT02;~;?@bF&^Y5AyyKmdL@EtkfhsK8nGQ%qp%pY8y7k#ij(7!Z(a9RA| z-t?hhR(J#dqR^(4V2yuksj+s3v9CF>r_fl|YV^&B-qjG;zH;or)5XE>Rqy_=Ve60U zw|~E8_XBwY2Qq>uCk!4kH^mL@7#9v^g>KdCey;4`-8rEri~oP(-UGUdD@_;OCo7&) zM=0kA0trPVB%y?|M9vwJa}pv*C>;rKPVIKv#()jjL>rTBjKMZeZ5u}%v9a52yZheG zGt=|tzFF_S)$iN?$EmB;J>4_6@16TX)SmD%3bIZ<6J<9y?d9W#dX%nI+B6VdC- z@ZCQ`Q)~Lt?iffLcqp^??^~ox_URv@~YK&Q3W0q6_@=wvS+}{R_8}amaiIuzgryn^F@(gHHLhU zV7+Q{To6%|1LA!2s1Qi@3Zn9$pr$Y4MvK<1y`Td3h(sh&i2FrcF?c z(Gn~8JTH*Tlj4hXw*VIdLQ}!P76Ds&1cAX)f32U`w$>kGXuGgSHu!OmJ^@nhlFuW8 zavT-}i5fL1Yp-x@ZWGrY00%c5b!CQh74$b!rhEX*j=M>4QUQ(5NUs4l|A zE%6g-zaTM|ZDwR5tON+lAU)1d36{rPgtJ5>U{EA7w^tcYW#L}buo$%)`7^7QHRK!p>q%yir91Aj>Qbtt~++@ z;2R1g!wQ_5v{=FK8YD&~(H2S^W8sH1d^GH8?Aa}ijg44{szG=Q6<4sQSPDrarerP; zhN)q&<;SYARjMjeWs=!Ku!WJ1Np~?tUFc7QlNl-_OJyO=a5bDH4dg>!N50Bu&?2{T zhpJGz0wFz9Qj(W(h)j1Ul3pmiLRTg>0ms}E1tPl!Ro$pIma0r~(LbY(ET8YGy)Fh-qKF9$FO{xi% z8LdX66x0nLlYWB_t}=7vgRisHEMz-Z&BY%1yd0p;Ds@msl9S4L&13nz&~e_YVd3o> zYt;f%7&khcju*on7wr0cx5e=ji&m`K?QmWi5%j~7sO#f~9E%8i(&~CC%=vjy;MY@( z4|0NDj&PoGI`=Cb_UO$B=hZ0t0gt`iaVsQ2t@suR-s_kyif~sA`x(RDVRimkInTQs zF9q2?%W`~_Y`fVK`e|k8`5@O-kNuMojst<(ZSz5!fq1I@ zuFK2IF#$dH*kcq+fkli=7$^FL7ha%W4~%P^f9k2HDCCRMaU?Ny zM~|XhWRCG4BRs}iOenZSoS%XAa2_|}7RtpisvQXlC4jnqgfWPC$jVmoJh1qrK(xsg{cVg~9wx}T|R>s;x-5!1P zQCvc&p@!^RzDz+)Fs-1@=tJ}~<}x(=Pk;JTQ&SU6d(4%%7locUaRMePy5hox3wWS- z+(?gZq5d$HV5yNpdP#hIJdW|OVCkbEOj4*DnF)N6flfGZ-~gJ>kbme~3cNEWG8I|~ zj1Uz;whXvjP*8wND2qfFoW}va4)!fFsUvhFGD8s|TgvvL!i-@}RbHyZm;@Ml?&Rg= zk*tCZs|YfPF!SQ~!w5%Yo|y5N(9#JLCZM~}fzLnx{LPy;Z``63*=gYKfKPWlJ&1k+cDD!b&jm~Jno>d0Ug$DhF_CSWpHW0nvEaS}7MG_x|% zlEG|{!Ja!N-tx#Jk31|JwNvi*hgvo<7SmmbQZQx^YO_{@bzMw?iuu(9TkiA6RyK(u zgg#JV)A`6Oosf$ufWzBO{TA~^^Q1|AAu4k3plz@~E+38A$@lpq6PbxwD~h*Jw#D=( zBJTx>@&BqGF#==tYys-03;$%2_|=Y6wq%({mWq)A`=VK(5_u`?q+m;(a6CpTOPQpn z{>fs2ASfoeUE3q}asg86l?_(L6rqB&Ng(N538TD1m>6>;qR>pS@5TwLFiNCIkbj>T zdYGp71_XZD687t^#Bat0|6=XP@5hBbX0u-$VmoZNJ)aT$QJng!G4%W8(f@F;_Gi;0 zzi#yW_Q9GT)ki?9JL_KSfw*i&}| zIG;D{zujH*QFGY!(%{buTtA-|cB3-nh{HKq`$;)hTI_dQ^cx~-|1J|ys9fyRH9Ay~ zezh$)jMl2HmbJ|UY1(AzlTkt&m?n-__1ZMkU0o%~3I#vOm$*fv{T!7t?I7$Zz7^`x zMui!ebvep{$-PlLm5@2?wRNH?A!oEZ*a#^oM-yZ4WYt8avfT?y-0O?IYlnmsMTL|_ zc`BlfszjqX#XBK7v?9h`73D7T1ZR7KbCSF*$=>nFp7G7z?bV+4JYz|*v9{E+KFu>Z zJha#wl5Yg(#CWP>JvFJ`sj1#6(Vhy;it*Ic7;O{1T}|HY!;M)9p|v^Q1u^c*u#kdy zqoKjOwbHvG%2PQcbkva0LL3wtOUHONW_eo~HhRL4ZR3Y*%{1nY^ei41wtap4&f2i` z2}VndQ5EGW4+|Mp<=v2Pw4#(DT7pJXbRs=tQjJOTqPury_--5On;G6SA*{2^voYIP z81HRF6Hv)C@2zM=BfYINqdHro`sPOTERE?~71uX4e8;lb-n%k=JBMnLNn4U{W0F>4 zSsvTBAgXs=Lf_Vu{)JJ!^CG*ah3}~Gw#|stzC87wZB?H3a${q;XVVyCuKP0OnPI%Ub{4Qp@9=$R4T+n?b(klXu2Ztug{eSPU#Bln)n{+&bn zQQ_^$S`%hXy!J>E=cb

N}U&{US*3Qt>^dt-@bePhJ>=E!v`<9phY`jH-2HzfFQ z*MlQ;$Ry-*tYGKikvpC(-1he^)hyyngcW1F;4KV=$@pK>B;@+aTY-1Xm@4l_Sw1gvd@&~AlRW!J3F=k5&ceusp@r?{iqao zN`eYKBiP7Zi4*M;a>%`cSM)2`5y&&(4&`bWq-C{VpLT2O-a^+Qy8IAq;W5^7rWgp7RfrW(t%aBg z(_nQ+4Fg|aY|(j_>98cvxqg%OSh zFM62+HZs7Gl2jZNO&ME?>yk6W(8<`gZQEw5rcj6gOJ5RQD1?iz*b|(g$}Sc~n8&fE z#qIRv!7F-97g~X8n=Vwi%8nvNM8pYeKdRH}uK=?{lj48>249P;FB(I5? z4Ayd?XbkmXC|yFJrc9O1Q}afv(HSZOWf7%8l^E5CtR5bd+>)l!a#T*aDlb)~s3)-( ze8#9Tjj9pnsi0D?>XCDaD#0gT<>MqiNQN{hZh=}bN6ne7X5)C3S~XwIM`LlRMYRxb z*`PLbs19nvkmZ8&v(&6s)jC7XV7%$cYVuOGbh%nSO-(~_W7Sxc#n?&62RT#t1vlbr zjjBPdkcN=ULbVW?Be#oo^+Bk5-LCNp3|3bIZC_OdeUjza=hDLXJ>tlNWlw{Ew8tIJ z{T3I=av0NQ$pY!YaMu^Lo^R?y-pdMjGv4ujgyq9wju+AcULWduFWh!5*nZXHe97Z@ zE!go+q~ltY?P8RDm)#*Ti}hfI%omFU9Ey9&ZGSt>c_dV$Iv77+N(uaSn)jQj#+Nlg zAEny9C=2*FKk#UPrm}1nQ2KG3^F*-ob&vhkA@+j~C*4Tb3xI<)(!&<#Ih(6jbj5ol ze&R%b*vH*MJDe%6_C)UQ)~mN7Av{MYOQ6JGj+FziQp8@AfFvV?L7nv%V2gxr)^1b( z!O8BgZ6@BLBwF7{Wp(?=@_`kIm7|Z@0IQN@#DA(s}|?!w)y0fPr~9r z=@|N`Xn{c(gFlQ=7^kQt?!r|%6LEuG^VnmLp;lD5MWgoZ+eb$doX0J&VKFCS{6vLO z7EBvh%2icWC=|8D&<#76vH*rbdA{eEvycg$aB&q4!5P^2Xa!YJP#M&a>bq~g`6iA} zoH+5~i!Y)Nkr;zHTFU7OrBFVXb`}zxKYyMwp^Te@2Z35)nnQ(Q(W7tCV~p2KcO-N& znu9Jyp=OL2WDa``?ZO;^x^;AP9653Xi80HdxIg{rPq>7hM2l)_YA|`BXxOkQ5zmww zCOCuU&{2i5u9#5BLZKH-a&j_$Dr3iv#luAfD1(NIp#tc9Q`TVb?w@<-~ayiH*em=GsRPW{`u$e zbnpn!WD5SEjdX^>y;Res{}l6NxM~KKW*lvli=IKUva&LQFm${sDJjX74ad~sp(-yi zD^wE4*yC}K7etr{8;yEXR0NOS^zk85O1y<2z~HDlCS1UiL_bW7!o(H^YqKB$;})4Z zEv8!UV8Ij9y~t#ekapvLSFQ+I_b#3o>67Du#-&O|0qWAa=6%t6VFB*lK?L%hwu znPR`xZ)q0&mRzM~2~a>9MuD(3O2nckgQY=SjW~N&DO;=1SMr74IZ^y^ngkY@DDQb0 zV5n6e=%C5D${MN3nHEOws22s=Z1JN(w%f(Nm?_RRvxUWxE5J*hfD-hwS*`4kDCf5; zQhwW+^xcAp@7BlPoE~+3sO{56&u94|FFPIYr`oQixL$O*u0}Y%EcIL;6ZlUjSN>t| zglqAR>-oV~l0rV39Q9d-eTT9SSREa(hAfV~q0YCm1Ku2hOhSLTG3BE~+ev%CONRZ2 z6>(q9O89za)OQQRZ`KBXm}WbxO*tC_%XUihCkeh(j<^>s)pENUZ)uhI(G#V;Ft}3u9!XOKl4=nlmx&SpI$4$< zH3G2I34@5wWteF16v_%DTUn;*im6&GSKRZ5x#whsEs6@Q@C1!4GuGuAi;}$)MoKK9 zIB#8oQ6KBB3JWdF@Xm@e>SH|BLyc+WTB~+xhIhtjV{MMHIL$jP+Ne(ROv&`jj`Wnq zcxvK2^=aOziQeW&qdeP~Kf&mpX!Pc|=ZA!3MtUl;JoAf;wMCv))53b{jIAlgq$p2$ zyiu3#oi@_Dc!YQE2=Br?V_Ah~<5VQ@wvEt1oR?*J7mW9Ijt}d|GFpdu=f)Wgk?t`C zo|UNTaAPj&kl<~I@RX(*ldFyPYWLQ43Ed|OdzZ%cObXkvEVi#MY*V^1BgPmT?XIZw zv^5($vWNt>run)DgEt918d_4?#=A~ zah7&FI$Y58Mn(H)b?c57bgj~?7~h_ez5_YECkwmxWcTgN=s%d-^GJ5z^4PxBalQk2 zy?1B!EspkWNa%YaSMziF(lr{>oz}lFy1UN3eR*8>V3JI?x)`H#@usMIg;K zi5ZRKXGeAZcGCLq$E|p$YQx{XT5}3_I$Bv00uvjSQcWa2l z^}5r3EkwQNRUam*cMbKrRSU4}mmtee2ttCt%$q^B>nU2E<-IWVR)F=i3V1-vBx*I1 zo%%KIEoYp?BFWn>GA7&n0xUk^b?g-W`C;Lf9MU8c*Ey@}^-%kJ;nvGG=M#Qkn?s`F zvR@c74+$%Vw2&TwM4pfwcz(N(*f;rQQr64Yb@J&{j-N;-KNe7Br-Y5(FOs|uibK;q zauNnbo1~{l(nfz^Xo0D1go|hUNkLO&z?>r*R*Y)2Tu_&lg8ZzM9Qp-ox=V1L-QxGu zCOsn>FAjz=l7P|3bn$*G4U4W#?aDe zI9h~TP($)b@>CuU__az^(W3_^8EhJRTq64giOW@avud8F<}FkUXR4Xg)U;V@R-0;D ztJdPUNi|U~7stEQu4mLUq`pj1Qz}$Nt*V`@CQnsU(YL5@wW`JeZuBYNdbJ+s8&xCH zkY>}WTDPffOVpA%Y7X%kdNZLWC}o10us|(Xq!ukx%O10I;Q;4{ z*#U2OY?tj$5)`Kju)Ix%+*bk|w_Ej)-Yvk(Bq96VA+PGV7NUyXc=5I#rL56|;s<%P zKAsN8j73qBxc0kn#U`zynsy75g3b*fLbUt zv0*w9oPoW7Q53@|MmdJF$FTPJF+S7rgsx9G#dbTA!OS>r+&J8h2I3M* zS-W;^d3iaEcg#eL33c$`L1aj$T9nw*(t>#;H8qtcKV?RUZ{X{wQKQfh%r)q6q^BAT zYK2U2Ow~2)(RR#Pun^12%8HANu^%&L3`r(L6>!Wz&%{GWcOl|If(qqlNee-JN=r+L zR1l^pC@4VncuXb<_Pm#_PrQqW4fdqIP*8;)Qf4eCLNVAh1T=8K5W|E-On;*L zBoq-XeN(&DL`Z1t^A^Garg*H0w-8LA8xipe(otACe~Kp9VoKebq!d#cg`PXaTLwcV zKk`RmHl2@5mmd>dG4T-A$Mh;1jG351yMHE5YvL`;;Xc3QuUQIV9FkIgL+HTc4uJgr2%GL_E+IA?C_an=+w~HoPR4Qw~=p>BQD2h`c0-a!lkX0rN z{?AaeB?8fu%kdO3NtkkhkT>!~G=^0CN^!bbA}@VjiL^dzb6n33I<1_~E5}PA)|ZF7 zf7_S-`Lx6r9O`tS<5ZC2e1QG=5ZhIc^TiP%@1brHu8^eR%inU)& zvt1ey`l1Si8CW9jJafg!DiJctH0iEt5oy6HB1KuUL~(^7o2xVpM!yiBDnSpW?fPn+ zu!*J$2-6`J76pnK9i~~_ubTWKIa3Fz<@9a!tcA~c> z$!N(l78H4xk2DrWyDP%nqq02nN<15jjn(zWwlZV=aPQn$qc+M@p6F?c_tYkO8Z*6f z%Z>FDjIL^9Q>JHXhIaRJ?*)tJMOEo5z8XM43+|oFt?cr?Sy7-=S zV`iMEKEpF>Qbfn`g04#?+a4ILIWa*&NcMR=)v7`h0)~LQ6seNaQcHEWO*PYhiBXCMv zqHj}T@14VXpUgw8^w-H@T@A+et;xD#+f(#sb5j5QobGR%+COht`_rmTfA?z5A`&iP zvaHP#h;OXmu5)h%xU_c))X=Xqk!x;5ZnkKsZ)UGkfJpxwzSYVK2 zD)3dC{oN4tmfiM_!}5~VxmTi*b_=#}r*dr*eU<3~O)QjH^XtTvSR?<1MN;}g32-Jf z70o{(S>>dFIZtTxMn^}zPxg09tZLz+1l+3?bT#vYM3+P2-}JQHa))wt3l2jxrcWBx zE5a}ImU>*6Ej#7porAb867f@4UQ~89I<(U7nzZ^>WM>C;S_mljtYqxgb|2- z5DLIuIKb^x>8Enb%9SgzNF%^PbcMPuSdMPoxDhGo=0k;B6iSI0Qd5YKun3$d-DQbv zJRsh};77zOOkxZVkY=!V602nL!?7$RC_&kij*bqjQL##-3>}t*#9^?YMM|8ZZxN+& zv4X-?!b)_Y#_E(6W2>q6ivz68>BNMuY!S&Sgs05F4eV!hI1-bhgImxojGc`~gXLLW zT^(bdVlhUCFDz5BJVS0s408Z8Z&g(lnLqf7#HcL}=s7hwK2f?#Pf;luDr1Bik*%_E z1_wDRhjE|D48eY=8j56jDz8~JV~=B#E<(t~L@k(}Zb8&Mp@&zPhpHK+!ZWx^@J zDi|Lg$EX+v*%_jSxRo0xdETqMu_`uGWoD@?6ht>D;w>x@6~HY>7OuisD-=peUh21y zv_qncS!XtZ5;R4ss8|)3tMV~w42~JD8udqeQCk#^4-RrwE^@{J&LeZA;f=VfPSwp+ zGpDQRXyXJmp;fi6R;%Z!d9|vRc*sh%@_;(f#)H)qWHt#-#N z1)*P62mgGQ_xt%nKF$t!!Qs3d=safCqtH3Kg|gkI9LEFfPaDolv96yc*{?;|-c7WBkZix~c06j+Dz8sjTnDVT z;-j@=ShhP}jkbR@!g)2$c`3m0PEN@0?j7;F`?J59>iKrgkRMh?{d_^h`Ivz5@;Yp_ z3{zXB^iwL}cF~QwQ))Fwr1s9c9A|ABEq}z~e9EdNVz9v5B4Z%w)A_eT1K8>$MuS&S z4in%A)@&0^(=C%_m2iOV@&6V85+?7J0{-sFZ_pN{RGG2{N-0UA;_^2DmjDTql&UO| zQo5NkL{oA8|G`^)vccBIKm;QQRzECs*v^bwhY=5B5r&t>#zqV{6taN1#7MOmRAHo? zJ9lp1zI_<$h;Q?~KL%A?g|UDH$d&@L$Qc$oOo`^^W>}FJTB#NeGZw}d5+6Bo1X+Xfq`K5KK9sS$QJcSopBfA{PJZ1#!1xe(4j*YFJ43|7$oTQ>C*n+(H7n|?iH z&R|PQN@8SW>jE?xTduYdjCd+)vf{`)`s+0Wj3>n%JTJU;v;pM3I3 z;(mDO_`y>Mh@L^n1m$AxCfgCdP$W+!fL65Yj8be}&Ik?A=_D2enHel>Y1Ci*d09ERWz13I%G25Y$xhoK)6^BGJlGBGZuBm!fq z8UL9Y4F0WkqL;f+WMlKRgO3Ich;^(Ig9x7%i*>2xmZgv{;Y&nbUW=YJiS6I0SsacD zazN5ct#DWB#5JZ~775$6Gm)LIW{P#_B8|EXlL$w1MZlz1@EP7XQQV6bDF?1@l9$Ms zyDtX1o(pz7r2T@m5%iqR`G!Ni8|yd|>A77w4%l6t4#yo9=P~8FF*fw0rXhFPZ11N8 zecEE239=tj&NpM-Hx?(~C<%Hp%D&0!e9&h9iL##!cl_?*#&73Fy&7VFGtBkVgn*wf zjQDkL;@1;BU(SyBxH9C+DMP-W6Lw=(6wJxh0wv9q*V*-c72GBPRc4FzG*VefMr{%} zrC7kLdH$fTwYTJU(I?ths8_hb%fx&;ssgr%k5!3uFJ(5DiXsGVOjee;((|~Bn*Vq- zjDdn=y*kLFW2Dd$=85-Ko`5!MwHe6zs8sjVk?tjV#t2NQZGi|i7F55GIn0L-F@9adQWvF*nwXtbN#Evn>sv)68 zLqZDTL&uIXRyBFIW1ntJ4GGM22WO{wCT1J+V>~t4#==5l6)uhNEJ!n^4)e~6bdT|d z=102Au$RcxI5{oVqahu4Panx-7_6UCmKyjo|b&?vWedA zrO|yq$?l&Q-4X4sPxef#G1}I~YfqntvU=Ai^iTA5t&Z#6J+yB{Z0~}o-lZ{p>k@n$ z5_~wAKBRkfeDBVT{>_QLE&)2$Ck!l%?Aw;CQ``VNGm&f)_@OI6L=U=26w>80) z$Ms~(r@6L|k}R*;9LH7QqkduAr&Pc>74U}5{7Yoj^h;ZA^P?_4`RowL3OPK|3&4UC~YR*kJ!^;6W8YE_Nf!&NwCTS8PwkP5;F2h?3* ziF7hD@fMHr;5>82?GY*h39v`4hO6NORd75wJ~d@k*hW;3A0kVPB2C~0iStz67PX~Q zb=Il63RS@<)!5_qGF65g=vu`-A!CQg4?b)!4suoQC^f1{HBDBN@xcMwK1oe#Q7sH| zU8yP;sYPw7ZL``uL(M>k;5>;m1f$57DN!YK8bV@HT#y_3jp8ud)%LAw>jJd^Igo2a z*qGZNxo&f6y31U7w^E@QFFHa!2DHa5&gTOhZ@TP1u{l;M`&J!Lz#$6^ zdz;1akQUcsf-%K%d_@uJH2SRm(#Aac+%n+Uo#>`ITz@Yl1tYPR6zC*%09#*b5 zVw@jj1@5#sXGmPO1tNl1B}8<3g&HXsf-oX6 z)&@p7j6xW2s;a7B8c-T#@7}!!4<5X77@jafVR0ZO22YHo_==><47`X%`NW4MOxP+k(b}73xC{BEunu6%B`%d zL_cH7Lw}<4Q97ks&@(t7o`YisaWJ#0diebL^Y6X)-bWvO^!ewX-@JJfzbiaJhFZqVi~c81qpPb6HAJ;ZwlUc$s2FlX zp$rm@A3S=VQPlC9G!>HZ72WvUbI)D8c##fw_yc0PXXg{MA`6IqUTBmVt{NM|UJur9 z!Q(bV3==Uh$sRQSd1){{U|RZAWijIx5yv1DKq!Vx3{$$+j5=*bL}C#p5`s&2+;Inf zEyU>XBViOIW|y4JY>1D!-5zkl#CiZx7r!HXr#=NFdSgxOcB^&f(B$Xg=3Cb4Q0w&DUpju zi?7X4vEaIe!a7xu87jI{C$>cxFQO}}?Yj(R86yfXI9VsfwTIf<0_=waT&T>}Ap6Cb zfak)4&RDeN^%J+_>)PNS)(!izAm|<&>f`LwucsaR9kxf56GDWhlN>*?0d3!T3nNdHZ5>YK5)N96_mczJ`fTHazaL=l}Y2vRjjpRDxTVw1(fDi`YP zXlZq&Krt)i#W!J@HU5ybD|I|X+iYRQj1iV^xx4_X5Gh?ESBvx_N+VsU3BwY}ibd}i z_*El{^drzHjf8+yH9CZ%N^*~jcQ>UOQ&Nlx;qK94p#|=sEO*dwcW_pccl=PVmeP$g zYNL(HcpYtdYFJ2NhA}PQScZL~w>d#O^^^^97Y#Esny;XK2F9NHQ^z}QSLDj?$T0YO}(+b$>?Yb+m>!jjy1*(H)fA8=4NEj$5O(QZ5eWM)pk(>sk`id!nHG zKyFWOnr~HH-|X<-jR`uMGj(8A#`%^+_jRWFeCay3$Fh5_lx}~!YSY_QZ8sX$eB8M1 z>*iHoG^{;V&~?68GiutCG?Mc~Zr2;-TYi$=w|i*+o{@d8mA9WO?$|X{M~KFmj?{kS zhOf5`>tCDDzaX;jo?*RTH?R4uaqV-XJDwfYb*!NK@ti(1p)bAvsr=oCM)n=e>;5~| zX%2al7%1eH6hFD8Nr+Ob#p!0VfPp&%qd2SrE?Zo$I2`A#&Vwr8qQ&`9isg$E=cjr0 zcfzb^gj;e@q7+?JftOX_6%}yR67ZS|c-v`x!J;KfZoj43qQRDKp`digpM9zPr^s_# z=$FfG6eLr0V;q87=)Xyqz%08&3ZO?6aT(c%3bcoGnCZY%DsZoWF!u_0Lc!aIg!Cf5 zQMWiPoBV;H8770s#vOvm+@Ul>{%HxBv`_NcDVR*V(l2U7{8kfMSmzm1?PY=ptr5Va zU0k9Dq@b^i+dIpT$;_1uw@a<)QMFAf*5+sRJgiwdj&n-&jvR?a8Wv#W zjW8HAme#P-$rYg}3h{wi{*C7u)0wOcs{fJEK>&o%2eA%nw%`mE^?6J^6J$$94f!5e z%92jQTNs>(4m*^VHBk<_9TD6h!oTNZ5Ys>IYRA0fpXf?i~CkeUW zm^wDd5Nk-R>#$VA!jetK8LY~ho12k<@r=+wCcv5>)kD*e3GU^B3JVz1|I3U|M4uok zu@H!;R@GIiDo^E+!V#z9B2^^zDJmsdCF2YwM|d?~<&)z<%@cg;R2_1_2ZldBNIX?d z#h!Qzu1->ukQjL~d@~^z;w?D9d9U)~AWVe~QA5lS&mk%VDRG8?NrFlsAcJJ6Tc*lH z9ZFOQ4(Qx8Obu&NO_i#$Koy`-XdTHdbb~@BrZO;tO`{cesylC2x1(vet5_AIX@qO= z!But7~F7^aoctDgXOJ(5zt(&5z&^ZV_P@`%ls)^_jGJMbpbJUzIYRgi!v|iPt z+9-&fiQ;fC89~jed77FwQ_Wnj)~`~l7N`X))QT?EwOj4Rr%koZP&1G%`jg}q+)IiJ z&LIB@Y65N_kN>h#?RuGH&6G9OQW+xJv{0VowItUkxvnouo$tih4p^M0Fyz`DCzSJx zs*wM>KKAR$VQ(Y{9P~KXSoPAPQGz7;WVFGM_^8Es(dK+(i1SLEtKXtWn;m)-axRgP zN2AIX=hIHdRhQ%AG}|{*jq9U>UpH*;1llfJoz%{HIm~gh#r^XI;Wui6k>J<;DL<@^ z`7~dP%)Fms|1iz|u<%$`+)|fv?6+tXx>u7AHMg0hXbf~uSX`Ht^IEd&+Zo35kpUPF zTV)YAQKBd|-O{hq3#X{R!X)u1HDGa^umoVVbpKmuN|fLpRHyyRaL?Ai8Ib7T#AiY! zCK1Utqx|kggVP5ol_8AcWMy&vTY3MrsMG(cTWV@5Y&wiN7~x?MVZg-51G5pPBE~Tc zFIerLJbALayBh}eQ%^lbavePaVAnkJ%rm%(^9K(eeDcXBVO7EUe&K}|4jnoK(;F59 zzEb=Oqb^b&K71JVCd>y6$TXrEx(Fv}#Un#X=3?~bSWR{Z=^sdT zy%Zj&(Ml&BoFV;V=gyrNMNv=G3gw}=NInSzOjCg*K0clSeTs^Th*VHR9_=*+ z>d`BQWDNkWd*-X{DxnD z{q;+iF1`5Ti(h{Ew}$#dteWk2`QLrGbX>~&*&?Ig9HsmR|eBsOcaRa4yMWs##>CniHVt*_{m@gDBg$z zj4Gzf67$EI!OWKX{5rbK%4F}DJxTb5oH`aw)WyWac%BJN1`cb;^rlx4;}gwHPTr=} zSfy6Wy9o)AV;?W7t+hhKs1iMpY-L?1L83PbEW@|7jHfeNqB)NcC!ks4T*HXTn35{R zX{bsf_|!>;$f{b9770kCZz)o?BxR`)#>gnmd(ly$F<(ERT(1WP-0yVW?r_{^bG{jC zxsl`ey4m=;*0@v~cQMlbtBo;VO^o|)Y3$8~(N9E#J{9ErEGOWU&9%#FdpauQy;ASR z2-m6LpaTJ}OQE(W?XC|})VpEo`cU;upzC34!1E6E$Kz}N<;se0S4KP*YQH`y>i@ZM z{~wQ*{qFw4|Lfe;-`qCr>xtg~vL)$Py{UhEBJY!NL(XY^&Hz|&RieuXm>@t(GV6|h)W4ZgLcUYn~Z}5Z*w#fgqFs5 z#+DhYTf(-DGFIms%c4TY3<=B+4=&QWE}klHa9*5I7cbf`?%)wAo(VNZdyCOoV{ESV zwk5dh5#DA?wyw9omcDGT4!uWZo|EE6O1O*&>LD17CI_H$8nyM?wy|M zomOP5D)cOm^o)u1RA+dn#~JlFUuCpqdloc@Z*EK2*^x5Pl{)aF5d#Amnr*TqrnfO{ zYmR4GiZOk(clC^j?wv#X=SOu<4eOd5*0mz0zb$Ft;q1PvWm^yB_1uxERZw*3=a}BD z$^A3Jd$uHNi59*>bpDB z_h6Rq&5HJy%eLG;tpADJp35cMzHVOidPVzv!*x{a$42U8sPm>IO{YPlwkG>})A~=3 z?s&Im(@)FW@p-9q`#qT`vHx_@j@QTZK0mtS@4#E^qVkt5K5;{Z5LGF_-5epAbZ9>v z%~yHoRupDUS$QYK`f-}&dW!Xp5Zh0KY@cM?znonnL!-2EQQf1_4Qy%kg?aJi0{VrB`%g7?9{5 zIk{2Nv`S5;`-O1l$N<>lS8iG0=jV{DvPW{DqzvOzFBiVhDsjo2CLb!jP-J_#ekHB@ zRkUw(3p6*4A;7l$Mx@*#;N$4?( z&zNoGf}HO8SRfHd!AZs>GD$&L{nGKDi+Pwf$PJfpfJ-ecEp+HcGZ~Wzdn`ET$cE(~ zjX;LHRZyd9aDKcRPX!X3#0MwGs4rDqBKJ_Au*8_e2@V99F=2w z7~z8sB;Hc5>hVF_kuAQW_mF44%16(1sjeMrN1du8zlER;uHqQ^prep}g<7#rtwX=f zR5O>UWn0zOyVPCh)%nlW=Lgh**=ja=8~GzKYl!?SRb{hk#)Cjt;8KfL3sDoqDVRv_ zN}0H8WH4E!?DyF0ug2RyPPH8i)V#YhcIPFBMp3?MaQ|$Q`({JP&zeI&ulF2J32d|K zQSojaE6pJu#ClXb9_svQlJiZs?XuHxTm_u4xSn-5&IZ|^bK4)VYOH&=a-MfPZ%*`n z(-L~cV>@ASK4)=i0WOQ{w8i;)nB%LOpzm8FzHScrere=)iz9AS2VZhKF9+B^$#T3I z=X}`W+9G4xRAs|xN$lmg)wxhY4z-K_>D3752dU2YGn}8627Ny#{A^0_TESadWpcSk z{6JAvtoqWm^NvO^hKO$tagreM?r9e_b&Q$DU(8<)2@jq5R{#~_9sz^G3H>z!jBQW? zC2^53CtAc^C|_AkPbIffsmfBKV-;CL{%7af|KyE9&8V_6Ffh>5(}TeT)*^~AE$aPx&6o)mvDeR zPM$x19s?{!Fc{+)pLgxr#kcJk(2(Kf%a<9Z2iA?Ll*<4mjO>hiu|InBD5}ap)fgLb zjJly{qCjWPoPjZm5x%##7Y8VX5-x`iA4XM?2FDooG4P@q^t+*?7^VY^u@pbSFgtea zSWE=y2Na5l0E70papO39GI|}#dg-N?U|6Ba$Qd04OPO>X3dNv97@QgvM%yXmiwdA7 zID^W=m?Aj@_9=P>HAj~s4JI{|g?>O*2M->^@Qzkc!X5R*3<#qY6~I;W9^V(BA?P&Z zfDWWL6ZW_Z6AEQj$Pq$iXl0Xvg82modv0zn4hTEY$A>6Hc6K&tFpTd*&Iz6r9s&9s z4Mz>p7E_BA=Nag#xVRXZ6VsqO4_O(*hYu%QKs{TWOiD^3mxkB~`7KzhpixxfW%NMw zDw>Q3!KhO7&>{DO$VN#?37INneUQOIcbmbEIVOvSToF`;k(&9H>L}Rb2Y`-x<&{^^ z`RL@=UVH8J*Iy?jf@b28;TfUl(WUt5qUXtPp%gI1dr>;$H4|>aG4dqw1I-~l2|sB( z3*?4wByxoeQ3MXqpXdbk5`C12fThcZ^Qbv4@k#v|-eP*b5IiBYV&W|(axyq{4pP$0 zC%!?~B$E|mdf%A7MW!?gQ4ws&3O$BD3+j(w^39t!|M4IH@i)Ku%`blOi{Hq0I+G3#TZE++Mb@CAjVIRao$$n5HHbt$P~;mZ|8Z_V~Z7Fwu2R5Oy4VlE%*52K`#`% zWsOXQqeP%=gb3Rf2{w|atb|JFFEdThfIMMbRLg2%s?ab_1YDvmwsY5dmjH0)Kx` z*}t3~`w#mkf4^kN&!&5SHqG-{wdd7n*PV99lL5}F=|M*vuypiG_GfL5V-b$q0$tCz z94`h~&RZQ<5?t>Ocl_b;nSXe!{<8wh=M@3}e4+K9&&|A1XEahrkJ7XMScz)1TI|F+u}^1<^22ETm^8noS*b{OdAz$J(c2PlG{k%AqCJ%n z?vfZ!by%<_x}(o*zGqRT zcSEjcaiOsy(lf>g$@K(hBV}x8l_xkWEVOV)=%^S^RfM}N%`+k2SdwcjN%KyLbdO0f zCJyt?8t$1j!kC-xonB(Bt?;ysG1gTXo3acY6*|$|gp|X)v*SFqna0c-V~b|jcxR*< zlTy5shI(hD8&lK0(=v@Y@kV2fcjNdWTdKV4W<~VQj_95m(J?VWYtElIw%MBmPg z{w>Kp8xp#F>Am+4zlCz#H@t6il8)>%kl{OD-0@23)~5@*alSvJe{po*(%8PW@mf}; zJxSA4R>$|PjO*Ev+P5gGw>7$VW1`m1tv1@q!`3u-w~qIAE{f^dl+=&&YqYp7O7tBo z=z6ZWbAL|nBP08s%^LC^D}I}hacbfpdqh}`aT#U0NU z?%15D0iR<9-Om*4+CQ@IK+gXh-eQw?d5Q9VkYU+dv^uVxY!>bx(BV4+E(p?cNd>&= zuzwJvJ{#rysy^gKMZjl;&M(IX{jfUv7i(j_Y7F@#*Y;7IdON^+Su;#f`S*WiPyke6?dEP1q#9KOa z>c{mA*gJY{23B@jkx@*@*^;7Z=pb%d($ByS??6c=5ssg>k|v7_;e3HoS7@4w_FAGZQm;g#ZuKj=%oTuTrvDEE%t9D zs4xsAWmV{%MCDtOTgXVko`I+Nm6EWORiVTRPBQ2Zi7sX&XaX{HP(lK%!LV{>lxKXU zThQjsn|TRqV4@|M(6LCv8GJ>W&dyFEEl5niA!LGMMl8Zy$ps`1_=cA+)Nn7UGUzL; z@#qSKmANU>g0B>`!R=IZVMq(q97|)M5>@!tnm8yb8Pp6o6)y}9oOVpA&RR;qf`$AQit8$SK^TdbHM6oKSTTQ7d zEl>qR)lh1)klbP-DjwxwIB7!}C>>vky)X^aqcV(hl&q5JO>CSL__*iF$5O8}3v0%~UfhR0TOL zxXKbyw{dFRVzszib>lNtO`W6WpjO1V4yi+bP=9z=y~`NTXc|6Ln?V!sfY8q<9S114 zQLB9Dm^;nl_`?W{o#JpkO!VIvd2o;6cq`6!&hC8H?))&pddA^=3X7a1`;CgAZ)X_a z&h-9rWz>iH0nY?DUK!#%8|`|;rs;VrWcJ6PIv^pW&sp?KqCVyLk@oAy!=0wlurLIp4 zogbw;Kh1G|F*@L)Lyu0a@_uKVj8VI_z_>IH(v%4e6|@qT&8FBV4}a=XqA4gR{x zeI?4V&+a@V5#JsWKIC4D9>!WHMof%6io*S2JXRmnJ$bcqj?t_w;|BzVh#GfiA%;^7Ul=VfUc87j7%-0>JqpVa<1;KN+;ZT+ z0a#2h{Er_$j`Sx^oS=UYzT(oUQ>S22!lJ>bg)t3795UIubt~*6ObW;WmIaIs)bOaE9(tm@<$p zY#BO%V&o(p5vC9-0CSigRT$D4w3-s}7|${Ak}(2n3#KK*UY|XC7PgXUDWeW(HA=)F zkC7kEA>f1N(CrA-K>evXj4>QvDY1*X;Q(26b#)sT1{8#+fXN8+4vCuRC3FHF z1oB5)V8dRzbcv$G7cN{tt&llxgr!eMUCd&P*^Q|U&xJ)`LvDE1^dW)?PwyU+`vF@X zzl~x3jq!09ycn|*`UEXP-O#J_tihvUbU*YDY(qvpHu0Q_NFCY*5q% zS24|D(ns@|Iogg#NyQk{A8om56GAU%G2co6u_~Y9FJ+R+Z1!A50NPmJlF~AUcbCB6<~PMghz<0P&tQX4h(87*2xr1u}mFEP-B$bg1lPv}K`$uBvmu_#HL2fB#WExOMn&M4V_a|LyWcAedo$MljMI9_ zWk02yC#=pVY__vD`vI5Ze5n1c7}u+Y{fNtU1SVKe;FV#He|>eszr4KY=ktwU?JoK6 z-~adz$JYL`yX04WBmdjW3;%ec=GXmMAC&~ZH_CH5J@{I*<;L8whk{+#hS**nu3xe= z3ZT@lobyD5_pl;LJ`&yA^lJv$jzt}IMUAG99QX}Wnq>E-s*I~^tRqVKI z;i$|Jz>3CZg`{Z|eV8ig-fHR9Y2pDrTA-CAc_=dl!-?@r4RiHUuMsmfRjCT4qTJQt z?vhwzY`S+whB2L?nd6Q6WbcGHqb|uiE=~N2qC8`w+~q+5snMQ_u#o&fXVQ?+qOqR# zGGl$b(Gcz)jr}n19PG1=)&!#=##0^buE;Xx=NOB^LJPw~M;Rd_ku2I%=?NK`;Hl5> z&P?-8jrEMp^v+H)TEapLVmwtiS>b6b@vKc1fCZHicOlema<+HTP-A+cx2edpMtd81 zmn3)^QoNH2jg_N3E8>hA)HBIDsV;2ovRGf0drPfnYmH}Xv1fIRr>@+yX|lI>Rjf}S zi|#W;-IqppUMb!FWM1#%xqaC0NbTR4pv6+&uWq|Edi$P{zOGa)!!jqLr!~5FO`NYM zy?=SEZ*^S%+=$*fcgK{lu2pfqweh}n3H`Ifdu9yjSrp}46z!W8-m_AWnKkh~9IVrM z_Ptorak8+B0M0#`z9SNQXmgU5Te*L@@3HK@w<+|XLf%;q8N#)kS^=n8u3BfDz0`9No3@EWl8*!#dR&%dd*|` zAWB^yX1P%r@Lg-fFE_+}zdY)@C6T}CN&4Mg>EA7m__8kO(>(jThB_x+MQ4@kEvM~T zsO9}Abt||DP^mGIje8QC(?iC<$~?dcd|$GShVjCF`r)L zxJL*xcL@DvieM~tx2mK+1 zjhTi>0?r@-!#xvZpq~*En^ze#2o<2jE*UbEx25h1feVW9GAyu+Q+&T-;)*fF%uAv#17AB0U%3)ITH= z0Ea<%zJ(<>Q6sdJuo4%vIEmF1s*QCZ&R|dRNuo4prYZkIfQvDS7(W7L4pwV;1gJk_ zWK5Vafq@KaYirT-W5q?4!;k<|z_f}*>~xmMLu zxg|qo3{gYER2WKUG$QWFX<>k&a20Mwm*yp;p%5-ZHW#Ww)CYAV=z-c|->4cXLW|FA zHG6?tuu`pDq!!In^Jc1LX1rjm2Q zfJ)dSafvD+-og((lCZ}y!)N0vQZj@zN?fcK*QvU4RbHcN&`XomWTHLD7G1GaExldc zzD})cQcXyJI-Va?KUP2PR^8jxc6>z-qk0{x<8k%)S#|b?y0JtpL0iVF z@y)84co*_P4@_6n*$Ftm-ln!F<&&{xnke5+mr>|J!MbZ?X;djw{CcZ>pT)V?;=C5; zcqh^RRDd3u_86{@3bnHC4{M@tR0q8n=y=KP{50G3Ub^#u)%mbUZ1qU^=Gl_%J`LeI z7AnUstMh!c^IA^e`$L@{r`q0j+i~ODG4{`L>|Yc)uX=4yS~bh%n9X_8;yPh*p0POI ziFJLqDB|m>-cJj(>*$x&K{uP+7h(dtw1$<&=MPz&r-JS0gEXG3wf^*gX4|Bk^OfUb zl=I7c*Xdx**?TS0{?%ybPh)kc>j7a5PLWm0QhECprF0Nwy9m_|6-gGXb2600Dlka8 ztWK+BjbwTcVL6)l*ClPO5_iu!=wqg|_|X*;ISEk~x10=6szkrc()^YHsmweHa=lQ3 zYzF@?OT_#qZPbV$w}dajVccOvf}zBy&{+0k%!l0pI~^7i#u*HHIFIoY!!is3*xJ}* z_{FG;ffP4lEQJY)arNn^pQeDxi4!Ntdx6#b$Rm&J*|P@|2P{WMC8j3=$r22&L*xXu z=fQ&q>6-w{l<3XLlP59mk&uaLg<`GLZ$V91N!Ty2YEe4HUa&uX`ZNV&(Z=1ocf)X| z7Xw967{3@Z5mm~OGg*<8k;fPdGkN^@@wkLh8r6V}{Oq&O!kk3waPa)|&!3YGwSE8n z_hF$w_uO-&yX@V&_tK?HXfK9t%nN7EoMCUEK1hQug7Jl#Gt3S80mtaNS6+Dqjl%4P z@gIBAJcwqX8psMa(y1Zmp&LLq8xet#C={eIO0eS-#G44X! zakZ$Z2xdEJAw)m0r&kUYQAkaps}8kuae%o7vm4QVl!7AAbvW7F+>8vF8;K&sJt$+6 zoSe-3$tpn_s;OWrC@8@15bdR?4!x)-CXAm1s*OgW2QZT?c`$s0 zW7eP5rnm}gMN|a!p>#}QVj`*(A0LkcMwc!tE5ozK)4>l650pW*@Jxw7;`f7-s3(7l z6%`fv`T3@&5-BJIkuZs)Lr}?*k`mOJ0x8Jk;>C;j6Co=+HHs?Zx$t*R#}nj_Ujh~q zs648TOej0Uh)ZN2kuSuKH3JaS>Nj;+iMJ4kF@b`?Xo)FZa-To$GeLmCJFH%8fZxnw@}4 zW|(RGkqMZvmk6X_!`}^QOw5W8i|7iyu?Uy(=`&j#n9-E*uP5HJTxPUp(RZyAbY+G3 z4UyH4X>_=_{EQZ(tX{&G%<$VFu=iJJR~Ic9(j+_$m=%*{=7m84o25*!0!oWC`|*bc z5r-)kHp?`DUuq@)apI9wF2s+ilFxF7<8oloRTX&Lf=n#;+8rmO9jAh=Z%5lM2RMGD zoCgCOr!4``+wAvPZI=S=Z@H}xxgC#(1-zYPzn*UYFwyozf~(tNzg=5T_Alo|{ofz| z6?o8MKNsS7G1z{m$Z0;})Et0=)>|)nc4&Vq+el&TjMe?10jmXqgrsYL z#ab`Tj}wFh=H58%Kcy92ri%L9I3cc7`J=;#kfw%l>NTjPjWyV1UI`>!F6*%tG1+Db zK9eJiqtWs@rd;A5wTe^N7|oB>kV~|?Hp*QaYt)8^7AG2wks&3aLBpdx)lr@bZ%BSv zNI|qwjY~#wF23Ru=_!lSkV|2dr@YWuUTCb!HWtKr>Qjx0sop6W#SYx8FU?*PG`1Np|1%)czf* z1BY_E-mPjoklVc~zGq5!XT7m?V#L<<3H=MBduK*;%?R&V5!<&TRfmLLq?s+gO^I4W zWnF@Ab)1glyey_~K~(RC#Qx368gJQ@G|-jS|8!v&iaRxG$JO%oOQqZP=kz|D-FKG= z+g>i&`udo*S4MC7ZtC)1&s%i4blb6l?t3zQDDgtcmQS1491$Ss?%`T@cWF%T-5OEp zd!}&5xuVX8vwMB%zB`BY9mwlJhPdnYVg0&FNxtq>->wYby~BO?48LPn+Q9AU{r|zW z8CI1;B&tHRgwYI_7loPfVp0C-TH5+95ir3A3-t@ibuHNPVZ6GYp*|jA{bEeOcS|CF zduRIZ@5%Vx-5LM1Kj)th=l^C`>JO`Oc*|pZKN{(6pN(>S7-MP z6%=KM;4PF)84yL6o&KQG4+`G0&M(!n(JyS>T$AN5yjtW7$Xb~o05Wr1Bts${k!Idv#b`ijg1|NNCE04>=R6>N zvQ?Z1mx%w-+FOhuyHBZQg0~PV!JYw&$sxf;%!GuNnf{G13*}0%@|-kj5_L#OwT!9FvsC4 z$=k@2pb#624fmpMXb~Y4oS~WxqXnUh(3wQ5NNb^k6+tJ`SBPlg7K-R%F^^>`u^RLe zIuKvk`7nHN9t$?Cv8kLhdGchOp)VN9A~1z~u%x+iq zBk?Mp_z6xj_Ax_9;}%%=EvjXjnl?dAXi`nE?h8}_<4QB=4$f{aZm#H#*Pz`zs;e4H{!;+TJ6m4yxI4?x8JL;KdHLv)=gD?@BL5x|2ZW{IaAFXs0QY!97++e?E$26Y~%#^EeJ*nd(z`I35Rm0IJT*q zY0ylh#|QbKh?jlTE?>37=^*!by|RlWnLaK(B_46p|IMe)vQ$RP875n^&3e+;`ifNt zQeLpxZlyUs?&tcrum80K`&obM%^2Ig9NS^LZG&<_Qv=y+t{hGlru@ zCfhR>+nKi3i|s6DL!9sB`+Qj0=5DV2dWgEDnNPaxTkVd$cH3#qax2I8>$zdyuTS~y zQ>h>I4R|-v{C;=8j|X`!i&utX5R?5-;C5zVk}+i#Y=Fe!~r!48gs#EJ7Ck- zOSh6NS7L1IO!jpWxHQ(JkDM5U4Z>TczX;I5{}^#8v9-)uJ`!7mgGtJqB#?_uxNY10 zM-!ak)?yLJ#r`*uM6IzGvv9>HDpQPr;OdPelL!6btD zf)Px(1T7*td~IzlibGqnva)b1>EeP?$d`+<4j(>z=gyrs-gpDoqM5i6Xel9rjvYJ3 z#>PfPMG<&0kPu=Tu=ME@LSG9q=$4d}kZYxjY@`|{M=m*4ke=~GRBFbCo#^Oj6vV%z zek~{{z_pl>>AAFh`*ze5Hal%tz8`t`;fIMe;eO#Dra~e)xw*M?k70BY8-l8`0_X?Q z!r*SQ+Bm{EBa|K)8HozuDuWk=AO>39xpQX}fgVG@Q9KKskH=}_#*O4JLS^vqbno6B znKMu1#!x113uV~&U9%9Sg4=JCwn8AiLXkH*q}hF%bOc<@M2F^aB7 z$eiLztS3P*!bAq%vZj>^k0GY>c#C?n0hRn!U{Py!7lTB1O^XsV(F_A`F=%ED#D;ha z&Z1{7U%q_n)~!!J{q)+kYlIo`YhoZCo@D%paPRQ=-M)Przg&Z##DF_m^UxaXD~4+g zF_6~8GR9W}foYAm82U#>waF}P07?cVwYEjZh=`4G%8)w3Gs)5)YteyX&de5#;!2WI z&e#n97K8H^HyY29;izK>Lo*7D5E;QSL!ZcycEVRYKXX!27AZAXk6Y2<4oogt0-Kby z7!>_moaejg0D_GS4>e-!SBmK|R;M4)v6rFZt&ygOx7c#!jrnwm=apz09ZaBMvRL;6 z(Z!<&37nFx)4=Ky9$j@?M)$6tDay$7)0i)>!Y3)~YszP-&|@wZiP$~L`KHZ `eV zVtd3#XV-t$Y{OFIUXFcVfKGt0PJcUZJ(uYJMxqFaro({V^%s`Lj~$$VbdL@#~JDO zYp%&$YtqBG7*7P7x}P$^4j->ndsojWSN{mDvW-t-k~SnKW*HoLPnJ=Z<4RGZyHTax4%hEJAfQo1%eTI-XqO^eg2{QXj*+`S^T z-Y7A`-6O#>ILK8P?(UxHnV9MxrTJ&Ja}~tuq`v)A-J|0?147+h!?m(RZII@d?)K}5 z;=;A^aIGTTzk9lCWIxaHo&k%xcxIsVH20X%LEgtZG(D5*-ITH9X#V2G(T$@+mX8iu zQXDX&y6vI`QBBL^n(E@39!b(2PR55cbayW*(-!s*T=ICbuE<>Moz-#;sY)4&^OAl(+P;RjuDWPdaeDka6%fFj9`Hv4y`Dw|dKQ@i~!@|)YR!+N6T)QgCJEU#H zj_f6Gc3JR52R;8sll8i7&aB&#S^u~QLm$jtwlk;hflzN>t!{2)77ruzksyZMg$Mb6Jgw*7Wh-1qA{{Pb$ZzZ@<9m*bVc-<$v4Q%PTpYj+Q6 zvhDW^9ryC=chW8Q@@#i9tk*R2Ig{g{@_AMu7twdNQyaQod{O2IsxU?14mz2P6|8lT zz!bCP)CjRYss*f=ASvl5GgqX7CJS8CBy1;(1&yh1i7k4lh16xKm@`Xsltzbi=1R6C zRH+r0{#yO(&!R#zB=JH4XC}%!=$?Xeb{0ty>b!T6VU50L{pCe1=^}>;mQvY*7Y%Ol zdl9Zk{UC+ggl*eGIc##sphI9QJ*ym4WTW5UNa2K;B6`pd2!9Jhc(76>Hw$qR$}1B~ zVE_>ooQb!P`js3nj0vLT5S^t7xxnPdzL7047G%rtCBE9Agb?u-EDH%QkX9F~P--;K znl%eMbW@=wH74tEvXMuZ+6Op`za&msym&Fz!}N;5${cN^ECoTB1q&AN5|l!rXF9sj z4F+kjOvS3EK{l-Iaf+0=1b-OxM0b?&D+_`gD z)hM0ZBcIG6om8g`l|kkca=8#dz&`%sYo^M~Qdx%TP^l^;1cqy&^4ux>N7*lcuYMuU(GL3B{+);6Fk8uRiz>g@<%ed$Dp=U9b%Xd z<9(2(yXuZUVe3#sG?1_oHMPmXLZ23V2 zSIr%(#tu`%@OOb)uwU)RXOUWjQ=`@Bp=u}+H>sxM>iAx@cb!`IwtD+_>US@zmr-Hl z)H{cVr=t#-EOeRP8EJ|f4A_0NMMUNBaK<}t!w@>oA8@%gZq`);udJ9jf|H-b!e zlC1CMIL}7e_O@|sbvn*Q`F>Ot`0G_kKWypr)y!}V^7lGBKO3lBOwuKdHk<9wTO6CM zI>GqkCfflKo30ZuyiAl98%?$s%(mlB+tqOEyK$Bc7JYnPZ?T^WvOZ(AHz-?h3;uzB zVKNh?v;teWWk_5P5_Hh@jLbo@3i>-T(56-b)2;CqGQ)Ue28>ju4x)mTBu<{063P_X zBGM;}xc`=$Ls;cwC1nReU~qYx|382i7zk9cAwMU^5ZHseckhP1fA;Lz)2C0v65O+A z&xQ>fFm_?E?bWLn229v8UayyuxHz?W^JW~x0E+~$yy)VxUp5Bv7<#FE8I2(h10ZZz zjHNI@cJADX;TJXqQo{~CNmxnfF6?8*fuRJu8zy;WWhD%M4BhBDR%ze9eM@D7 zEr#0Ss=B&5^gsF>rv005zDe~u)CZNMRf~xUcdAR5E~I%!XQIc@7W5>iE$pBtVcw!? zm@lw{-lp|ULQ&iinD*DNUw`9`H^|CMaDWob6bxc~4UbdN7$!g6Lb#Kam6el|gQj7^ zLeDU`m>N&$E<8m&dh}o#UQ1@$(9lq7Jriu9kTHr#Pfsr{E=Hf@AwhFUy^H=t2hy(q zx4XW+9`_o5(M;?h8SYwsem=1vR-VWSJzMA#g83IaXo8__l$n`XP*6ay0@u>Tg@I0} z49R5B1E>;`p`Y57B0 z=);4318>2jMIQ;n-G%so5qe`p5*aYd+EyY&29b+FX~KO%4Xax=BNKuW*l-s2>(;GX zckkZ){`>Fo^y24$=NXSH9unLLJS})Ua4TPU;e~I$`G$UI^yM%rX21~yTMP#q!zaXu zJfd%j5guhEqtp?}s9V$LGAqI+20;z_pDsN{Cm0|PGbeh33mMW#M-*0%Ia9U(PX(S0 zJdp8+kfhqtm=#ZW=81OPU^HqXRNILKA_B zzMJ@U^b%JY%v{sNxacZzG)Mruskd;+u2zl(65xPjUgdxV^t3Qou2YT!%6ZZ3IApcG z7H-7~WT%j;Y`6NnlkdC5t($DHT%I&LE@rvjOm4H!ZrP}OHtF=0j{Qn~y{zzm{`TOX zzB&5Gcbot4R?QEa3;t=c>${io@0NIWS*>eD&~}$@sX1O%&Id)0^|Zrrx1;IHq3!k+ zdrriguSQs}6lym*I-gMX-O710UOVL128LmTPL^UJ;b&(d6HXTH-&_f#>Lo5H{l#CX ztGwEtA+)+k7A2N1TAeJ&42-Nr(n~@qY!P{G-HBzg#Cdhp=XyQBxkHPZcCJ7%UXyv1 zNjLFk3oM2e#bjk}Qfij2Pt-5UrTd};_~%AyIulETr)Q|{?xO2K2m2QW`FD=edPle` zUA}3tTK`Vklpt4^C{N!MZB(K*B-q_0(>=bYdr2S9GVCBhnpP9wpA*m~JH<0v$7i(u zIqpfh+LR*qoNn&gWNk=f zk~T2YGcLqc9HaGZuT=zN$K5wt>yzdlouZA7_Vi8m4DX`Ni*QxA+N8Gi&kJ%DqQrKt z!c@=D`H@Zau}!nuH|GURN!4nKJu@bS*RSi)aJ97dNdDrAz?pHbq3JH18du_(wIHhb z5#g#`m#T*X)dV*zi`BVY8seJPrZi0rZ$J$fN9%qj-uR|*Aq_n|iv|SNjSbPcaMz{i zo+>L68<)m;o8lXvPW8@c?;R7|_(X?>ce>3xTeRS@4vi}knm_0}^{b&1&llA`*`Z;5 zTHVF&^L{mR+F#DE{L94$|Hs;CKP;N?`OwMNOXr^Nvf#l4@Ay#fj_kT;(!90NP0b1V zXK8HHYuR-#bo4%+-1KCMcSCx^niO4DX-HrL5^v0?Ki_51iiC!K8fsO)D`)AZ44n%2 z;lx$mxaOr%|0v#qHELUBYNva)n8I`pC4KdnuCpioECW45ygMEg#@D@Aw_9yDL)4vg z%lln@KkU)wqdu-Lr-c0G@q{0@bo~9E?BBhX`{OH_-#?%FjW_zUk%3I`L3x`y9WC!f zs9VwMdZ2pCWPeNOb6*l2=Lf}aq)vblsuay{g}2aUg{Xxn0qKbAT%lVT(c&LcBTgw( zTJ(UX$%V68M49JHCQAgH^$NDLPJDHm&EgO*7k&%72QYX&Y&mTV^q z{6dGlaveI>xvc4oSJHyF(3x#yi|Wuo8N|xP6Q)Xj?2IHL4%1V{fu8cA4~)op=m5(y zv49>|jw!;aJG_;6^e|=ar_^xKCo+%_tSU)A!Uz|vV2P_Rq{wj6pq4SCSIE{vMP{_!GaSPGB%3dCUova9q8hMH4?8SutOzlER(QGM_Dipkc^y)mT#3fknA^{FET8dZ=`-)V$^i;vu{{8!7 zQH>^02Z{k%Sjb_Wj2uYMf(Rq3XJ($Rsl)U0`G9{!G1V@Ih`C~=4y zGFpuup+?}yBDJVa)h$=cm#8I^)#PDn*dywZ4Qj(ywe?l?>RPpSomz*lOV!fBYA_YI z`>B2-)kyRP@_bG`cU4_Q%0+fHSgAQy+habqt$x-w+L~W?TVJv3FOn80`%062o7H~W z$9l`F7TbAW>+v?W3-P|6_42=$YrkLY^VQ^_ zPX@QWk>t1)Ykn`y{85?j2gQDGggBmZV1%?ZE9(in?OdqkRIu#@lOE2wP1$d>x4aW$ zzNA@?``OOMIc_G{F8kWva5>I-Y&WB=m!m9~!>wmS?052gKN`~R%Nb$6ZjSq8P{0MZ zWw+IVk@g{zZI{`;UOCPbBfYFrV<%$5kA8 zCD48+!M@I{D=j*NPb^rtYDFRFuLF04+FcJX^4EP`^fyf%CF;l6{+15cpky%%#uA`Z zw8W|x30jgOpE#YQS66UuBc3qrbPis9Y;XJTbM_OlB;%`15SvJOPmv^?hyH(83+ey$ z_WbkD|K%@#f!z(W7luEKL55D?_*-wib>hSce%-fkAI!t`>(>_+77|{-unE%xMib0K zj9nNoVKrc2L{_kFNJx&swtM&Pm{6q8i5FmmdJVI#*H-nFmhr5ME=-E0$3%e3~?Abw-B_!arzP_8Dx2&Y|ob zqeYk)`M}aA)Q?de(*Z_xOc@x@(H!&@x)IYB>HrH1raiu*4lufr2@1tjLN#afBxWca zq1Py8TV#lCr4tHy_=+^RUuY4k=amiS+qP}nUVQOI+z1$zxE8Yp)sRC& zLt)Wl-|+EZG*DVv8fin))6*%GgRj}y+2!Tss0=bkLFit_lrfGBQxdwjxVShkFONAG z{w?&7X&rhK8yky*m<^E?C1B`5gcji|x&>`yT<}^^Wx_uC9DR!^n4vo4;3XjpMULoT zlarH!=Hp=_%!0>;!bYe(lHt~*q@+YeMH$4i#9oMF;94ZWB?ScqxPz!3+Ka4kJ1Q$H zG0XSq(}y}rj6Y((aU)ePW2Q)Ei+W;0N2BoAY~Q~9?Af!oZr$3ucQ2*8NIi$gl7d!e z&Yan^XAhnM^e28kcv?_rWXqT-#$@p?`V$py?cYLNg1jH}#4w0l{x%@UfH)Y8V~~~* zs4*B(3`rfs>4jJg)AMqq#8ta@@4j^D(l@eUANPn55JH$ii;$o1r5X~@XXBhaym?I;osx>NQQ09_t z#_(^U3k{CrH-(=($%XLiBUZzZB36=tLb#PHR;*|p%0#A@)hz->Yg(9c=^JC{6R}W( zFZU6pR*8oRESDsKL>^TRtn-6)>JPI>66>5TmGZWNGQ_pYUMAG^gf6b9Lj7W+=@&~n-S1?6Gf=0Pct{Mi!#3LymmX-m zML9Pp=gU^xtw43kY(MI2y_{&d+tGe8#pgnp{X&raT!zmN8}hE?2CosLsqEieT=Z_4 zF728iI90h=VT*+iCtC>ldkU6SCTo&Hxg=l0iw28*H&yRUT|s-O5Wk3kk>1aF0=D43 zj1#Azal$Gi7AUMm-EJK${JoQOCOUJ4QUi4VmdYsCfcSvH@!G&R&%i+cyhQh)3~g+L zyNAoKW0b3xz#TopwQ^6J>{R!NuG)O0jPmpi)w)G{`o?R6(zMa#?nPsR8!G~5hig4L zYU9FP-2+^mVs#?i;ThV5Fn3qvT%b+Q(k4Z_`^0Gdl61VKDnlFBS(_E7>p^!<)ahIX ziH}NosH-$vo6^g(INviV*j*f@^~v!}$=9ZrxND*l(N@*|aXjyE~`urSyh19dy3j^IdAs z6fMNxPx?>&&A9RZ^TDxyd3N%TtLA+^efE}&MK7c_tV#B+>wpUALei7M8g}Q_A@lK} zjZF!S&GC&5@l8*rHa?WpB)Qc;lB~-;Hz();O|v6(&X$cC-r14fkwNw2LcI9QjA&dI z+cZCN)wIy&`H}x9-lAk->6RCHu@Xs?DSWw9V4tC^B9Wu7pZ6%|S%>XNkq@ZUa}^t&AyKfRj%n^keYTomz7Q$s!-7Vtrt-}@y#A6EF??PR|Z zqOSU>tN!Ye)pADHhW6PnazE>J@tvO`mz6EyMRZD;*8+Xal~9_|;^H-3z?0cBV$j8f zvOXgOh?pc#`}7ts7t*{?p`%t@S(;_U5EW@1ZxM7whgp_Mp^YMw@rYF8ajC-u$#!%L zp~`f@TPBK!$#8i`+)V~UGGkY?`r2Qd$_BT9D*RFR6}*L*$vDZgN)C!&pYG_=NB^qF z{q>Uz7fNu!a(Q_@ti`!yl3?0HmAR`@1I53E&QMqSY~6fM@JXKn}q^N$OM0}@FUYB8jfy3DM*YJEPkQcDmz&vN26fFKL%Bha#RjVp?ov33*SWqKS5O~ zHbmZ)G?hjrBmBj+nJN=)oT8?TRbz3=ps8W$giUZQimO)Dy;W~|%a90{01l&ya3Si6 z4{MGcN*$3{2JJ$v40UXZYged>eyZODHDQ@rwp1-8s|;#BUX353hM+%5!-6!VWI+W` z$>nOfS9xcs8N=1^6>7!f>hYCoB@QlDix;Uy)7A8mY9#R}y5$U2Ly_lvHUA0q#D2B^ zs5)97Y{jHJR3>`;{fOE2oQ|AW_sc7xSIqXUCdWRj{ZgRyltU+y!g6b!-F`B_df1{z zM`G~YiY1D}dctMB5@gwJw(kmZT+i{nn`ymYseQXL9%J(RrM@3__x*fy;4f?2|Job< z)%392CGKk-ectQj^M0|<-E`~eHrCB19RNRIwH@-YT@ST9YsNsT4~DOZXXEQO+ZA8) zSzqgg_V)L?xA|;j;M?)ebL}19)W!Vfk@#!rzUO?bFZo$1jJQLm=elN>$J@h0wq1+2KWTSV$q?FA2wOTTQ^bFSTWAvC zLx+=m@#$^%ci|0Fi|2_=V3rbDadl9p1o4e=DU-b=+Q=f{Nw|DUM3K||H!{!;N%^;% zSpuLE#pNbi_nA?Ak@1hu#s60}n64PHF}%QhgRuaM9>zFK2^a{l_;&2rarEfX|+vvrFrn+LD|U<4lJ)jhYq2_IF1rgnd0JNOtokriX#CFg`!YrI)$QRsdj{xVjuSc);+Fb zxGv@wOm=_y%U^h>NY6`q6SohyJ})mXGBT1(xg;|o07Aroa>>LrsN+KkX3Rn84YUQh zp{lr*w4-U}ED6wRbPnI1 zpwIE_kzoTpkHtV%R#s|iDk*vCSb`GKd^~2@;adrk$YHX^O{B{TiC>Tgbt9t-IcgYH zgufj-b|m>N&SD=KViqPhFFKZ#Ea*2%X_G@3#UURyjbygi=nI4miBUABY|=;L7Bfr_ zt;0=WC<;0ar?zd|hAzc%JWjZAg@uL0lqg6<#U&y_#yz5c3q2wT2$1w+O^f=Efwvej zI6P>C(hzMh@D`pj99j&Fg$@|4*-O^6QZgdma_-!@AAb1Z+i$-;apD9XQ7o?LDoih5 zG#2Ui?c4YI>#u+I*=I;#L^BzM(uahohye;2AwPe``fFep1{P$H=yK>_dOWCvVp`)f zh6oYS9%ILdt1_aQ9v6imcB9dJhEP!_8qY0$UwB06MMf;e09y=A=hl8KykB(MVYNw+ zYw*1gZz05l1o$;gNJ;6h)C)pvJ4N@L*I(53m%!lyAzc|JwtO$~wV+fqSqu8hDVS+f zL??QvP6MmID9;n^p)_S0q7&Zg&a0TkgmPbZXo;2g3%$kHC0|@+1`3-&_mB&A znH0aXem<}nck98zwzD=YX>2Dn>zxSuz0S7J#svQH`uy+rO!{O{n@=al{o#|{zuh|U zNPFvQr}bgsb6aV$uW;HnD&0%=DTzmDHtE60yB&^8vHl+o4Y}39^=x(U5T_u^Pi1Y1S356IXRv~GM$BAV(LeI^dp;U=d!EL$*yNZPE zc36^Uc)DkNSNDQ0?%H^5K)731SPplWwe!yl^v`YUpBLvDkl+~@7tsP3NHL4kEq?#gIS zKjcQRMc>zkcG6}fX+x1X!rc?cQ?!ATLz{;L*5_-JP(3uNNSoD1TVCy{8|Z03&VjDN zf`FRwK`Xlj%o!fKpgyj7US!jnl*W}wO+7t}`UNao6y2ngrF3k3Dc$=@X2U~?jr{_a z3=CX8HQc);tKnGwvK`rVdvlf_$X&j_)AEh!4XcwIA53g&jBnbTyX<)WlGn29wr19E zO!scjs(Y_u=Dms;rwbNe>{5HTZ01k1YyPsT=2tb7&UURmp0j9M$K`XQnsF^YD-xQQ z#WkUl)7v-f&Z%1x(>SV~90Bkmarw)#Jy079-Pt-Bd>s_7WZOm@ghF zQ(Ks3Nq#~-`cdM7LjRL7Ee<4u1sIv!LVMC6^tp!93&o6Tw^lyk(hXuJ?-5xN;CL&2mL|CXL{aaei{md&7QY0Z&KOn{^=bQdA13*A}>9gr#( zU#X9SgF}W4AyC6`AqLA}T`_3TAoAQYZiwp7{LAPeW@Y%UkPeqp(G)*IeNa^_I2oXX z^&44g8RA4;W>WGJ2toz$6;&d(gSihoSh1iutYomH!4Wd2$d`^FK0ceA59@qqFAgp@e{&CxYj5F znIHjj#;rjnSUJy~I~SSYX5lN=Y*n(6WB@fsdP+x8avBRT{5+FYGCqkaF;iuhs!~R~ zbWj~|kl+T@g>bw`6(Ira`d+G6ch#LSKJ?$f@d8yq+{W;4300wt48n1GiO@L&7a~Js z%Mc;tjI;DgYG60jjr6>zN$W5r>=OtgQ_4s+a;}<-#K;GiGZ@NXp`jxR zufi!DM2WpsZ)zklGKjRmbjU%?kp|5iqsGw1B~_)e69~Cr2NxnYI^nRrjGT&9v5dE3 z1QUaa7+XYs8&q|#+IwD|KdcUKR-0$28PpO&V^JcNjF7Ug>Wkw;)R3ua>LRshvYL!* z=c##5swZnzE#-=k6)q%s4lcwc!_{!2Hxtx^b!y#f>b0$E>-KEtyOsXuv;7};*cZzi zt7-|8T&7dzI$kw7R+#LIOgaR;&ZLvgHtS+Q7`1c_ppDA8M0B5*=;4QstAXY-ZLJtQ z>rA%uZ7o~P*3F^LFQ$im++RBy>^K8 z)4tKK+mBl97_oOK#}$wLiw8o#SrK=yGT?p} z-_M2zel{}T{UYak8MeKC7$d$5JPVGTuyBP1dSLCzhZ(%>tN{DJDLgW;xF$Na-N=OkIYfI(DU+Z9^|4i#y2~H9wwD5l$ zd}I-37`sS6V;$91=i@cC)_VRs+5T--bfHO*W$iz9(f_rLA(J{ez??Z$F~)f4^?GRx z!t~p+WeffAvFyj-1B+?v)~);Z?>~P0I1G(7!c2?wFeG6;9XN0RrZ>i9jD#31aV<)2vYzoR$NG4mpyy-UJ(+8F+otH`QX8WID!%W z`RAX1>7|!;?b-$N3PU3GlFpntgA!4BSbq#FASOdyW)gL7*sy`Vlo+=$?898*+k8wS zNCRsZmoPvHwOzDm5pqU{;47MQ`0!!03ynpeJonslXaZV_uY_n|N@Hinj2SS8F}2m# z*CT&sh)ibAoQYEyx+$Xf>Z`AAkc|~a|0B2N=4RT7m@-OAO2}7?_M$$>1b2$Lp~5H& z&3W;~7f}ry$9;r74kMirzL-<`^yz~NAW!rU`k%lQIZiRTVE!UXz>psp_2J>+Bsnou zm8qmm<`C>4Wo2b0YEF(~bOo|CUd%FUZMj~qEd*niLNQdz#)h7u#}31q z*hb_-oQkbRdQ9S|1KE2}CEQJFlv5oHzcTzHa0+(^k2D&Hr3h($sSFh<8;Zc4!firj zPz~hYzI}TfF&sq<{}#i#U-KVr^x`laKMc+mf()$*Ra(=@8e+-@jV-TYq4?pPKY#vr zzx&D?`JtU%!rIUwrWe1`Q*Q$-s8jw&*Okjx*vxG>YjBoP@1w zjn5cCQigL21FQ@lm^Ce&x4f`5Aq-tv*0j*V7;#J_fHGWYh{iB*3Hg&<3O_@7urRcW zo-?g+6~m#$0El>x2vHh%3)y>GM-{PpC}M)}w=5DN!XjlErw78A`Sw->ZuIxIFjBgR z6K0|AFJUPV8NOI$Dwq9Y-DJ?MwoKu<%@V%WPCATW8Y?s=smg@ec7?oOPS7QgO#PH~ zmaxnA(9@eJCyjZE8q>HWR)}GX1%yeJfTJ-&p*ddcpof*?b-QDm*h8*p9oMSdg3yHb3KMd)nqaALR44$FyJBw>W)1sB|52TJMzj-Hfo@&29597w3Gn zwA1@VzHdeO{r>KYf4s5$LX2&_*>ce0IN+knXNyA1RBdp)HULI&gsS zk>)6Kuu{TAq1#z`(xn3>zJOsA#lj&xSxh|i!!Xf$roT$AsAg9Pqf0+=bP;D=-HWE1 zL||3P3!1_5X1u3(@~mm#0llbB~GDs-ivp(zLO$ zT7S(i-Q}Aa=%3TxQyJ>+8s+I7=c$hO42tmdOxA{X&_+bLdPTOWjCEHrWJ`gSWy+YY*JYLg6O8jF->)`-iMRCPp070IIF#`M&yn29?V;IqG0LP%=%r~ zI-$z?w1yWtde^1sEGsJ$n-1hIJ6*8&wd}e>ot7TSTYRQy!NHG1YF<%d64 zjF%e0Tlz#tV=01V7$t|On*+<8PAv2fVYm=M41A@aGbN3v`;2|2p_2@b&|8C=N7$k7 zC_{(HvVy-{Vd97p+Jkj6AsXaE-a;J2a*R0%zal|SRK=v_>{Tr`vFqCI-_!1@Z8_v+P)HAG7C3DW@vnd5Ko z-o0@l`2-kYL>3(^8cRz{ksHdz5nMtac(#rj!_TBh6~VeMR>kyA$x>N3(p`1WQ+WnNLlzfuupl=k!>3G@RjDd` zC3wO(Bgz92)<7o6e2f}1UX355234v`l!!FQ$Iv`tw)9}Z2MKzs-gDHPfofnE)rAx^ zgj`S=Tt(Q2zzAcMSP(vRaUnEBR~+mh8LmZRYgA2N)t7QY=puqG)VU_b4e=Iw$lzLh zkQ>sF--iA?1fx*HUaD7xszB4Wscj#q557`gT~e3Esc|S`m>M=*4JY1$CR5)TU(o|d zKwcMo9i>KLA4ia357h%lda9n(gT@ZNl0z6ZpR6W7ubzKeJ-tC~IFRjpcYy!b4}^SC z6ZCF*+h+rvW%7=xL^KMAN(eIN zeyjbs)4I}R+a2Jzo$7Pk-}1CY=d)cc{)*?@+dr=G*J-%2thYMYUR1Vq%JH&J8lxj5 zE3Ec&@y?U}wrv*MTOR9$0P9-;)>Bs7UU5o%(_*_GXu4nM_p4=5-!#U5*wgj-*mfWF zb=@!a`J}SVyG5=iee9!U$a-GlzH4N}tC3a8L~*zk-Yx6PzV?8B9}>=#G%V)n>CrRI*WLd@BeOg`}zKrKJowU{w>5?Fcwj12qyowZQC$D!$5#R4RZ;9ckI}KVVvVI1~Hhx zFsLzp!m?ll1dK!^qbmXiA{eI_W{EuD$d)ZzU}V8Oq2LkhM-0d?Gf9L(CN6CBB!sO& zEglTN$YhPE2q8U&Xbj*O>nbWLPzTuNCr+H0KYu>*-?eKOp__gC_Mt57;}nI8&>|ec zTrg|aELcV-Po6|3D1{IKDvwKWoNkl!w4~HB5}<}Jzx?uo1q)ywp)Ie!{yG}S%uk&< zb?n$N96WH~z~;@Hj~_pdeRLgKhd#ko&p-b>;dIP>3}S`d1VbBTVTwV{CE(Li)KCMG&y5N3hdPLABz*jRFy5P2ZC36TVP6rEFa9gP}>|28%n+qP}n zw%yohY}>YtCXH>|wyiVgX0J6j`*z-W@q85qd<7TR*}z4>%?{Ds-w97UTlwrKM4~PD z6Cs?OoFd?`1zWmHSU9!JQamG&Ttaj2!Uwr_0uDK46=k!(K$V4Nph@ z&;JZ*Ucljmf(N7R{r|R`+qRF}^VMdniEwHGxD$O z!VO-)va!VVG5*7b!1qGn%lcyZanJvKkHG(7e>z9{(UZ=uCJa zifBZ{6RC_*CBZ*X}%io#NA8Bu3;H4a^UIKhd_&dd+KwgDrUek-W zLp=6*G?gKR#$TaAl!`(eLxLhzxc~zCl#Opy{Vw>w+@3EKOA@!t zHO@J(bWBP)rb6H(gKPqi123u`I&S^0mz5C`gE>6KHTx#lC6fWl`;=bKCTf zQsc(OmTg1d0i!M~YrX?VJf?GB%5j+|b)FJ#hm^*Q&p(3=!)D#t7fust6IMUB&ET63mv{fP3OQdj z#M(4p%Q8(3uOxZU3%uCM7?~%~m=~m>S#r0AX_B6Ipu6dBX-jHLlT790%n+$NP-%nD zQ7q5Nu(6e@QbR3%lzxhmaf?M&O0A(>8v%5oCU#C491ve0TP7;;^!GPMFxl$Gvo#j7 zwFI&;>7C_Md@Au8D>cok2wK$wlIAhO4FLnQ)7w#_KkrXRzAZL* zv$hse9{05K;?Wa~#)pK``~+lngsXwTXVOiHCemKBQr$uFM2LFhnY^2xcZ0#GaJZfh zA7PWBmivB1QHiC=+obJ6D#>g4Rz*C$;%N|83~CG0d2v$u&upqp-{={L7>u=g<* z%K{vxwxL7f37I89>@m=HPtP>h#k85IyFMJ{4xaBJw}e}yj2*cXfGlOj?NnZ8=S%54 zi$6FT`X=rS)}yN@`Zo%h9UdUcQ+AP=d>`ca&$dy=trENUOf0U$hFSlXb@SBXvn}^L z(&6%7jqUe2Dr0vV_Lpz@G-~Q=oueHp3YgzBbuNtB`Z`~VG}|~TK5yNLZ6s7Kd(wWK zBc3uJMA9_cp|{+Jb`IL&Dcs&kg9<@vFu&8{nMy}=fJZ{BYy3;@jK4{cdqNyd@PvfV z;k>v&Y(5hgW@j6WN$wO?qzQo*!37NLeV!tm0jslMDnr}IkdYlU z#N;_b5YyZq>EGdao&pR7ntO#zWrxgSJBPt{Ql1qme>+6$yQ)aD?9&9Q_KQS8w0lwp z_imW=*{kf;;4FHzCD??xbf6IwE7F8`9z+S*gw_yovbbERVgYU|LOuKt;x2){X#6r5 zwUcKWLq}-e7)Rd<4&Ajw!~w%YME(@~7xIcB7RgWtytH^#4W?({1k^hsNbjKpa81P! z;wMKLRf-h_%DDSbg0V+9Zq+A7Y%Eb7OR*{cYi5KW3Bta*^M3{^FUvRF_V%tx-X61d4a*^BgchDb@YPxoacD1jz^5U5J3Ry~rg z$08A4+g4OnRaH>|t)B0!6L4g#8HR5rz8GKB5M05aemKV;ltdQQE2$C=9AA+(MJi;- zQo^9`4?J*@R4uopiq<0y!aBJSG$>L@X3_(<>{HPQgYG3)5+!0B6FnUXvKgZp6`_NyXj?4a^PN>#Ja3IO7a;1n|mjrDk+_z9(Adz+3RK$*=# zAIMZ-64AD$6s+i~F85m9FtEZ}V!#F~+u;6+I}9nt=z)PRXi?$r3-uQr$p`i?doxY5 zR?dx9zEZr+`j`5JOR!3TBZmP9s`;b*Q3$R#`R7w4tWd0iMjxW^C!{JNkQ(V!#C(C$ z68eF5m=z)aD)f95yV}&MH*t-#OBaMfZN(F|)gym(`_O#an@=8RX}PXnVPC6u8Cx=GxNcaB&A=V5egekKl%Z3Re+5@#UsmYA zv|QKY_3(#XwJzv2s=a>NsQfJc3`)A2 z=U7Lv#)<_iq0R0QayXHofwM=Ri0Rk6`}nq->}_&Su8QHFP!8eGI}I(;7jeX2i6We& z5fmmV3cVJNBq!atzkBV2{NeYZz(;u-dKE}FAug2Dz%dLN?s^Dth)s zd;>8!DW!wzk8KE{9NPwP!nH?J4Ni~kt_wrJh)Z0e-Jtj982Ij!;(d&xdsO1!HX!I`eS*!r zgaA0J`yTCzkgzc3qwp|n$WNr;9{3tjG62xe+77sh(Bai(zuulAeRJ)_f#MCt3 zXhDq(C+H0n0)m{34C_`T3d$-wt%zm8h>gc^hGqAr>X#RnT?yK!R z2jj!%VDd;u767;v?Zn^B}ifYgFgcq=XfWa`wLqVc~c#)E%v`g9hXOj&EVa zVb+?@#NZCmk3gCb0@=PfXF$XNxgu~ksyo7Bl-e*~fVKO4zwZ7H<^R|P{5J_a&Zl=IeXjr1^$J8*VW;=1jdbi@FzzJ9KO^P8{`6TAhZ_ zhz(lRYvz!%dP0!i7`|v>UwhNNJb1Uux=e_DwTu5QRTip6eeJ@%;IU%}Fy%kl5*TRjUWwYE*n`zU*f~@tK0#rw4J(wjfpB!+OZF2eME@RP4K5kv-yLGfa zGF@JCV<2BV6$iwH*Z!fNdaG_l#V+g#e3mU(q2-tJN3jJWCXh zGk5AVPSVRU%Wy^%SyQ}c6f}+=;jY}AM`zjrQ5EC1DDe!BT2H>aH^T{BG79FB4W*iS z)%GNB7b5|Kg|SFnT;i7(XKS*%)p99d8`w`py`YM@hYg!`_9=j@W}47NTOn)BrrzEp zW8K)~mBr4?Kdt8v&&i+F>gKW4B7Z&ra^k8|VEqhitfywlP%V_Xy-Hv$|8`OtXoh%s zYxZOj+MaV;H#%-A-=b1VH>I-*Mt}0Ukgjz)+X|&E@ecAbP4hNtWUfnQX4OOw_liu{ z0od=NCGsid#mz2;Dc#K}-L#r$$)%9|knCp5u4YrztHIH=FuI)6*EJIy=hxzVPFr0t z%{7o$Wc#3Ok?ET{HdBy*Bc8OG4a990t~NuXwASA}d&tipih_^A>$KQQcJ1sn<&6If z%(QV~6f1S$eIK9LcWwv-7KZS$1PxW4LduzEc z9XHBM5oOPz(Q&SupBpc}|A)!;6>7=KFN^O}?*4Tdk=~8Lb*lF|kq8#qYPQ6ivGJhf z*}vOaT#HV_ml8|-xZZHKoMLz2$){?3_2>(A7R}U5H)U{QGW*6>nOWxs5RuGlOUE+aDmaV84D|QrvC8#@!vyYF?unZn7UUEI4 z;q2_JPKoH8Mpyp@D?ZXS5GMGFr40v$QzZ&YA~fKUBwHI;fg^`zF;k#(Dkr7ifCXmO zQ4Bhu1W_3>%9mTHfK)0}t)HF;6yr)7*O3j}p@u7(SDlvy4=mL~hR727EhIJw9`s$# zmm(NvpEw5A0`8yyZ~T9w1_}n4QGpuIcaVehGyE&Nv<60JzYImAl~p|ZeCbyeh}0I! zupRrU&^7aE0#7i|6Vg)i4a*qN%%mRj=2fs*BTw-Y0PyxkUm1>fuuE>~oqdEIUfnrh z8y;3<* z;9$FVLaH1Za5ZX+tQBS%86lrasu5X=zqo4R6459XDJpdHsQ_Z6NiS|e2&@8#f}81Xd`(`IS5I5DCj5!Qu*)A+&n6 zTPi$1C~d|bXjubu$J`qg`NhOrL4?ZC){5%s3UhqL*a;Qp)1kn|gJoJp(oSf7uQI$7 zh@xIqAWVK>a}>ypQXoEY6c$Gh`J-qTzIamkR-;sjrCO-&(b}qb{BsdJRPMZMcKNoq zY=+;4_Vx0tsxkN~bfX3Q7J1cVsrUz;YRo9nk{YeBNb<)e8J+{jzagV_8%Lb_mDtsb z_s1Daa`4ngoC|y;=HZw5+q(KT$&@KuSxNwrWREeIskXa zXv%;1oBOB|!PDdli%x4!!_JV?Gr|6<{yJefWGfuw zRRm$EexwlGiJwkK6XmtF z09TNo;7hZ>cn2}YH+wv@#_%{B)RZVSi@J{72NNgJ47;fTEvsMPkf7q}BJ zWl+Thio&ps6kz5a(93fs{iHzhEzZXxJd0m3_qm?XhJ;T+~7Wi!J!Uf ztqQdb+Tx*ocl(09u>+J0(h3n~y(lRuvrP&sB|Sa4WL!Ej5VZ)usrs!XP=&S;_6SYj z{de_sbY!4N9{Pc7_&1Qu?)HJXa@g2*JR4YBSqU9dB8V)d0z4k3nGht#vj4&r;%F;? zY04_WDwdha1HauZK=Yr)M#(BTVtuoJf4a0l&w=wyL!23jgKH$YB6~wr?EuhScEioq z%f4@?rGS_E2CPf~XECte4~W=Jg7<(kfhT!JB2bD6;e9g;4nve3C5Uo@;Va^$NQVZ0 z5L8o6miqKazxs(yLV!9RF`!W?DuF4uj>Jys04uT`rf&Ypb|fXhHx5)ndyt`JTSmZgu;815L!wqs-rY?-$G)#6NSuJLkFn zgq6kD|IVYU)NWbI7Ypeg%SchkYo?vMv?h5PM%3Ci8A${E*|g+2S4E?|2aMm@jb}Y|^nh`n9g#b;Pt=?Zi*RDF^bAH}%4)EqeOUJ-5=Z zh1PXF7T12yT?V^+>rqJw|uhA!a)q!WGV|m(@ zb@SP-UO2eWJ+BVYZDD)l z<)0F+~c7X|W;L{w=-*Aj7_tZQ1OHH2?Ka z++euX3ZHvJvucEH#w^qBWFy#OW)Q{BD3pzId7gKP;R$4Jb{M?!KwT?vl)sD$d-LIR zsRdMB!p4KpaL0TwOzw#SKev#ZvgY>eU&bz9YqQVQKGA2-iH}aJJ9X7W_+whVETpLS_=GB(@^?KiXCg86K8AJ!LtKRjKhzcx#YbFa^DvhNOkY5MRP_T7$e zp~>_AF4#{CeU%Z8Xjjv9pS9FBr!Ed#J0^X*?)$_a3$SyqPkcxlzZ^$DQn{AYo3$5I zGx>Si?5xdj=lGLW!iXN^ey$_9xH7GYG_Ble*N5x;6!-6KW1p`hJZ#A1CJ+)q9$#+gl(Y~ z5vN%z^b!29#RYH`XvpjEsIW!3e}G|GK%4wHw~^yzLba){bQE&xGhBc%PgUbE2VRO$ zYWUyC0rpcDFP(pU#n#xI_^796mU}wvO`^Zcx&=zW>uJ;b$;0)>M1Iu3sij+CK7@p& zj?3hil*hj{_VErdn>TgtR>vb51T3KG-Zs6gS1npw*s55?+#F(c*8U=sVOUYYxD!_R zX`N#^Ut*XG;gLcmq=HVA5}@q-v-BUcYn@|{5~4Lv1s?H_U6e7#ds4Fg;5vCkkaIY2 z3PQh#=mIGYxmp(xzdRWPt8HBKW02Zrl}-v@#5TH{d{Q749o_!idugUlARgj{J>=bsYp| zaG~55it$rdxk1$xVzGCpVp1htLM9~fhn0Z(VzUA&mUF|4LKCE=eWyq=;PQ1;UbR!D zCQ;^7HDz0&7X=8ZjOfQ#GH{h^sUj%%1ODPlxk~a&y^s;QApG(k z;c42gwWa+ihOI~)A&`If(9^4Gv18Ag6`){2uN4Y!_*>ySBMpHI4cNgeFRG~c24pZu zPkL(Iq;MmpL<%lly1oObntwrT__ARiEtGGW?Yqz5t=y=)jnm6-39J947IshhhB5kA znZRAqjJs7z-dWMsB|~j%9j_7v9a|TDr%byLP53VBF6UO&v($gmT#IPBaypmx#&`5r zKQcBx+GwB0Aa28i1KRNkTd@V(aH$n%=|1DIEcUW9_fRe1(Y58izsC34*&YX`u4`+^ zcXhZ$96$16J$YZO>2Z_fTzrv`|LW>?{rHam>iWw$?YMC3EBEQKJKn6eStB&9O8x{% z{_RGCT-Z8irqLkCTlCp~A+-6D?46tNh}eG#ee!8J$tfi6o8PR3-)L5yH7zqhB%@t9 z(i<-&rl{u;36=1YKa`b*mI9i+@}{5svx=pfUNbOxv=o)hIE5wTXY(t6ul59y3JT>9 z+0njQY2;0U-K=S(T%2J)=iTpfxPBxVwh$Sko8G=0EAF~f#yW$Y;wywxdIQKFo`Ie) zMmS;;F)fXlhqge&uv=(h3L1Ewm`a8cMo37^K6_ulWn?%@NlE$tD4Cd1 zB&m`CI;A3h(pQ0v$$0=PD0R-56o$bp#ZVssd(udIlwe6&=JL5Iq?MBs^B}OQCH$q7 zm&YW}`?vt)+#5z3aYU0I6dWbB2w}w zf>w|=Kx44LBeKL$OHe(rx)QkWmn?{Dug`#Fa3u8UfR+p_EJ6Zp6n{*hP=L*c`lll* z5%38V1)M<3xDUNxRJDhUz=ODAh=5e$%fnouctSlRWy4y(0ReLeZZsdcjL4fp6{$V7 z!{5zPapBZ2hvbWu^!5kzhe504db+j5MstsY!)zNnf4EF@0^UI1%^_W+9rB2%TJ zE!F}nMMXg&pq?kiZnx0}FlzuEDVM_&-tqY`FTd^cyt!DZ?tmj6_ur?Qp5BxO36)yn z`jSCjvAR3}=(!_z3>bi=G2%h8bX+9_CfFvLgbV?144q+9=oMiE29A^~NAYn3h>Gw?P#XQ=^FY^xn+mEe8p1NR8~(f@Z^^ z1b3499bOo={l&=Y0N)YdFwz|#vK|eO1_fmBUtsy3`}I%?n34$WCdLFSsQ@|r7;tO# zzaRAhmgD?->u=_lt6B>sSP10)%t3o+!@;P)kW|Rq6jtl6?2%?*9`jmjJBV!4gFB>J zsm=HwHe?hMPjiWA#M+5oe|IpSs0AcyZulaD+DM)`qw7ee@c?x55*3q1{4u?SIfD3S|=irReas462Z_2_@IXz@mggYmf$Nqc{zX zaqA=9%~VI>K*5|*rb*TifRDZ{SY}Y+VJVYtl+<$6&lnLg{;Y{mEXhKX3+HTD!Jyzq z=ml-mrf+WSIz8@8I(jqkUb)Y>FB^4juD%rWng_$bN9Bh>!wCJX@=Eq;?!HgTz1QJB z)>d3nb{$45{q#=S{!w{v73CvtkeS&Y<0d>w4g!D@ytm<%mzlCkS=R^c*IyOp7fIeH zd2GY&+_`d;r)=?tCH>aJ6^6YAze+!BbE76+<~Jd}A4a9W9X(%`?*q5T8+^DJb~L!( z2kpl*|1CX|aqG9KLpL!AALB2{*6r@0>JWS~lm%m#W;$O`ILHzT(rtSppaGtk(Iiwv z@%`@1e4EGx6AmUd#iWjAK@VLbBEu%HWlj*&Bf7G4z|@8{nV)o2Wkpw|dQoX=w|ppt z+ck-;d(sm3NVk)hn`sVPmyq^MjqJa5dewVr`8(LiM_7uxi7I#Ym2hsmW9ls5BH3*$F%x~$oM?IZ?jNou(Hk5`y=27=D_X*(eR za0ll(NkiVntGXeQ&BOviV&dgL2__4$}G*rIO~yrsZr{(yT-J?1fC(|x7(d(|d%u==p* zjr89w+2` z)gmRlf-1M4sd3Q>R6u%^mEmwTN|3pNbQS(%Us*OuTUoLkjj9o~qDuyY;x<}t){j0p z;Udqf3!3G3!P>Nk*iXOPr>$94oYl7zzVc~KXmWxe&;$Vq2?~OwU?OOWI6)%JI8Y*J zg`Y`{0N5I&Wt8(cYNVIQm|@fa$G*j(Wq6-EC!L$Qn|1lRa!rkY)5Cgm`}0_~w$bzP zw5DcF)kkh|OaJ=w64hiyLWTBT5I~CvH3lb)%F$#+Dg=w9wFccw)5U2kDk2I0%F+!} zhObYBDWvmm(6PhG?9Zaj0?+u=NE1wr$DDEnos5V_lXDpuW4khCe-sP^XH2Evr_FkT zeBt%Xn& zyKyP8?Pf>P{%{G2iT&O!7mk4mn3$LfG`FZ@$VrAFA_lBQ#l^eGa*nQqS)pqa4#H8p z0ybI4ynn0m7*H5uWXu;~tVqB?Rd>;J0?0yYWuE#zy5d6H1604!$OcWT{`Bi{REbT7 z;7Iu6jQ7`zW(Ald!eL1ght4u24Kx0&?fEbV-}Gl;2v)N1feK%N07)gHOqj=TKtb52 z%Es?2(TDcA2H zO?OzJ=o)2MpU#KQBp~!a5m7*3NL5@wl%tRbe(0McQ;gfGhIL zLd8_Sm-YmA7&<=(XUwn%LLW22@Sl+@d%ywTLI1I$0cfaXFrTp=>n z6kFXBsq@Rwrxa~Ecs43Y=VJXXiH?&N8llWwhgq6_OQSOtgBxXD$*}B`Yok6-e{MDpjps=i6}Vc^}@a z)93Qs;2#~_cf+DSxzos zVF0Qc6f>zxMaYM;Si|24DSgQ&hnFGnZ#u_v$;?=R%svOybpmr8t||e%NsA(E7X_$pt*ih#OHX4@gOy>v0fMiDTO%^9ZKW5%iOGz4 zp{Le4Gb3Z>3UCA!P}R`nEJ4o$tKB!oDR{X%aQM72T2POR`ir7DahUQ*Qov)F114l7 zB6FDX>8f4^BhiI1!b17_R(75H+hEM#JwX*6a^}kozrWBy`u(!nUFb zpkz#(5R`g(h7Ev|-M;-HaG7kjLT+_odC6~Yp77>7A`%<2a_9Y&_bb-H_eb^}=B^zE z-7u~khQD$gK)x{T&#ZW?Ti!TZf}zX^`Na_SyPT2*ulErQ3=HJS!NUu=r7@&8cI<@bIYtqdZi_m-j2gBFNzjmpe=@w%UJWY$ z`#*0HC?1RR;UO*Df>H!~GH&uYU& zb^K4!z<>arz)(a4bodt|FASC`qS)bxDoCM1Lbitjc99SOGRR;1a_lFKfi=p4kWW;m z%m=@Cl>w&S>|+AZ=2Cq_st$F?XNDBu>WcYT@uO{X~TC9%<#_&3S zW>FWDgU1=5{nu1VQ5I)|>tR#^^eJobfQc4x-^lOlJ{QyF^?m#MD|5sCk9eTD1AwX| zQ$LNe{D8@nI*!9v)k;1~5kzK%l>(Doz+-HWh(M{&s;G=w<8H2)vSXtHA8dm_h+I2N z#;xkhI!PafxJQj4#DgHJXCLqWPygbfp_5oN9!q8bjN?T*I_Y4F>6-4nl>{8j11UOg zmY2DxP_#f=E3+>epdO3!>{ghC-!TXFYXRhw82u8D3D1#L&)&hEe(W@B(S1tRYhBu}y7U&- z@)-+mS!T#pY0XP}>q)hSI|Ke4*8BkGflbtdT%dbVaocQ;-hpis*1k$I_XLltDx-WKd|Sc zfJRR>jH!-?b(~=jkvy>sBN=^26=@rtbAZ%0v;-d`S(^Y$C9N+ZL|)#r5lIBKAeNg# z-Doce@z(SRlj+EQ_o_><$H}1Hj$)5XAO}}qmfNHKk%HbwGyk?;qJjGkV~2J=W0&Io zSGg_I#en}Drtkuew>rIVaFCK~hOfNUN2SPCtEG#uUklYFd&f!^vPmwi{wS1F{-z%O zzRvWaKK8cB^q~d*zVQfjg*^63i*>n?bxH5U6npPCHg}2jrA_P+rJZc~6IgtU5dZrM z!TABkdKTzQ^d7LIjhs%522!*pbmTx`=6h5_wUOU#ws zxJ*5IY~3<^-Q<-q=D9iU`ZycWL{92zvwM`bX^^ew@9`TBYr~3SM~l?6cskbJsdi!A zNNk@>eK;!}q?IP-s$EH~(S5-|_Fw3G_r7nPPW7tRftk<2>>CnVFWwJPu_5SsI0}z) zX0!NC_y2MkE-(I!U4B~A4=4NaS{}tRn|m{r-*oK;cPF8IIGrwZFCKh$v3+;zWq*lI zT}%&6e0kY!@vF;xTh1-;A02(OSdNwUoB5@&w$gT(nJO@!<*s${ZR+WrEh?PM#;sTH z>exKCD8xUs_x*hp_lt(_*tqhFgf?xl(6Zd`v(?y) z2cPv`MrINqv%?Wa2jfjPda@Mii|Pd&i5?VPtvAp%?OK>@Mt;$@xq1BA8MnRr+}@+b zdTp3i{~|5cZjR}y4w1T^lgr&lWZjpDk%L> z7ZduB-7X7}QaoM!7hsQd(NQ|-C&rDRH#}x?ec}dQ{S&=!34QANdL!?0DP;*zD^?Jr zk#f~xWQ9AwLBL!VEbcRzWy){-kFI3ATQ53F#i(YjeeSflDsa#=dc==v#GGy1pe%aM z+&7pcT9aVYKriKroxw1pXfci+SJ6z0##AXZ6S|Zdi(b>)d_)byC7tFqso{X%Gs>B$ zA}(z@2_8I65jTh-_-+md3R+c)#$xq)ieIRL)?`I|f#Ope1Dp~V4y0f|u$-h9I|gdO zm?C?sxVW(T1>cQM(Ne@RPtc9;z$!P_y;FCe2OW*l6Q#d`nsSthM`IXa1ZC^3Ab@Id zg-#2wE5qDeT7;#$uR4K9LE8EMM}NFzyduwiw zG)3OUMN@h^9fDU!fugM?GLyZ>;|mkjeGbp9s7OT@jTk_OC{A+%WNLJc=L@~QhxVwl zp1J8F8E?}%((Jig*62BmYys7Y!vHdlXe4vPq0mol0+1`J;yLIF_i#NeDe20x3V$rL zxjvUBJJjk7(MEz5L@cJH5HsZ`3@i{rn6kV$=+gNI@3CC5d3&fy!)ZH0VoMV83bFZ< zur)Av5hHIXqv?FkG7p|;eDVE|`5T-u%OWv}0O>=OEiv11i&bQ;hA_0tJlxt@YYXb| zgc&z)43}LBDpyZIc|}g{Cj%1#5G!AQJ%MIs3-Gr`vaVO?uAlXa2xmOv$1~hGh3$eY zVYg_>nP9^wa^R*r4m+^J=JhKtYq4(*h-gFjo1rV!3($(}vt&5N1116~J=!Fd;fAGh z?aR7uONzZ!ZNjs^Z<)3wukP$xhVO-9ucw7?8Sk1;5S>SS5z{OT2*)Ts73?>lC0 ze3@&e-CcR};t;=$Q0>p(^7i2%yE9B|llk~&i*{I1xm%Oy8Y!mWnCx1VMNI})G0qeH zmC=|lLL7HbbvoE8NdsNdjI}RR?H=5!FK|8vy4vwKDm-Nr(?wTuUYM&ZDsiF%9@)zQ z5#PZkr7Wy)K&E5FDE*F9U4OKyz?sI6N@b4o&j^vngr{_$HbVH;iHIsz3Cl#ntpXoW z{KNNOO~2`@(#CJMgJFU1E8D^igR;IlUgr};GW!X|v^m2S;3hQ`45)JQ-hX6NCC+L6 zX)1;{eK*5Wy`2p_vW%pIz)omwZH>biXdrs6t*@t-peE-LV!)^QGj7uAbz-Ctyc-x~ zblLCr0jM6=>n%8G+wZSWhI&O+z!!~*si>erF=hreXV^CsJ6zEyc@@eQkHr(UP3x}* zU=}7%auZKLPxH?#9Mp6!f2{EctfbsH++7~jQK0Y`< zR3fVrnpx6;Gp7a@A@)=!aEK=kU-$<#vJ)%XPnju>Kaw$qy^J(=G;kQaRm5ps?T%F_ zWj~Arz%@QKG-v3qbai!chi*pVi8V+)Foot%AX88`rY;x(ML673qx%<)xW_k>0fh5- z7Rbo!nT%NK<_u4Jp%+)V1M*R)9QupeaL)Q_u-qGDCL-4f@j}9^14&?6K|w)-LPPhd z@t{KjqSc;OdUuSgX27DbQ>a`C4Czre`lj$!m?8TWZLGp%Ei5clR8XLIT&p%KqlCOiETR|aswd{(!9`y_$EGp4(03B=3fq{$FoHM zsI)&E#kgyMJH<7y(e5t8aYfAz9!5YIQC}$SK3rmEuJ#MnaQX()m?J94KETp!{t#B` zBCL;e7L;qs0A>i9ov9h-$-%+lW$aUkH|rLY7C_o3W}DZk7iMvwvr&Dck7w(Wt(d7> z%5!PIjdLrZGv?%#%xZ4MQ+tR)3`a(^GSKwL{oF3jQ8z?X>7$?^`^6LX{{uPy_gKgE zzwUCsUvvK-=XHN1`_X%=@g1Dl;`HC_7<>rkRq$D4!QGqVt~xhqtXhAz-eulT zGqz;lDN{|?qK)BMJz7!e2WIk-sHw%NPP(!G?i{YwJZOH_K<7j3rj zzZVj}Ly@fta%8WTn>quPVKMP%(hLe^r*(p_ZbE=;ZbY6Djzv)F`}+xn2xSnszCsqTZk5Wr~H9 z%O5$-<{1mr-o@u+GHD)S!i|ZyEw!(w|CO5e)5Z#C{%su-5_05&#EMnz3 z6H@?x;-o^?!Nv2$p5nw(}KE?bjQ65}0?0zO9m573(N2vTF z=H1d#CsWR=ywaT;1m@Od?w=7$%@UPpf~Qeq(MAv&=qI^6@lL#nCF)U_h>%fIQ9aod zLfk@|!_RAELZ}%TWKt0A6r9|0$FLg$qXEfDgE^PfTAKpW_4P3LpIPNZe7>3NQ+siTuV_5XY1LI|u#i6cj593enCX>e@WerunPOR#^BVk+&MNW~yE{yWrz; z|F5<(PnD&QO(BTwFCSZDBb)isHg;JzzuVxNYNVEOyd`HpNrQ7E%pI%n$Cg-}rDCS_ zX0kpm>KQi9UNx59W(S?+5HMct#tzhXtzl;eSeRy1ckK`lEvCe(d-ArWSeI(JmgBco z$3C0+_cgMijtQ#=OI_TBy9U2=qV5#d&$Coe`UwYJj^)N(((V+o+!PPIN#PC^(^YEn zj>Y9Cy@!PFNMRkS&T3+a6*F09Tbc)pH7qSRw)_t*qu0WPhF0ZqK>Bqe10=hhk7uT% zO=-Hny!O4~!beS_>iDp=KHSv@g;ku}G1*Vnf>-aDqWG}6_xDX7{8MRnpG5)ulUaAE zd!5@YsGmz}x9hvF-!>EP^BW)h6KPMvS54RVB|bdmH@kTWzlY?%UG%=l_hD+uX+G?9 zdb0(K&jT`DgV?gd%XR6IF<4XGS))(2xQ7cJa?Iy(VoR7otd@dVQoJI0rnH4JTAqIY z!_fAj{pVBT)s}47MUVf?Q1o@NpD#bjxCf~~5Lu|be7?K;Q)2ox`7OOUq1efbehmj9(Mc-! zb%@zOonVI=S^Tq8duA)6CjPWV%r9nw<6jNq)(q7$%UHi%hTHo&AI`yxWy|G${uDk! z>(0Gg+*ggJ*KpC_-^H2WpOWFN0=ECERPi)emLEfOO|%FcK21aaaa$8Wq$yNg%ML9Nm$e8gDSmwiVRVzR|2u zEBe&o=>y=Qz!13pTL3??#5oFF=ln!;Hbya4(<=+Zp&M9VC|Bkb24!=t$K!FgcXeg6 zGvU`hF~GU&=VCDT{?l|p5|}YO9=-oZgF(zU55#z}AJaKSKh|impL79s^KWx=GOwNi zmbFuMgMRc+Uy zW4_#FnB)%V_=OBoLgB%9BSSFaC(KTf6`_SfJTN1F7<(^ImeUxGF=mAhvvSL*T};`M z^%f#^$&4t`jy@2v`Y@w8X_4q0r5n%t)dKx;TdTG8=AL zv**&^!JZ57hU`6+eweGxjgeXPl?F*U zQljIdP~`p8nJoR_855qjK(TcokVhtU*uwV~n=M0;GEBe(*Ct>i>HYB*Pzhtv%cRwk z=~|ehB6Y|>>WmZINCDHA2YZ$-@oT*N1J%=%kXGc7;?M1F(MnnLrrC@v*#dyc8$4Ek z_hig8OI4z@(-)+sn?D3`HUOU#5TD6@Z2BVP9Z1)z5N*b2;)pOtGg2APgCkEEVl0gp z`Xz(JlOa}!0UcXh&umF{mL)dNfOT)Ng|-%e)Ni1sbO_*}M21oNMH78a z*2Q2eUdoz(hRzNR_lAVt%HhX2y%NlOuCITDs9CzP=$zE>$TfXz>^yE(*=?sff4kPa z-~VhS2f@i1;`U~&87J?*n0D;=x=|X@ZxQuXT)Vd~^6_6xqhsx$!9?H_bKeUewGY*6 zKQnwInEE(Ovh&l}yI*yGPo3}hd?}cYt?s%G;=W|2#%2xa*J3nZ8dVXAa;zmV&WWnk zp(g)UQW6_!cWxbOLp={(axcyNic*vr8813F4h%|D@GaB}$}I)1@~L{>yg{k|`cd(K z9v5c!CrV>F4(YbAsNcSRp>X2c=XKuMfYNFPk(dEo(sU`gj61&6#0ZlV)3Id4JN}bP zN%7tc(YlOG@y(yxtwM)<(~{`x@-=U@J2+dOL7h1!+U3VRNr|USY}%{hf3Cb{cqiZI z%eC9U%4nKLyT!(zvqz^b?bE;zpffnIX(#!=+P~cF-XBjJPeVl+m}hf3fxF0Mut2Fh zB-~%y*CG53$om3X0D5iqh5&s52`7X5@w9FTdWiY(ajUsl45nwO_Qn?LAHw1Rv^e4J&V|Y~5Kt<8euX-;i z?W?Gs9Rd~>6d1H_;vIIHi;@5?Oh>^Rv?VBQS5hvReT6c87c)-qe8RV%h6NU9ylC9PA<7EobF-BJ;e0FzM(3(r)r)|l;GEm*4UW8y(MOFztR^M3t*b#+$r z=(lES3DVJU@8~4A+huah#Oz0M<5s0FH`T^~yR?IpRB>2nF>LuqZ0eD4 z7|Eb)^7xX1s@Pn5=wYMk>pOW;g70K zJtE0^cl|h8O;=NiyX&HcMw9>Gmzb(%q2@@k=5IYa)Mm5mDad&yr}gXjb7%Jpd1}dL z;V5JsrU`njS#kVk#9=zS2Fv6NW7BECs%gi5 zc4o|K!Pa9uqieIy5yjeqVbh6f-auf~Wjx|C!Z%`h3?h#Hfgd^ z%2=~_p2vTqeLd^A!NkSXBZo-uBSNE^-UK0R(sY*xj};;E!!?{GUbv`Io<3X+E_DK^ zBNHgKd`P&V$D>>3u%w4dZtuhX&{o8nW#~=KJT;TFZzcUJ5jD7(u+X1!WQcuWhq=UoG`4MXH@4l_wrw|QY}-Z~8;xz7jh)8E_k1%ulbQXI{K)LH z=RWs!&QU|zA98n!ylaZPt~VR4ak^~Aa9A%_Vq(Ya)TaK!KAE%i3`+%6f4yswX32`%P)7t!`=8P)9$qs(AKj5Nv-yI;FCiqXKb}#8-Lp^jA5t z)3dhITDc`udh@j3jI%qXF z=GQ9jS}S>@m4W8o-rcT2+0jRV;pt;i**ly4=~2JMk9hFEEbzPo74rt_oGYl~!%=M0 z1eGTF)y8Ox8Q$Uz)O{6o5|Vc`(Vo-#S6g>b=PR}@j^cfNUO0z!T*bh(vOnsY3xC&} zqL^NYhfS5jc=OCq+6#w#6iqjV zh8JbmjiT85;YWN&-P|9u*s}alyYu;*LWA43FN-=aoJ+492W{+z25Sw!W{TQ=dyJ0> zsjhb{bf3uR9_);?Gm^^DxRW-YnyOwje=ye|S!(0Xymc#0V32WUd|}o0L)+hW508cn zj`=L~;c)=v6WGrt{1N#k@=esUy{lcAJIB+5tt_zT}jfYDXe<&s1~Joduw!}OIl`FGOXI6 zj6;$Z+QWXMq?$6+s8E#2PS;H&0=c&y+P?aCu@b454A9Lg#W-{FnokGsZlVy+DkRU<(0C@d*0(-aTAMZAP0fx2&t3{#gHf0#0?T>?beojxP3>|!o;8h{m3f-)uukS8>e`i7W5ES6wQbX?p+@uw5@gP>#+SDcKY z>tBTVhI~IHmB8w8VWDi62p2Tri-9i>5zavpgxJ{7h>>5z^RHQ=n9H_?NdD?uDxSYX z45HcXV3p_r;)+3N=+)tbBBGC{ON&M{eU@AWbEG-ihuqH~x7Jyfib0ob~ zEXOSIawM!KIQvo_i~K(J;|{sT8<3yx%09G`)r7vOSXrZDP8M+AT77`s9?TXP)~ zV98y;so_0k{zGpQBeK21`0)xg5%VW{jN3m9-zfHpqtW`kJ`oBkYw`69pl>DvRm)ZdY1jLn_Q1R!pAklNo`?4 zcHO6Jm?lWp0?$cPP_`qRfOHf8w8uNe_({KOS35`@r*nKL4~AKo7_7P!8+=B#9|e-ReQX|JvZ3>DbJFJlA>O^cJTWWx!c-y z1MTFc;jtQL)m<=bGINKw3>prmP5@}@hH~%kXX?rEb{*Isz7%7EW8!AA3pd@|3C)Gv zN5nYSf+855*d(ThVkpE2zmVc|hhP#|d@JAnIt9A+A*ESt$p z_F4hZy^)OR*BBGkx}~1~R=a-iMgl2$2f$|4EL%w(M(dXj1nm{TPMq4p6^b7g zuAwBFSaStKYph!Hx}7cnStPKfot>R5d{Ck#5*BAJe0zU6yx3sv-*ns{aI_{vEdi*F z9{RkeQA10gpP%2TtzYCQo@|O8VYDu?E{}S6x)3cI|MS0m#;Yv&%6~X^u2_KNdvo61 zAA%f_wG1sw%FdqPqdpG_EM~je@OiIej%%7b zafZ#$Z=$k~uSXN{dcbH|(DUjqa1@a>@O^u_&T@VQib_A<0EPOn?x2t-Fjyp(6Q9Aa zw(uWuwbo=?G^sI0h%hF`UOv$Sol#HZyP6|8HDg~9P?%dy3DFW(cP$>@F-g%* zbxN5^kH*93|H4l9)MNRdfthAdhO(-xL{;P><68~xlA;muTO`+vM3qX2%tCFx2G5hN zS|sM;*J9RhF_A5C}SJRznw(|=)Lz-7$#D{56EC6$=9RP3P4nh#vg8} zJ~wkpbcsf!<)J1Cim?*&KQPS|C)<`~oQlhAk8qX3;n$JP0Hl(?(q7%Q1WZ zPXYV$whZ0@d-!s8mNe$NRo)B5_RZ3Bxr}s7OY#RHZgbK#7OtRt-R4=T1G-@p2(yCX zj!hLWB4N(PvT)xHhnf|U681kC%~<|HYbIy35UIco`{Im+gm(@VZ*qjR4o+5zAp1xm z2Qj*f(HpyESF6fz-+dA6y!s``=}4UG238sCs#Cd`ZOlDvA_qxFc5%-4|1yh4CbnxE z^V#BVGQeK43^r6ys#Cl>#iGJQyQtRM>6+DutN5@%uDW#E^^4f5P_=SIH!oXgRzPXd zOSG!*w=I{gVpC{+m-Mibs!#D+my}#Lmv?ety0gqZTUGt5h3=4bWQIRCi5I8ty&C#Z zcNpzbm9^Dr72avx`rS(+?c2^$AL#7-OLL*-dPg+Vtz~51!_tVL+cM^@%2UTAaQKgyXoq|ut2heQo%^aTcEyLf%r2H635oTeq@Q-~+`smpIW5Nw+>oQ*pS`jZMg4BW zGnggPXksaETd8o0^fAd%7=O!UJlG<8oH_iE!-iJfmMY&M+`96PH?-u&Rl8NO@73(9{!p`zOBtBbUs`;v9R#=vRv8IDr;5Z}~(rXlelo zfFx%Rbn&xuVz@KYk-iTho^Z8;x|5)%;BOT8#pP>b&}?^jqV|hhl)WdLq;XEoGV&}l z2Xyikji$o`1mS2(SWtqnM>ZYgF%-<7&l{6)m&gz;BAq zwF5%)hUS=4S^XgRhVFVKpgdJ%>dRy_P4*Xr!GZ-d&K115xiD>woM?lj)A|gVuk&BMNVAUtgLD*|#)C<}X&3TvNwMB>` zR=yASa588#D0f$Shkni213R5Hx(Apz9@}gCi{L$Z3694d&e@AdcT6b=QiPK$VvDUA zEo9JquYF(T4H4D$9bL}Z_@l(>t2??F#HZ@KsTnC(ketBg%odX?n{?fKZHO2Be`8aG zIFu!wGc-v!E+3U$wILh}Q&E`1>`3{NFLSs>hzKS~#(V~0q`y~&D@p!@D?`$YCOR=2 zIXuWokt!n*dY0r|g%%UK-x4Q`M3~NZuru2(W1glcl!Y!!M1_RNe+GNXI>xB?=_wvL zv*+i&B9Z{QCB&&wC|3BeA^dAeKJ&33r5ctJs=&M@p7$L@XHPDNWlOM(`%J9&?_o-W zDTYk;w8Pyqfi;hxNR|1RqE;?+)=!V_r2Wyp-M+R3PDAlMXtgdVlS+E zlW9TOmmwK>ce8?5bDL*oT&C*Ujk|VurS&EC>9VXAns!Q;65M{-mrs7t*ZrE2Us{OGwR_Roh>|x**Jde_YfJdOsZWz zGD*7rG>4?h5LS_tbwr20X-KDQek@6)f>yd_8}WKC<-w?w=rL#+Bs8KV6uWQTe@Qq3 z;uXWAFYtsg#1`Ucl&Q`p&`BS5H4nN|ivR3W+dKexSxyU6VjrhmdHoA!cNo+AE60!y zD6A*56}XC@%HhqxX)Y=G*6b@S4SOhY$P&`m4WxQjKF-4f{}^KS(U?bly$5YxKHnZK z1MNcI_tx2h!0yiG9}l3N-1%mI)MPlez*)A9pf@HJaXF*w_1r~KMh8VW zS^<*PVmg=K^NPeeh;XbeX2yVHINb;VIvy^SH}fZDNXgFeY|*hmR*w7t`*5R;S`EPVJx6e!Su7kiZbXX?11m(cUridtgi)%&rh#oviJGu)>g@09zEhEOp9(HXl|3XjV zhjt8~3QF-5i}6wlz(&A(H37*6NDdY7WP>vQMaIM&Y={y}1tE3|xd$0>;2=dCLnFI> zG0sjl5TINkg|+YThEt|9AYSmy?7ktI2VK|}sH96MC$4c`#! zETRmChZ@^g(k(+A2I6;d$fJ#nv~lh*e7e#$RiA#&QX3c;AQdQo0aN)I8NykRV7i=u zXdt~h_J?Lg&ru<{xp`pm_ZaDU54Mde@*W8$>I=!;3I|Xm7muTME7%j<&CU` zhTkKir)Y^d-3UBXOs`>q=}(7pX>+{NOWNQ{NIh*UcuB1dw*DIp7h0?OdwujK_FSB< zdJZ>0DItE!GpgM!R7KSQJLQ+T7xX{5Be`#CtsGlC^MmERa>oK01{wgy=k2$6fYjpV3E}3$r^QSi8^%G*z7l5w&_H)E6!GRO|#2vupbk*c=rKr zVqO&g2*)M8j`%7oP4VI((~f27A^!VDkDOx_r{*DT(y>#Wy_ji#jOgHM=N>un1v<0$jND^$%jI6erk@}Ci&6Y_yc`Yw`Y22h zv)FIwIV~yB!QmL^{Z_GD0?8s9tkB>nsT~=mR^sL1h;F9}WqTi3>+8TP_LmWaQyhz_ zU=~>s{Y92q8YyG;`8!oP$nTSv7&a{f?K(;Cl_u}!CVzC2__I}fbi(eN6ppRN`<^eC z0+^-%?6g01Yn+E`jH98`*UgELHYbWY7rN7>P}`~I+e^X^-;A11KAP+Nnr)Gz z47)BIvRGF1Q;wE2cNbX(JHcxlEj1S*TMr2z53tuubJ)80QJvc6UTl*WS{kc={H(S^ z)r8$@zHwCl)9?q?-voghh>;lAW@>f}#WvpqZo6~%_R?^lv5iCahGPaoF0L^xOn$HL z(d89mf08U9@>cm@V1Q&lOgaGX-CA6PfnXmJA zfpNoAethLmem8IQg`t_OV0Px`mP!OP+L%L6jv5%tj$FkLnv!!PnbmpWK?SIk{Cj{0 zt@*~8c5jViRP*cJiuT4*MJGDV^zYb@}1OF$Ms?n zl61dnAjF|8HUBp!G?OObMk7`0uM&k@5}iRsA2qXosp{Q_&0o|W=ls%+-hQ5%D7lsY z9#kL!*}Sr=J=*^|UhEj~m#%O)yZQME>!4(c^o@89Qrh8&>lv4Z_7}HLy)P>bo1Oa= z4Jt)U-_AZvhP#w~(m39pj`>9v)ux-czsfBCX*H^BTR&so;YwHb>5H3m-2V|c&-8;% z%Y%sS!-6c5g-)9#f#X826*(7Wa^*MMcn8C@QJjqXn{@QAo$`IkE2Kr{O!3#T;*a{l zJMTCL5qTp*@1gm=r~y*!6rR{F2gP8yk2UI%cdioS8e$H# z%@=hq%UZ<=M_8L55)wm8RXR$Bz`c9e zR;xLRh5n3KZZqUqQYZlTl2E<5xv6#EZ!Zs+t98AJQ{Xa+P(=K7VU zL=)PH*H!zld}rai<|_$>MXW@K=Gw|O{u+Clscr~>5jP9!jO@SWuQ&J7nCE(*Rn_n{&R={ z2U1eL*9S*GGJ~4~p#ngN=0lqQkt=F6VvFXNeH-15>gX8d%`7xp2vhNNIK7amX4;>N zX>Uu&;%(bQxHLXZq>#ar5xtf!7TXZ`2{oXI;FIMzgE+hisRBZhm>YR!^QX7d6Q-B> z#g=ztkdN({7?iF^eJPH}faa3!*osmdGEwdbAvaRgVJdU@AF;Wa5r^JJqI8B zS=(`9dqNBIIa$>x1r9S-6{fGugR0S3XXW-}a;Ejy_U&sG~$qcV9l zp>^rg1xAQrP<*Ajzd@k>S&?+~xQ1}Ci2&_NHaYV>bGyK8!*j#1|;_)~+jHvzb^=v!BvGvTZ^|WN;LDl{bPBr%_AVJ$?z$e6m zJpL*(_c{N)|JfPJ)4#kWK7-gEKX#rpY`x4Etkj5=lBmIoV8n(|)$-pUG3`nA>@n0V zL}gJx3&=1w&ZbKi<;SWt990a|y`umdl`<$%IHBJ3AAqBXNP*03@YPTJ~J8ABp87W2e z7t-~SjrWmRj6tCHlTn3<%c4;BtB{lFp1Zr*vBRuLaz!B}Z~f(E8*2M>hr*r^1TGSS zX@gh)0csE*kbFRwCXk!YKnj5+1y6&-^b+-jXD+~dQcM9(L`1BGBF|Fmz2c07Q2+P2(Z>*RPbOYz~q4%mup`oDk z>{p+c1;iG~yFwN^w}OWVat{8ZbR}+b!oeP9>TL|h!Kp}3$4T+l5Xa<-V)HQu$@c|_ zpWmPo5KK@|XJ3c8Jl0af-fpw#7KPr{;=`g(-*~xCDYaRXepSVb28Hf!w7DEj=N$oa z(m>Gx(7pu(l&&$ZX0F3=ZH0$%VTBLQbpVI#4!<87`_=Y}6$Lwo-dyutjtgMaj@-5OG4p$$1Y@dlJ z88f6J-+rOqPn+{mUF04&U%muz!6R%AIoU7_;t4%B6~?4NjY zms)lbDZFY0>iUf;<{p?UZoyYLFL<3F=e+T8{l5D7@-e~-|4uK}t(h6`(0WGvxR~Rj zk!z-T*-m3Hb^b8SPP)B#_d>IN-<d zeC&B$B8mcnb|HdhE-CKo>*z)}cjD8xW*pf)*=GhWkpg+7Jzr(>(cXz=*p8CakwX+S z&SAJ=%<=z_bwXQyh?U!EcQi5daa3fnZ_TH!Zf2g_%bk-#goi?{Qd?-*1llI~tV#y2 z+=Q*4zOLUeP65B~i+fplfLJswU9UFB zf_m1n-2O4Gqk~K%CUU*H*s3|T7WRu)!=Kf}mz5!k6&cAt3kQGLeTPBDp_!U zoO4Sz*VpNtm3cVeS=U6?W9+1&Hosi!;g_{@9{pjTzMO3*FSgQ=+S8q0S`?NeHlKQq z>-*(TpZCW0W9y>$Mj;f#FPZ%akeurgff$ef%#uEoSf7{sI{FjX$04+l#21)P);kz# z541&|yTaFU3ho7EGaur{M*SzI{j+uFu+%(+xk^;Cqki#VX?gIJmJZgKS#76^`!Rai z$B64s>WF4uB4&RY9d0OuO|+$T7(-=-|Iqo}kyX~l+4RzsFI%N>dl+9TM=A%% zY(Z)IE&>DnqxDFMY4K$u1P9IC?H6mq@n7ku)1g>fQViW_8qZEk+5aRId@_~p>iu0} z6u!54j2|<(?fv1v@QJXM{AV!~OzBJ)cfNVj@d2)72_u;|)@b z?Q&=h8ysi3P$vR-r+2GQOa}XZ{McJwZ?!Io&A2<~HC|V;wjL6R9lUqHd+51lQCf`C zu`s265sTuvd!ssWtPhDEhBS(LE>-fMmcDH*gSz+So46TJ@cBMOc@ck-ZbNaINqv81 zMVL$aOc+bnHrE0fXdVt%IG;8wKSJZkH+Y5QZjoB zbL_Z&zb%&9GrdER#Bq}Fl)o2{4&}lejg3x1vO+g&Vn=gGz}PLsW|po!kn8M{6A0`i zycV19=)7jz+}tck0Y~a9EBAdcW)p?4Iu+E2R0Tc^elRM+6yQ3$Lsdf9ELRUk{zr%~ zBp@&1AL{+zHv|jA8bH$zg+*U$uEqi;W*HOni6qQ&zw+(v?95f2kW4@UdE-PoSmKjZvrpkHP!Ks0fZPXQ7^70qN2SMyd-h~RBVO_ zH1_~(5%9G|gb~zOj<^?kGZWd+&`=jPG-cZ!O}InxyDpGxESxqMS6U zzK}LENzw)F$Pvzh3u2Kz9xQFWQl>klB)7tac526WM6hs zVYvwKvD`&U0926QkS+X+*KHX3$b`lm1yQB|qcmSSKe&J`nb7L07yNL{B|vTosXc(3 zZ1l0k&344@jo4{Z)O^B5NcL-^FS?zfI7H*b8gbF^j)X51K953Xmk0irBh+L%U>{p9 zB+qAuS(3@^zb(fTKbETSAl=Z!o!BqUWL}GzM1ZV1s=;05sH7aTWK-IEOtW9%6S}1s zXnY^Uq>9ut%21;fT+9}X>WUS#=$c|v0v}`o5;;QdZcQO1r*5clo4+wW8#NSkMtwwv zIX8zn7lhpm1~U=I9oH`lx2}x|$%AVyRg+)!U+^V15&!Ho$35>+3{S&vIGDDbpW5>j z(pa#SF5-nWi0WUMdsd`mH7++{jr0HRsWPIi}kmydC53s}-9WhZC`0u}?2wOZsiy;Z4x z(M9{Ot+o*rsw3Erz=g(k8k?RO$ut43JuX!eujB|!en27!q`FJgDP2Js2=p8R3p*cg z&)Cy>{*Q0ZoWPIQx97*{ywAJ0XXKmP;Uk0hx3`Layw!g9Z_mrX`%iBbfneQRs$83J@d6VNPsc32~UJ%IKE1Ox~O2u#pa)c>u=ht1pz z79d~<+M5cd0Puj5%|D!BLm=7Sz&h%PmW_0$w6Fd+Xge(B+)PYJz{$5CiVx|HquQX` zZ*K_kZ7-M4D{wdlHFQOjGXoh>@Ov}}$+P^Lci`Ex4g6BDG#yyS3l|IE3%GK-SQ!jy zNmzA~aKDZ3puWw(yN??=5QJ)J`c+o}2ml{&w!+hJ!nW`-@5XaQghpTkD!0H<@Q@}) z1Xw}BtWtd_kbZ?Z@0>i)SiiCv2^I^Dndv%e>*t5KOf9@qrvyl&P0H^Y=t_HO5c5nq z8K;=1Ie*Jj(Ww>!L~C?cbdRfq2G1U;wZ~I59L+C2&~K1#C@HquQwrf|<@3*il&SlQ z{IJubhK`_SakswS+y=8>8*TJFg^ddZmjt54_ST*Wr#1t*tfvisZxK!$-L1O5$52}* zdw!=;suPnkF0PPnYJ`!HG7gg(jpCJY9<+dun&^Q{Tx98${N-u_w|oer>;@i2+GgVIn@mLm$cFJvc(!;-EmF}ub(Rh zB#Xxe1xrO^(Ky6^bMUGjbCRy^UyyZ@8Rl28RV(knWe?uGW)(nCT-&%q_Q9J@I;rgS z>@DuLTq1Ph)UU;rUuoiepU|)yGm3!-*~#%d%DQaFuRa{#jPWQU5RmuqQ<>druV%?O4_-v9+K>wT<*vb_|bbHE%^S0RZs5rRd74^W%$qctxJ=&{mS)XwJEOS z?Xw~u&Y8O&z0U)TmKyW6x2Lt9?p*xq;imU)+6$+Ov^{~M|1t{?vN9o2Kgd08=gcOO ziF zT}&WZimWK;D4aP$G$%+ohCkegVTjkIIM0NL*J=FMTXUO9fk0J3^PAy11@bzHf{wYv zD~!SVG0xe!Uo$Ph(Q!Sr#9r4UA~hd8uQa^dngCHKgxX2OS_QaTxpRwgVoQ4Ix-`Vv zNzolcAN|TInqKY5pn&E%ob^hf*8!5(ef?%qQVu8DDQz^HENpw`V}~rB9&G}ntQ4z? z^uKd#gH=xH<8OXTqe@G-(o4HsHcquoBr80N%P%6qC62z!Lj-9r$~8kk+NwEtQD2@; z5JT;@-haHVJTtE0(3ji$keT)7Lp%4;Ee^@qXXL1d$_D10|AC!z*1BHbp2 zvcZ~4Z6yJW0~e~=Yd^dSu^DD3*34Y4=Ev&Q>U8~K4xwUalmA}k-%`YPJOg-Z?d2LP z56%9up2JO}80x~)=6i?odn+yc4Ns51fnl^Il4LgSp&X4b>_Dh1eYNPNb@^!k{Z z8H#dBQGmwW#^v(2>!#Ax!)2q08-! zJ3!r`s6HM%!W$A48L{_sX-u&w%lv&Fr60%`8|=Hi`x>WBQ@MJj+WM{JTyk4oYMmrN z^CK<^ATD$q6mBv{5Evw2G?m=s=~HfJsKZV>nqlKICqBpOf0pC(Ka`fe-vheXKi;(6 zVtMz|N14a7gKm2V>-$@?kO=(*Lk_(X_K?0|pXw_#f2ZU1>iyZj76Y#(zk{R)`swu= z-%Y=RtOxq-W`pP7?_<~(8ePe+|9^YI?;z-b{$EpzKS`Y61=p>;&nxf6g~Hl;USa6# zBen%nhR}dk@%Vn!9_C!3mr^J+Q@qsopgVsZiZ(NISHp1x9Vgv6iZ+W{%YbaYGMQZ} zCPfC(RIKr9TELMG#M!C;JArCA&cQ;M>^Uw`$i20tnD^AQ3~aw>3XS~vY9C+B3<8j852&4ng%kZHI!_Z3 z6djF_9Doyj>Uaq?H4eLo4Uyl?)n++MhE&z3$G~@Lx(|Gqxfc*dov3u|Cw3ulNw{x! z3n}(kQBssp5B-b|aS3kMWz{X~3V1EQxkIwv)oUOJA8C}(M0}+QwhZyobAhM6i6ZIrvH$OTSN5~iBqgC#KsFO@-Yi>tAf0~MGXAf+csQN%1JRc& z&X)^^yB50Nfd~_@=%U~^qwFux@nH!B?hvp=Aa|~Sa+;oOIs%yJ4euaP++%wCs|XfE8;U$j=X;FTmRRlidx_o-_?)rvS-CE*V?vRttax0ASE}IfV9em53F2 zyN?EOiZzIS2~}1Oq-U}p%KZ$~>espePU?s`k}TAecMxm(5~!$NxX%C6@4;%P_#xdK ztWl&bJ5^Ox1>y&FgSFtMgoq{z%VWs1W|*jC8Il)R1f}1PYFFolj415Hu-{Ix$8M+J6a-+4s92F?=tf&4+#(JDJ05UX6>m_yG1umPCR#sksY%YRWp8bJ3R79Af9!>=b{;|Xxz z1X|D+3tPyWS_FN4eUz`Sz~Mv)t@IgK9_>LliiTkqf{LZ?LkCcVpWF_(%OW8NoKE)S zvE?FavSLAqRiI_2A~hh?`ZWr%%EZv;;e7Oi0|y+NYtjhK0^;fsFr<^6U8OhC8ZLd& z08m!167({re%cPnkf)R1Vl=vmr0^+K;vo!ASfO$Y*(2u&4ZGdc-s-fv3yxV+$H9>f zmB-0+nb+}C|Am|)7kQ^U{-N80+Y_{2vI^Bw%@(t(U`-xCK?%Dm|KR&>=I*?xCy7{l zsvC4lH4wt}$0JM&(33Pldi?|L(WY|-_!6vc&efw=fH!WgOLK;S>#iQxTEGO06 z(yci#M@e;aL4{(w|2fqQwVB#ykc>r{iWU{qCX~(A%))OiI}tjGHkh^_K|Et(4a@BC z(rsbHEn4cZli}_zAi2N}ZR(z6Y?C-;DOVXK1v&oBqh+E6{i%gZOQ;vG@t3BjvBsAE zenlDMq=S5mE8J(eVWG&Bai_>#?G9$};1E2fWI=1rxYx*W!zOgo8TFkreBv7NT;D!P zmnCu?mwqf2|0F3Hf0d4PxS!j8WEVUy@u!}-S~2h#FkYz!^VQCURIec-l}cW|Agd7O zL1j&G)0atqGK@d|dxY<1h}aNqzl|MgA!rmYxMi&$w=~fkvK9(^05C}JlO@oWhhH|`)DyBSmWE8_v-FJd2&v3l8 zfQg;IN>sPXDYrSAs2RiHHQn^JkEX)W)v1N*dClcz#hK6cB)3_X;ExW}Lo>pe8DS){ z%+UJb^IH86J8szKLe=#?K%5e<<1p)(3+mVx+nHB3nP%Huv{t+IHh28@x=*rREVX_o ztS1AAk($e+Gbq|oJU*>WyIJr}O}%gwvO7yuc7=5V-NZe(gbvEJ?b zevMfJ-2$Zk+lUP6SR4Jcgl^Az$;@uQ|IktY#(5D}zKy2Xq8% z$e_S36kyrT~{@bY%Ss%g1D;nP5yRr|58KvZ7P05eWbN# zsI$DbNx6Z88)?zZcX|qE2_eZb$#SKF8#|abXU)#zX09p@;D^&pMe@i^y~o8LmD>Bu zitLCH-CsP*Z+%chedO14iZ@)aMn;qVn=~TuwN43E~qS^{$-Wx>myVWUr0~BdG36* zX$bAiP4D0Un<*HdW(+hYMOY1^sZ3Lr>dDITW~IB6f$vR?>5$y1N~>jMwX)MYXu!`Y z1IhOhv^Hoesg$K`lvEm2NV@S{wNy@jlWwsev8VdCE$V_*^iyl%(nf?0vC@oTd0Sj( zW>vpud!@RnWKeCcr2o)>B7hw&`#!hneX=#3Q3P)WWV6Nz`sjHfDj!A+tKDww?q85P zLiQr^DNJHQGwT%X6}j^*g%$e2^7XsW;z-u8CP>*WFxHqynWnWNsAvx&I}~DEuqA2R zl}6J$BIlP7lZ?Pr^etZ@PB%mUMfPrnmT3V*U1`{RtJ7Y7q~idhE2#uyz6uzZ@Q%!! z*3=<`R5bK21y4w*B4#7ApT;*zNOOn*!$+KvAv!Gkoy3xol9_ex9aM?`%y2|@To<#u za8J==Gp?|-f&V`f>D1-~Y-qk+AuNH5y~?=~X{I3+E`bTkj2YXo1=lqj9B7mgGM&qb zOx04}Fxb15+ZEl6ApuNha^SBRO4Nk{* zzDFc;rIb#UJzILf$$2H{^NE+q&qN$F@&&Q1l!eR_yFic#1btoT31%foz)-k`P5w`b zlzN!hN!@1Z*FdKzB6sqIOC~c{) zwFjp%l$4lQBG@~WEHR&q4>o!dO$HfOSh;;uJ1^k(IAQwTmk%Vi?p5=?w*hc1`mE!N zl`G)!(iDbI-`F$fL)~3Cv=AWNcg#<-Y6-jc&40FT z+@odQ!&8p6OitQNQR$6sL9^v7%&rmV(L?ljGfaHt93;v!U(ZE!^%Weu6Rv#jhYYg- z50&|~a}uPmVjW~luU4omS*4@}lgAjPCDpGN5;W5s@lUi=4(Z{U)yz6BDETks6U@+s zA&!0aZ}FY5=4ccBtAvr zJhf`rlWtjC=VTk^zRo|-KX24tW-;WD*HHDk!2j;u35?JB>sM(v*>CbZ7XRisdVSd3 z{z!%;l`Ep}TEf1#Fo>;syr0Ogv0!`gmY7NFhuF5WUO%a*s@1BPTocB%rqj4*7uo(> z(T#odxOf&|fZ*?d(^M1IoeI2@-L}cAn(D$&w$|J;P@pB;%?LYqLE2_ zgjyP3dE0Wyfu||Q99Rx%ZIrsUJEXn;y=ndNM&QE-@K?p z{V&MqJXyh9$|^3NgJWMgmnh zB6vgb!D9p&0Fkn-|2x6{y{FLKL~h2+Bzh=+zMh(r6(Hhs#9IbTLX5NjqSAb!d4M>= zgTAV)(fcF8jeTy@3fE-xV~8&1qX_ht2@Jm)iuUUNT$0W$sO9GQDQwg$=`c%E0E>XA zLxia(b=X@S3^$AJ8;R9ow;oEE6ph*lBZ<#CG_V9r$W~TY59tCU?%uJ&1rq>7brNNC zUrGeOTHJwqJ%sPzXe2qm!c=!tbn955gusipWsFWiihkRFfffe@)13W4*=fBmLe4%OdNcy;AS@R!l% za6&O1y9w+azKE7?42h1)D31g+B}H$2YkZDF>lg2)@@q*qJb3i++JHX2yBkPA1GQ8leE%xO&C5m~C1X|0U9xelZ2 zOxPJakPLX9ONpnW&ncDfd{u}uR}}7 z#IF<4Gp+35IiBMFLm}mtMfF1Vf_rA=a# z!>~N(xzd00w$u#Dp6CMO;+yrGl{vR{M69dRSxr&q>q-X0gG0^Z9C?$Osz079Yd$T$ zZW_eG`ED#c#G)Vv4cub9b_(VFLA>}kU}+HT@G!YD#EGZXe6sdBIXPV@%gpooV@~j@ z0v6FA7P-?NOsG`7>sQe{O=@=h(8f5;+t$Ts`#M~)=dU)m|7REJv%YGw+xGiQ0pEGj zm-`mWW>jl|un)c%^+X+IN)IV2BC_86pv*;xD6^ldV&=1+xN4@-38PsQ=7R(?Khz=% z^?&;Y6yhAvXIqEE37=B3qD#Nb2pMTt-|4)aGeQtt%Xbgp9L;c4|C+trsk=<@M1+%a zz=>a)5p0TR(D?D`&UN{BjNq6~5;x7jrp5nE=(E`_n>A54sbB9MO;1Hx*M)3Tv~aUu zzeZ!062v>p-W20vo$RB@x;&Fx9f)D!F1dW}`q<$?{}V7GragIkU#jKgZ25bnpG!MnBX?_@DQ!MVNPk2)#Qsw%)0#BPsu*-}d=7ro!iZ z7V8JQ&#s_C=G)$Y)pks#A3M9hhO(C2`W!rM2r{d)9}ds6MA=3OSj}P3i`kW?BO_C1 zzWrRt`MFdb6Tbv2^2l3zE2x?~ENR@GI%QL4oOibQP9kli?CYVxG5^2+QFPA1b@yu! zf7{qjPTbh(iIYZc*x0shqiNhYjnUY)trOd}ZR7s#O#aWz31+|h?0z+BdT=Nb~|OV$k7(-q8U|m<2KWyapW~% z2q%M9uG15yl8Z47q|B11yL1@5w->XXz{EFV$nTU#6lrECzt^vdbFhMEXRZ;0wa*lN zOC-t6J6+(cf;913RWfa=QmBMzmv?}}ephCt@orn#Ht}psVx`sAVM2Ap5T5Gwhm-bn z<;#I=(o?0U*_xEzW!cK=!?&+RwkpjvqI(7IbnvgXp69s{X+va?>h~F=U;UAm;1AM8 zqXJE~>1<^uDC~92ia;kNl?W}pJ2YY~LJl@@2Ts)z_A#xzW`-p^l`x2c;=ewR#;Oe` z4S^$i6>Wj@?MGV3{STy@7h4BCHo=mw8IWXw5DtC|@^zTLX5wp^%85=9wQE3XB!{A+qNCS*37ge%A69o(&=PVjG%O_}L$V6E4p=ZW zoyZs)O|Futn(ZwhW-~>O!|&cU9G6TSfVxMppa)|sVA_Rdf|b~siKGvOnc2s|2JGQi z-6KO+-#>xqjt_?n^$M${K$EfCuno?PgYl1d|m*4cIOaXe$Y<2;L@2dDQ1L4PYk>tz)5&e5!^S2|_H+jHtm>&UUgn}Vr?f^=0IT1O3D zAzols8;2$PyGmq}jRfs*=;NmSnBfy+p_b}#@&)-WolrdMKOVv=eNiY2#*b+UKi7iH z?`%IBv!vuOq>Iqp;=&yYXVaEpa5O^B4z_t({MhDlo~^F#kgg2>Ec%_RgdpCwDaYGl zt=M-lE+RpC7-Jy%fj>lYg2u+7g7INw=y|Gbgg5r{_)VC zEFl{3(wv%|E-PS-(k#k8i)Gv4HPrgdhufa;k}a1aZ`jc+e0W-KHqH>fx6r>n!12Ca z8BdUg^>n68U*2q;n=#$}`;;if8b4H^3V!=Lc^;&toA)h)vwqc0OWQ%^}I}989*L!r?@}K?v^lxYwTGM2D18gC=Jf1_oQNIw#t<(PWe#Ay+ z`40(`x^o$NQu1aVyJEZH-IjnXe3^mixFS#^XghTd{|_RuzsvJ^c5k6t*>OdlQQEP$ z8I|?nbLWldN2br;&!4`!00{;4f=k?}W8kl)(R%4~CN}ebiHyNEwu@DtTA|vm?@kut zX5j78*ZEax5HoVYsyc}M6>4ZaTpH|Z-+0zXiBAI7T}`dh0{1)s;H?MeeH6Tp>yHx zIYfq$xWp*w5ejREwW4byL=LmuRd=brnwy=?5+dr27PGl6*;MTf?LYVZ73f$7M2v$! zRCINt_N#vVrFlA>qDbQk6n5b0_dn`k2{NfF*H0f{Ne#G&Sl%qi#uOs|gGy`|kEK9| zO^wuZa+ia{3I`_F{u!7VdIl!bX>mT9E5-8+Nf`jKJSD5&elqH2khKLWa{Gt?)fYo* z3=9n8Y%pR!x{d)6$`j$$)fExJ6A5yK!7GLbqj591GdiXQAI%i0hat6J&wCg4r|w*-ZA>`@Tt0 z7aj~>tKY!ueCS(Af(0-9`EKW=mti0OgSy9rvPC{L37c3rCfFWPB`C#lQ2&#IW=NfU zXOc5sKUTITJ;II@4G^E@OqvW^W)yWnLk9cth2gk}D9Z=KBRAtSoD|Jsno`>P2~x0N z5oKlX)oTPIBcl8W#i@6!5i`eZ?X!9kd?qB3(s>>Z)4Wpep8};e?uLQ@cJA7O8gQ3Av9bH%P<2I|FDS2b)Sr&!0|ATq5cme6>N^b z@tDqB`K2{Z3#&S{Py#|aT}0=Ib-TgT1iW~$q_h8+bKYmImU$uQB7>411I`j}{-hhV|!6gu;Un=G#3w$%G1G>%Iib| z%ZFC&r)Dh|V7*$+293HA)f08{8(=N{xK+e?0qKedNc{Z7HlrKG z+wTGafh&S%7azZV?C5pk z-;LdhJ-qf=#El}!QyEd^9lzs;Kj9hmyt|jYRH{+_;u*eMvJ%+9b$eSYw&)a0yb@NJ zSz#(OK@DYe!P?hA1f^s-Z(*vab}s}{soL&2!fGPSK*LD8#BKA$&QD8P*di-usm4aA zazGf+!yjd4}%5gKbY2bmYZm(S_}X1c)>k4qB^)nJ4M@Ej7qwWM>s^nUrP<_ z4OP%7`M0foKA_?*Y_N~AJV=e%AC`3)J3630Cr(b<9=6;`Xx|S&){+hqHe!_X-IpBXGF8#C0lb`Er*=eLX0;{$8-qK=;qqq>{zAx;=!lQ5nuM>Yq2lHeNSFf3F zU1_G^R^RcdU$tg8KfcGR_{oeH`&^n)59@$uq2p1vaFw)Q=KUzr;X@aEpCnw!cbZyT zt@1P~e^G~SXB+62G;CSdkZfxM z7}Z9iXcU?0VmnokNB#KOGq=4pZa6&ET=3|g54=7iTc2~t5FXlt(^QK< zVfN;yJ(%fEk@Fl~qQ}BaF#2M)g2z9U{O7#f>SzgD&!I?0!!-`5^5RXPRHRMPPE>VB zZ&#X+rJ~;@OzSJ*h{YEL#_!DiIG(T=pb|8$A-6^+8dRDb)}l`~`PZX`OV^wbra6_6 zK%aGyF>>u-HbBGwOOH}hFoaDonvLIraosIc93{|uW%`x&=W4Y%UZByRFwyH0&+DL3 z{}>fsKg7>X!VCJfpvv;z(K1j1`-wN4JRcH$#daouU-wHTe8MS1;K>LG^6h z+-$Pqr%`+LpiyqSZyO&fXe>YxL^EPd;DqC^oV8g@b$o80LXh-hg)sjMBjVY(vNgZ- zsjSKzIn1~+ot@#;L|BsItXustNn7k%$&D8FdX4tcUX;?62pFtyrl^BUjXSQ7=5O*j zE;Jrl3`*fQS}@TQy}Q&8or2OXd{A0m*`+BF^A8F7{J+iJ#=^+G-CEZMpd&WhRc;{` z0AV>$&#ga+;$k9!eKcQA81-^@0%rt*18%Q@I$^|r`gA~``0sw70;)GPXIER@?2=Q9 zuo|!;gq<6GQ-Cn1$($C>#~m4aQ5^ z3c1AQ^(_tI-l8ZNrC#qz8y^$BInVLpnko-CoRqDT!jy=YBkK)Ji7(cgmSz`zvd56^ zfYTKgrIn=)-$jo3ltc>25y9@pEnp$A;Gb%ju9SQBkCGTOLps55E@s9TaM2k zXuAp8o5H8agO`hEnI>d;g-aK&c8(c86^-B_uZ79H=J-J=jT<*2vh z!m=Q&SQs)%XE@Brf|SmPlMl%O!5unf3~G_7M&Ruz{C5Y2!$K7cpECC!idN;NtP6(> zSk>6poA2S;RLr#u>Q&xPxqiEA)M-(hndhyG>%PzRd2UBPDbykQih1`Ca97CmDj<2~ zHPz!dveBYqUdnNz@cS*kGPtpH%<-vbx_v#5bo!RU6#vcxV%=&o$Nca1pXyoFkn3Vu zX5ArN-gW1hMDmkG+IvyJ$BWGy3DqN?Wl5*BOWX0*i-FflIecbZB(b>8bG}#u@07q- zPnh=^EgPRk!L!VThPS(#N4tYSzVMj~n5FUO!;#V0@auwD-KCRBQgKVU-m!(xOv^h{>xP)Y8Nd*zUY+)laE)DaM_l5SEsWc zO7Y3D;&UB?a)ZApVt#jtm+XwCbd3VtamC34q56#sL9K2-Eh9uNqfi_grfA;j%t_nVRMMdx<7^~ilhPRLMbJa!zKfM+K zfvTTuo?kyNHT(1RyD5uj4&INDc@He98{upXyASifEDDKr85`2<;FMi|g>z z&HHCgjYo6s{%YUe(w;rq0D)X7AT}&2G0IsYl&QdDisl6Q$y`#iNzq0n6eOX4|IHbJ zDMzgccc;cr#CxfrYu1Qws?@@3N@|3iU zfrDd?P0XN(rfuG*4P1}DkHrDAx13h<5sY~In1-^(hVaT1p&=n(0u8?-MywiSp=9F_ zp&RJep&~E^E7}U3lpwq z_fn&^AWqAhhVOtGH3Je0_BaSU&|*bDjreF$rTYRr(g9FybmxCQqDA*W$?xHO%?k*h z{C{}>_yz!mYg;Rr%i%V@ym8>~r5$aB#3=TVTo09Rq)0I^dE37q?%kXy9LR za)>1h6zD5z;=;Zy_3vK!0uEY!SG>D!>Y&7O-laCOwMJztf&u2DB`>~OgmO&&ExjMdOO-zD9Bm@QDleOQEC9%a% zDvdUmlLUQF{r%xuo~?gC9~!?~T+AHj6K()cfYGZ7ZK8P~9)=$awI2v|iT5!R(}k>b z5}wfzKP8@woLnY^o{A(#+od>aMdD~1RT1u2eEbRfo`6U(;WouwMN$bS)&W9!XVs|E zvK2>%X8TXe^m=ocwv{m;5?DR+%y%btaI0sDzksdtWl z?>yp<3dX!48$LAJok37~FE25P6D_=*1lY=FfWo!7q`l^m=WiR#0XHKoL-^>U8c zfRyU9BBwpaIDvJR6y#c2{0@O%zzaD3@RYcop?s;|7x)wSt58(WZ2Dn@==*$JBvI#R z^rNZk;8*Iok8HlzAL1V)@7oJ162G}q?(h1<)SOd#EUKn0K_x^jx{@=dd5-Rd^cG1L z^aDz%!|7|9Cg#I{NYzqtOq=e-FAz}igv@8q+npj)*({6x5`V)UF{#NRaZ%2e*a>WI zr~HbLM6q;cj(0j5;t*YNurNPBvPAoZX_=p|pT8#G%R8jjihqNXY&?RxVx6qw+$Ue%?NpWT)=Q^i{rLVoL6fYT+8rs zYt1(vC*4ZD`H^(e9g>9e)PK7fm`5a-2Sf>^=jrImJls{@fO*8#X8B_=$H+!X&o$Gn z+-NF4ep}9S?5WIc-s`AY?>_Tt?Qd}>i2H3I2CI{V`^?>Ossv;wF;n6Y*(PPgw}MHJ z$MsNQzNND<&Bira3to20s*hJ+!q@ZR$p3D@_$0@=zy5P%BSF1d&PTbKuEX(Hsbst$ z;Iic@-}j#8x^Z|I{i9QJPZd9AS5sA%av=ZjnuCdd#IU`{ zz)ew9LCK>{mu?1E#C?FXOr#ZXLft4;aXHJ06f2!&jV5lOFdiyIT^j~DrI?|*qnj9xuABW8#Z@dBm52c82x zjXU%O&|ZoCp2gr=u4?b!icfCB1v)Y#TDVFo(`ATQ9lXV-&VRN=$M}O}+Di@jwXo=& zdikXc6B)zG=n-FDoo{rS0To!4SEU{V=(9!g0N^Y+PO*o{(Uf^ zA1>{+rP9rse;nPQn2ie;^v2)#B$~75;lJk);y(M650!)8^2<6J`b8@w&i;x~(v;3~ zp-cx0wG6ow+f=A3=R0K0_yhUJPeyXRtNsgUO+89=lBQ9$u?DQRyJAej;)qd4lRO`zx3Ig~PG<_ykbQ2AC$oNBqRLNf_5M*1WVF`?Mqr^R5baV3 zLId%YGt$##$^wiD(*`c-yu$tdLS(|z6*+fHm1y06oCPS8r;*`>ut`@S!2~Lx`4N;{ z9i0d zSu<@A9r=|tou(@`u~%3IJ~>oUUkE_p8HDHUVHs9(hN)Z=eGoOAQf8^IJseG4B63nJ z48h)JViG}uvNM=IV2=ro9@NPWlVX@c%*;7r&Ot)V;u}}`9-NGv5lZgx7sUP@S^u8= zc=aCRmuIN1G-2dUwxM|_?GWD6n}}cpo|FH${;4&;ZeMPXX%7CeaSJ)$OZa;(~OR9^OgRqCu@Ts^MDFo%v)$*(0U?wZu7u!~YD8p}m)uzp6pf?HU=EKZI*C)7#H z=yp0!nA!n}*!x+4-g0R4D!$v8a3} zmwh98##=zLs$Fm6wPNJDC}wX5dqu6XRp0{evL^k#F6GX`Dl_`qQ=57D8cKt%t$FKg z-+?v%K>l;o3X09w<(1#JVsBT|x1HW?*Bi{JuO_yKe{8HDv=bRp8{}%0_796D4#U+WV|^EEg)MS~`d@9&Phxar9_TPgDkXwdh?aMh6Ax_sH*q z=;KzAP*d|zcr;^53+bP(G$hwx%F8&1MBPI*9T%*Y(_!a7#|g-*?*#FDS5RUnYfw?k zeE;Jf-i&MU^!u}%fKOp`q2@^yiH=nDkNRsOI8fFZ_@04D)7~o) z6`(iO>lz@pm9gV(VnP+Hr=%o|XS&hxi#s$Guwr|hN7ZSjP+)U%6q*RRLpux*Oo~3s z8d7ngRwV;Ctc0heDfVHdNL;f`vuvjIkb$PnJ9HuO%xVZ&2ulkYwTc%UR;%0Yd07w; z5U{nNUq~^7DZqO7t!hUfqkqrd`UpGsKQR$CY%O|KYwjKp{hQ5$4b|~MbW$ZV*g_^$m1`t;1eM5Y-|N^=7;l+5J!x(Bvpe}B9^WcoZG z0i#`{z_1JOg94^0cBBT9!fpR<^pK*(P=)n^O%W?+O}Bdg))?4QvrCfcjZ;FjxKome zfS2>3LXva!Yb7b{NYA7phqK|(Nl|zrXiu%gdjMJv#4Hj@^Liah2nwugVItV3fjtP! zmJnl~yk3qUaB1dZIN7CP{Tvlh645p^a-3I3EnQc`S(R6g>u3UZ5HvJOg0rm z*ce(2e-YJybUebZ@l~cNF&o)P-VHu)^SAgD9Yu18I}$xvzfef9@E5LN;a<|e?nqG* z&SdM;0_5akA>Ik^cFl@E2l1ZmskS}~8dgo~5i>y<!qrP5D>594O^Wuh=>*V%(@&q%<&xj110&q9tGdSZ&QjDzb>osHmbw zV@8U6sucAXe4CS9&`>bHR-W}mK2^Dbfef3Bs4-9c2kBbMu~pBkTTaxfHxO7($BklY z@yLWYj@$6(TXDIJ;I!^}^N%%6QRxmi-Vv&(bSI--8&za z`tBqwe?i%sHc>FFfXD(9cxgkte5waR%}T^MiDv+ zmEVvN?=kc@);33rX?=QdiKm~Ro;}OUS56zSQ9%!fxaZZO=N6Ia{NBHE91229)&7=R z`O1Z7`O0Qjklej3i*vu!JIYDBjv>0Zy1h`%AswP0k)-aYwo=Y3VQ=A2gTF3wkV801 zYZ+Q2txRdm#;yE<__aOCwXw8#3xjxGav+DkWJ{4IVVN-~!wfl;wtv_9TA3?e%~x{n z989>mXB)B#WV)+4A*8p1K6AWC6MD$W?|Cn)X9zfUjq#eaBZk8huA_p<+F@1<+3qjJDSOuFOpWJV_Y|xbcPH${2vUG=dg$Tc5@nGph^DfvoqKD_`f*0I+^5a~ z-}Pa}Nv@GWmqg2@#O7)z!sEef;@9hP+J7QO9|TIICjl5Ws4eT#xTXGFVi(vUoc zx3=u1R_S3z0r*UGeAayWD~H4$uA;*nY-g&@y{MV%A9)CzueL+ye|;xR=lq0V#{c@{ zp1Tj1@L5u6F#7Bd$>D961Jk@;^@eWHMinvH8D@1BOv23FlouOIGVE00k@J0%=v7h_ z>w|ZT1x8iu_tae&YJZxeHCPJ*bKYlC;$L zPF26n#8cu(jZ0P~oCm}Ke9}{ggb|TS63L_*lrV{7F=DiUt6uSX#b2q+WD)vou4ohrng;VO$L5VHxYcbG}kaRD6q=7t|2VU79 zL&n+Q|HlGOrL{vMp_p<&mV-k>2L49-4S*npm)2v4yGfRZWu~l!fjeZg{;3VWf?%ef zVnpaqC9LhAkR>X{!@`LuMi(qLrfXXauCRwq#Np&fP6vxmX)Tt5<)?%}bLL6^p+-3b z_vV>?G5rq$q7QGRY& z3_5A4K7I0lRiM}_i+2=1VS;KUOpI4K>u5oy$&NLa+)t2hu!xcG>wu8aglh+g%u(__ z0@#pGwUATq7(pHB0deF)p(J}AScm`^(V__?AQpj7CU(iAj%=!rL6#(tgLX`L9!M7F z(|cKDV7Ta5{o2IsqfVqtp6DKER=Vh^qrJi8iWf?*ELs+<2o{FO6Jd4zutO1D9WrDs zKwa1m(^jt0JKdn;FKEw)eq4dn#HA4~4@ zGblEdEibxA)#=XWcxm%eoGIrhy&5*t1N6L3ewu7}#gV2e#FAU+`MBI_`Huez6rsvg z4^*D{w4yZs;w!FcC-(+%E3`k6?l8&XE5E3HLCCeWpTKr zo&X8fDe`*$pTU<{UqkClEDY(i23aSb2< z6Y{#AcDY8Qz_f3R=rlV9wNV^H?Ua&*#`hTR&SlZSz`#WKnWQP?d+MVO*>$sYv>M)Z zrM$nr#K8&G1+L=UGJX5D?8^x5g_Z!50hfJf(xN{9ltOamXa|x^%xC?5VrpA48kQU5 zK~N2tJ0KHI*3d_tr@?Vr|MFj-ny-TaCio9PY7p?aru4~7UL1%f{Xe}I_{2u{A}tCP z0!!qfj>%TL)#^kM5yixtfj2J%5V1mcL;}T=ICzL3BL~&!el8u$qSUoX7(k z`Vp5D&o3_F7_ck?te*|wn_|WllIs08*nH|lV1LeiAu^Ev5rK=~ISWyYa7D2m(BA}& zq6>dDnbRx$!&&v^7DWo0p*WnNyeJN58QqShiRCxefN{twL09-ET>LG;;76vZ=oT!i z+xbJBZYro*%_e&?GCCpVGQeYDWTX{Axaxj6GA8?)FY7qCyh%-#kM+ zI_6p?8P`y_@pSUxLp`b;*?&f7+d{#h$WQRqx)d3kA1ZCWBIGN=L1ark1_NTHy@Qljs0x@z;8S#vO;UGmo5i;CfRi zeDXVf`(vr^FOpva7e|U8t>~D69FFN-s*u#TQuGq$VVM3xs$l{AB6JXlodp%2 zP~MctwJYl#WF%b7#F~RuL=tT*_lE2=F^-6)2j_T3*D4dNyv@vP2^Y$u#!p=_wuedPzx?*jonAOeDufxgCFEur;^%;*tU(eMhEjO>^BhgA z_fng}0)|ZBG^L8;ewB}`%S`<0lo}t&a1uC8X=f{DxJ83($XQBXlrUTAo}DW!`LJh;z)52_a*EeQD`o{#MgbQKJ#b--^kg z3Q@I{p@06ycJkHA?7hxpUT&oD-Elr5wEn&HapcKn$}P=)glp{=aw+w%c+OHbcewzcI2KWp(3>0+wS}L!cM^lv)8uV zOU>`)n5T(28vgO*m#527K^5sIr=`BHa1%`lslOYQ&ONCvW0w}$z*X+jyB zV_7;+WqPpLr^((n$T}*Kuw2Q&iCRg$rtawbWVu588KfY>s#6bn~GRJR(qSbiQjMi zD9H%faRinee#E@B;#iFfg*5ye<~hY=h^zyMqI%g`v~37;-mSr!m&eLA2z{^qbF*Zg z8neue%8VU7*eqbXh0S76GU!sdxCBx8KjjY1H~587CG&eB2<&Q<2j`H;O(9jB_`V%~ zmLdkRDqBPCII?_azGjFw#4fw*%bwO1Vkuf8PTR|m0N8HBK(aCEF=VhACF-!z$sd`| zGyc;Iu=r4F3R}j2k24#=VQ3F9ucF!dp!MNctB4DTk*s0aS}35|Xmi_Pon3Jk~x zuv9-brmr&D0l_JX6;`&_g)QbsvO&WaY8pv&1khJcObWxvPoreV6WA#)koTnRJ=AKWdi8c4>)%ps2{hNR zp+A653(FY$4eeH_pOYs7RyOjt;9>@Jm?q0&^%kkqZ_H78cnfwRgxu~*ZJ{Kx*jV_w zbv@!`nA`Tg@3iYzYrD^aqlnM0o|9_1pY>es-Wb=e=pheP2(W0XJXXJ(CI5XJrZpN7 z$Oam;(Ok~j5tqL>DX&ZMr#4Ku-c@`S?dIK{3_OM#*#bI2pvxs+U$u7c%*ML)GP73V ztn^t*D${oM>bjX8PlZO3S(Dob^00L|t0iD-*Gg*Eifz+M zWZk|o=-BCJ(kii0ezFj7ODw=T@0B2-XBGRfRz=WBX!>HQ-vt+wxZ;<{=6d2;f2#u? z39`+fKBAW|TiXgV4uQGMQR&>NDXaa7(aAOU)#bjaD$^=o=8J2SbL+~v zsL^0!+V0sObCb&qO<0x1$2C@Z$;xcG+sqYFOC2hU+VAgCmdj6xk{4=?oU}4u9<;})E{Z3jX$6ohWV{}%>+2SM-^mzpc|ChpBl-oE0 zyxLx%*S)>Hf1Eb{Tz*hZnlBF*W}tnb<6aHMWbR@Z*RSvU408eCog$^NKf^&MbV(Gx zo(3V|NlHmoTW*a4w%#PPtnE?_%w9F6p>MH>=rUQR%36TeiVB4*QI@Fgo-#y`WTY+Km!|vTOG5|(!Q&;LgP{rr zE3>%iRm2Zjjl9U!e}IU>UmI*zCM8!IY^pb=R)pXt87iLo4p^XXNna?&^8CqPd5boLuIsn`^hJuklzK9n+MAY^Jua=r2lA+T|qQV z^vxOGDT)|q8daVGeA*%`EPtrkU>@U3j5eQBMt3QeMi?YqFdP?Jat{1XPe-ubC%c=Q zn=85!$U_kR7aOPrHN}dC7ut3r544^F(&H^7d;l{6rH8jCj`;Yfu z(!~QiWCUU`AVta4QaIq>hj6>Lp}2dvDv+4YxT7nRS=*$o`y>JD^9Sg4H#rVI`ttHJBm_RzauA99=FcA8ilK3o z>G|nLR3m5yZSXmx9fh?e*djpf?v6j$}3HG;+qy?0UVkuOX?+N+!XaSZ&qq}y8--4kdKP`ax*&mt2tYZ{He=M{7hh^k=e7|u> zV>a5Qb<7%cTm#U|*2<(^SQy4Rf8H@Lobr70 zRC8DV?jhO@#XZ>9HwNjN#*{@!L&#F{$PKA!DbYj2Ays`A8>f3C{ zrbr-N3tJIjsIn=;l*yU+FVdm5_@;^7GEMis8h`9+7`lAQL(dvUJcoC=ts640t8J5V zo|o}k&rFfdN~Ah%=eCg7!>a@%=k)GfY-+j3pS92)E&dQLjW;+5T5q|1{oYBU-?3}u zH8oz%wRm8gvy-#pIBdOPSKVwSX7QU1JqD{sPu zQT2yI=!M<+iej11&s41i1x@>P&N3m6C~}T+R-6B}@9q9=5(#ow@%3}CF3wR5^N_Fc zdl?7BDxgTwu*d$_a~PZe7?P^>mx-dPbCgN^M~OE%1FPY5K~FLa1wqQ5xs*4Qr4FLH zB9QKRldoQ5@2;111MM}9H1TWFW1Y*@oAQoN@AG?9F?$pQxD&{C@!}p_{c1oVsinXq zpKDnQU!xja<7}h-wBnGFLWaq-lp^oSQNL+0;eqyS@{#R6+_@*$l$O{?Q*7;2It&Cr zp@#C+rX3#|=`O`2=+<&=BuSjdJSJTPEVX8}YqW`S4AlFvc=Y)?ycKQS2zpbaFrYBC+vW8=_&suR(iXGk{8 zG}M;wH3lRq_WeuCQN@zugsI20IxN;2cx9ZfZ7&#d-!b+(R^l8^H)YZcKyU}rUE&`6pT9#3Ql}76qrDM=(Scp~>mL=YZ~uQ1 zqnSNU84|XB&lgy@IUITeN{kp3kTQ2P>L#VNf@S$8UrBy;h@}1->nGYqwic+-w(o`r zHp;aMTW}qW}i?*PgfU7LRQi2Z0)}m}sP;X8%W3`#I zF&(Df3=m7PCA_B(I850O{Q`PLVprgjW7l9een-C$%55@^_XIm5CCPUNU5PM>N;!%~ zc=yc?+E8h-=>Jiqp|r~jQ+9uA7x~%-VF;yC4uJ#>k>l^tUG_+e?Y6MvM&hppG_Z>F zo3jus7(0DUj4>Qy5#17o^86xbmgUkoN&{il*ezr|!d-9) z@ih#M!+$^4`Tmr=5$dd%%-iDYsN% zh&56R4a@cEs5a#)!MoN0pNwW)Q>{5tea5YM8Mn;6{3QGM#PVfM3uj+|4x?YRzYih&rNo{DUPx*F3l%Ci7PGZSiY`b zVR9iV4#g6kxOp3hlc>$7Y-7MLun{J|Mn&S-+;QRtG@TZ}RzRQ!*A6pkgalb?WR&!s zu$t&&xWW<6FG|+lH!OKh<{YFtJG&RJIg-i0HhJC5%$OA-&NxF5qm(~gUrBD}2k~*5 z(9S!zNK5EjR@iP}n#dIP#l{pJu#(NzqpYgk-Z1^koz8jUfxcY%R| zs_pKVuArho4q#i$80eD&resmt01(UkzfJsAX@Ejb_YBnUbX{!VUgK#AnJRi z+rX!+t&=US@2!ur4h9{bS!h6xw2S2~jwNof@Fxm6NGQVp=VutxtB{Zo{Txz9p`uEE zI!Or$GPwW)-mBwo&p|sfNDwq-!F%@}qu4mI25A#C1b%tb*W#DF;1P2bA{ctK7kE%- z@_Fu5so;}v*YJdXYK6DA|L zC5jVv(7!f=FF`NKaRx(G{;VY{MVyhcM!efRkOcj`4EIw2jSCcG7-xVSSSXc2Uq%tB zjKV)q-^G*vAmK5u0?jr-oLA?o(tf-D#3&T-966CQ5ESrU7eU4%+Ix1B$R#5L_~8-i zGy(3yRY6HNi}34YTm{UIib)=Z{}ID#*3^vWGG_;2(KY&Gm4&8g91DZEr*0WDi_!F7z7Q!@*;Y^dyHisbWecO~^It6NcBjx5VkOEt zI_eXQ);-^7n`#NYtwQ3dJmKz9Vx!rpW=Y$4&4eLVwQg$jtGpr4ReC2XwfPeA(}%e& zIGLDR9GiMbO*f?-uAJHxDv~nF-Y{|&r@vaG;RKUv1dvWm=^+nvP9m^2)W#g>^J+F6(E0(wPCBbgmN#lG8W)fpYt|Kd}X!8!OK@3u#_K zXMdyj-e^}?w%jX&h#;;*{Zptf8g?9Yg67u0(96>ujt_{IW%L{t8F@RW^3JXrE9}+X z<{AYr2;7-gAL1+b8iaXw%?JyqZDI{~ZsQz?_F;rF%i6|Kdyw_yx+xOzkMCtjO^)+Q zzh_s3olI*OFxnVXCRt=v8J83vGEU(*g>ECEC}-%q8L${DtVn4IAuD>I{j#1yZP&=M z`vv*21)fHY!BGP-8bcIfoK zrsuw9u{&3s{uVKq)|ULH0u*G%NH-ZtrwIauPV%p-t;Gq2j5U_VZ`THQl5ntzx2`zY z9CvN_JLwpjEF5{i~ijv(G zdDmR2UNu61g~|ssqBie1)GQ2Ut_h2Sn)m?A);S;ypDg2uECGwKou$TSZ&obhw|=z7 zY&kz4RDueANrud9dz%Q3%B;pw3vv=RUa4eLF5q#NUjIH*Yzv1%MB%199SD3Y8M^PX zZL#CvcMdPyQmop`81q&kgT)Kl37&O`rHZBn8>rc+%WE*fvMQP9T8bLs(lQ!W_h^xD zv97sk-P5HR1jO20(Rw6u;i$4HbeAj5>Iu~1wxm=hVvF)(l;W%_Zjq`NjUN=bXO=8W z${z^ew zK&(MqVj;%-64?bVaZ|d$zw%}lJxqD}eyztxmb>ELshPCkjXEmQ08`vq-SN6+{rb5Q zXhDGEe-xcnSd?uTgcXonx|USBVJT^l?(Sx3q#Klw?vU;-rMtVkTafPV_W%9^FAm-7 zJDPW%nS1tWOg)m_bV6+f*h3q;)B-p^tRmS%%e$kE!6*ifPT}7mGNE&4z?h866RrmN z>`fONF~0}I009Jk$A_wcI*slR+b2C_ll#Ur3~q7&f}mAMbg>bd93j?Fj5uh|pQ%XO zLC@gp4JI@rydF?6R2uaQ3|n0`031Z>jiGOapiJwgOpS$#p&`-zS(-3XjFS>IM$+ki zPyL=Gpj>>Nlev$;1yuht-(QhcR?G=3)IYWehUpE0u$LfYhA@R|Sne3Lni6A^;SW%; zCSxB#M`-3^V45>DQwT8fO?aF+jW`U{IHI31F}>Lyh5T4-ORv2P@zved(;AyFsHT0rWQIy24*Y)`;8&{3CtITP zKzjQxfDTmB5FyJzJ{b&mi6_wAFOs$-W4liCC=WUCYdm?UZislkL3vDv@xi1ItQN61 z#W7)Wasa;8l+xX)|U=2kv}^Sp#G3*HN5iyWgghBayK< z@6Jmtt3`2#rW%sI=(f@x7LPShw7C8fSCyuxQf|nf*+0)I*ojU%k(6^Qi~la)&~EOX zt?i3p(}4>N71WW|9+i%LOO#%$i&^2zl32|S)KPdCC30{-wwgEB{`W1O$#A+r za`aEalX>lFJMAi-kbu4)nHJ@Jx=rP;bq(5J=WxiqYOGY+4Co{$%X^M`Ua}CD_NTx< z0@w4Rr5+Qtw?V1YA78xc%tjH4Cl!(g;+l&`nwOQNYaogrFXJ0}^b|MgeUr0hlyEJ} zJ|$D8zG@fk*)WY2qmu+5p68daoF*8L{d>xf`er0Bh=m~Z3NTCoTP(Qoc7pHxWVP`e z*ku9+v$}dBIFT#0aEJYk5-|R7G-YV{0&vz85k$_epMP83S?GBjx8U$aJ(^_I?GECB z9jP{tpDHatrCq1dj$|Z5(3^0SP|!a;hGZWY=@=sk2zDR~(yb2IpkPeVVp4t=vh!#dXB*dP3JslM}J|>Tp2- z5s8DZCCi7uLH5D;%;+_e@eNGzEl2?7jqaY>1!O<%S8Fk;qMG~-{}1-GA0XlKH4Bm< zGB`vCjr?OY4Y&z?=6)SODa-fY+max-^8F+VA9b6JUg)Fne)Wf(e+Tden>V(q=p5l4 zn=1fv)_8ln%(!7whW$O)gb~BZR4(iv&mvrjzf;cZewP}saS@Mqy08VZVh zpa=$k4Of}a+s{x-6cQ4`Wi=D%8R|gV$yz6312-r}Dn)Iz+Q4M}X=g@Q00@5*eDQ@; ztEo|Nh>@fI3~xy$x}vrrr=t2W8(IydB930~|NcA3k8wL$^2>hjqk~S$|F98&zW)7O zpeS$vXA=Pg3#;x(A6lK`Bgzq+#xzDQ=7h3$1ZY$s{?O{UJwTJ~b#=RF=zF(pAq4Eg z+WS7Q8onNB`aaEOzikV>ZI7~adOcq4etf$K0Y(a;Tr~VWVQiHCtQa!L&J|O%y)`A5 zBGIVxHYHMHo)on~US!llY{m|u@4);?X0BMM*U$@t5@>`FH}!kpm;c2qFhAQb*8Y6f zdH_rUPqLakIJ0VU!$LzT!Hyh#(x8fn_x)gg42r}UzYOpqVdbbgUO{3#PZ%PBGUH;? zS9M#g{T5cB!W8FM%~&}uY6|%pX+*mv0{?0*UXBC0#Df*rR+j7tj>X)cLhN}W@KfPwwxx;!^0;>va2C6zS_f)Wrm6t(F;5CFVrk|f01aPri&l2Zzv82E8g z)8|cFE!iiN3N(IsG-yk6@6qWm80Wfca`oXe(BT#xU3Ur1*d}Utvw2~}cMKMhAUP8^ z@FkVD4iMq+s}yq6mj>E~QaT-A&##@DT@{!FP2s1MMT{@7i7!s7$HmdIDB_ie#hVU#beV=>RL)>1K}2;u$S z{r1a8Zcdw5%~k*(HeDh4;I%a2Lo?^ro_iY**#$hxJrLt3&yb{=DS`k$>A ziFhYMo-*c`h+L1&#+_Cri4k4(=5Npm!zcukI2|A)E2zG&CD{#PT5iavEu(i(i@|DY zwwecN_xn~>!TnQ}n7l7$m@+EvT4~bSxCF#EE#cb-Ickmrzlc~xO*QPI2yzUPZOFq) zZ3+Z54sRT)BC<+lMCa6tzwr@4MRGj~1 zUgs%l{vHUl_5Ugd<1|zEw+!|*Nq*pKh2!JnazzbwLyvJmMRLEj-XjF>5)y8W0feZp z#~F!78R=UYvwRSH?MMr2ulXN2%fBkhtc2Hr+$cOk(k6Fb<>1LhTtIpItG~GzPe{G9 zS%X8A-T9cDi+q@KjP-sR?kRC#8egjuZ;LtTYgJB}w)!M|?P`m;OjUno_SP8bd=+JB zb+Mw6PlmIXzxB*T@Z)h@xPse+P`Zi-pTPYp+3ZB&4ol=6bM&Qoh$l6I`UL29N8IDD z<=JwU+KZdsavQZut(m-@nR?xR)ZRLfP(3f$6*$j&EpT7BeqSp{#IY@*V_>E>-&cW;-Rp)Xxnx;-LG-fqUWSbMKg{KuD+{ZP4* z2SF!!9OYWhMlFlTpt(BHqFs(&-{(`asePPWU>%3V$da9O33#=tt+UkSYgKV&Ym;bK zo8RpY}T{?knyXy5~RQcHUkD|x3wzn!bR zB5>LmYkQ+B4}E2%i1cJu0oz>s^3ru$tA~n1zrVxz*ajE%LS+W-$s@vDN=2DuFi!sn zkyjBdZgy6I$PzEZYWmCV!^rc`PM?zAXgiW zlmUqk)@|ygDy=PO>ndf&r1;3Ezj}0$cduwYS?Vs>ty@{OyTzW>=G3dh^D{Ie2FBFP zsON$$G4G(@X;P#t?g_*}R`K%XA`ixtd4`^9)lH#v9Q6IVs_gC+;~K;2r8B-d$=m*C zGGoa}PzrSEQ;H(O!j=CAl%c5~bmIQVWppFBY^QAP9(XyS%oDwMW&rYC2tsD$$rgfE zVMd7w>!0!~gdEa47m%8e%AQ78JNBOU`<=~6*ph|=nZ0>;f!`tG_qb&q=t}q44<_W* zcTssJ2O#FbVqkv;dVC_JDxR<4MwCCQjDCb?ESX;;<8}%=C>PS*MVzqnPMJMg8TQ&< zWf0zV#>jlYVj;j}olt_RO0HyFc34>JH)Uh&yMWD$XtHlXY;&W#7mitB4VQH4##4$7 zQuV}g_$$Uc_`;+&ftb1pP0$XS6oZFeHw==Ee)f6{XM*1RbC##x!I0;V^dhO|p_3Sx zSj;`?VdiSlwq!a}m5LZh++Fo!%p^>Cmzd19W~;RuJ}W|x=ieeXzG@`Ne*qh9Gy=4U9QUVOy>zu&qqg13-zuA!aKF;)wF!h4!l4I!HgD0r!MHA>LA z@5E2@d(owr(de_phIg0$KJ}jq2ez-2*+UNZNfx$S8hXu{5?%0zk@UVU#AwwKhPc)( z^NKBJF9jI2v>BjaoBb;E$PwY`nMd^(tJl3%N+pD+$F^*1Ip)%}6i8I&m5$;S4<(vU z|I6oX{u|g#xMu_@{S{eMYnYk;UG(yLn}FcATT zBo$Z7m5!<2Y!oly|3ImiXCARuMfLUuzf4}R`?_byqvHes`UE;bfb{E44_KTu`smR? z3(NHpg)T{t@yCTPItg!JAr=)KXaZm@0dGIk?Hqb;www0oeOj)p)C&}1Ab^oZK>dJlyDXz2|-V-^S-7c?R3g79?LCA*MgG^U7K5G29Z zKL`#MD-D6jE5L$^)`xw@Z_})?Dfm7)2%UT1}U_&!o3lgpm8h!9o} z(920KHrSvlodVL5k0iYnQJmSoayAm6g461eF!2G%j_}SRuE{_XXalAHBv(kzDKINr z(EACnY`H~Ip_uR`Zg*{p5KA!P_wf|YI1|(#~Xx7hsW8O z8Cnk!P~cGww>gYi8&!+{6@+L?@x;1h@SQXS&&v!Dyq}j?Xp!<^;+-$I`Yk52*4n?{ z1DpfEHSVq39$ct4kqn-{5N<#cwV9%&H&yt0^WHlk(VuK%1sHP8C-pji;8dBh_`fk$ z^6OVOtB+<1^2<7yRg4X(`y)RDqpl~GmSP)=J<+h)Y4>t_5ZJbAuOv0}eZB2`UG03` zW$}Hy&i?me`0v*6f4%zuy?=j#M`ok1tOjy+lnM?`)et-5eFyJo}^B~Jds>{iz zv)1#AzrgQaVQ^}<&>}`W zs|Smi+JiWR=ffk@OTtH1yOOrHj3(qGvJ3ylf%lh5xXAX0OnqonY`%JHYWnsw_Eyv< zaeTH<@*yA6PfY2|iav7427lEqkCw@8!S+M-CUw&d`7F`cmCR#aWACEnPOE9045I~# z`mh8mcm4C1UR)!Nh|@abS`w4Pu4!mR1tVM(PZ8a%lJ@8wi@HD4F1_XkR==74Zx?e7 zI>uJw#kL|huXyNF$>i&uI z@28CR&Xwm?Is;$unRC70TVU~}G()d8dM{GElZ5i*GbcANaB;k!c7snyAY@zl2CdqK zw(Wo5GRvQ6TeIOe=g9g@*NF9pMf{rq2x5rh+8Sr-=ToyP+Dv5e$4W(^naD^Bwb)&b z0IuAHGHXUb%nzlWCYo4JQd)=q*NaR3b_P<~_zd&yMEiCqZ)G!xamH-Mhq>R)Y&!i$ z?RQ)4RQ?xgG6;51w_>Ok84RJzrAx&9mDI(Z)XkmLg_HfM{UNdW@&5KQS>H@+yO|EQ zu7<3&0g<(js;))}lfVcs_d4(AZk4GK-rPdY#sF8D91Ew=azokUrq2su$_-Fk+wIXF zz51W^-fa=vHWUL0ypj(q&pHgcI?nt8D>t_+OUolXoK3v_wR!1Z!=LbHdN^I(s8{r6Qh{R1@(qC?_6>+>`1v>ol8a2LH(y{Ejz zo!U%u+pkceW#~c->~+UMyf204z#q%uceVTz+KUx0*9P%14Ge}@+b-fQcmq;)y1;`? z%d^n=vbK({m@jEjqQyLjhpXP@#9B9VS95($g;#JW^-%rii8lY|DkobM3+*Onn;$0M zLTmYZ)MxwDG0BFt!h7^ajWe=~_x}l+k2Nf)vx``d_L#kLM@))*HM3v+y=MFSZ-g7q zQKd}v7$J$$wH^DSKR7jei1g@%<|&1qeN3jH5cMz?=>}R*h#;WzN@w7M~uo5t5yK#&Q+0Q9MTtAVGOwhf<;_ z=LAz87cf%)B5X1cnJPt;PhK`8OxA~$k(k)OA&qCNRGW*R+hw!@tf>a@GzF#_q?{(z zLh?by99uG4w>`p1a4RHr8Xe)aQtaYPcRc5cPEKrdGzuf2wV3AjWU!s4Vgz>w%ye|n zO`gvPgxuF}n?%3yH6&aNN1um<1++d+Bm3luTgE8SSgj2o%a0 zMxmmCDdMs9mIl=6>xXDe)?9LqFK`ctAF12SEO5ODBL<-4 zkpcz|((cpknU;(4E&z=xG8JNtEQ*@;T~2j6!TWQKn1&y13U$$LDDj^C=G<_`ct|dM zg5`S1LLA)D%l2@HbRhIY%pdK@trm`viMK(9Yg5%-t3ST@ucj9jIQ#y-0j)^=(9 z0%Tarz{kz9~>NDrGx)O_O6UsUwdi&=j8p5u0TzwZJ_%<#WDLb{R!V|idT}% zF-KKs8ohlKCpDmdW7^oIMe3Z9*BKE^xNES%$QwZo5sNh5yqf<8#W#*piV5QKTA6<} z&)F>beWo_giJEwidK3&lGKd;gtiIb8a`_Fh4reyXd@lxs@$Vb?FzNDK1lPBYFY(G zg$=1Um6oDgeQe&F&T07rZuQT@-<=?Ip_2IcB|@HF*|;P|Xo*JTvnb)8bIV8a-j-Q& z1E#rC-dT%bSFYN|SYMP5 zRx=y3XS_~aBTy0&kr*S$$2!1 z31qn;a$WWKer~|g$AalF{C`IZ<3tjt7ZdzjOBm8itdL;ZE?N2&f64=v6YbA`ARB-@mdO0SxD^+e=K0#A zPMajA3t*Y1Fe8O1=rfALmcLbh%QFOHxDW({NjT#5JLoFoV%r&galr8JJ`g=FiDKup zz7&i_mc^3%d2#%1f+1=*OVy{*{x2yCY7$XdjGSY3TH45-P%lhcJbdw$6_9}kViKPq zV>9=3(ipwQVCK0Z)+ES`e~;}--RAxg05t`mPpwY7$Y&Gwov(L~``IQ4=pZ|)5%!g? za))=AAEBL?a{&|?K^RWPIm~)Z7eFWhhzDnLJHm+e@e6^;L1+_s#h^taT5zl7OaT&h z0ff2lj5(u^pdW&BfXN&Z+hpz0b9wT*iSGL_DD-v-n6Lc6qdw%Lj|af^0v;Q{|GHY) z`S$d`OMWt7j=JHTv7f}fKZ}EyNzOu`R^V~M6LjDcAK7m z$@!RTyj|=X7izR)8yOT3!*L=m%Ck=0F#iQtLh2%ct*^VE!yOZ613}ALDuQVy2h22t zxMg(^z6b8T2;KXe$J51FLUTL+4)0WVG{2x9+sYDaf5;q!?Tl6=M(i?O^f>HL$*C0d zVOzILlH)6W!l2_=7k@+TF_)WN_s5WXvoxe0U2{b5TxF+?Nviwrtn1+I5MXTli$GU!z^aiko9nDj}~EAnHK~T zc@Zjg_}&3{-Votb%UDvz^P|Kd_+GpK$4+5wYYy)db8ac&zVrBZC`2WY5j?DkfXVMY z36cd(R3cCb!?N@ELbCjmg>=L_Ry(Cc6N#{{u^_0WQsa?avj3VJ?H&sCt&(UjYy74} z=%!B{G@{k%&$tWq$m{Gn={+HXd+VctQi8Wg`^NG`Mu^k6EWhQcTlb&`yO<}I1^3jH zM@8;qQplM|?4w)7KbG`(-;&cOa=xwN=JATbv`KJeGGp){iGXj$>Sj7pcXl0@FXzg= zhQ(;1xm`9@Np>HR(BaBO%DmTr{8tK$ns!R7pTl9i(vx^5EM)O`X0i(Wzi4jaqibeP zg1)l)`D3n3wUU-ySqCD03B~kPWYgM!b_v#;`UtoJ5Oa65!J=qS3lCv7sO@4>sv#VT#qIz^T`!sLGFHVMDjZB+# zO#4x!hJq*UI2|oX<_Om|qTL}Bm%5tBs48=pTD;r3qyjCORc)H@tuzmwE+?kx3%%AW zpE|Ww{rCE`YrXmTC|8RLm&36cKQJ+R<`=+*F$fLS)BfHwy9@NC8+~-NS>I*ucRi&S zvN2#^DsG{itEL^`p_@8HtZbff9@w>G|5zyhnjAMR6hrVGl51>&@*KUuiGcMRP_|hl zyE>%ShJQuA1#HyP4pdj0p}J+r>(}NJ?_x&VmY0&=q`XNV&6Ssb)fm_;d$k#y5i*s3 zaaNvKOX!(r`aRNPQ5oi*air8xSm^B->8y|Pe0jI~eMN0^KetbYSZF!d#kn+XMM~4G z$=cQVP?>$@Z`P(d=S*i&WjX&mn{{2MX{$cz{?b&QxqRCi!C?(oB2>%-miRGBk55O_ zCy{5+J(3nrethhHjqZ}-QJSP|(zSjKO z{i8lkTxxvsPmhWnWs^aS`%$C+XVF?RX$mn60y3hs(UA_hcjCXZsGTCG$gX(#ot&DC zPsyA+!PR~j;k`YlWe~%;RF=az>%c~0T)X0+Rq3n0OB8>O0v{PSnVxB@EJ;&A9}?hc zWBv#sBf)R}8UK#tTT;HCo+D3JRt){1!w}OLk6r-54Y}TDkf=Y%Xy=b~G``y*uv$ z0zMwke}CJ87IL$L&{N%nbtV*M7InHj~a z3?G+68buo{8mNT6KuCM~;ugsRpK6wD$nBIyx)*5%2{h~!vD!&aSCU+&AR&>gU-7IiwWhdoPM~akK>qqwl$# zQnz#)DdL1x-SgNCYCb$62?gt=z-R{ODN!5rISW$UMQtikNPs;;wgZgCK(%v4p$PzC z6ga_4?%d#d6C_)*tTf^xXbQF;Dp5WD#616Y^P;%B0haFpr#*#H}QY8fIJug zqq2*^Klv*}VVVGfS7_QEDD}%krjeOvU=YF+dwMbl(ddF>&Ie2MqFfv6~1!soV zZ!NaHFrL6b##m+s`n|gPaHlEx$ENCUTnjE-Qw|HCdi^1Z<7$!5FLSTq8TX-Dil?+U z&>3-!JSu&}Ot3t=X>*2iWB?K~Sb>0s6+xu3%SI@HSpRBhmO_?INgBN!%Av|iAtOW( zM?sdRHyluWhFKJEZkCSboNt9ZeSeDyKN4w__F%3vP*+5(OvUV?7Bh@m8qlIy$vpc@ zbO%J2oJb|ozGcB{rtEekSM0LK+kRLDfx_@FcRffm?3xMP+YdZ>$L-uf^xS#Wn!#lm zXzch0paYR+1@igxculA`_~%ZCEV<<0K(02oV86cCI6z7t*5$;;=T=LF9+xf}l<{qe zmzmMpFAnddpU=|77$@YIq2`|57aBC;JW1t>?ot%8#NPaq%eAD=2v0m=uIXRE3Hk1D zfA%2(J&kMo-x2Bc-x{cSF9!?-i3aZ243lmH&=Cr47BuQKQ81|@bjH8)WE-RmHZXDn zdhAq{_<-Ltm`4fHTKZHK;Yr%d7{Zo<&Sk$2;!FN%y0;w^1^XSY;V6~e!YH`+tT$+> zyDTEi5!Ln~44Kt6pVH<_%B1Co{{OGuDA2WyeL$8$X|{B?N6ivo*}h0gMgPJIW_r%9 z1GL@%_nrfeMC(H^I2QG_ht35qaT{>~XGs!LGEJd%!;Mx-d<+A>6M!!GgEcWBAt5O# zxE@x-_-^2b5u6cPjnBn{o|Vw7Oruh zQr0zBuDMTb7bDflR*!TBw87bn8GFNI>Jw83bP@O#8J^p$)LQ3NaaYI@^;nId{6RAD z6JjhPq6kNmU|oKA@Ow8#KguS~rI4j25@OK;*1Q~7MvD@HIKwuN%j-#@K>`1rzdp~W zhHqENfcXma*ucCX4GF}s5uUFAjPN*x_|^fSGjx zcam zxCev@ME8mnvjK}8Pw~?k-?#sN$@XL5@7Y{Ygo^hu$XU`7;ZrCa2#*9~oAwiMQ7|X4 z0olaMeHSP(x*<42l6|;T|B@K<} zweTijJ`4K{Ab6OVZrlFw7m%x?oM@XIBoNwU6i;J_fN!?^F-$(qFKi+&Pq0^+}ox8SeA0*fJ9=!Rgq#`tDjB7mpI56q-@el8) z(S~)BMhtc^5HlBe;#E6;qWBCduq@zB>%?dkWtSP?rPS>AK;dI_JzlU?(OP;(mV~k` ziMq4pDrLW3K;KV&t06fGQ+ntUdl{bnmY^JTyYs2>-!IdT>d0p7g)cv>;EA9i-5h_% ztF)eW(#;7_BWN*Jaq6V`f81)iGql-<&IR@XfxO5oVMJW3(+oDlCFyCq|# zMY>?mxPvcixJD}_?3Hm)?yK8-o%}k>fl)@O5tV?N1sk#B4{*1{Dp6zCN^(kL3doKt zG&~Th^ARfdU7wOmx3b+K5qIH`x;vaZWWg+PXfx{g~hM89z(cPl-H;J4>iO@s0 z#tYfHXfv-`X?z{1X-UFeww`fSghhFqXx z%1DJWf~rTB(?!^$pz;YgVt0?wr(VL;3*Q}O30TB_+7}} z72l^P3tpLNFv!fWsfJF7)Zyku?H5o^66iBgIWGK+Dii3nC3p~9fk)8!#!+j2U!Bxg ztq@ve4yWD1c_n9MJc?X|kA(c1)^1wZR_zungF!rCYxzU9%pgCAxjb zMxR+NUKa4?cQ-43bx$A(Pqcj*97s|}S3$AAWqP5n+_)W)_64(T$|UD$QVhJsX1cTf za1vym-R~dRq&c*pAT^QS!>kmAh zukw@zo4zEtci``?{8V~dEoj^KrkX$9(@@upw^%>=yluQ67f)iLFGt^;%y07j_~1E}dJ)D-xpG*U4Ds@d+-WlQsrEI4-dtWU zcMIy*dHU8m+<@?2{@Xw(%EdL}Nm=b?pFUE4IsdOvW1u;ln~_8j(_)^n5Z$psY7y;@ zDOHm72Oq8Mv{~k%jrYzdbvV6_o?><=&!&<}$rv($bS}KxpBB3Ng81;3U$@rj~_LPqODY_QD=4!Xs3a^y! z#&D16W-3zKU!F{r{uo}S&q>*D|kAnUYY) zByaEBI9-g#j!+Z_K872&9n+A)MYN)VXbSdYQnF)z%wvy`nV$oSkosLW+KJ1=`vR)J zn~^+IUQM9*s0(YBD5k(e)hb6cl08>wTTpcn02O!g`FJo)3yT|6i!ZEtwBF9Q&7>ut z(=5CSWy8iH{X*otBXS&|5V}z9gIW*;o2m2;)v*ax^}#sc3cft%(2WNhBf+iASP2nI zZG^kc?H$du6);xW;|ZxoALfxk#Am~0f(zc^j-b%#!qE{E7)u^aXpSXrH`>K6rF5~6 zq}PC+-F(FQO*SZ@F0d~zBr_KV#JFm4zr4^fVe{XjEeLWK94^Jq^MF!Q?eGL ze(j}mAwE^WRlN@tVDFbp5YDs=y4GOd>2Xk{m=2AXV9iGvcyA2(l0f045sC|t2%@|E zZ5|wotQ5de-NN`6*imfLj_)sbi>4#uJUQraV{89v$qa>h+E**^K;zVOOptK96WBsN zZHaSuN0EJugNo6li~VF%g2y|Uivb-`B6nxy)0fJrtR0d69UZEjJv^O9=uwka8c@j3 z0EJ*A>S6sN_m=7&N>E;(p{YtMdBl@>m06=gdQ8OWA^oFx>rG51>O-`2hlZuGvr zCp;9CW%K6Z*|3vstQ8zMK6_gGHcOUo)oSVuv?;m!-@!xkgS z;dtc>6Q%-!n%cjeUbp1u2UgSrcIr9uNdBmGpjC|j2dP6zhY`8lJ2K6rcj35Sg{zmw zhXk5v3X=weuR^9*0~HF*T<&!}A`Ez=N7QdS-A&*;#?v?f{FDhkzSIzp-396~DK-V8 z-I#vlWe?r}7337^V&t#iU_($uF*cuK;(qoADFO+P*yf{LWD_1W4YCno9~zny8%eOF zQY1SbYv}uDcU;>PSm-^}F7Um->_a*038CCGaOHXe&xc9hca%f_)pINM7vj zw9MT*F@FBSQD2oou2*qzmno8UH8ogUP_+HVeK7A0KCpp>M$I!@o&)c8_ocJy2&yf% ztA#oB^h>o^@a3X+doadCZVJ-{o0@0AOkq4yw@aBc0|$K+8Kv#p?bknj0O8ey)`3h$ zO45e~eH63D z$ZUUZn0OBxA3uZ_Xg@`+v-(Z|(1VnaM?ji4J%i=;-Y=BsV_{+CfHw^CK{D$=TkYxg zc!>ufOI=R^)Y4)=MvB;4683)hTibtED14hpAn*aNIbdY$7zl%&pG&?cV}x62)L8)?*3iI z_`Wh*^^XZ5Ir%UMn04fkjWw-m^R_BC-w||2)WQ?c&G66gKgMmG)9X9Ts zXl6OOvB!L#21M_^`q+$I2+o3(Hm5HrSR4G_s2G17No=$szycl?CKW)gcF}$ufSV*M zI9+SydSjdt08%CqwUHcOp<*Cm%UG0cCLI+rruZ5Oq0;7LOP3mZ3i^}5o|o0MbL8_j z5|sjB4js=i5s!(>rR_=otgVkBs)UK*14TU(?0UWaPkQ1fWqJKv?Wf5ATG#Sn=H*FE zrUU_2U6SZc`vKPYt_c!@VK&fOMznUo8K zUI%JXCM5Pm-j>J}&v8QRJ`c^~p}eXb!rRvzM`{%>k$(?a7sM_s@XEf*Rl}v+BnmHA z1`!1viU!n3D8~e*;H6R0z{*7uUbPhW`I;v~&Zd9ukJ(wf45%k;REB1#6N{|nab>bg z>5QU7|8jCL&Zp9j);cCAC6x>;(=TYa{!;KZthCW6@Y4|aywa-m7jnPks5* z7?GN*cZq5GGEL^4{CB3JXXU++@^O}DPkuY|mfcF#R6@5ZVj+M>hEHx#pmf2Hq=u z_fa@Ln99^wKKK5LsQ{(apaWs?Q1bXjKd3{9j{kIcoHGU|5^ZOVH_IUHJ{QE6Idt1t zICcN?hH=0R2OKA<9i?UMFQFzsHgIB)PN$3c8Yp#@u|86pQ|PLf!qgtdSqu1~Ra8tv zD{F)8`i6j(@8Fa#gEg_u&npY)WqQ@o9Yl~qPQHOmt^@b_^0IB1ash|mt%Z~;yi1Gh zzXDuU0&R_cPu5L$R7}53kT&!4Zcc9}VA zz-8DbZfu=sTPXC9_ZojOpGeJ}n0||{_M1@lxqEMrn8|M*c~hA+)6|eW899+e0BfV3 z$;dv_CW}Hie`cxgTJAjDW-0m6T3pDQYD80_iN43$y`gMnwdlwxIqzewQ|AcyU%`Ie z+|lv@!X{9pra_QBOqDt#CPaRca4(g+QsD*g06X_*71+S?%V{Mn@)h^cJj>#(xzJ|tzkp_1{gDT|8 zzsia|hl@8db8jfEy;w{R#t-4HC_l;!N{%chaMSLo3LdDSSCWq)2&6X^uakKcX9%8S zkf1udFDfbR5cAd~^+X&E!CC+`0vs5twidTn$w$>-D0KEdM^h+){m;krRHBiQIH!etC||*UJAkHWjAC< z&@DS(+ME+_!uxxo3$q&!`?Cq_?zM|6{>*6y69MmXmwp93+T3na-U-T2} z(9}rPmNqc1+HLG$INg|hFfZ-`g$Eb0LQ{g+1qZ{m5)`qmbdV|CsQql3h(+Kd zjE1o3Mfk!L4#_jayp%lv%)YdZ6O!h~X@skgPz++=Sik-Im+=udt6dJ5`wOTG!FWHc zZP@sVnAPH^7KgIjoDoK7x4y~e4YWMbq9H~s@$LSHYU8p!wch4*G9uodSVRlenHn|r zFlHINP?!*hr6BLprKg_{mT4&io~1llh!m0NfYl2{_zQfMl6jFjfGaEVgUVRCnpVPa zDIjRpl)a&sApeu1&~%I+Bc@xyVHXH@_tyD{;8rp6gkkbcEUwL-42eh z0-Nj+E7MD8J{l%9H~c2H(g$S5p$*j`74adl#2=YA4#?Q8p)w2y%`aDKE?})kFWW>2 zFB_}Ha3#^yG+O!yzGt>&VQy>JtF9TmEZn5Wn`8cgP|}!UMPpm15Kcz zT)YP)YfKB)MO=J*lcyfye!uuC&(wtg+oVJWO=Forq?m7z@w|X4$%P7i6xcul9SG|d z)(KGVx?TceoYBO~_15_Njssk2U!dm8LGp`$?bMH3dQtOo~(RV6$q`^ zd!UdDKADW^Ny&hcLsjHuWqW6SkjSNTkCJ{mp$Wiqgkz`K^BhYhJ`N1Shi8mtAL%oo z!T_OQ(EF(ollR3U>WHbwBE&=R&`t`9i??xnjEPgK@QDUO7TNSr9?8&?Ksdo;CCI3o zGz&^g=NTf1SjJfvtyr?~9c1u<&^T6*pal3Y81*J|7L*Gfz-xp@U@>S54WNy&h}3p* zh-Ko7WKW`G#C{q1as2=Zey5uiPhx_Cjvk^sY4pJj=+_TMHf1y;UFFly#UQ{+Fdywx zsy;{{OpgPbQDWla-FN$p;!PN(^FUzO{%=Rr{?X2GJm7w|0L>_%QtE(Yhusp{Eyj9x z_JyIz8>mTn1J`;0!w;y-YZ}EYud892hsTTt**f8OKJ-RDY!evR*s!UoV|Z&&Y67YC zU#c{7Va6Ut4#{MYtgj$U_@jr*Z4aZRp8@%)DIb!BL2fa?S0W1Xx%^>C*#e+vgQd53b@_2AoYl9T_ z3owtIJnKUD31TpU(I$o7Y9G!`NWZ-2C>J(Z8Bk!f=c~IwY|G3>EH}!UZqloZieV29 z*r!Rp!nVa4!5f+5*JN4NDT;l)g?hh5H}@sL=;o)$<+_S#clo{3buB{u_V`t+qV`*_ z_GVJ0=H1F1!+KG9c%W*MGU5eeuFWTQ+xXvV$8!wha-}=FaX#ZzI9GY@t8$|^pTp^v z4#<+bBx|GY6)mH`%Nagzy>}mS#e)XA(V%DecxdTL`The8lvu;JYtfa->a@nXC&D)i&=CgleFz$(ZTc!=8RP3J)&Y$SB z-RwOk6nV@cokMqTqleO!ctze^0$ad^Cn@IgEB1Zmz@w0&v+vxWm;C(Uk6aD^wxbOy z=>tBx)}DZDo7PJBPq*GYbprmPqrI8QAF)_JzrGmUTxXqqeC~nwGRb7UWBvTjNzgFgTce;djRJz3vDpN(M?WyQ3l76+?OjiwFXOn4{Mi9VJm&Hb4P znqU@d!Jtdp;+ULKv-V7>H;m;htfuN;rRbilRgI-8tY_j{V5|GdSs%nn)6L5>@wo`- z+7!6Pc1p&%2lP3}e#>B@nPKCej{Dn_h7TS8YCIlZXZq+tY4(7R7c!?{sg`&mP}u6; z4fY*;c(;%7_Xza&3atTYyi7Z&yW%kS&f~ZQQCFkR^+L&T@ZpQ*$I2P4;~|3vwp8|& z8LpYM%soEu9QXbk3{CZ>QOwP@S^KB+pzSvm_P5*Nv`+hsM1k8@Q=-PT&)%zlgb&_WWs#P_So;@rT*Z^T7J zA%cUzXzsKYYA&Z8E)idAC0$_?gy^v_r->*hd2~lSc)rY^6;##CZg$$3`&z`ueaqp9 zi{Gc?7|a!Le%Hj@RSIL9z?jP6wYYIt14CEyNgABiI#OTYaXYYgbx6)jWK-HHI`+Pl zj`&u@abl21^6P$^DwT^iJr{jbBfuH`{DWgY_g0W{7Lmv+87AUxahr+Zc-gE`dIa+t zm8&-ZV=Z5+cN^UMYP8mFImrJnlBQ@jpqVd@QESi^-Wjn$y@$yq}7canbGKMik;BHurHzJs#v5*EB- zZ%mR_9?OXbc@X{2wC-O0`Ic+><}G{%Tj2)ne!G(U(lmoFn6 zvE6(SOTlf}Q#KdE4)4Db9v-ely7J)(4Vu%kL4$xTpUIo(imo=w6Oa@2MKNFrEIknv zNRg2NGfHmVnQ??@3r#dN9{&9lRS!dKk@A5_0~js;**|nKsx&v2(Q?3veNt5uWxec5 zOtjB$JQY^`$xRM$Tj14GMBv~BL63|-KSXpCS`x5q@tisV-Xz`ER5r~33T}icL{{Up zl+Vg?#Gl|DRLm!?A_1PhCUX4qy)ejyuDH)V;pQOt|%|;CWqv)K%<659F+BUXrqp{W4wr$&XV>h;K zv(dyh8as{6-g95_kf+Qy^G){Id;RODYz|6D2<6rRMKFsMQfnMMVtptfC4=hQ;L}cN zyjk+#$*;b@mPWFnr;}6CV@+&hTur01az-GO4bkY1MmCWUievB3&}0K;#w9+I^M|^`!a3Va>Z9{JQr(Z#mNr4bD8!VOe zk`}T_x(qBdrbw_8*54nPPw5Wiuz|ShMO2bv3_~h^qVO@rP;c8cCfZ@lWB$iKaZD_b z&T_R@2!_%BJm`{9?mm?)_9W}0k}* z`S8{|38_eyye>zRKxo^rEFT60t_Zn+D<1Nq!)1VEi%;o%91hgBaR)+w|7L5k`YZjz z5xI%Y!hH}oNF`lh4m@*EO%uYfbB}HxJ_f1UppQ^M>8?_($`E&-icy*Aw-Ssx_2dDQ zPeY+oFkwWfztcONwYZn2_`CMk#sbvIv37hoWK6pB#~mEUlG0-nAhocOiW66I&Gt>Y%gu>AAxQ}Kxe2|E%yt%?Mta#7Qm&G zVN0doo#3kDApQ|90vOx@?13#lr@bLSEQ`%!;u!gZnX3<}HdqzN>3Bb1?+!;}0ST&c z5{?s;OZH`cc&yi~l$6C@KcCXxj2}K6w7x)y3&_;?KkxnoX1%R$uREa86c7VI;)V2fe~(Ax5`>)EGQns`Cl_WK0xZP*)70TR|+CuOf{1 zI60HuM(g4I!m1*M#b5C*ULK8O@-HqqiWF)4QHrz->?S04Jxvh9DBLm_j@`sIVw!IBfDWv_ZHC@EPE{pgq|O1 z0M|Ac#kPDV3o5z{=gBduBi$}AZ0Rv%%4d~1K~XkLA0;ruuy?~2#`-N zh6@%sQ%_l0j8G?+!W8cT1XpBm-g{lS-shdH;kN^pS>+B8Rkbh1Fkg{ zuj6Z%bsCkQzO@HcRyqFLi~Hxn*J#29)hg6bQG2seXNHfKX8W8u&B3iKAx-y|b`RlD z3b|*}{*2i+#L1|2i^{dBTKA9L0?daXWCDrSgV4nBLD_k!S7|nJaySx$S)K1qc4}mP z7F1ePZ#qdPHDthkl}85-088JzD80OzA08zmnh~tc@lcKMuy@wjr+Ij$IXaWw#PQCD z(CctQe678;rP1hBIf34xH#84isYu^IF`ctTPTd0qJ{M!V)ayTyUZn) zc`}zDn=vqduBVMG#lh2EoM^3Xx=0qY7sc09ceS*KxLFI4S;!l@@yZxs$;?R_yip5` z3|PM9a;Qmcr2b11_b42)E`Gyc zB}E0~@$SY`B&(08AA+~ve#>DMv7v&Ly4?r{O{>bR30_GDtLUNyXN&le3-4K;>8=cL zQ?yeKH&xRPJov}hxTn}SRlC^1Nhp=OSl7F#M4xrKACi4kzP+VPkyB4`rt#oRVwLCTox#aOGMtpFS=6_4s)dzBu_t*=3CEtT9`wSR?2l6>^OGqy1HtQH*P zRhbf|^feNg!NgPkJJa-@AK2QJBWV0Y8~4k7x4!soz&y1U@AL9XQ(NWgL zOTVhF+!@Z0+8+2to=eAerf((3QIZAUU_LsG`J?@~r#w|)_iDvLW?>Z~B{wG7r577Y z(-C%?t~g#hx-Oxu%tPa3U1ZJTX^#mm<4EfIBO9D4A!C3$*wJ#%d4yG*d>nv;TVfil zi~V9sl(ie$5L5f_pqE5Bp>#wUxAx%xt2vf9XQMZ~tR@zz$aUop{0PnC8FE9*j>}s9 zrZusB=jr9E@kxxr1}(64bsKinZ`VsC=SFqVN=z9V1xr<`E(5-cLm?sP5mR|nL>_Nu zZN1_)gT|rsQd~ZmvEXL~xE_^|Y)+qH z+)&0@X+;qG)^jfk+E?|C4T$<_dp>wR4l zr$F8j;6^WFa(=JLp$`~NiAhnmNf=eHBmY9HDRgc%o;3@1ZC(_cMa}3FbLs1c^ncEkgXtbU?I?xH|W%9yAI9;na`Ll!mG zC6yvaKn+o3wK&6}0!^W~z>1L|3S+b7Fw8r_@UM~b%9!qkkd@P_m^yFeh3xG}IiVsj zyD;*^=@(@IZrSP4$?EPClv)CYa2LvaWhHG=>Fj~A2FdblExQh}MG(bzF|!7022`=8 zp=w$sh_%P49_kq-Rc2C1Jwsxk({qZPx`)*L%7*C?N7COXY}h`EumzODSmH4{1uP8W@m>|$ zN^!5WSbpSEwhY|ZGCwg%N$LtKs_KS$a>4 zsdky0ZnbO{DoT&EbErzR6mUE=~l2itOi;aVXko&JY|1 zzx9}^_tUkBmeo3|9AzM6#f)=I7D7IxXWf{iEgW0r<~6hbx#8Thv+cHG)pci;#0MVN zOxd=O)bcz$-c~mp*B2ZYWCGTWxvW`wiqbR}jB{SOw(hNtAkX5J8Y$0$XoU(Dp4%CbT9{nl5can8mR>TtO|C*8xh2iFs30EJGqgV(Mg_EAAH%TM-x8HFUTS7-3%9Q zheMIB8(jPsFqiVrdr&u&8+lFfLQIg%HAcc)jb?BP9e_^sf0lAAnaT*fzj}~gwpFc4z2Ch5J+cC$ z#s!CC(-C$XeZ9Qs9_HJ##Bk6{b1Axy)*0b_C9!wNAJ1 z)wJnnHp|uhz7y5B<>0AwN2aZ2j&_=w*y>*wwNzBVieP?(g+*7Ur(zmktu}y-F#i)F zlusUkxx&`ASdp8YvCVOU?Kn6vb?+1XLEW9j5fPd}Sj;P1nMLT)KS2sZi<8{z(mJ`ukW%gpC|0Bz59Ngywzif-~0Z{ECnRQL`U z2Wl4{f!=|IGKH0;rGC$2O~Tb*?N04x*?72vVV6ap!1i^Y_A#o%^>iM{OGP{F$>^jmz>NLYkPqtL`6X3e{ z$Dw6HL0nl%`Ci9D!60$ikH-sqLR&f6aPe~FQczBT5}}X?ceea)hOydLs3+T_!achQ z^otV@t>1jr!GgC+<6aatJ7WClh*R}yio@sYU7}M_GlQ`H?pqxr7XVKO!iN5Q*RlH! z`>eh&EA{#O7WW&5aiF!Xof`02?dFtLr?iY5AK=ZH?1Lg ztXXkb7`9B9_=ke*!;2qzlbT^-;#`yW2N+N?7aBXJ!nJJ6DMIdXkvOnuU0)wpy>gR%qNJk1+@ zsmSH{n>Y=j=pPG~JZPp1)Nl;pALXnLE@`l`IZRi`_CB(ph0E8wIfOdza-xmJD*f=R zpx2)SvKl&>pG>39RG?5>$>fFm!1C$r#Sw zxwv|ryX45&v8zGmclC^?&pptahjHuW5Rc_Y|MWgoXY=~6m+V7{>Z!^6L!xpwTJ|cS zN9B_!pzAHI>dK#$`&!&!8*OVGW~p03+^8JxPs#=5K5R$~doB4wBj;R=Ytn3#e>qdL zS2Dl&?cGz7(K@P0hV)vFw#q8Xh?xjc7ULq!P~1kIr=c^=4r$!#+X$NXg3^gMTq3z> zc#CoQRPH`!SKUgrqD-HpjCv?VPgV&qTt?MNKpDh8eiBN>)>H^BUw%zB1tME=)mqH0 zi8b^c^fg`?Tffl0<$!h5Ceg8a>r8~dtmgHtQgdh-B%q2-TVSEY-su zaQJ(=fgwBDN6P}?w)V`J-qtLGYkZrZWK@Y@R~zH1I?53fvP+%A!`9l25zeEa!)xw# z=b6|-KCp;*)Tv%?%z@kF@*%Ua#>~vfa>`M_CqU<6{kihEr&RZn$Hix2TtI%}O=jxZ zW{lONt>V4baBtiEkkHCN75N00=m5Y5euYiYiJ62{uKUQJ@2hE}?9#%x$Rjz$0q6g_ z7OXIyww6{6(ba_>+O2qZ|Jh9|hqKDqlwhHlFy_bjF3dlF`CWt+-2|>0-^J&fC`PuK zSr@r^D_hAY*;^Vy4Wfjz?}`ZSGPy|ttlEoTYHXiGR{1<;ao(F`wXi?!KWb4js-_f+B z@t#OYR|T}Ia|)0BWX$oV#c&0*l%H~L@y4!Cc+p+Goo@v!3ipGOIOr~ars~tMJ#I++ z*Ewl&kC)1drsXqO|?fqmF7DRJC*)5ICfEPGN5cVMqOi8!)5QcH#R1rd@;A*?dFzcEj+|B zM>72@n+sUELt$o_xE)wT&}3H!5VwFF?b!6R`E)XnWiEn$>@Y8H1kxKycnlWXGVa&m zxiC12sB%oK@VuV+@F}F!sWy!s zoHGqE3&)J2a8neTmS}1b!#10aN0K)DvK-!l+=rA^odEBKU(6rqH@+5SM)_47eQ#u} z**Z!{4vBycHm3{p5(tr7dU-X9V&cl7SpjOTU1P3skxKYPW+7Qks%4BwQUqHgT5R;+ zTE5m|7j{lv+N!J$Orm?Hv)N{mF4#hZ@>84o%_C4hG8vCnA~FG%R$_7t>%|h~;R(f0 zJW{EH6zkRyN>1b%Xk-nC@XMnyPmNM`R}m`8MvDp}0L7)Vr-8LQhKlse_6sXT$O~1b z7E;s>j22zJCy&$v++Ja0ai){)xjF4n*`+NxR&M1TUO_w4 zez@b$^@70z)8^xHz_0zckI9`8dReR~5U&xaEb^#I=**tLm1E%nc_Di1GLO7GUZ6?BA|WpJgl6HDCIz8V`de_jI+Lw}98rbZj|$fA2b(%p~Fnu0nduI&ct) z@s<5A&;*O+tc)SsolOIiV@3oc`%r)HgmGqp$hP2()6QFVbf%i0z%V~YL;1U(qDsBSnD3`naGgQl=gkO_ ziRv6?oFFRQccugG-tc{U!(5jLX-`kjWJ)=%`iO2}=S9!@y)z8nEB01L1R$l@IwD7K`e zpqoebXHK}10zCmk6tc9=z_J(6WMP0lfjX;C9`d1hMgDC-EyTqB>o?dYlTHS`fDE>-_Z4=3@?{ti zfL07wfJHR0kA~U={(Ip5gK;INFP0>8@ap5|Wd(y3%z4mfvi<39jOWdR8rYEXRKSAocn%W?9**Uof!37@m0|xQ%;mY z%(S8brYTK0(#Q|K(0hvvy(Dw<@5-6%_j{%QS*VyZm?hUS5pF}pmEWWbcr#k?zYY$N zLLgc(j%A#Yp_}v**Tmg%7Q=Yl;+L8SRr>VUFI8fRl(|r?31ttrmogANX^wFvSu%>E zI#*We$BJ{2R2aQ+8EHXK4M$xhAYP~N^NQuP)6|ihBSt9C-jgdQs?L53J-p4Em2IGX z?>p>^oyfUOf&C2Rb1s5w!0fo|mTBjCZ7!14^&xa!8re4k#D_PwC1+mA9xOX={>?Xa z@2rK`9tLf(97PZ4L5c$4TkBqcu7M!PjrULYBwtTe41Yaqy`WXa~75ZDVeY z?iSpVM%0BQzn@BrveXk3;kuUaY~Rv>=DLYIOw(s%O9~U4P!H-NfbCRkk%(PTyCYSbzU(Ii_$&E8UXXS|2k3Q@Ogl zej{&c*Qy|9ugDH7AeZ9}WSVZ=F_5|xe7FqPGyI2n*RdEQYjiO+l<>5&7c9*SJ@g~Y z3{}nnU-&!qc|EaARGx)0()lDxFT0d}vd$U^v0kmE{;1^%*8oN7PRIqvsVOE2;a6A} z@m)6cF(@^zL#ftFX*#RN{QalP-}oX*t}N9u%?V|@5>}Dy;ezRX0_lG&PK$YN$lrR~zqM2=dHfk_4NpAO%9)tUEn|G~6SZ=-D5s{XPd4wl ztby!bi6hYDcDX$v@xf&L{x8-T^zJ`(B4@zGEy-CzFkR0bn44#G??6uhRmnXR-IeSM zYF#Yz+!B}IFPs5tT(HUmmYa5)axWFo)O-m?RLGT7oEQ~15zvuHhvXDK{9#cyh55;= zsB0Zr6HBToi8{C+RC~;ZP6l@Qiq5}c#w{5AOWjQgD$M57p;X46y8V09wJKytcEO3T@@;IFituVzt9!H=_9zsX{|yY zgY#w&^S@ZWL>F8!6TrUliEx`%B2Q}$g?9ecMW8@~9gJw>zz-@N{|(pc1QS9(S^e!W z)+9Kc8d92(ieC6UTRT$0QtVZ-<X3 zo60TA4X;aLgAdy<%#B4 z`1<@WXwXb^IIjaQIBQqv5b0sSQ5D95WiOD7ZR$o&HX8^l*Ae22>R_wMJBN~;uy{k+ zp%GZ*5y^vMn-nL(cj>jsQFK0(NMVnB9p2F=)EICLKMGjGhK8=+E z7|eM9-d7=6Y+^_z7;$62L!r(2QZnZPy_F8bzQQ;l9>n z2I#P~WT#QM2ZtSXM|0_cG1fLQHWPDdr!*jen7r{OZ4E1Y0-lC23}0iC|E0qka86c_5d6Nm_2vQaXe92rS& z2mNl1B`$P%PVT2;N+gS`Z!tDZ1rr_amw~-qt}=_g>k?=b6v_dn&*x=l5fdVo;t2HS zC7FBUeqxfYp&hzQ^tgQZ=yBtEPq9MF_|+y=x`)EGRk~2MUW7T;tn)6&67e!u;-?OT zTu_zzpq9fvBBNQZwQf=J+Fq(+FX5!Tn)hK+UuS!Ps%JPVa*))mV#141(%VaEDG!oa zu2Su1NW=_%&el&SC&uj45_uX(dG%2wUT7kJKLUY?Y9IK z161xc^P{ zSm$w?h@D1H3hRLDyhumZ&ZnFH?|tw#0v@+S`5X&%ex|W6*B+r(1xg+0roO3v!)Jt$ z%sQCxVMh?vt6&q|PxN}o`KvvqoowEraP zCsB>G%asu>jUhk#Do25m4HPj-YB!7&3%qh?D~L`GaM^;>b>73K(^5xUS%{idd$&Qyilj7inLrlph0iIRU6$$)zz`U>c3BaI$<>_aC@Ptvv z&Q2nkgrVp@w1vXa{dhUl9mx?V9qD)V>cEV67JN-upQu<|%1mI!b&!3d4zxmEAYx1Po(> z+7Ug#)_6L7Z~~uam_&Ycm(M#PVLP1lGU)4W1U%sI0wk@}hI_j3Ea6xJb38?`2~cmE zM9jnqn2=aq2F4e^Z&b?UcasVWNf#IT0szPub%C$pN}cY>^mL`9IxWXb3q#3k3WA@&7~s z;Ck)f>I2vVP{0p6Y<=i(xH(et_!4?PK0Z1+_-{kQ!&XU-JS4Hj5IIZ87)RwsXV`qU zo}H@@Sjx=Hp>TkLAqr*RbrQc~=&J;L;7+eD@SC=6f51qFG6Z@8N*{VZffQKp>)B)m zE3j=A%89uU%rQ*U+cf6Oz*gXU_w;<61%08+^`fS?%$y^Ugu6*fO2T&jKUgYUGK{v1 z{xxD5#a2(& zOF0hPtgjeb9wvf<=5ybXpE+`f5_C8l=w zBLY`+nG?4i*Rwk`s?YE{6{0@>fJsTt;gi8;)KS_1B6qfI?3nY-H4(2nybIYU$}^~* z$qHlQHyit^kp>P(mW%yy0<(f1TTii*E&e&L=EW4vT za`hD%49uD+$2tqP0nY+uh_|ScN4lGi>-;>;GERcWALc4mBLT2_ryL8#K#xF4OC7~Cgaheub?}j-`zGBWCXl8*34_c{iT<1$b}g-5Y*Yk6ZEh~Z4Wcwz)hJ%EE6wzHc|D=+*hTWcR8ktI zD6#Ptt$h~KDeOM!4sxvuX6YpXF>abvGDd~$0@FM!tLs%^cCLkX$~NbrT<7T=%P?#F z>>+;E%GDG-cEd&QRX?Go@CmuTPdKPMTH{4G2zEtgfca(}MAqPxbra8Elr>xa;F=Q8&k5sCR zac<~8xnT!sqlEFZba>*()pM@cs`!R-|0?0z;N+NQtj)QY2l`~@Nm#pVl&iO*!v_LX3&A;7 zt@B%3CFgrKweH_Lm;68qgLnS9Z1xJqF95obX<_D-{$aFjmG?mJnZ$B&82GO6D!aB+ zYV8#>xFrg@orHFigQ?X+(YVo>D_%Lj{in`$W@tS8Zr(ysP-}WVsj72x5;%SfeJcr5 zlR;qR!RAJ9tIj*rM>oZB-eY;(i5cfXda)mZU-WSS=)rn?OJ|^KeEYQItTd0Yk|1&ryE_`&pi*Sxx65zeH?)QWe6`@>L;&u)%3*g6VSDm{3n`S0Q=CHaeCK339Z+`jdc@#?=clK zAb0vXT!5E?hA86SXBTPvk>1zAx5(f0Yg3atp&1?>OjW3w_K`b$tl-o_xnD>}QR5hr zL+`sZQ?e;8kKpProj;8npnYT-nrsqR5p%z5k7C_OxQO8-x`ya*jg6SMVCVeaJR;Mh zPyZ!{%#?$t41dho*JCr?-7v6~ME#i_`K_Zw^Ms&;k7nNCO9#Y-J$-bji%&@XMKu=U z^@f)u3gjhI(TxV&;5y@#5DP8sn*r$mW2c=L1| z2d?^w>CLT1E8-ai@EcTXesBR6QRGzUWj|<{P)#`Qj6fq2TE!t`?t}zAu|WqQGnP+G z7G*(#1-B_C2)hAb2|{&b(jiS9NDLGk;gnjxN$*N1LN+@|5%;iNw9B^= zu&gKK!FwB!BMNsEd|_bGOeBcuj=a_tB=_o2JVTIn2s%617S0ICud#XMZ}u#|IA~{y zioYS3MVEv5gS z4@Xw4KMg;1Y>8Ua8H;*&9IYk}X&HeoeX`Ec_= z5TnW1m!mPZu3=>W`woKvjn+gDoZ*UXvjN+ zdD@Ztu?T#bLo;udr`2yZvSjBfwL?n`9Sw=nbOB{N` z-l$M&>YXF_x(;eA;TR+pUBY;0f3|^}VcVyY*R|o@yYC!b#7mqXr3z1cho`ZsHyUujI?Nz zopsvT1SesfGP>nY_ElAvmmXIF+fJ|A_X!x+2N61~zwadv%DD2>-_Yjx&XX@Wy3{{(%jqlSf~Ep8R)S%MckExl>~53#kz#E;~(+KIL>WOL8rBEfVzwBCD{t0h~-q zpJJ8*W)-{|ex*M}JPK?A0hj84pyS7xk?HLVT1D&Z3`9!r^<>QX5AZXF976$zQ~VS5xB4qH3$AQfpmm3%O_4&5Bs`@ z-$s3dzdnSV|D+U7S??W#2-bgZ%r2C5unAoom(;wXz<}tnm1%}BSYa>-s~{&tiY4zzsb zcNe__)~)}gu)w02os+YzwN?4dXc8L&t)M799b3^KXb=8>-UXU|7ykeTm|FdwN~7aF zF7JD4=--kJ(#E0(DZyu^&Llq2kiugXG*fIPJ(Nhu!6g*YXqATbTtBk|zwSK@X2Dnk z>V%k=q3&HjvIVG0ER*%AZ@yFv?gXTq+cu^5fbndqtHvw!o3uRtOJ-T`cMZ<0l0K04S3(ql~e8I%Yi zW|nD7e!NVA{XXJy%S2uRW{0sa!5%R8fW7Sb|61Jt^|ydg?BB~wz~^C_f#V^t7RM&g zuaA52V}nMYY8Gxyi&LaB#U|MYTXTp0oq^9iTW1bNvH)(Bp5Q0a*+>eU)-yZ1?^nYn|0}oqKA9y<=Ts?4d?MDJ;#GWNa!#& zmVLw^3$0={r1V`le5#`cp_SB10>&lADX5QiJzYebrf!;qK zX$nc7kB%4db&nsu30H{B+;12k7y5_+KU6%amTYRQDX({JjHdCp5t*>e0ha-}LBv6&^#}01v z=m{0j$>$-(s#FU8q(LwcmZrL5oV4dG*`f%td1l)@I6l27EmsIxTYK0%x+?f|YvWo^ zK{v<2XpBJVfW$Q^JG$V{isKM9Sp615soji;TJrPUmQAnD=IoXL{O{@*##biW)%G- z|Jq<{smsNJR8v59_8pho@^Cvu*w-bJ)Ry{nwE~O$9`)g51B3Mwy=ujOn(M32!Dhn5_~nvi5~w#oe#S0@Ok-7 zI@bOgO!`Tr^1Q)W8g zv8OXnngpL~>5!l4J>v7q-P~(1moaw5X8a7z;j$)mN#>)8`NIN@Q{j?FjY&%Cj>P|L z2l_@f$r*(&WNrIv>>S3S#F-Xt+}It#b?60ZePBwl$=C)mAPWVAF331S>4+JZ#%DB8 z%s;bjTkR#v zpX~lb3>S+AB}svWi~0bQsV{RAk~BO#thsDOF}xR6su=Sw>lkM&%1Rxy{AFE`b`jj! zR(xNSw$>6FGL$|Lhxm*Bw}c@Wuhu|MXL}(xU2-ybc}47F;#|@!M>_p(>=ovXa!F6% z1IcoY&;to>IEoza1eXl6?kF55c;fy3eA6zbFP^R(bwRqLJK%6S<&9)9n5>Bk(JI>_7z>TaIb zZw_xHQr_}e9RMF)5CbyxDWIE-q(h;PvDi;*I{dyGWo3)2ojXK%Sf8z%%KJ+$XOuFv z9u=?2tWj21cNBBl+0KWZ>#n%r`z^RLp^at-qNP*`MIrqFOOCPs5~nS1G@;UY&xwbkGI%K~X< zBjsrj(0D2hF;-UDV<4!S*8V!#;&XpXEP|`dXCIG1p9~&56W*b350(fiJ=6N2Y^4b+ z$@h@n5+yOj-9j8j3o>L=o2q22hnrY!Bvo$?q#ymY3Jy{Zl^AlBluJ@AQVC}jF(}1? z!6AXfHqQ9O=+As*_7mt2IErp3fxW<)AUJD>f9x%~^=v-0Dz%ZReoX@N*wv(c*377f zM8cS+x5NzY|I$FyBuYiZJkdpdImj0-r8IyTQtOgFL}Kx9%bRo?Iqf+-Hv6}r@5GvG z*Nkg>@CJl@9Ibz=&9EcIC^OG%eoAKXqL$MPQsQ46uApDI>muDgULaPlpbyDLddqn9 zOpj~NjPuHfgGhh$VCg+Cv2|8!rM}f^bpEZxeat|_PM!86l%%A~d3o)+cF1GF9Ig$g znm3Qy`P{I~Vd8_ZxNGaLu`Ob<0`{TDJ*cVjz8B2HLl$#SF?Xje;fdcRv929QJ+c~> zG=zh(gD4%u^CViQJJI$oP!`^9fX-7l8bd`cG?uhXBqv2r<8HAEZZ%SH(AKYLWGqZ z!&okZGLqfCO(tuST9PqBoXn+o!|?FSE5cW@aZyzSr8t6*^Qvm=3elI3orJ;TnyP=B zO2>gmixmlg;w)99Nr+whlA|a&N4LzVUC-oBM^p~|I>&ni%Nlupf_NwRe5NTU2zivz zy4WB%s~qfJZ&+|CLF5Vg-+Tgke>u>yxri$g>8+Hr^32B7WD^Rf=mXPSMY5-nviZ9^v{Q;OW z^?RZog-G<100j7IqYViO9Zp45RMgSY(bSaui0Dx1XgY_N(|*&^-d>#E_uus(l18m= zf76$b1K2Pr5s!w3hIFEoccA1eS0ErRDFvysqy!?TFnW1;`Ks^VCqQm<{m)Hrb$$hd z6qF>EAh>lPVI=S!85&X?MS_4$q=C_ZrcU3B{{{x;1TjG!ehnxaWYQRfJj6Xt;F_R< zzRCz}fp=qA#@1)T>Bqv*)4$CWwXIA4704@^c!lu56Z+dHA|tzdkQ3dxr4Q z&zG4rnoM2K&oLE-ttOa(u_`dW0Ni-q51()^|JQpONoyM$utXrrS6oj(AC_S5FxRbe z5{2nk#NCqs2h_$T#a-LmKsRKv!iz2J5D%f!=ZEjvG(WP>;7o}ZWVrG`g1g;2kXUa2 zGu|ei)SsdhiDOO~)N2~E^cTtufKwA<9`G&QH1ZsS(ZM6wjfde6v?9MtH22e<%Vo@p z;Q;TfnkoxNU0_a?hI^YYP5euL`0*-KU}-uJYpzRtpExCJ1EJx#t^sO_&Jz0sq zQtLAAMbjh3=OljgtersP~{k+XJJ~+Z)4ut2JL;wOSrr2JI=0!bz&h*P1^Bee@5034cAO z#5#@H@n`Y0B`V{~d;%^fa}PcFvX1<1683XY80>nAvxNRudG#{VhFrosy_RzAbcua> zykY`~0j)mvO)p&CeHpJl1*GqW#tQ%+|e96^=%(!G`1 z?s*;zV~@bvb3u_~D?`d zssb23i2*5ECXXh)e5-nKhZ;?DmZ&Des6pA!d}NG--n{d zxoXN)=`=hLPtgjOs6_i&DF(JE9Mjg7iB*?+__jI$h9^Lvdm|=2w=48Zt^aZF%SJ=! zZNP^1TN)VSKi$j#_S%DoX#|QKE@fC;#_L0VswI-xI?+{w*yg&3qg6Vs%hr{mvl)2( z=aIFAfDBGQyR=M_WiOG%c~du~0)dg~DaSg33iT3g8)JGi=CgPB8L{X=JPq*;gUIH- ztrCB&6uZtex%U2R7@TwMobv_66N{`}Gnoc;L^Z4wcWck8EWc6@k<1o%eZfI*6x0@` zh-B~qb&0#`Szhm@9PIWJZQHfG=HLK>l#j0NjAO`^fX%KW#k{isHOYm@VzFL;e2|$q zaToZWh@E+ry)OD?8q>&O?Yk^6(4KZrKhv-hnIGLjzI2Xf_N>(q&W9*X&p7kt9eMKS zo{SR=CS_kbRij>B&M#~9?pE;@9Y;_|g!UZz1)=$9mt`+~&pV5G3su^S_|3Y;5FE3$ z1F6P9rJ{X@Z$wXn8JP?VZsamNZ4#D{XMhPxCIzV@#`Xog4;fpmK%$7SiM(e?i`Hzq zGo~4%#EFjO+g)tY#SRDyi>Kp$&B&VRz=@|?r`?O3hX@r8cN8c}IhP6R2}?plF}%W` zdnTP*KmVrYS!C!vl71SMbePNZ9uz7Q-!FN?1Qsg7d9ivQle8}%o+I%`Xk<*`$mof zS-0%_fsSw-Cc1~LS4Io?6c-IDR6*Rq;o+QHD40%# zlq!`>DfBAF%mxt*lY~W;06WWKok)j30(D^YgH#4OLzjtR>JzLVb(pxvkeT8Q5|{iA z!mhwr5ds1NXOEaJ#ns91fsT<;LTXf-goLD^pdjI7lu!6H6!<}GgL?$m|>zMWvX3fERIHq)o3dz0L~9>dd!g{ zdoM(h5bM;QR7TdUqNTbuh&{%T?1YEAO)(JljIuryJvCUHI!3%v>ruMZnM8*xE*5VB zWZpohD6GiT9(K28%Mpf%Q6J2s{?mb_cqh4^<+>^57iQytu{vM$RAC?){Le;$0!HW2 zjEN)6hC(^p1;SP+`edCr%so-)W?%@{{revPqd{E0%DR%FBa%gSVO=O$6532-X&4{@ z|6-yrEg%?8fmD!9z_wMa>M?u#fIWD^o}e#~f!@cn@hF3V;3sAhbKaSCj$%>d)ejRMN7xY*qrfh*#ssk-a)u#$3&GBSX3#U8O-JKoeR|i=@ToJ~>mt47tQ;~L zju;G$!g;kjH?8Q$2y^!`9XIEW6}gI>g@rtZ>9a)?kAQ8FmOCqFLELA%hH#o@qb@JOh1)H2Ewr0Y3A|BS4!}L*#H!kmyx%ePY z-)y)))cky^+c)!lp3iW5IM#AE(d9v!>-`ZfPsUllE%tjhKj3+R-|uWe-`59TiC6dg zn${XQIE-*p#?2zaNWctoVReOGsTH%GB9_0| z0_+77?#NhYrZ@fPbrbUWYa&09MfkV;@4hVZ{}+?V|Iq=K1@!?+#E~OMpg|CvEIFTm zTFS%$EgRY*MdE;qLKgr>f{siJ3KYyOTegrb<-PabL)~}Yd528axC^xrJPLZ&#*G`H zx#QOQ_3Oa^!LOi1pgVK~9yoB|K=6NPsbH00XjpV8rUoh)xE!?0Kj=kD6KZ=0pBKA8!Q~O7l0X~ z?8cZ?bU`K*a-l(iw6rvoAs804K>G!hLFfU!Y^!3+YKY8-x?%lhz z#gC_n9tqYaK0cnDF1mK@in#)vc&~2XzJ34xeJJu|F!}P!FEJ+=GIU8i7qasV88Rd# zB?V)_6S;WtBGh~+`#=BuGdj9;>lR)-yi_MnoOtx;5t_mY-m9dfB#^B=dJ2i6TtyfN z0k)7m1SbRzOgOq^nxKHAtgNi<+qZx5#TUnp9mCs;Cw2Vz@s^gBYuBz3HZoRN42rBP zG%iB6K?`_Y33etjGBPG6hUSVACQ|4Yq3n`_4dx#L1199kl`9y#x6Jn7elE z!t7zl7$)iOTeoh7@IaH9AP&MVCSM~$&D9WnDTfqAerfg=4Jx*yUq?q^STd53^9EI+ z9VoojcsUOoI6xjmhYugdBv4=)xo2qDvzpO>%okc1QAc!K%??Aw&;|ayc!4#827#8W z78#2)rxz{Oi3BR@dPRGBXw7P*b=uLhg+{H#A89TzWR}qkGURDPe^Nr2p}#7A($uZ& z07|3TTc|LV#5;p2!e0!33Oqsl_ciYw!g|syHkx5dqoJdDN}C65_B!IXXkIbeaPVFg zb?*E&WAnte$PT_IYkEg7?{wg)f}s0Uh^0*^AY7YxSr}60bDhHQrik7x7W?2EB&l9_ zwg~M;dQ*}L#nQi21PE{73Y4;2Yzn`rSC;8zN>DOcK%Z6cRMDnN;fMc@&`@4t#%aP3 zvBgPRFY)};D0YIG59oP{>aBWXv#b6s12+(D6!`?#2+`tWC;cXqw36}2g(J*(*iqVR z(%%SBnnR`YR>MJ0^YuZN=AKSBI~i|98?HDTpw3)Y4c9xFpXIne7^611OJB|Qd649C zM$%sjG(H^V+}780ySK45-0($|{(F1m*YkWn3vhlq)$N$CvP&`CNN~LtYWlV^@K1Xa z{{8-@7nPmAZ;X7L<^3?#>Ct%0<4m_Js&dgRU9u_{2B=q}jZLmRH{xeb#?Q>kqtVU} z(_HQkF&$H-_Z)a&QMzz27$(%-DZ*1lgcvJX%+QPJyD@!%kS+)~G>$`UK+BT{a0nOu zM1k27%L9sefODeAPqb11gic}lv5ZY+%+E2&-zC}ClF-RAYMg6B7t45WH7?9Lp_@7* zL`}0ec5!lua&_txU`_UR85(R!i?UAfQxp8u#0WJj%sM{Unig%H8e~oNRpW!KX`ar5 zhp9y=)~aEa@{wxg7}x4ZH7me6(%(8V!a6C?GRoUBEW(-@XIVHxtsL)Gm#Wql2iVti zv47B~_E5i?9X%VWgLxL7^%W5zVfSwqK81aHxMZ8Ug*qo$Qq%jfH;p zY)>9HG{LQIl1D>~rF4Qv^{VK`Ies=YI6nZL+Q+)qq6{XgA++(WuC~qHZLfE+UyCjM za>U#pGIRd^a?UShIcEpe@9SOjLC@NMRY=nd^nhUDyB;gvy*OcVp&US(y=A!&bO|!| zU1mD1SFRd$$3!sE8J*JNEgUoY$ae-CA7xmd<#|4t>h@@y`elkU2Xz_lc)N$x<(svcue3eWSD(N*j_#nc2{9?`OVp;R$#D{3bTfQZQ=eTi-e2& za#6iR;N^ZP>@8J-N6ipskP=~{$rZc9CBhC<-i|RvP~M9~5ap|4E7{hL-!enkTgbm< zGGm$T$+GjgIwOPTmjqlDWuu2tll z$%WkvWH*CFyp%`Tk}M=t z0M!c$3dqKSyQof8$RGnD6M`{BL4tyT?1wHO6=Rjb;sO}}aw%jOR3wZPh~;EkK;vYn z0rBLD1M&#)mJn_Ud!OL(C!VuKa8VaBnGML~X&_Z-a$)?HOg>pb^PWYH=+WI5|7pfCl4rL&v z9MCwS`N25_VWA4XQ3kyeN-!mInjsEMBN??cTNY$X{5RuSJk$vbUf?w6l}#+{^Zsu`&<8Oy zD36PpC4>@TQ+g%>gdr;pieW0y>|{1MkL8hD4hD|8m>(On-DP)wWxpO{$0&bO8cRbN z3=KoT6?8-nDq$>)GDe{n6dS|Fpq&y{GK#3~ukYBPY2O=C>`#8QVgZmlmIw@aoNML#h z19e#04blg$N^^II$B9l)vOFJ8aBmG(4jbi<^~MkN%1#{@hZggsa>^+e>4vwit-JB* zRL^hb`aH{WYl=2~UomrJ%TN7*BL*Nm6mYh zoqoorldR9Dx;;*{JQ?r$WTN}?eDCKu?$5GazbW(kGME-J^>Hbi2bEw=L zB3-b^r@WQB@lG#hdEHKNdrPP65{rdMi^*->Le$|vG+4Qo$kI8J{DB^fm*a8eF!tHT zP3I0B?#I|TX5jfN&C>fW(pew*Mzs8ao3e)cMe}?~M-0jl$@q?*2a~Q92CII2=Sx3Q z_!>np-5_SLu>a(+^8Vhv>sGEL&lXpXd#v;1c)9$eibT?V{;%V=kR=4FE@k_L%88Tn z=g(igdiArz6Z8U=5DkG<$|mkSO_Lho(z?C+_-UIhFi95p=|;v_?tFu z0y}MNYy^9R%1H?4v9YnxPoe*k2OxMaIt91K6_i2m;Pzy%#ubc+VsKCf%K@|}j29{o znuWFtri}vNQ~~8jj~=BJgnTTaSfM+V$LKId%os+!dGlrp*r81j%F>Hz!3?8)Ofs5%@4ff7 zZQDk!L0I@OOHkgSxY6o{snB*G&?eT+F=NJ{Wqf1clyihFq!-fr`6wHJxivhm>{`;6-OzBr&eRccx?Z=NF|M8E1ynp}x zh7B9Az)hGifv`s?Kn71gDJcm{7#eJ9YPxjkQgd_jg9i`p-Ma_HAHyMMB+NP92RxmD z0|!#pA)KR~9zA-H8wsVN#=|9B2bm*iIMLD3xProzj0y7z3R_xQKL7l4OgQGVt*!0G zjT@hfgK~9YKJj)Dk*G!1q|ne%bOHH<5MapYfyOCQGj&R?K_HKW4Xn|4C7_{SxNzZb zfBV}%|M|~z=gyrvb*iUtDa=RnAt5gp&AvgNE!2yKd7`1dtZYxWL}fHGXlP{6(4bwYJhia8J&#gHLlfDW zG_RPo?FatvG<6-VG-M2+SSqr~X!e$E;vk0^3hN^yOh@M#jXZ7sHOrNTmrDdwKssGn z)}9obYEc*$Ub#Afm%CV)J}6Ujl>}{tqZ7Q!jAMnK@(SZ%wy%q*pM^qgSu4O0R?7M+ z9l!gsWY{fCF4KgepwYnXEt`c(QX@i*IGhVJ;AELj*{wHD70uQQuh(j^;O=1DjdQKo zOkcs2Eh1uQzDfU4CuP5<0XTa?g;0m z?&d>A^Oa8WMQ7=5FZuH@<$8DHqjbyt1c$F?d*2`F@+iygdQamQneGo0Ef7?Wxan{A zH$9)}`Auc;ld0}Myd3h=(!gg0!Qa+I{oBVQzbX!VI?bbPi25wg??GPFr6}_$SGlF9 z@lJ2$yji{yB)0_WngjIBA?9-)#+M~&3sc63AcYK}&_Q_|C%kH*SB5a%R1vd4LBvyq z(l=B<=4Nq`hKFO15muUXk*O9!PK-z~lg4;X$v(o^lFFFX+{dg$x;S+4vcwFq&grGj zj8rEMvdr&iDfD(3>R<|WbL!*j)XURlkf+OF+zq#8_^V0&>WD$sIlV12ebm@!>*Qc- zn!lRtYZ>luNgQe^8KM^Vx6GU5Q8&l0v703;+>#NemX5GiM5~j0)r2VPWt6Qlq;{%L)gX0#PjzOPbzF!wy|1Nk z6!-U--^VgLRGr}OGSceMH^P#UpqA%&+2;ETm1j4`fN6W!}$EG4}xvok$v-tOLTa`3`q11di1U3F@3+1FL2e^^)fOI6OF z*OdO-s7G+=EnWqzaDZ)O>&t4S39bK|L8>V?R%=ETR_Gl;%Y`V_Ca^0|NX2P&nM0Lq+jE%ZnkaR>i<H;Va3#m#up1p%0!i1VR4!!{9E3iGj9%K*}~p3l{@e&tAuk)setO8F3dsH z8lm#8Y4>c|#>1w}S6mGDW6h5X8>p&F5icjn2j2Sbq+(1wqIdUY{Sx|yos7|S<$rb_m8B#995{O<9EI~7R z2dOBADp(Ui0OVoFt`yXdGGv-SMTn7P;Q<+^PoIWz0pSp}$i4)sP-Vo35$GK)K)fLY zU`i@MfHpL6^hJ<3TInhhSZy+NZO0@_UJ8v4rI^ zij5k|h98jWQ7&1BA4Y- zHljYP57~2uv*Bd8!1*vXERiLSWn)pYl$92-B9x!Q=G3y)?VDOEf`7D1Tn+U2tWuMlw`XOuxB~C&C&=AJL z$OII@A~Zi7d}CG#=$O2Q?hZ9R80Y+ayz9g9ZZC>`f3gSt>9x);X8B(9mp*hfTnbX2 zPjP=S*XQd(zX!R#53)TUPw~8)8 zbUGd4xX0VH-X!nWbJW=_LU`M1kQeDVw-mw*cN!74^xx9Y?J+6uo47|}1+iJ3v{J|2 zb-^|0<(qo2R#?6DD*+7obSH-F0MeufI^CI%3=>r2wf74 z>&THKn>KBt%pinAvU~S#s21c}f>qm_mMO+$e z7Ob26UP3}bz(;HGefAm127^scPlxhLP7W9pY7HAU3@jQW1&bzJ8Vm}>Fh;Bt zWG@0^C!+*K9H1lU^3b(G3?>03p~BPN1nqG^lfs~|0%CQ*SZMV`88n6NP##oKAP;4j z#iUU*E2={W#QZ}^-@JJI`o;2uStW-S%o5ErzR^o^ax&qB z^zPj|GBUDj*RGgw!U@4QA+qABfo8N!{zVwzTW`HZL17>jD;HK_N^?w}B+=2)7}}ma zdoY07W7gjDbf+6E^P%=SE{(`p-4-roX z^n=bJ;sDKfpeRfkm*|9iKeRv_`qtc9qQyb9fmD9e);OF zuPCY0@#Dwuwja-*KW}YqJ%9c@E`k8OoOq=%STecL+)~$gOcCP1Sny2%IFwzBED0SA zEaYnSzuQ~1)I8)$LApQmeZrch&}twsl*|Zkore8F zjD^fWxJ#xODnlqn5Et=GckbLt@Yncv5R4Du|7bo%)B?=_p~TQwv|uipRWjOW@kAZr zf>Dw(F5%Iv5qP<)#U?TqvRYaC*s5Qu<596#kGAXhR`NWtle>aRs}#9FGSFf-Ul>vZ zMy|X;B>33DOydNIJ-OxME(Habi*0DAR~bCWg=;I5#VWj(ufp7hLk^;E^Mpqnq=j;P zpv)^we$Om#*2%jRgK&wFck4_Wbf#T;zWogy@I7X#(@FdE@;;OCb1(U{qjD+Se9$Df zb~Zd3XFclbe8fR&a?{@o(Vz8{uX-C#JDHmN47a))Z}pHLBs!jsaJ(6+9COrv5#!Vr z?|jb7^gQ3|Nw)i~9@5=db6ZcTDav%Kt8&>-z8qxOV=>%{wR{m|fG&C6O?PLg%j1b| zKfW6F)hzEnt?Tlz!1wE_m?ycterybXHY?!CG{4`k@BNp9vA=(3+>?neUln%xy0p{N z$$p=z`WsH#dWD=y8WM+sISf&tNh70f9 z-b_DCD8-XRtONuJu_L1!Ask}{a)%b~6B{o!mHIFiEAU$!jL|OUZmv!}eJpWd*398* z#YnX($x_uz&2@7b(AAoe>{jY+8Kye->TDUGb|DE(+a}7%WmC=n=!M1*uIVo-n^L%U*+-tJE>X%0F z4Ae^_?DPEX(>!d#%rYm_y{;(0UL0Uw64rREf93CUa{hfu!Jo?V|5}~%>#>GkPc_~j zKX-eV+Krv-UW>B7(ZzmrK=sxh4Yn}5JdB+4GTdupE#+u-zP~-!y8)j?B4hPvYjtCoZAG+wVURu7hZ}s-UF+g3<-MKf zjs`MwSZOFZjt) z5&I)DNAq}}ns$>*NqgiD_YxegS#+25Qj>+Xc(E35Zft4x;9f3YMCSywDsZ0 z!`IC0l0iDl%$Fs@b(I&mGtl^SjPs*$mV0q*dph@Tfk;Ni780{Hcov8Rka8i#K_Z37 z2e}&T9Ty>I5(X?Gq7)VuLe_=WPXMhDgdokLOj1v4wMWCq8f$X5JWAm zkn_g$>C-8khf+cls301objc8PAiWR@Jt3~3Q*y;YU#JeDk#OW7NunbxeCU+CTu_VT zQgVh#O-%)15U2>75rQ1bKpet1M6Az0{~YBh+tG{}GtdGW$2Y_$0%^oW+|r_caEsDI zQ;H%8w3H8mau4Af!$iv%CgzF;i?L(2A%RmM66THkg;1Tsl*q4!GF<27<$(YU7V}21 zIrNxtk-WhPTZS-o$yo?gL5#$IlRQ9@SW+L>X9ODo?LL>~PGXaw#Y45%B4wb_<7YgJ z$2oaZ5cCT9uAo*Ni-WRHIgGlq?gYT=&-{Z~a3qWD#=0f4M6#KzVyntn*+@1L-I1pU zeUD?~GFc|R(^>jNHj$iFD3K6O@Do%K((Gt9n*43hU=GVc%j9E2-XP>UGKEc<&Zbi& z5y9kAz9fpWu`%0Lwsjp_cbpvuJqy{wv1}{`62oG&MAi5VW5dt_IwgD?f=?j}5(bOw zm_0NIN(ikNg^47>bwL#hM2cq72`r(8)y!hE&=iJ)@*r$3n_J9^G5?r73>gi^vRDe; zqNpgcw>)RhU$7S_j|TA+FrR1>pKumVCwLIU*>DUQ!$(szZy*d8Az-wIv1=~!VWyn( zl)s2p?hZEJ?q$5t*|fKZ^?bbh{bA-CQSzNs_h&g?kJByRl=%MJ_CDX11w9 zVGm=rSnET0f;oMVfX%PqStE^~808NQ%5g{I5l{1lK&3T6cichV&Mm;o7QMX1q`Yf3 zerS>pJIOcv^jAHktvX|a;D_^mOUm|}4{ec7p2Y!Pup;~h{{ji{U7m1_+unC_z$?y# zCkgzS;@=R zB#NbHyGgzoDgA!^pl|AX&6T)dvxrGI`#XM>=kav9$GJe$A|a;E6kHc1brU-U+a0CP zT?}@?z|rz49s7FA=GH?;W;0!>2>)CyV8O}++h1l-_M7A@t_G|f1j#j>uLXR0Di(yj zE($|-U8_^x*74P0g#a_0C3e1=_I~jB$~AMDzU((|NuH@;i>iFy!|{u54woa$pF}(U z58G4zy8{d#${Li2M~@zT|NZySiz%BeR0*i_&^n?0;V!!|+WhaZv+9J~$}-xUXz30wp}2hXIqBrq;8Oz=I5;v7ACGlDgByGH>~1&d zJq}ZfX0Zg|E(pdg(&-3A77q*S8lKnQy?ZebluSxW!qP^VJXn>m$bvj5@*o~lk(`{2 znL%G<^dO4_H2T26Kxp)xJ9j256fFla%F}v8h`yi}^M*lbHWMtw6Aw!0cI|{CljRoRCXlQ6ySQvUFw-9uJSK`#EQ%y}x-+ue;H{X2o;K2h>2!VkB z@F_A5Zz;wLO3)N&1Cg5D1_!!8%NR1QP|JjVgP%b`LFkS!dj}32i06oZ2mTKDi`~6@ z7taXqGn8md5w5qiwERgNFJ8R3ef#$1%a`%2FMx!I?@7pR(HV5?dWLHF#t!i9%#-XnzFC`BwrUC5UVun2X&!=;&yZ_p(<2MYlQ4A z%iD8CuWC;xL@qAm3o4;inc`L=92_`bq6{bQjTrbTrqI)$IwXwY%CClGgCQ zBbP7@U!@~zE;+qWFU07m0Ht#}eE2ZsLnliJ{-I!Q$BrGtpIw`CEp$i=P@(`vO2eeZ z5Yec$;cMtI+9=3Q#q%!o>(|U!9Y?Y?iWP)wI{RcE3WSAby^d$Y!CHx>{2;UmMJbUa z0cH7{szN?0TbT(<5@|O%LfNV3`_~l7tL$gyGU5MUD>kDu#Ew8YU*PzaQW2N*Dz~2) zUlpCMVMfXTvP)nZ^-`C!(Gag~mn@o9El8&iTbAbG* zQ97j8H#xIwo>=~zJ~lg?b2D9ZHbJ$y;iJ11rL+ujY6+2_&hfd`%Xojd!wqlUHH&no zpXt1-@_|*p6k)tK*rCOb-Rf?-7wgd0OTOwa-3Zp-3}W{a)jw?>(i&twX)&C$>RQ94 z2WhT9t?v0C$?066?p-hAMYI00yP>JC<=i0k$pp&}%X_}4>fX}B=~24n+Y0Yrj!pXI z%$&!W9@l*24^2wFqv4RR)Doh9n(g{!vh$5d<6bB35j015$&MB|pksx?AEssu zwUouFWj(k{Nk$Jfca&>oqHAe@WrU9{r{IQ2Zn{@2^Ypf4%wIuUD4;da&vrOLKpk zk#{bpVsZH5vcSbJMc4^jrXt9WL5)x=Gu-NMlIB{I42(rU2w}b%g>(gYfH1%&JVEL!y1=_H?E4dAM96sxL@_FQH@I@8efWN zoaJkq?Ppsh;9M34+P8GCZ602HWMJj`uJ#3i_DS42WJ8;4qPbaVvPT2T6n8?iwR1Wx ze4|TM+o-vBlIOkMjYm06^J=IFwqt;KzV;PSuP%$i*c<*;Ax+Pizeod3yM*0^$)&%* zav3M`EKK0JC%A-3>7iFL<$Ym8xaOd{;lpkO8Ls+B7hU8l&U*a6TElb?Mml|46Zrc# zqQ0vSx;xnPxyf*XnLc8sw*)|RqhJ)2NHksSeohzIhSS9U;|PHoJy!Hfz9{&QZx9Y| ztAu5>QN;UD&|<0JGZn&NW`&4&5`m3uy|A~u?Ce@BGIbWU8&#^?0b5=Y>}9bKtxE;= z%PXS68o|xzVqyDd9+NI@etYMfdD9ppn{&Akuotug$jlbS$^;v%5iM^JFpRH>2+IA; z*yJrg8gF@+W@!!AH(9xXMWD6lTEoCnbU42m!kt_04mEudZMg2v_{W1?G3qYri^3$MqC1pCn)Z;4?TOy#pPgM>1`pFrf`oo^H)VhFkcCEQ*Wx#seh`1jZ4!MrX0C z7#2fLEck|!K8B6y#d`TMzhD;Jll6r94^5vkBNH4hh1(G9?O-;TazhhxEJ2argSwPM zCxnF%#%v->EMNum*t{ZERLAPJv2CZ=sRq_ShKp>LJ&uhd=-o+d(s(wWuwn>(7wr?4 z3wgp2ov4_>GAQ*krHaP+WHxy^n?8+Ao64q=?+uEfal)`IVI>>bhOKPtE9@0q&trKw znZl-k?4fKZx*NiV&?sogWWd3w$*U)VMG&Aaxsjj?idRBAXupsZ*07pQrLt61M3o9wfq7WW7K6NWmJWJAA^G`W_+_lDoR!aK^N+A2_+ZE&2{WlB zfbPP&putAgSjDQAvZb}GHkajsTCz~lM1A7Uv72s(8k%}JTn&x|3vWc211kLhCTr%V_+HZkLSl6>4r zy5u9Dw@MeC^}EgTDw(rNFnMs))jDIoV5Hn-QKuYnQZBl2CY&QAnF-8*Ud}4=i?Yst zSl{<@jPnv%!rg;L>2ap(^E}U6eU1C9oPk5ynZe|J5|=a4)CJY>k(oPGi+FRaQ{3Ki z`Ox9vj3o+jsG0{@VKwSQ~=IHo}Ftbb% z0&SjHs&Kbu@4l-m*Ay`QI3c0+5xW(s%wQK8?pBKR<^Knp%Ky^==1Kl7Uw{4e4?q0S z*4B3O=1pjY`}gnPwQCn8zJXoC_IG`AA*sy_8LZc)UN${)p#X;&n zI5{DR(zY+Ok3D<#fEhwhK^gQ5tr=R+M<0EJcEBOQInfj}8n87e#bC&UngtdLK8&>h z{23ZIaZaotP|49g78mHG;PYUTsDg`F3$PMF_rYp{p9BSiX2D3YWDt)f(0K4(R3}6% zXxX3{x)p`8qJ5N4O-%)nI03a7JDLKgB?K@uPMjPqV~S7(dNG>1a^(toM@N*=kJK)- z22yFu1ASoyB+nDDcQgpD4uuPaF~g+gp)agZXaNwa?8@FhVhcUg%UKQ zaq?-wpeO|rCJ=WcBO~!9P+I52#6&z|0@b0+L{Osf?%_{?_w(Gjb3gw0<3IoT&tHD| z<=L}m-+lL8OG^s|h*u61f;p#*NaVc}9v)6lgg}Y$6DMR`!cQ$yMhk~o*&%VW=H{U} zw~)z23%co;`Ufu_0jiL>gNz~cP1cEyHUZ80L8cfjhKT6V%qE&wi53l{wMHi${W{19 zqxrf}Euu;@k5HJBW+__Po?e-ZDjj`DG^}76H5pIHFNK(Z=7h7V0}e0&!qB9Vp_OI~ z9DovUkzo@r5(vN}!(R`7H1I$zjELybdM6^m1SoE6RR_cynq+OH+9VTIT0Buv=gw)2 zy~^bM2CgYoibT(ABo2Kq)EQ%=06M^uJw-U5RPju#d`o(&P-^A~T-=?^R3jFqQ9N%T zTHuP(Lg+X=^Po~I@NBEvQ@bt{=9+kBcuQ~CE%v@^M1va@`8AtxuiGwy{}7~-_Y!FBTw?t!3BE4zL)DTvwKPag^>!H+VVU6PGThC1 zKp$)FaBEq*Yi(a^ezY|+OihorP6@YW4p0jNToSELy**t9d%466u+C0)T^M7XKgF|Q zil;5wk`?bdZ&A>ac#nbrb%gMr$%s?SQ(Q|jyvlO?stbH=-PNi7E=k?hnL#co0hW|sl0``Q8Gco@p|y)bYUc;mO!Tf8=~k5LzNovJ8{jgkzuVNEJsVC9 zsysWm;%0o&U+VJz<3Qbi>|697`)mHOYvJG4%xxJpcSU&Z%i*?c@0wwj(sZ}#nch5Z zXqK;iqI+GIM}195=_OJ$6uG|pO75!|qg8$)ah0vpq;wbQ*C_V%v*pik{g zA6u43Lw%^NHiYA`%=Nd=_Oq7+H16$F`+3a5*E`#a0veZu+dmt`9aJ`VZ#W;b@YJA1 z2m01F#g*PmDZCzEa%OPl+)g#IE(_OosXsZea&@%5I>f%NOWoNa6(9Dly*aw%k7Wga zsh;_B-n3gI=A)@;UbcDuC}4Xn+P1cf?O@-HRO`x8QK&@##FF)i7bx8!Qr4 zj2H1}Glel^2KV3K9vv&hpZWkZeWo*htT(=|Gwu*+oheE5WyR2B(cKKuKN#-#c%tjm z86NlI%uTMk(;_nH4I%9>6S8h$yXQ=nc-5zhlN8~&HKN_#QXs_L#R8Ojjj&+6ArPDh zi~D8ad_G5*T*x(LrEs39W^DV=_{YbNFIF6j7%O9}g0WJ@>KR+b*b2s$Ft%KrEM^Ro zRVl8lVeDnbsu(L5SLQP|i?L#Hj*8beY~Cy4i53bkKJowh%{_*ka|rUKR>;`v+kwH~ zVy1mM<7GGL{&2@f6RfwoO3mJ^*`3|+6G*vXy4yXaTU`yUk-F9h-R)lT?Oqa;FYfLV zrn?!;n%&rSE4R$x)`MinhiNX4hMHR=Sd%k*qi;6|Cxn8lB|(NHFmvWi2$K+Ca88B` z@@mFGc^|N#&>kfrvET$kBgK-FCke&xqZon2+U<5ahwP2Jl;?;dk0{|02p|U%+D;-f z!nA4AKncW1NR@;VgIid$Kq}g#EYH+08H;Fh4O##-P6VT6Ex!_0Qi$l3v>Kuvq;AMO z&!0d4`RAYi@|V9rp`d(}5Id>mtgI{&ZYk*!rBy;*w1$q*3;E#?6bx2Ony9L(DhPh~ zCh!)DD#8`4l@Qg?GT}+$9JjD~lIe-eQ;2L{zi03%NrGE9LIiZjBX=rQ5I6-*bd zKyZcRjA3HlAVp*BxCN;af6Qc-Oc|52SXL5Cq720ZV+)NwkOhXaQ0VwQSdUm1o5E5? zuo3-OKg!)hfkGK9gP?Bvv;JhM=*qfAu_%A$@56k2nQssa!WFX2kZoiJo3Vf`c!|CA zm_2^MUhH9e*0FVq*rKs)>`*o|hQ$nL!zZu__=)b~Sv+_LjM9E_x|o1)!mp)z+{YoaeDTbUKkuq_jDH-=B6Kez`lH}JNl^xFVHnV&x!u)N0=byKXdS2V@PP*Hpkn45C!+3X?Q)^%I^(f=jo{kUFtPlDrUwG?J7>v6V z`6Xwi+F7YJ$~8R1hFhxhczC$-j$T=AkSB993l}qn3FpQEV&f)9csMT+`(N4IKbDIb ziNd*Myx7XY-IomVP93Bd?)hBKt=T*hypCI~IhS4|3>x)1ertt5ue$6d-S2O@>Mfs8 zr1b{nO}-Gw=_1Yu%=~qovdJVZResBWnJ;`DH!-EjN4^_l{&8u@PmRGX0mf5IIm(P* z^fuq_soWiCyxrGfEl*3$Z5>5Ac{4MeGReoBxN~p52{ z5#GPUH~@9 zP+_3(l5zwlNzp)Pf%Kzo+qU5wIC@RQa` z6vK*ul>+=9Y#&$9FBmm?L^~KDx$8jT!j;(ASQH>QUrfl(ojakFVWi;yw3&{I7zoHC z@Dofg=%=JkXdgeJ>0>>jZ4PK#SWPIb2I~&F15tp|$dMzlh+q^aPMiR>=m_6X11U>2 z=A7Ws&?AinQwOq93(HwjQW8$E{1M_xR8&+@P!KtRK!qplT>Qkj7PmqMgaHEv5blbW zABTWbVq#*b7CDv>m@Sr0JY1R_2nkp~Db)^HY0wnK-B4O*%DfE1uviZqIB>|2A(RLk zG~iq2ZGyDm*-#=8)_m zUAlA$2?@ao=ub#UKzDeoc)-$WcSt6;Z{Pm< z>#sli>@$oJZxLn#{h|dL7urX&6iE~&4s_@5?@!-&e44#Qvqf}t(pc6ZC9!6EA;=&U z#v7&KyJ`jy&E0~$R`B+cA1~QqG*^>l?P$81xnOyR4Afe@j20==@z9JjWS7vQkkCs< zry30-1!PcRg6i!UI;26-;ucvj$nZi$5_AkW3|J6hN`rUd94rE}jY%N-$z-&uJ!_I? zxzchT5zGtz>3Bo-@81tzfs0@Q6h(O8z<~`LHfY#5n!QDHR-wt%FmkmpB1+h#ja0)B z*0LkvN?csrq@bWhjO~@!3W@ImoVW5dYp0Bbi92~hpCVXD0j@=-_Q0Y&=ALHUT8ws|_7u^L-~^oR7u z?UID`{e};_DCs{l>B*)4ypO516Es2NxlZO&A?Axi-R|{Q9;P_o>aMg6a)35;%~L+b zj2B$=*TbEfRQ&}X$Ce-kYR=`suJ^ON@5ej-xU&B@b$y@D3wc)9<5_;E2T6_(v()pw zO+UQS|Iq}GFGo6k<|ZA`>8|;*<6+9TZ^XCtG(DZ>{Y{1c(;2>h_^9ZQALsr3-jZK# zy!9~EXCIS~I2kVoN+&$!XA@no`^&fcnIDdFc`#DF)m=W}BW?HM3w^5aP8lzhP)bh_ zE%s1)Fnuqf9Z%-!uuN)ls(_wL6MAbp&kAjbVR~qzaliR3u;~K7CD3V9pi4?;Yj%js zXeWp0IM<>HZuQ-)xt>k~ot-*6J4X9B$ELbf&GcGa;A=~_)}^WSy{&lyel|VPvZ#-m z-$@-6X3Y##Qf(UE&aBKMtFI%QtJ$ldavgP^M$RlO4M}4xTa-cdtNc~^Dy?1mJSGGQS zy1S~n6}wtF2ZRzx0trbdAcPP}1fc*DiU^Ujh|1~~V4GyHu>qNAFgfRJvdK9|V`Go) zahma+8GGj5cQ3#1oNd+8n)k>1-8*-^TD^KzSD$n0oKsc%?ETf=-_9BAK6^!!ZE=Kc zdSLS~x5h%Z#+6Y`A9moiianav#xyOBw2t>{u4~sk*}r*|k8NG2# zr0T2PryozK{&CEhH#@8TbJ^H0297?JRKr7!ysT9Lw)%FQaClFA{oVAs3*Ba&?>6&) zpr;vyI@(QuEzqk|d1?%6N!dYvOaC#Xg=whk` zGBaQJb<<~!@MhdUFc^E6*3DVtkWp{&h596cV{f4iv}g(bVM)!q(NK0OVQ#`+bDDh zdK8o&EWS`*Gcq!u(m)R+M;&_4NSThb8;o3T-)T>L#dr84huab2#bIw zEgv*&f7t9R*-DuIX)KLklLVGP{I@XbDK8HN(WJ7}o~$Q!DBK42em~Z492=L*axWurrg`q!Ly_ z5jVrx@I01>{Y;igxu?6aZiL358`$Z~`l8HqHhngmJ)Vs(WQAxDEsSI%3s?d6N3+p5 zR|TuUzYVP6I6L0LTF_N$gm4@D9KwcB090qznV4s^ARuyj!H|jKhSD<;+YC|Jp!zH} z3n!h-Ccnkrn#bl%Wm9op9KlJ^J@RQmNh0zk3f%szKT*nHfaJ-8b|}^f&0?-_2`C^a zgJqBl4Tc=Vf&y3oKG-4W7R)v}Z)Mg}R*K=EFeVunMPq}BXYqB6HF4CJdr+*EL+xJlalVu20C9S+y?mjQ)0qgz8$O1+;f8${d7q2j zHDBYC82M?2)9El}$K|RC7JEIYzSF@OWH9&fv>G1U= zpRdPz-3^s5I7yqC@uZvlG~NDwxZyKr3F}d_aIG0GI0mn&hGWiBxzKqsnW0)QuhZ-P z@X?xY4;_A&Q{M4cQHz*y7CS(g?f=Kg z-cPdBGrr0hZ|RJ;e9)0c6Qi#ty{$YO%qVgoOfYg@QF8-f>chM_527PoBijojZ3DhZO`Snu7csFkk?M z5y4hM3y_>xr-;4-2FN_Ip#?~Cbc1XttXcj0_ebY7G7$6u|6*l(TnZ^7HlEnnSXwS|Qp{LNN(x~Sl)D-KcJ0~~ z-{f;5{q);!zg@k0_0*|TIC$a01(?s6NxHsh z4gDv76`B^@Cup4Fr!ZFpJp}~?5qw0`hT;|```cl-~Szd9XodH`t|D=ATGB?2uvx8a4vG22@4CuH_nBN6%i3Z zK1F_hegOdiT0D`)C`sc7Psd5As-m6O$-o^ z|Aq}4j)<+LrG=oIk3RYck0f+O>Xim@XiaJHL=+`NL0CkYi${-Acj3`SEgXdSBJ8Ct z{-g265Okx(L(vf0lGV_8HMb&~0c}K(OH+b^7BIGxNgp!P0%lw+$yk(kI!UW_#&Vss z#e~(9JA6(MZY|3trQCp$+!Llq;33W8)yXmui$s3=^XOFwx2=o~D6HcxU4YL3&>Ocg z(-pIP#m{iTNx$D-KBhD6VS-@aK|1N8U+<_KG|4C3q?4Www>uhdCpg_sbpF)abS+4^ z(ZTp~pwkVD{**!fOx0cYlFr&09|r37JIfbhOqZMuhwYU|G0GtydP{Z zM+v6;FjPb3lTOBR5y63#9oonMVT5+)dWh1SbK@v^LL(Z%op?D5916#9*H(#==jMt$ zkCl9fCy7pFY%pVGj0HIKiFU4xvyAg`N>?2_cTfw{EmfV=$^f;Ww{v%YwNJQO*xP0L zQ1`kKUbdd<)b46k7j?9!xtqIFQn*^s(NY$u4hU6;csciowUm^2HfFiZ%66$8>{1_U z&IwoZrv&hbo37?bA?BP&wIE&{YqpQ#IhrlQ6V&nkYQGRmZfA2vl6g!Q^Qb7bJi$C+ zfJ=3ed*di?TTO8D{D{Wwag7ze)ye9lRLi(T%a}+@QHEtofs1vJrM|N|(#xFQUd`!d znUZ9l6rg7LnfruVh6b9m!ql7qb02iFvpOQhJS9>s>FzRieputqxWBOW=;>Ti8GH$RL3Wn$HlAT2D{dldRY6avjeK4R3|n-i<^t>v&(=C!L!X zgxRVBnwNI4HHI`-dRyNKZ{8f+xF^2;lg{W&!ySmgL?OJmuW9r?EDGz&1`ZQ(M z6o2bmVb(3NO%=X1S+29D1@Pq2iy~}`BAee1vo4NoIuKv?c)+ATPa5%KW#!qFS({^< zssd~?0&UHq%@h4?O(C|snNz+WH04>JiKkNjCwL1sXU5tyeK6AnFkLv)r3lD{cvR{b zrytxVsA~^1yKN%7(1!x_Xc38o@Yy6D=UvQnl-V5|+ zgs1mmD8ne^b8vgUP&~g_DG-HbZc0LvkBt&mZUM05HV(&Bk zrjW?zd-s2F>h$NwPkes-j~!~9$Z0Vrlo6?@s0ivGy+4JHO57}%76N}LX9W54 z;3tJyVJxUiSwt~K6iH^@GtEU z+YDwEy@le6r*ae<^%i?;C0kj(}{J0UEi1W z?a6u)uLwoE;1kBee3`EY^YCWg0W1K2p%}5q4rBvMSt&*O6tIGPmcNuO{f2$>@9f{t zva>C$Wh2`-i_IFzMowmvQHGKhk!K4bEyM&%EG0xaiw5&p9=VQ`vvO({$LF%S4Xj}{ zn>~}wq&72H2F4Z2LeVSo6~P!O1$G>Z!$|3a0{DymC7g#`QwFg?1K0p!$Hm!kQVgVy z)zz>XoMj1HGKbBX#->eTlcuxjIF1iSfu3M)hp}PARD+5b6sCfFX$Vll{NO{pE0k^< zXTiWRb(l}gb}$PLWP#Lef96k4N4O%GbM%~Yd1DlqdHjVkgdpKl&e$~WZpU3dTJ*|h zopQ$2xWf7O*Ht5!+*`a=83#>3l4k1U1B!GlQh7Mc%`v-dEX0+Y2B;$Dx>8PV} z)6?**yWQz9yTfks%@FCLD(&R?tT|cM+swE^GHx*`A2H>yN&Z-pYIR)sDdTVGbPrnA z{N>bvdqM0*1luREbsRJ2o{h5@OX1HK8z-pkdN8)k#IE?W1B@+aY^#ZV8e({wf%1dE65qOYD~$zaGGsnhG>ipC7D{3uC=9_1^SNKY zeiZKsD~S@hqJ6AHFrQ)P5_t&L5#j{HmXni%DiFw6IEYLbB04`mpI{S6V+dg^c9fV8 zD+MfW%7}-t!!}2Uv1XwpYOPwe3f3Pwi5_NWXH%dME&;}l)+m6BRwAqmm!J&xiMNCh00Lyl%>z}4xRrQR$RC5!D5D|;98m%Mo0yo0iWGxG zDUEPZi20U4t&EHe^fNCn5A%r&N01#Z>)yS4H*emI*~5K>OMrU@2m6@hr%#_=ym%2aOvFLB{m>tp zD{_@VJD42WAy!@jsb~fWyuv;TqceT_^uaf@4qTLT=g#32uV26Z>tFv$(L-UyVPf%32oHf{8s5^Dw1nUeJVctwzqmDBGXBCt zgr6E5p%Lva7P)fTx~bqNC5Y6xR-mY8q_|W}bI+jfwx|w)9#oi|PufNvX(@-aFd_mU zw16fJav|@Ug>5)>+r}uhMP0O0Xf8T9FQf+^nd8Tg;~^r(6-+g~D-`4>%gOdT{K3mfQDQV{zC;cw<0R3`M1;t z1wG+QiG)>^(rUaX45GPM)b#RdC%J@i?)r%`r`Fu$z?b-UMUc^K@lNDD5ofeeyyg&Z zqIq7SCP9hNXwsTvtiRvEa68I$H{5XAMSj`O zag&2|%uDwyMt?m-zSB{9J<{)wO<|8y?4I{^`pb?!znvQT^U|1KHud`Z)#<;UoBYKv z^Xp3Y`{BBuKTP}E#hSlfT=Z(F`?GHLCw!$N?$Z4r`9Wv9m)ZOU_k(EZBxBD?ynp_n z=d(29Sl)V1^RmlR(@!c}!~0bZLT9w?ETO9$p2|msl{%XH2>bNk=Q0(|wdIei@{LHAA1JGeR~bh4CZyH=;0XGE9_!!3mo>M#%IR5TlEDezT$ z;nP7aX>ZQ;R(k|nvI8yI9n3}JeVfPmHs`t3j`M9=9MN1KWQ%vL@Kv*d)xjZZeu6qO z$F)9FotdbPYo`wORWsw%(J7WO!RG${&Y5ZI)O7Q-WOY)4Iw4#giZgaM&p_c3UbfZI z4X*}`dz>}?o6+O`@z&^n*vkJgZ}hM4&OFw0=2)M)Jl}D}KGRA(8q>_vqSX;y)UoO2 z=|kNb@?9Ge)bU-+;|5u3OFWuNJ)4r9r)0UzF7jyTZJ8Ns9$VmEKR1*+z03}79_?d8 z`5A$>6;ZZH{LE&C>!oA&ap$IK0k(#A)@$k0 zZuY3UonCb{ZQ9zHrnf?yhk4cxaGhD=SwA(Pd09v6{=}Lsv2$9Y8#l+*{?Al3o$zvT zWXzlCIx{`7E{_)8WmUq1dX9+f6C@{GEW9Ux&~?J;WqWIi=dA*2IKk}BGP_F#`KE>E z-?{3bKW*TQ)|-T5%xeBh)VNo$;I8731=pFUv_=L^ZcV8?Q6!WfCm_dao`9Z%y?FdT zWX40>&4!~lEdqp@Bl^=I@RDMvc`%H(QZ;Q`j zrfXrkRZPE(>E?*n&F?YY$4tMK=?^i(1;(y9u^Vpermyb0w{+P}y6z`GPB7jNmoB>) z&N@gZ4DuPBa=aC5Ix2SXImD;J?hv!PY-hL|z;3#*Yesfe;&{s~FLp0f_o$1^ah0yh z^Ii^*la2QycvR87Q2m2w!_zd=iviBB3S3?db$L11{4(45k@DmC%{-Tf>N^MN^BNXOC&Mh;=hQDx-AQz&{ zC9H;C&rP2`oiZJx3dOuYDJ5@kXsu9_&?ZiYZ=4IdIx)vWbtJ}JEHng7VD&+l2;zVO zOu;@B^+Rbu$#)ml0g7Q1uos}jVl1=*qH(BL&`hAW`1tsEd3oU{y?29#44sYM;i4y$ z6^OtnsM%;A$|}AoP8>=sPKp^r0rZMM4{eqReIoW7D7t6?6M}vcNejVHsD+t?&VzH| zATjRZicr=G;*G%(LclNzOacZ)x&iU%5)MN|I^>Fj4K3iC_*+J>5fj*iQEU{9{S=l$ z{w*-!Va`XfD46s3z<9?I{7Zk4Est-)XK)ZdDOYkZ3-)LJ1h%*{cfwo3SXfWia||0( z&dLdGC}0JZtP&rbcQTv2gYDSPwl}e+YwX%{_WT?6%~5u=j@3J22|_v2SXvL( zgM2yqus)Q=XgC{Q#i~ZKk)^DZ2wPBV3Y*fxS{AZ}7#+p+5S=dug&z7bzd#m<5BiJ& z5*H0|&d|R^^NS-Wk743;y;*N`1E-^u*@W$kW}{13$uKr-E}MIR9azj3quUj%Vjvq> z#EPb~>5Z(BvR6-J6U$gxF3ZJ-oL-3LH}{9%IkgEXf(_Hqos8X6^un=Pd7b^|}@XyHo(6){Edy;KUkrBS%POy*z?4;pGF&0y`t{ze*Hd%8wGMqfCGWw|+`ZX( zhqLqdW#=!-&Rw3JyCFO8?d;r|?3@*Yat{v7yE`=ZNn!4xfq5+ha}VU@J{pmCAvbUH zp!}VK^3UYt9m~$Ykez=WpPan!uU)^hctRHY6+`?d6iETOM=6pajwk~Nj9kiD3=^1aG>Q+!5lC7X zp%7CrN!G4ii;}Q0VIZRzgd~OPK-i)R#4_O(I4`6bED=f{Oj(Ap0};A>`EppqFgGBm zu|VJ~5US_^>>9LLBQ|slRUp0d^YhWpnl)>13JP(8C`MhPXCE+N04!oyuaKo!DPYVF z9Xb?0DSZz?-xwWb;zJdPWr%&$BH2#V<&fl%{8%!E3>iY)>E-3+!-frmb&sV1lSV`< zu&*&`n81Mp2PTLO$Mf>?Pz<&?>SD#*xpODR2m=s}bnV&|J;#V(Zes$8y9Vu}7^Vds zhD}FVfv{c^7)95fge`5IVNiym;fQ9IoXY523)-X;qVjZVE#60q=%K&p_<}Y( zEmVsT32IOaCTfeNV58Sn+FAYy@LAYqqkpNG=HY`&3E6_niYEw-;E5)mYI3Msv0{Zr zV5?1|=H)_+H;Y^&zm?a3r z`U#^<z73mEZ-E}MV#(JH+S~0BPD>V-;*ua@y`1|Yk#Y>h^jCf+P z3iI~glazfzRPc125PVs+~WPF+9{{6g|za7s0wI%jZ zywmgE4&PMyKF@ObVM&jl-|zg@xWF&Uf^T;Zx#Fw4Xk<_Oxjh@`{%V--*W?41m{a`JKJC=Op5|^*meL5zkPvlnXLVGp zT9K%Z?ZR=G{Eli_S9LtTJF2C@YBs)mtFsf;@$D=*0hR$?=5%NK4*2%B3`kcemwQ^X zTxN%^)Y0kY842oyOv{X9bzG1-u&>LM49nzbwKUc| zGTyZ^#5_1cEsjwu`dVi8vCJrPw+?Wrjx&#oRxA9?eM2onaB4S)WCv4ZqGfogdvka5 z)K2P%j%sx`DWX+H(RQ(^R)li72!4I?$!kJ zg!bm5VCS4zwKBmxKFu;Y$F(-crM}psX}FiQr#d~wJY}ShwI;ZEazGOr9O2z)Z9lif zvmwTJ&~U^ZOlbwe6boTzH!1nL*8iUFwFqHJ(eadX+b}rK5FraPyRa=J{c^ zEwPQeyVS4m)Hu%9I^44f6Mi7E`fldr#Szw}k+$=xvo>_9pX}c}FVwa)(z-6D@k)=X zi``~yi)}cXSaUL|=It=sn&`$qxY87Vx{XNk-~cnp__L_C|h zgNJdQKvZ@y(_sO*T+}JIec1g7{ewvTZD0Kf9miO&I@pu`3E^13uK+r8NSBA}5^v%!I#gn02?j*)>OY-Qo9OqFWy9UMPDIZFm$f zKkZ@vw5R=(6chFzB`8l)P0xEdz8Y%zW{U53v;1FIxPP9jz8dE8MUm_CeomjKn(lYj zbG#)z9(o}3Sjc(Gssp`_qI(DufksV4xCE+D2oFUSH8(etdj=7b(7QZ}qoJmn#75*} z4Gj$xQ`6RqhI}w6jWRZzmz4QIg9eeq4C!V${Z zG;P{6^bkL3R*9#D=y5RlP|>l2i-o}w`wiN}c~JnJA+}+h4$CxDO(ytqVZz6;xX!FI zta}*sL>n8+V#$w1b7kqyx)UG++n?fD$iE_zMH1dZA4*F^%q?W{qYO?#*>=)dIz{nR zvdSV>RLV-LSk;H@!w=X8OWD$mY~xC{@&G$q6pjn%Ar92-}{N;Cpk)a}7~;DfFb z{)65wVN1Sc-(FxB>RCOxyG&!#mb2x%*sjHFF;0ZSkvIy0oeAam^58uVC7(1yf6@TG4TEHnV+vMUx z5ni}<5i9~fG4uFPhG@zbjh{)3`7ow3=D}D`;Y@-By-t$XOY)-*^1Y7oT3K#1DQk@K zHfCDNl;!sFK2PO#N8_C+yZ0TX<#zgqQS#&NPS1zAeG=oe+`)KUF)n00b8(UYOp*li zZJA(1f&NgWljcgCPIjH(+3PFb#X$S&Cj>s_I?Urt`&{xv&44sst(W(kq(iRqX0tR= zl9Gj*@xGA!nanVd87f-e`!wq0Wt{Mq=T9Ww-EJa5^8}&MO%XYa@|o14Q&#JgGC}dz zQ#ivc6rrgFBBYdJR}Yz_laA5>#kgBiPD#ps9T(U8Eym~R4mU$h&w4q&7-W8xul}Lb z?S81~s4T5ADLY(@XT9VDPST|i4L;1A;SN3s8x=P&;D z;>BOT`Q~x;tRs51-C*2nFxc$kEB!Fa(*?^my0y>Q{e<;#~JJa~ZXh{pl98AgGA_U_#~EiH}6aw$0x zdDM{q3|bBe3BmQkRiJc8_{K%~fBUx(3PHJk7PO`=(sDx+B@5v#6cDFjBU*YOs!m`F zl_&J3Et{@Ju1k;(As$*%o&~MRrU_}G25E=llBi2`>cy>@kJ@^+kV6Q?6KU~28mOZA z&rqAxCCzU~D~5JftXP4&g}m}06Cfz?=;4XRlZD5G+~l;B)Bi-_LNE&fCd9aljnGkS z_?M&&9!wmkp0veVw6G~cPB3FMArv-6qtJS*G33x-wTp-+Vti1Ljj^{mivoWoF`84a za5vHIlA&5BEs>--!X0HJGkz!{g0}_Ry(dD1W{cM@^Td0K_jH{5MMMGfR~YZ>c~Zhd z?$TPLg!LT@{Q`q>+@kDfc59hwmUyoXD{P*43o}TtH`MaDBxx=ujo~(3gDA6z2Qf)o z4BXLl0cW02J4Oy`{B z9Zsf`Ou5s^_+^3loWJR5meZe>Bz!$K;M*zQdj`8cO0xg1rrpo)$NjW8^85D(d^N`7 zK}YtqyYW`C-FMB2-_1??>(0J^JDm6H*@=JpAnM!M;a?V6ep=AsA4iIAP7T|o8n(Dd z%O&Yyr2NN*px09ZzNqkm{_w>x*B1kv?}o@P2U~7;b2}`UD~g4kTQ6d$v&HMDzC1Uy z9_q>{CU+O%!()U$aC@ffBdp^tA~2*>_`_ho2hTO24`i$hW4Ufqru)sE75Gk$Yi+uD z`Y?~CE|zjv=R_~(v{<#GqgvYEQeEz0D|Bnjw$$ZX8oH@dea)GEYVR;hL4rEL-`vN| zImuV;?WgvMF_(p_L;cmhftEq|c609PWljrI3kyAJt?lQOdo_fr1)bE2fofg0OWgoV zO}Dl75txPH)?#-n z>K^S{y)CZ(!;aRy@pY@Ct)Il!f1f{ccVZ2OHb1QCqbMs{JJz-4T6)!s0b^fhkJ%nq zmtmQb<~+5)wP~b}b+W&0qOWb3d(#9z>)iJ9>jLJC_G$Q66gEk4=lY7MBHT{#!Y{h0 zmB?j`z(cA9CqSJDPUMM}MvHR^?nJJE3l|p-w+hUJhwXeKu$`5Ho^gl3Nt#;0 zmWASbk$85fz!IU)zb%NQ_O+7Fye~TN$?r&!akrqR*(#uzC4wvFBT;vc&U8tU&nfar zS@~3vFWO7Dy$#nqq~o%3SZ6$?<2=A0iy7D>np!QOr2PVK+0E?EN%C!Pp7;5>BPYV; ziJl!ei_4uLc0WS*I6--qVgEGE^rV~VX}aCB-VQHw)z_6C-_->Cw4nVDw&1VEc)l8H z!RLz-SMKPNY`hoB`EpmKaQ_y{>;wG}N*&C6a#NvTAMB9d2*ve4LB;WUu~9e=h1C<@ zLeV!^vnW}B=IBBW9+bz2h;WH-1cfz^Yv>h3R8mt@105K8GX8}M3f%@P3puVpDW$l6 zsEglVXe zaCS;FgpLKU0DMQYXiB$)oj?}o&-}@=1p~pjFj5*dIoMDdCOU#CBFCHFtalkJYh;b1 z*eDz!&RYVeFrOG{F)OBsCrUYu4inhYgY_Vi9JGlllxv%4fl&;nC}f2r*a(b$I2(@B z<+5BH8N>!*2xy#AN?{)t2py)$C#M(f2tF0ug^5A!Nfhr#(s>F<4tm&_e9cFG94lQ9 zRPIKYPB<#NW#v|T<#~$fa=7t;3)hGy3Ie^)><#;M^7TaL@2h;C_jP(0t6UB;F142n zb)1X~YCs^<#W6i(_*|pB)lNR_EN#^Dw2!%hmb|}+8|uXjLxs1=a-H&_L0&BLga~Un z7rN`4gkOo*A#YNq54HM@#}~*DL!Sl@s>L!#L$p zg6USI>1vSORkO51l2_SDyB($d4m_$ITIdmja$b=?lOVL^ZD!|Pp&fUJ#jFmC-W?uu zH>KmVejPsT7`-ntdTUh2x5J}mgm;`B-mxycV`F&7ap4_G!aHDpUU4Uc*!JZfur^xp88qoGk3E$SJQ0ZZO`ks$lHB%g6K%n{zj^upp_ z#83Xa8}a25;VmQ^q!z`HIq_UY3Hf)wqvw6xHw zK}Io}CFq0VKv5nN66P&T8(5tbf^_WIu}haOojrRNI~z7^z}fNFCb2DFz8r!V@)-gg z1}ThgiWNee7cX8!H^|=uCp~oN5ClGo!4$$$1wjhK93mIxVF973#Ud{fWF|x_ELNh5 z#Rfr2Aw1+2LvOv&4lx7Mszkn>FxFsIVX!z6t!^-XQ1{H4GZe@J%bnI!^Z_O|RvZkO zj634d#q<&nFcuV8HIUU8hlRj@-i43$SQGUu^RB(Vg` z5QDCk9EU=U;^X7BjL8JiPyvEd&IEs@U& zCi&{stN$go-+ue;`t|FdefHU{Ter@iKTpnAXcmS!?wR!TbgZz{bM%eIMcJ7t*b4hJ zMFee;=PfE`Wo6+B!oz~c3X3&5kDG*$H2fsiI7<1Hnwm;6XlZF_m>G1kYuB!5T;tNg z4xUFMF(wxpjElTqG`vN_G&CL(x_{c{%^`0M%^?KkF{@e?3UAR^bLmkfw-$m{+J^jS zshl;$L-VCs)FyiootMCzfAT_U>yy(KW6``~2zJmyjWpLC4XL1mn$Ha3E$FQ_K*Be0 z&m1^#;PBzYxVVrfLF0@Gd0yVyO1_-q=ZE18!juv5i#K` zY-OtXClpAd%w5pRY^(uX^IKVk7q&76YcW%rf6Mq_j<*cvSv-uyHc!jqHKYC`y|lu> zv;M&n!O95xf;>!?3%bZTf`SYS`dgg4OkQX(H0n9a?sO(^VWwIpuQn))91N>;#sh+| zWfe0u2$r*@!rDSpg@QJ1BnMcGpGeYF9Vd1}ftA9xn)7?gSL54yo}3!y(oP-smAxR_ zpED`DC1rz7x$Q4K?8t4OQwI52SL2ZFqn(owVSQ5VyT-uCy>obHAi zPV40x7X8ySyDuxeKF_z@^fNr|@FXm*uWMuk8NoiGtsb^!e@6g0Duo{Jg8~Z|7(JGcjF<&u*J)`$%RwH9qnZQJkRw-mgDC`)LY%1 zu11?~dPpzwJRij?k5f!v4R<>q>HH3px9g0#9B+|#={OS$7V$9Q-!fEaCd0%lA=t%~ zhY0KYSSF_ny(nI&SOp@#Ku`W2N|z||K^M8z40mg)^qM!srE!2-*TGz@I(9ZY#Q+uk@23l&y_}a4FX17xZcd(Rp zH_u2n&x|*ZjaDmS%;i4jo*|apXmxmkdBPC4hIhi}d>F-fcN5K%(#%zPt_>Al)=}Qp zVh?MPTT^wr=IMcRssiTB32iR(Z0w+x#i*5u=E+IUQ|j6^T~D9>^YF?yE62XsI_1JU3T1e>csDvuxOvx! zc%r%D+a}PDZQQlPZWl!o>5QlJ#xpwQlF@M6NB1yBdXeSy`C#>FPy0t*F}){Zg60c2WwvllpDmz8Ew*T^@SvlF%QivKy+H&eeJtRV_XXZ^fZ44R zPAM(I0cN>yW%;-j0QsPm%yyaR@H&C02pSn)X06DtbVyLs>=8)Lasjri6Fb|vj|v88 zw^ne@>=A*Gdqu?!!jEOOfMr&u<)6Q zmLA4RkGm>Qd)U9sQ6X5q9`F6VE%?Xz?SEX@{(EcC*JHhK{8=xDXPFL<668nibeP5) zjIHbvOJE4pFf0?$%Anm5-a`H-P*90%7RxKNILfe09xUYCf+P4zEG3lZh^&5c!=|Vr zYKr1qD5VbJEjUipG3dbX){XKaVcns$(Uh!*g4|IB{lquc8Z-{&1Un544V3T*Z9;W{ z@=4A!&{a?c$BAkNr-mv^FCtN(tgH-$=gyrAoeg?5G-vW`zkmP!jvYHty{M=Nx=>zT z9?BD$Ol4&y`H-L@WvQMZHVg+nnI$&LmyJ23x1XekQ@9g)jun$4p)hJJuY`wS&hZmV zC(-7hw}hDx#)6?yywzy2kv9wBU|L8K`bOvqy@Mte9pW@bN%C(YN)U32fEC|^^?b(AFD6~drU zh4L;D880#3;DhRu+2pBgDn^Hc7#-z2B2F9B#q?sHiExH!e(7@H8^@a&+sxQvkqIc5 zyA~U2b;^1>9hK`{>{h8fg1cJ80W}ELmlF=sYKfXM3BHfQRJxjBHGC;kM;P8o`@m+iQBLg%s;}utViOjuWXX@o`5@e}t)k}*c zxy2;yb&@W4%a`1s-Wbr}dWF;V4;NIobt30-JjYKAWlUP9fJk~rV& z46#Nmmz6CZ$|^hJU{)%38V zv|CZu=#|er_$YShl|x45W`O>jJ3B1#h|%TDaKJ(LnTiQFyu}GJPY{Z(xQaCfx=mZD;Z(f!J>n0RAq$d_UUw zWvTnoF#AacP6(6zFXJcw-3|5~j5rb_FlHgFU@}0keE8vq6eUV=HxR!tC}5R9213|E z_&{3r>C*?+ALW86C@6psgY-icv_mvjEiEmSBnSc)atsAv-r_hKfvo|1cFUG66ifk= zXVa!lFkyD>+64>x{Cc-iu56b z0fk`-5yJ^KoC`xkC(+e_fB-@RFpAZySEB`(U|0*VE}%NvS+!~vj-W1<7|2zaX%sxQ zY}qmjc7UvgwT>gOuuvG^4s}Fwe*|P`3Cv0Tkz=0SSW)+>s zYDKs@W(K1p3Su;feOT-G3!^65SPJsR)S)L>ZpdR31{&rW?Gw8@MuBfEd>Hb&b?bigXlQ7V@@V5MxVy-yh8$F|5kWCU387+E zRu-BXFkk?VpwIXh4-pQgrlx8%zZ6SFz!AkF;hX$(C?pE|8X+*T1Y^_UiQ4j(v<ByC>*n{;-~$3}BBYD-z8!CW-Y8vBigy|m4v zt@*di2o5@7&q|nn0*~b7<`vX{g+^(Ef%}xUaB?&LYWoA>z&}=a``7EFJ&Iwzjw3VG zJnB}d7sR>mFeA+Wda<)!FYPrLZwE`qoaL3ww9Qdz(kXKUf7)2V<=$+Pmr2SbLBrc5 zFq{>fVAC{B#2~K~l$c{%Gp^#Vy?SMz!L-xKa4FnwyMuH=wY%S0c@Sf~=BdBnARjf# z52ED5es-4w93FI#PfPMCEUb!r-dVX5pt~I^U-wmRdg*R?7@j5@?k79n>tO#R$^OT; zLv9C~z8LCqE7o+@q`MQOf0FL-%i83hmPCDT4SqhrayLx>S)}nnlIdOt9$#>`ug?!} zMSS1n|LvSUf4?;Hmv!-fUf1KdTW|e#Vf0T+yZm->(vRXTKnLXUomSve8qUQ7L z&rYdP=Hg6sR+?JX%Vl~eOG&Ugz}=i;ag2*L7j?5#MVgCLhZr7|WXa2MpVicU&Is>@ z&X(c*)Y@2eOtN`$Uvu?De_N49W3*ZkZYhdWM|D-l$EqXa)Um1NDecsp2uo2<%e3xl z)c}{-GlQ(Nf*bpJOo%d9W|(L8vCJCk%ENX>dE3VMTC0OO4{e2alQm>+p+`-mS`uv@ zG03&1)V*PCRKug5lm2b|@HhLa|LbJ)e|<9TKbKAXrfBrRgt{%UjhkZ|H*~g6@wX;B zRb{EQ!`yjxp(N+Y8R`rkp<|w1>d}N5=Y0;Qr}0NUFKC!X@EY8Y5FMIx-g>o zlg^FHI#_$CGh)r-vR!H?`Zd*uG*9qrS`=a3*ty|c%B+(~wYE^(vJSR=@%1N@YU+Y* zsJMInuTEKw`~?P7U+JIik`SV%rkiuqn1~l7I6!-=@#H&)C_eZi;{N z2yg3XAMWw;u-D}6u?-tKH?4}cPV(bHNz((H=Y};ekFtHx(bgO~XP8G_vU61rm)YIS zRU>?+{VNKaiF>x|f|#xY(!qDKq(Wrl!U-WOoYC!!ClIF%Ep=RG5+V>a4lp|eT7rHa;~%?^MX`?s?aUtW@0M2XUkV_?oKSM2 zO#*K@Ezp}!b;dgaTDhUJ8_w*0q~TGb@*qloH;~=-X1Bb#yUQ&v_8`XayszWeh5hty*mre-uPfYNWH~)fGNRUFo{Rf5dOl+?83rA9Km;PZZ3Ap%gc#1g%A~Jyl4(q^Pqm0kkq0~jeESTtUqgWJC&URoOs3NsMG%xWiK9gk*WkXSs zG9UF|J^Hi$-C1{3NoA?z;6d4ui2ap3H^-|TE1 zM<=Rz%1GlmcllPN@*u)+%gb=y-}Gra)4g=(*M%9g3RI&-dS$vnTBj&a(;V+~ zwmG z%ZWVniN6>c$)wpL>m20#IX~rjn*Di4o{92|t9-&F9XAn=y)JhO2)&sncaU2Q@|^(r%Y4h@-s&j_`I?t>O64qqZ%I%u zp^+-9#kx?dm*z>*YFWm7&eX|ExsIxA)hiJ1=<^7!ckz@%Lz$sjr))NI=bt^TUY)0m z${ty{moMDCCEV7v&8zSdC@~CK?XuV z;UFwr2qG*TFcl%DAaY?a=H%qS)`QImfeh1+>_^C3$Sat}C`O+A5Pc-RAl4wYNO*qq z(MObJhSJeOly@G-Qha!6^Fn?TZoZ+_@7y-@A7&x{ayZw{IWL0&5zE{6djdk5bX0S13$| zCx&q7&><`e=nU2=3MjzP(0_Cl{Xq*D0;Ywyyf9s4ouMKMj~qD?R|I`PXRs<^&QSo9 zi3^mUpN~}!O$`M0Bt&6hVL0yZ z?~m!i4yE8Bw1h}nhzplC!d^787b3{TK9P;!2%#~As8A{*v_t*Dal&56VS_ew5|q@uD4S1V59LlL;3hn1bkM z$x(#TA>o$BWug?}D_5>09%I}CxIjeVi=JS%Dc2Hieo7{lo}P{iO_4t2x`Q_HZ@k!u z?TyHLdx{O!(Iohh2l#!8vAy8qoKvck#0-M69 zsf>oVENV?{tcA3Y!A^b}6sEU&^=e`o!35%-CoeC}p@JfY7PJz*QVt_>7ooo>RnR~2 zjc9O<=6s^%+|lqBx|s=J(XbIM6A*P?^JZzAFNxxAG(@GXFO3H8XkdlrC87aMcwT7i zc-V3OP+$?IHiK?<`t<1o2M*v|RE5}a32$i&xoG}E)PHO(EiITy5)1gojY?rhT09hW zrtP+%4Mi{|m>@Tywp76X#7;wq6RvfAFz0Vs!Wrf{5;9XzsI4^0%N2Q*#GU->e}CP~ zolM^0e)}*$SSe&k1|i;t1}TY&>L`~(nYbQ2IV6MX}==xm-Ppo0*4E<@O*_x*wr`J<|K% z4o~`JL-+4$gP!+tx)I@YJ6O5j*?6_9`E{ww-2hHp`Mi()qXgxC2h-DZ#}~uB_PLmz zBq*;6U2nRwFH1bX9^?0_%aCim#=yo?uJU=&x`)S+Wwe_af3-d9%8!Tsk|KO^)lP_ z-wsvX?%{Sn&hbUMLyNt!QJA7I5_5&iait*nAHbw+CiNEF8Wii1EyAV!m@a|ohX_|* z=r99C746rm3DQ+tP6hD2C~1*?O*SVnbHOMTVe z(VVtr!az%HqB^#tT23A*3F^2^^Q>fbd{4{NUTSr`IxfgExRW{}!#p$0Imgo})!W>o zgIW@4DeY;QR_SHScCGO@_X}1BMX6;ao(&Uxo5%Px_v5541s={R?&ef)wTG)yqMJiP zqIpcDr6klcG~1u#Rzt@i1l7WKE(Q%d8ah)V}KM z981HDKdad`re5qeb9a~e@xHcv*QUl0w9m`T4Q*Z% zV|}lKZGJ>^k$b}cwSH-2!}YXjM-uoDHgvL14X_q_G&O}ZR|j+CXM0@Z{BR!AG|A6; zAff3%+^h}B6aL5Ank2|_J2HJ|rtczR|1(4s|4;#UOc64{Dq@H>2+ZLyv-?bzt~=>& zdvIRdJHG6G7`q?Ia~wa2Hat!;;wgCA0}qjmpSQepXQ7&Myv1(2aKIMCwnjoN779`D zwt!rw3$V9FAh)#wh8^`g(<8@QYQ+<=Ngyf*c`hW=S%dtUBwbZ>=k&^1S-xylvTV5R&8b@Mw$t7AW%t_aIM@=TyY0!R>xK&l zVX*V4i~KT2{bNJWUp|TXb$`;|w#WWueMgSB)CYc9;qffP{$UsSUa0O)5PRUoE<=Pe zR_){C=H>myo5 zub|E1UxFHl&jcNYVviP39hxdBoaFZcC7j4+$r}fSDF7eU*REZQ#TUvI<=vq0cC5Tu zE}>ncI`m%h49CFH2+oc&axbSDCR$y}Dn)!R)NP_ipbe*Ji?{eN9}ni?$NXTScVS&9 zAr5Tz_N+befly=${z_)aX)G;~CBmGKWwFH0N^H9LnZiBSy$A;sc{emP?5Y)(BLpOYz!MSnvKS16dP5VCx)rPfACzon&CA*H)9mzWwi-uJ5*?-l%IGc5j*}AH zLF~2Ui-Kb49(9#c6!m7k35UT4KL@kHI1wGBq|aF_ODjhHErqNwpXHaaGW2;U8;X_* zW1*I*+jMHQN#rdU=>#@mI-6e3%CSFpq%o6GRCFiERor# zoWB$^Zr4L3^7QocAxI@S>{>x#mPcoe!cXznmPJR#}-wl%f zFxltJVzL04?$Nhk|Y-Xo=cABLV8#5 z?_~*PBa0K{a&f)RC|~rE_vnoqbi5ny>-j6ZY94CDyL^|H)IE$dJxXzU9A`Y>rM#t=)(GYBoTKz4$#^#s`%oR_N1c_=%+d}~5$fV% zopRAt-fiGAd9+wBnkD%{y5-MH!qM8(G{<|%j_2JB4?4=%yE>f;w>zjBFL=ohlk9JJ zN+)DxrOsF>IBn(%y=xuMYK?hNpkf6xU9!NM@tW==3Wnti zTN{=v);k!skn==}1)~^tI`*;3K)Mpljt$jePoa|+E?j_|$C?EJ3ki&VE)=xw5Z7p# zOhxj$K${o>rNG9L0XYoIiwNE^hd3z~DRcv4A=nD$TR}kq8bRY|mK=&O8^kk@5upQ^ z4Xg#ZV#BOb57A*<4&u&*%*PogPMk>bzvS+Sg&Su6@ZrN@R8p_7GNNK=XlQV7Faak1 z{{Dfjo95gS6ciK^65{9QhkbHK2?z)X3kxImS8^8#6&ty}5FkTgO2jBaxDAezKMCPB zTI37)z2Kq{T!WKRXc7^skarD^6Txg!QW9<_+(noH%pt7WW5#LeLGw3$az>0l{;Jy9)o=|F1qJE?eB(Iz91Dpl9e=~C;hnyFXW-v%t^M2CS%14Q`~ML4-qBH9%lc@|beI{1 znI7ewa}Ee0vOr`62#F+-gh(PMfwD#flgz;fV*>^ZCSx#|U@|6{oRP@c#@WZmKIxtC zyZ3kA*Sik0wC?-ou6ysgZ`Q0?J>5Ih-qW*p)z?*DeOH}$JH`8Dq4v8aQP;hV4^v!j z_4az3PGoE26@+p+WG`*z0$O?iQ0&8d%yPDjC9)=H`h2e-traC`CAqd) znc7$n_e?))$0YlRBA?2k{`1;6ylrT#ZNxCYxsIs1 z{jI8gE*brgn$iD!uk6p0M*LVZ=I5$OugfQ|PM+7(bH+&jn)i|#K1!>1L^VzeajZ^k z*pgbmJbr#fSmPLf$I5ue-ptx}6CI0U9J4|l!+rHs%VPuT2YJ;_2&{iAreQ*$4$F-4 zYv|`u)l;jS7p@~VwULeQB{eRNb@a7Y4e+dfH?iSRYdz!h^pM7(UiEm-3`K)_4x?G2 zjg3)`C9yiW?!55EZD}=U+s<8^RKGU4;nP;t^P}o}+Nvf6H7tsrUmY=jX{_Uei4DmUTDp0-+2~7I+IMCt4|>h8<)+FDX|Q!!=dUbbQm3 z?nY5_g0U$|50-h5p*+knKg?DiWGeU5<-3uF8ya0TOUDHU!eKb8gp12wGQa;OY zszf5Qci)5p%@^+lVqsY*z}~fFI!2aDM)kUjLB}*=jrS6yyJ5zrX!LG?Fqe%wL+{Qv z&MMo4Bh9KeZaHFg(%o7<6b|8g1k!SZ%x8?I&&7kw9RhFpLQ)S43f%XF82Mo#Tc3sjkkSu7MHsbbj{Ux)vV`+zG2l# zT_F6!Y}4y;zQ3-G|I4n_zwAo?fA(knx;Ek4=>cDj_I}>q_9(~nAWQd(xf4P+T%=>V z?;q7fMfK~~kF)=5+_(|@3&Z}M_d_rTKDPH_+4hkOg-$J8fw-zQ6VVr@z zRv5H#9x#Y8Wy%yZ$o?K2S%VfBw80JzRf^p?u)jDQ4rrvP&H+m7U4s^I274K{8-7N~ z53?BU!C3e~9`qK}Wj7lXgKi1&pz}bvV&xy7P~%}HpfF0J5$w6paydK)l5ltwRAqDx zJA#KQqRDgJ=*;dlbl~?0AZgpok30fQ{dZS}}ft0|qg0 zz=S9Hl#hz|ijt_y?6v%rp_N|Li~UA8#3q+=(HE|Aj8ZJcqC1=^rebz7si2B-Dj!C} z2GT%u#KLfnX2vvz($EqrSxk%P(%i{3c>+yfFA;owi{9Eyn>Ww~9HUG!C8Ih7W^WL5 z-J5#%rM}&$8@r(3%^(tHrVX`Wwp(^?!6-OabrBU!q=~4=Q&d0&9QDKOE)W=1I#cKV z)E_;LrqS$mgTm|sgO2dofjV%66#LZ-paCr*rol9L6pdo)V2(LLVN_=(90-6n<{4cK zr{NH02F;j9^XjR-UeB9G2NdO~zq-?|eCDNoq9}V65R>|%SGPgWjY;W z{&F~!8J`z@-R~tzUyt{F)U>;XNlFwcjPpg*Pc9kz3wK$pX(c45iCjhb0*ZNC zPnw{E7^4MCe_Ii7I)#+AdepOyw{*~->_{UG^1BAz6S7jcB_A>=+hldCQQar$oHYjZpsK7TbE6=onJAvSeI%*P?XACG9{YWD=(Aq#7ve0ZLQGdY<>PMZ zRd3^Ycj;Xb>-4@+y=;?L>4=lMT~Uwun=X1Q4>Oc=Ug}1y`35~$b+kcVZ&kjX9Ppzf z{O2W+uS$KJ)7ASKrdRp)uZMcR$a8(1W4e`K+G|!tEUI~;fQ+UKb@UU`vqE6^*#RY1 zmDZY+HL4!sI?SMlVtwwSGR1?|9G(NylAR~9{$qt? zUqQ$T102VnefAmeN?3KVQ*iESthZRk(JU5!NQF^gHO3jt3G>;))WtEZp{-lDVsu#5 zu~5T|h48ovlNoC=dSRayOd3w&(!PEBwu=|G57^)+hB<)@7`xN05vDW@tM%*GGou%d zv6o>=Q3c~-eohR3)TmL|T(F5_90%NS z6FG-6gE<_0#5s@HNrstbnUIA2KrlbJuQ0v16PRWhgT)-4Jb9A66mV}~X7F3!2F}UJ zagzGtSMDWVm}GYUVCoeno8{;p%qo6poaay-T4m(04Xo20m&=HzN3#f~m0j;5BCXQp;7H0ZopayN?EB--D>5Sv- z>}=fp=!O5Pm^V&fgvR+lh(91&cH%89Lv>nGoj6nNtX;d7pS$62h^x4fa0dS%+|tfi z9*&#CKNNQqhm$i@!r?{EV`nfC<1H;+Rv7ki;y*ZG6lHmfw8&0C!&!dGn{+&QIYAG` zD4alwlhlP>Xj&4&IN=vA<2>es?;nX5XDPx8gMS(RVi@)D^wk3cdcYd*ELV_Wycn?9;;dyTSC;SifIZr$3(^ z{=C@tah7`0&jlOeoh-|}SmTud`K(Dk?kXQ7^YvKuRJhwESL4%R5f3|h>s*xmY>zuz zZzgC>NiKK7_}8O=}fJ-;38xi?t-BGCA#jrzm<^%c)UA)}0-CYti*VIJ&h-7WFzpXRQL~DiF+T<*I zSu6XfbgeX9D{G@o&d?@=*m`)lWu|DQ8Mg6(wl03QPT^X=G;K_rHmHqOo?)AqqfJWG zibJ&C5%vLS+|!yJZ0p(9Hm$2|P8V%(ne(2CZ%cPLbbjzT2Yv-cc9iSP8*zP9oo%v`lgh+$K9u1?>O^Z z+lrgH(_R)#xYl+0#^k#CP)C7Zd7xYOC~Z(T`<%JqI@QU%u=#Vt=AX)$cdzTzRSAwV z|GIwmnz><)WpR4?=5;BK`O%I+p0&MfRlT*U>WId*$qnn1>!$`g#sxGK`_vVB))siw z;OoK|$GnKfhN#Ac(T&6XYWmo#=7u#K&Z_=AXYSghT8v_PNaMPshRvz<69O8lBOKe( z>b9iSZ%VD-l~Mb7&b+g2=dMfEzaGo3S`t^2ZJk!;-!ROt60_IEbJ~=kModU&+w3v^ z4UqO`_v!yUS^xLnOj{XSGtv*O={_)J0ri+D!5-syJKoU{HUD7NynjwE`|IT57yk{s zMI!PhiXlS^8B;|v&@7Rtw3m3kIYl6@vqhxOECGqUPo`4_^A*W>!6;u+rRz4sEkDEE zFuD_>znkNX4|7b9J6ayK*ZE){WUKd*jd$?Jk<@*HZe^oRrSh-bKpXY&eEpCAmL8a+ zhv#e-Pzm$Uej@l%nEZCF?hm8mGoO&2W?FEQ{}ti zhHGwybGpFF4x**4biQ0DW>7Do!(b0(FKZk#?FrO2PC_ws=ul{AjF52P59iI{0}d3z zCsb%wq}j8DUmV`T9I%{B6D@Q5ILRR}%nXB)jM(s|4}FqDc-ZG0$Jo}PA!EBiU$}~` zik(2%%Y|LR@mXA4?98db6f{s^pv7@wAPCHQ4jN(V7%1d01faR0eGG)FL(S%JD%cdv zsls79=oeGLC=e82%orquLJRq^1#_?ugFLv3?r_YJSr{YB%uY2Z$=M9CDdQ^UfipKj zJj^y~ar8Y$U@3`qb~6j&&Wv|YQvd& z0w{nTU_@r&4xBd!1sIu0qqGD{$e@f))QP=8I8AdQ6?UbrjALX|HuH_3RwiX8Q!+l= zQ9E2pp_DAj!f`k1RzL-4xgYh*p&Xn~q9k~{GLiv-=?=~sdgGo<5bi`Xvhc}F_UKE{6^Y{OQUHtyM;K>A+*B;HO!8S zF>(SX2!LaV$^vk>P#)!ByeJ7tDyU)-O~NoyeJBlOScctHdQmT?_C;Z|%#JH%REBmq zGK(Xs@QGqk9w+Z}9^}lE;fa^5ab9Sr7Lwhj?6!HpA-A6`00bqz)CHA=M(> zCV`BBWC#}?t=Q{1!f2II-5@I;n&qP|%65}_$yYibtgdpwqNMEhFzHlU{XDOyxE?es zcM?qdycMQ(ZKp%)I@@h8p-W5?MCCID?$K8~=ou)`6(}CNjp}lPo)R%vaP4*$UbP)a zsuI44HR26baHnMY(5UN;7*wS}`Pis#7b_$#VPQX_XRkA#byaR9o36*2KbDlOs-Dp2 zn3uZ8rmPYq!0(WH%2Rz&V1okoRjJSY49le$%ds%?IbZp4JC}RW(sh61b{A!nQ9Wc* zZ-%R%x-07prcUPtQU|@x_eMHov|yIV=TX}j{C7Xl&+>4F4f`S5=lUQo7{;-ogV<}s< zY+*i3TxH4;9AK^6ym>Q9!XCu>$&PmHmVm_;rXoL!hc%7mi=7*>lB0{^!-sSFRJJBy ztzZR1U0Bn*ckkY_XAdk9rmN*_M_6o8lF@rK0#lSRb{L@?H^jM>H*VaBjey+?nLrH3 zD1cGGJcpgmIi}GsbNoV17_bl+Yb*wW!e{|Ke(=Eu*dW+iWEy!0i4~i9fSIQS%RWZ~ zVJfh&!;ag&eLEV*RW^0eg8n4Bz8V=}SebJ}azYn)CR z@~~|StB{=#P#rSk45vOpvzRo_$OHS4AH-w!m@W+a2xbPq2o4|#X6E3*gSdpTu=fb_ z<+2}(ld2^lA%R_4f`fyZ<^^9FSK&O)iHV6^msxiG{rwrAVX9c>+GST34jp2=#mQ&F z{IsZ!8-oLpaLm-TOtHc+3zyFjFZSqQkOB7t`|Ys%0ml1V@xr}|A03TzE@td~O#i}& z4JtC^f_88#VzxP9G|saR38OAOdh~GOFeru|aR#+eo;^~SS{daz=^@UrV-lkx>~MnW z?52Xo+qG+llV}swam)}3&fs4I0r2nW-Mcpq&>ACY95;jpaYuFP)QO#8a0XW~G|r2S zb{H4J0MS%-b~X=zGi^biVL4lJn=EgUg?ZVVtT)cMm^EwG;BLhI$p{JlMEoDY+&Zxf zCpf@97<^kgaTrDnxLFQ_VGj+K!AZL7%qX<<4e6^>jFv7dPLCI77!<=PPV9(b2PYFR zpZO0ACr)%_S&KZ>PS6SC#XlW?L;R08Bn|&r+(pb?bLPyMUAuOzUcH)e6(?Tg3M zw{VCP7h@t9{COD;;!7uDR<$X{Lc5=W~K@rJ0^}bGs8JUGtE>s|aq& zuszE6d0Ak6)ZKbJPI=PR^@e6>N>rcpvj0#S@P}OkpZ5=V(%$u|hw`ke%d4RHVc^3)&S?sMGL za5GH0;i+B;mJa(Uk8;&7M|%8F6Y+d_;vtK9zh=JQ+U;4Mo~GnfpgdJ~Dp1*lHB7e~sajbF z+sw|knK4>Xu-3!hmh0`F9c0UEqmAvZ&1-9$8f71pW-HCMO^VZo255PK_U^ITkO3aG zowaEJwyt)!v;b@8ENwy?+mu9YWV}`!Z66e;4U4u7$hTLG3uvg1XdLWQ6Kw4strbRS z{ex{i(`{oq*=BdJ&FG=c%h79<1Z&+BZNrMZYf60`BYhht2R3&1nig#96QKHzWleAF@+HgJKNE;Yz&2x8&kJ1Wqwb^~N+Q~ulKhCIm)pz_?{m0(!GW}Y|S^G1q zE5jN_`ZWyqapZbTL-{P*q(W~!dT3l={UGnE9v;){qALz$&0Ce&Fe|iSLI4Wacehnd z46J`A(eXiYLv3VZSwKUPXYGs-hoD|*9OP9~1({{X*?i^Tvm!Of3`RnZ@K6xKW=MkPB1pd86ITFx4aEk72T)h zk}RLr{bXFulKHyTa6eUkImF}JSwTN7i1^*o$looA`hITk^ZvH`8A@}!epL^}@-|#n zrLzX}NwP5BvM^oe%f+V2iG-MBl@FlQvHu42J!}ft$D!mvRfcsB&5a{j*aw5t^sp|< z@$#sSiclaqh=vnG<0L;)d`;#!`3x~;Lvzt75GAcq;^aUZIctFkLkY`Twgkz@*3S?lEiM@juX+bR% zh77Drpcv)~pJ)~vD-IaX;SAT9UY;c!FnSC}UX)JMiTQOILP147iHaO;#F?hq#RaueDV5V3alYmd3Sr2F1Bp(tQPXJJOq#ikwjHE{OKIs0nlXmPpbW%-{3tMo=B%dGTWCu; zm9wV`y3VCsCmSszArKETr&Bsa!W+XuN%Yu(Iyh4~bL0-1g)nPrE#3@CF$3^08pedZ zIh4aBEu8TgWlE_upYk~&6f?J={kGJ$KlQJmig)SV4Yc8XdjAj|+Dm(<({zqGLS3|m zmNA5dv~UfrSxk$^(pU@%V@HD}R8mj%AJIoUX(y_`L+{Ka>OoYbv)}7`eS;pNqU2TqSEt}pAGdc~?`ujEYkNa3&j#%VfnpXn2WX}d@?2~jtQu%phpu3}tD$`y<8 zv`JczEyJv?cQ>8zF<*{yYl=5N&$T@7V1Cil`l`_WfZ= zbPw@K0H5sqfVCHmKw7MRSbI4?9lCq(z4ssnC2HYK4)IWhB$WYSX5-sOjKm^nR5v-;|(X8$;ik+ zBN!1s(C0jKXl?!a^&GVUqYQPsckj*-OxW76%b~tlnD$BbOsQGhle})TR0Sm{a85a zCm|sL$H~dbQBhI2$~?4Oi-8*UQQ;~~W5Uibsi~=aZ}jfnn}c6kwQ7ZuXn|8gGfu*d zw9F;V^e>!DnHfn8p2<-TZ zJXhbRl(klAyPHxe45bwU7+ED!8Z8wt%X|@eG@X>+iYTF}g6pzgQg?Xh+>-B!DvL!T zSM2qx1UNHSSSyRj^uD6g%TEzoIri}-IzxhKr%Bl^tCv0GD+zA*`vg4e<^HPJe#zZ% zBEs}Tb=akJ|Ho5P9`*D8^}{ZA!=(Eu=Ev<_9;TT-cQal~Fdb0jCq1oK+qhiIwp>nf zyP(P^+zpo`!<`W0IWOf_hUH46wK>E64{PGTp6H8~uLM}W@Km4Vx;BT)=aXD-75F{O zG(GF(cEL^h%w)XbPhXY#{!knCDu@ht%P_JM&~UZB=3#NNA=Hom`Bo2HHSwzc)Pwn@}R_13D|Yg0nB-l1BbSo_c@ zd!etbLzcaCQOx`gk{!eRD(xqK`Y6&P0g`Q?`WHY zTAH<0w6$=Y|AP9+#@dL+)rpQ-q4iy?l_kD4i}f5s4J+d3PYY?p@bm4}B|i0I0~}-g z>Zb=gN_=sw6R@D=&staA>^$Rmo67kyjVt0Cj%HV{h<7ZH({l+O%dV`CYMdE5pQDFr zBO8Ac@2H8yna2ICs?WBad!%*MhLnaK>2)XDRDRKJ?#7h**`f0Xc~>XsV9S_3_R5}G zWqaGqks@7nsh?v{R^=Z{MtqoBHOfzqbsFedJJ!EpN|0k=OykzH`faIon^Wu8CpXLr zonPp&ppUJ3tp9&KKaoV_E0Tw13ksK%f2AnJttE{idA^7Z6p(`ckKQ7X3eKmzNd)Zd zCd*k_Z3;6y$Wk7)Ge2mpKI&k3+ROdrAp7fae&0+D_^Q4HU1UC?XG${f`ea!aH0vcjQBsCdyV-wmd7E_w>K)q*UyM#Ne& z3(aOS-HK42c6Dh^G&Uz0AGT3%2OF-t(KQQQRgISv7jq?LXPIZA|XmiMrA!r1uDU`0#(o!fJ9Nx}a zJtr-K`hmLGt=NqR8yWUlj`D^uEFQa;peY7(@L4Hd91z45!7vO7raNDHUXlyWLZ0bCtG15f}%m`>A|({fa29HtNT;Y7^rsDn=k&w)`G7jx>iqjn6) zV2n)L!e7xcgUls zOv_}N+#u_GxfLdPv@A^^d7D{1AM183+;yqDx~rc_^Qm^#@y3(5#H= zMK9%Mie+EC>smMYU0L_6d&{7E4rA+g2+eAqNO>?8sv4=MZR)`Y^C`P}CqyTng(e22 z;&qAF>H^S9VBz39K-v+$VE7r=&9v5Yjz}qJZ+{PgS z%sG|W_pch4+H?rJL z#aT|qx}1x$JnE*s8szn~m-W>UkME`j{PJ$XpLXSZInw8OzV&*7rAl~8&o}4+txy-& z8BL!DsrM6=%bIjM)c9(c$D{VHXICm}i7L|FhLsPDm{pfwRwn%NUh<0qkNd6MpX9nf zZfnL~))cF3R`qDs4|E+^XV1$OEO~uJs-|wbq9rvLmE#s=gQW7)$!>!8=6?!E`A@zW zKfzMNkOgytV5J>1X3Xl(OM}7(q^N^i4uvqcqepokH#83c>Jk~~h#U(6v=$(xttpEG=?ZdK$ zg%``|`t|F#Zr#c`l5ieNBFt5onOI9v66LXa!#2U@wRY`VEUJ(e3oouhU|3kpmVjQ? ztXYG#7Y8R!oM3bay`yo6fdzTQh!LpBPzm$Hz^cVmKn6}UgHB-`!}5iVKWfw{m`G^+ z@ZrO-UeO4~1=AZtMtO`E^MJJ-+b)y+LU@RQOPH>aBS*sA!%hK%nyGP75*=Z+-y&YT zPhpqAh_-Lv4uMCH9?kiq*)0?MV0wBwwrR{Qwm)>ij+z((j@cCl=cA*e(LVFjvJ(rl ztb~Mw;3o?bFAfM|%!F|m22z-tB}u%TgtrWjI5QJDQ5SZh!99?Xk%7Af8ySWSGZvdB zSnvySFcAYam|>3V!5Nh2U?0v9&AE!0iI?l*p5!)AAR%@C)jdkI%Zvkng4MkIgu7lRKy+{&KM#m z-ogOIvNsuxo$e@3(7=htII~D|v6gs?GfK!A4CVal*Z%$cv3lS?i}7O7;B1xndgRCv{AHZ-I}sb6tL1MvhnKxcbuTf?mXr(Vx3_Ld9XaI&g|7pP0VxU1e6$W=^q~EMKnX-wWTQr>3#9U8N25sL8r#- zVT(mR=xvhz0A+zeIibiajrvagDVb|T0+A&;C5u{Z)F0NY5V@Y;(w|>TyH$C+q_bSk zGAbKg^l;<#dZK3?jj0fy%LVH4mQh_`)M*eZb@EBwgb}VXx)17pqvfodajm58Ri(YE z=|+TlH^lHDO>WL`{qv_~f4#Hxn+cKkI=fu=H8=I~xEv}shZ*lgn=bpR=e*_HiRR;G z`D&!;lAqy<58d!J9<{0;De6(ld_7pY7NOitk?*C_{nqAhCiy?hx8KilyhaTxPJdV%|9Vz>bEK$T(g+153+X;v~_`yLAGw1TdJSd zG0s*LVe6~uOuy~>cvXz?ub&*GJ9u2|IQv=e@<-jLT<$n$L3BfbXH94O%-$YzhkDm_ zwoebS^^DRA6SWa|x7G!i5T^Cbw2vI+*DyV}aZhI5`-ydho^_-B>Su&H=7c&%``7gI z)WMei_Ugf2btOLaAEs9C&8(Ue)>svx2M`r{S4|74-ji8Ku zZxiZIwW&OsJ?}_%#rov>0GL5%z8&c`pJr7qjB!j1Y^(@t{3x}4TUy=f#D?D{Iu^%b zyn0Hbs&L2rs76$e(FR9o{rY)Sr)$IG?1d8o8;iZ`hj`VmORjy`W6JwU^~F97-L$zq zJmyviN#2Ta?8>M`gYP6b7RSu*<29$PbxyX+jGngtRz7Hppm50&&pmq!Hs-N{y|z}w z%Pban!V00%zatWKt`JFg5M9dp}$)k z_0yurZ)XPK%%k?EyCHNzkx^xt@QCB~cL;}>(}HaFy1St%T2I{bRjK!@VV+NWx<4+Sxyi_60{riX1!&-+@@_}ArruZlgN^|dy~NN4n5CG!z7oe(}MyY;NiI;e7( zEN4X;qkRG};XFh;#ib3x!Df%>dXI=6S}XDpozZLQIgieY5TxTe=%E8arwpbGM&-K2 z@P(@Eev@CR;mu;TTsXIUOr|^G(xY~IDA4_M`9Uk?ZkSHhq7&qL>cLibL+O5!^r)ld z%Mo5bEr|I2JF&m6iT`<7)T-4;nOxAI~u*3nK}f)0z2i*;Rwd6|rq$55*_9 zh5}6If)^Ly^3ao+{)K@KcA;Tk7pULRNVyA!L?8y5LcdTsAP^Ojk<8&BVVpkt*j?ki^tfGAk z0h%rJ6Lt#X03S3CIXNndDRvxd6{ZcsShBM>DQY;PPQY4bG+JUDSP zYB4f{-ko?0s&HOu&UQ3{Ml7YJ>uLQVIy8-@^{4*%ls|w5;Byp>nn^R)(Yot&eJgEk zq{ay}p@d3MhG}e3j6HBFsA3r{W1d@#f?ZG;GU9B`?4QGNMogB%0aKhI8hv3o01(0w$O@}a}X*7HOpcV%Z;gf5zzY1i_qrAS|qdL?Y zM#DHXX(WxrnbUL{Zyd~}x%E_!(XFP{GiXK!>cC{b?ES()Oc?AUS~Q-_emy>*DOR~1V7%leZ!oGK$?73Vhj;KkZ&kh= zWWV3af=f4irKTj)?O4-AfAwChveT?bFdg$zf4@BQhpNcug&r@4cs}p1-A_~RB&yAw ztv@*;e|oT901GMK|T+dtO?XvQrB)=nw(=SC>p7*ugX>UCc<9ag5?bQ&k7k#V` zGt_%=#v2jRr>cCxO*&zbKQidi@talU6O--{RHZZN=>Q8RZKpwh&Ny0k0s8*|Z{c_% zEJ|3NIG+wI%1xU#9XfP~X$?539L!rRr@edk#>&Q2pE%|e%rNj#0Cpee%!OTm^$mq# zIbz9zCFsPlu|_ck7O&D+V6iG}pE*JcqsEEE$7b5N6z1#Whn=>qb_~C~raQgJ=qeqXTI!sRNM=)Sp zix<{U2!QGs6s&Jfl!J8`!(@t77`ALm!#ocN2*5zF__Lt|37J9!269filR9nLK~~JT^XlK62v32@IB#r=UtiLKmPpl&-d@&KX~vUrU=^> z=8$vxaCT#6;^j0(XaPMkt1ag|N=;2g;~eb6jWdS{T7W))y`Cv<*aa7j7Z(>pL%?su z&tGsiV*YW!g`KP^jInU24n83g2M}RC@$2B{Ky`+&nDCbI7Iu1J%3RcPW{75PTMk@e zsEyrV@GCRG!u~4IJ@6~wC*~M8PKbkFlg)5eiqJc%GqA%^NNAAxcrj$ori9T!Y!(R} zp?%zhIB@#6I58H!^IPUqW{88q0k-#8Cq)?!Klm1tixQ zZs2fbmL6dsYDqpdw zSFO^W1k>wreot~;|GYEv#XyfI-Q2#N=6~EpJ{P7w$hKSwlx{_u?j^}*0@X7LxYC8!l_6=5))$O!do={;w-~J|7l&*=lG`l=iz??u3|b`_LtC z>2{p@B+vbJm~z3Se4@xVQr#Z4R$dm`Z~4*vH08^YA>T~(dr_!8?&{f`Wcq1g+#fe( z{=72&&qqf+FYtyjcr)1*+SG*rXUOz32{67yGo)1fIj-hYn zwE6q%gMYcX{@;H+@t2DYKQ;tEpAdgPMZMp{dMwFpr`0k;PYb2wkyIg`#f;O5T%_q@ zi-%@4S;PxN;TcWxAl;AGSSH9@ItuUN3Ne$xMDe=!0%cjpcC^lDtxfQBZxw9o5pM4n zWXlV%<@#82yxdzy*^B&bxlFneX6x%^%?{GK1!!F(>;*CQA;DU&V6A7oy*SlAHpN~V zZ6B1Tm8NQ=gS75JTAt>X?CzQnsrB!wRh0PDjr48E_pFT33PZG>Y1+6nZG4J-OlRBd z6#M8XdtsDT5N#V6YU>qf>sjDE_wBfaA0#!739RyW>+Io{8KQU83l+od{UJ|;t&gv@ zou7NB2z&n!ZNMP^DaUgv&UKvqPC~=7ILDT>`gao=D#PnW1lo40;D$`w#8g{Z zhHczn@9KQ7+5Wc95!!%apSnKYGh5q7kM(y9@v0l*U56eIWmSKcUAZEzeokn^+;E+S zcUGulLO{c!7{^;Nj>*A}Lhq_>_KFh!nju~lm66l`n>w3H6hMZy0;!wSA}!Hk;jzYy zwR}@e6kzOZp`5Q0KD`I}A3Dx~K zGL%bh(kHrSh`CNAB*OZzNPl{$yN;YB%Vk-*6HJfWo1gc!zAEv2J=W*daIY^%dw(^? z=hZNe=L2ld@~y9idwx4J@Y~rzFN*9>d%HjF>E0A!Ja1I?3z+4AAh!Kj@V6Wgp-E>2 zl57xL=g{Se)D`WH>FrnFHc@kH{R2(Va1vu1L~ZgL+JF+_F}1 z@GgIok!_I|L(Ao`8rbs=d5ei?DxEie$V<=FlebqC=Ex&_a^?l7=-keH@#2;lz2T^QhA$Z6V9n`k;N3X-%#1wFmV;j~%HabDQ*{UiiwvO2HJ&>@j}ihvP_! zGMd5O9q0lQ;?fq{@)o^?0;N>im-=?6?(AfO0_Ym@ zjH7X@X>~o-qhIt6F`UQ`Cre@{9GqcLh#?n@7u{h{!)VwT8Z(kc;ysgQ&ZW8dioPHl zv#;EtJ16PnVp=?vrj}9Jcp5*HhGNK=69|J^9CL(|?3OcyrYxnUEHHZEU?nDp*+3iq zPJe$)kI&GV({y?t?ZaRp6~{U;`3wX`<7jXO%~(#$C(=ZW3-7hE&X>EvVBRO0uG{1n z1={^iwtb4aSJjzsR~h8D^x#c-mMSl^D5pZr&7G~cQ(d+T|KVd6`JzRqC@Cem$|$cR zb%#Mcc0Jj`%8{cqq#y`L{-8n^E2BB7I^~DkMEl zXSVK3tOv{F3&5nkfZWH5$nnFnioPy;O4}{^bFmL~XJcL8=q%WAI|?NP`o?dK>QbW~ zHeO?p&qZ6l>~H(AD)K_E*SjWtZ5L19^l;R{BDM4^!3Wrrw6*Gw&Kqs}1TNqk2S< zH_7_*K3!l*ebmA2ac8$rEy{0Y z)5JqbjD5bYyQ;%Q0Qew1Tn)qAU^}?Xa^e;2jOChl9G~e75f(xd||wtlL!)G zs@Vi)24D;mzX7APs|}&fECG4 z?XmxI5+V-yVJ=+!lwo0EoJffCJ~OR~Gi@|JnV|(&VehA>r{fpL+;RqIPUVdAOrL@S z4kpUV%EHjjpFjWYx8MHo!w-M`>tDbB{`)6So?N_m5%Y}MXxp|e^V2f$!sNZ^Iy*ZX z=g|Vr;DDKL(M3*9PV3gKSvGe6;Kx8{4THt+$a$Ri?%fM}88$5L8GclRdB)6h%{_&4L{pnB7pFh8G;|6AiiInj>;&$SE zLma+@8w^v#If-yd5zb-JB6^G&z(Fk!3%KLPF&%rpKP_$y&4z!`ori1N6^3Smo}gehp8u$9w! zW{DslZs}~p{w+>FoTYF0Z(HImOW$OV;^-lsG46tYES`Cub|>D_(!ZrTGHj;{tux9O z%!=S?*OL)%5m7?xU8NeMywIR*CG$uTx(j1}r>Z~Ez}~b?(syv|>Fe~(S{kK0tLTrv zKUU>A24%gXG#Yf+WRvha-6@$4+N9-zWp|PApBYE;3Sp$+J)Y#zlH6d>AE1vTrNW@o z?XH0xC!0SEG_Thb*es_^^3~QZ*ArbX2biCics=cDd*0vvq=)r+e~+(*``zeZeL5rb zZckg4tX%RoH%A#yddLUt%5jTy&{chqVQFe*J|3?;NRh64)16qkDPDSA9`MV%DZgwR z{y5kFN~rl#u(~7Md^^?kMu76Xr~AV$*5f{=yCKGFK88n~tYnE$HXpwtY3i_scQye>ybcd0*ur?5sBCrbP8Yn({c?{PW_NCmq$> zVbYUcwpXJ=9%Q=S?d5vH(|Fs<*qo{UxH$VSSKj`|>wVwVwt6wp*4)+WibZ*7k8)aiXmx#NI1LE9$7tOwdYns#~{A54Q}I zPuC_yY6E?(?Y!LE1ZzFw?Zs(YX_%JpV{4D5LbcvzQ>f;eoT`oPXRnzY)KDG1puK%^ zxHd4sK0MJr5}%p23EtN1KwH<2wwe7rYP#7g^1bF1cva?jm$vmPZEKr`M!c;#fm%0z zE!Wr9Axawxj|a@2(2L6T9BfR z$nhB8$vz`Tn<7A@!RhwW0UkAd?UmiNxxKVXM-;kf9PQUI(6a{P8W+&;+xYtW$c9Nl zj=HG%hgw%2%&MFg+&I9ac2217ZBic8xHq%2?)Uf*pgr>e_0PleJM{w*IZHQ}b*UI7ZiPeR+^$Zg}H_fQCG6?uOLL?+a!;>r;NX!+$*=wDV=w zZxsXXt`21EA=K#sf--lIcx+lI7<4&?z3;!WMe6~Ejl#9#u)%c4m+nSNw?pKn80kq@ z*Vp5Hznc~G{k-6x8pFSv8;mnIy^WuU=ZQ-Ms9P!gTNVquKqrAA(=|70jy0m>v%c1s zLp;8od?)Dtq5B!|WG$%QgX_Y!D7DdqtM#z2ed_GG8>PXWw|p>=I5g>jk&W zX&si)F&SqX>&0(UCSlE7Cl;{-Wcfl-Zu!%_BPSKS+@of*Tq6HK=Qbmx`_naZQ~ zW?XtT*yEe2fxlZ8{mZJ@uP6II$#cDzEd7go#!43@oqXnu5ax|48}76r zw;e+t9Nhz@h%pwZlsJQ3fbkaIP&hqMd3iaf4dVEt`Sa&<;19F4u>S^ch5YP;cSa6P z!d3Q!;iS^+!2&Imp%Um7?8|{J7@9!ioO}l-8L!y7bt`iKLfeGC3RR5}2?kgo9&7|? zGMu6s3O{rYd}14g3e6ddIFmAVIZQ*_!qGeA;e^zEsqZKnHJK)_qE(Y<5@Re~s0$O%q6JQ1 z8r;wnQWsJoulucVbDXv9Do$Wc^GTFa@HnOub-HsX*U{yB=}u8e>W%N#=K#S!)f>j^uZ3= zQ9>p7bugtfMSh|cBAfFbclCKM+b%C1*52Y`deqhSa=6cfj@IUM%i}!j%Y5ytVP3yq z6#4z!(3`R5%|;#KK4enP#+lX!n-;n#2P0kYwRiujpY{7u-akwYc+}42qMLlqs+@3F zE?K1mMqQ(uED|5QYgEtMrA-!Ppbpe1+Z7!#m?Wt92Z~%sLW`3*Puvy}@jaI0O-9`j zaju}f9Yc!9UZ+PfmxzaAs|@P32=%nPa>B0MNl+iQw%kgxtaX8Qr<46|la|lru}|;j;A`^M^Vq4Ia1IUFV^7{9Yx=5 zRBweT*8=7Bs^T!JB?h^N*aR_(S>mZgseoKYiTJDx+z z1Q?8{!Z}T_M_}<}Uz9Cdw!ro|efspyojb9dvTF)X;w!8_>?vhsWmxgB#Btu$b?eqK z3bb+KMi@s}Ke1YJ=nz}$%+7&+OG`^Rl?=q-eG1jF?{H#kyl{-BFkTp3*k!POIunav z3*ayl$ccT29RN@aO))(?ta2>-u%j`$w6wIOq$ErQ41AczoRS!Gh4JDmM8W}1Vg_&$ z#yCo{7bP}X?106^#mwQrK9CR$6V7ZfpMCZjmUJ9ru-mq6Flx0lqwRi8{ z&p-bhJ1?X?cI+6&1tBr9d-m)>BM=_*fJuO0%xB9!Edc=mIKwHMIlzZqN|^bUgNASw zlBbJT%P=2YVuXY-5azTO%@sEG}+u#27!w*0F`s=UXe*5kH`}fbD zJ&VnYd0%m3pdu!R8DmmYQ<)~0iF4UuhOaWJ14%g154J@Z%O_5pICJLALGikE>lOnS z_wL<$`SRtpYuB(xLkD0FSZvw2Cpl9g=XB;sAC3!SzTEch+w&P`1Q4gPVkTSmV{xJ! zIL~KLhHnY{3vgH9D`zj_7-xoha0WltfddDA`Q?{?{_~&z_HY08*T4Q12mk)>|Ni&C z|NYUUN0%>OK79CaQ&ZE^r%xd{`);8rh{UcqJYD?4O@&E-B%IZVGj?%aFnqGL?qqyr zID&x!C-T4r7>>YK-1bcS|Gs$fB`1Q%%&xfi7@uKs681}hV&n8kVf28hNSrY>PInjQ z%jvtqVzBD6yhYaLrEl1G8DwxedvH@r-@Ke&E6ZABlxFC|**lkX`laAJ#)ZM+&cX76 zzYNw5&N0NqdKlxrefw~A*&AYBemS9@mf=ebfbeqRWb|G7CYKQ7GmN*ibd2Fy<)4F{ zYVeommRm++EqRk!n?s5ic;N}eUuJxG*iNG9*jNoFm{&`LL+K`ige`2XK{trz3L=*( z;XIEGw_4BWCx0L|recvUaG^-vS|g}hszj_S44Hi%$}UBxp#0dVV=T+a^sb9?HAvcJ zG|$nK?&(`_m3VHxTyVm#^--!6owQ`3QQoG?D@1Z4w1d5Mvq|11DHmjAfoeM9rryi- zJ>OCLWq$apAzsgmye@{hp7m0%X1ISd((`7Vyw#|lw9DsIc`sS?v?hvrE6#j9!2C4N z{hRT@UrmUAQQEpG#`qvzc`?BAUXtli)EM^FIN$r}>ch5vf7ssRPrKT_ z9v}H|umCw*P!{k%NrNSyl>yJZ!ru?8b7-*^GGbS5cAu()&}sSioR1>};ar+zl} z6Td=NolVy;LAdEg>13z6v7D*1aG(WBB=WI#^s=@K(7Ghphlgo>z1&*cT~qCD={DCS zHFyueDu*tvJtCQ6AWs z?>!^L)+552AFUO+yTn^v60)>$!@V7jD98E~$56k@1Z_mTR-CAfV1Jb$ty_Xt(#AG5 zN1Hm(v-<701!DthqP5~sEkEC0lVTrgb59G=dPLarBW!(Qv_T6VZv?wW9|J0`OcaWw4k-exO8nC#PE0TkYFDX zXzLQLLA*kA9c3?o@S0nahkFLzzP1hlT3)0!P^SyFb%?X+f))GJ_4J+@V(SyG_3y0B zToCQplUchou5os#V~l^p{AkC8c60V*R@Ft#Ul2Y2iw?6ccAPcXr#jJEmghNroWEnZ zPs7@z`U6>&n^S8S#yH-N)A?Smc9`{5{p7Pu>tjcwCW+=b>jkR`*~Lk z^sH^8O$@c?huQN}w9^ktI~q!(={YRJR+{on$&CDL4J4C*54XD)oLn!T0M)eqWCC zx)(2>m6Q+3{FaC$suEy}Lp(7Qc#EF9=WeLsL2LC%p6l~|)|Z3quSR&iD)xMsZN8#O zrzK^-2-rDlRIg|{r1P+i>3KgJ8h<&=<9<5o(j9-gbOvAfbfFvIZ z)P*x29TcF72tl$O&|P12-k8fK>6*!K#-L*_TLsv%TNli9+F&|Kmd%1rYW^FamO7D^ zbg`hf+9EKVGe-5ck6>SkH$LuQdED9ZsGa#9s9vTX~7PQ0*sBo(ytLOZi+YDWg-hF$0#x0!W_IDJHx&% zP?k8+&)BhJam=ZIm?egR2vp?UKpa&B37NMR&7wNg3}%Mmr;3v&Pllq4V`x^;aG<0y zI}LY%p)nB+KiYt1f|BfYgO1Q3he|QX!znFr2KyaT&G3YjmzU!caxxbI=7D`wmIxw5 zPSV6gG%yX&2$SD3ISdoC@b1rjF#^K?3obER3$qa8D|*3@OT~-Btk^$?IdiyW4&Y)Y zUQX1A?U)&cF>g6JIV9qVA}E3hX<@&2q>de^1I+mZN=T(t81n49!s&qus9+EcDxxB$ zI^jT@cGQj`mtYF^As-L&&`7hB-IsiWD5wp!DWy_W%%*IZ|3hhL8I?_?sk3O-7#cH- zhM_?wQQ^El=sJdCLMfCfbJ1)fB_>f4-e?NNP-Oy5K=lS{D5m0k%7@HMBZImS2JK9y z$#ZG$6q+)dX5+Jv3ej~B>cO5QV`(g^S5U{i=keSg8 zd~){ce$;OSjX)#px5GTW6KP^KRdXm18pl=i2=SVz=?EQJN9%A2Wl(WA4IfAY2had? z3IT8)E#y)z#2!f_Au8qyS2=Krb0J}JQ0#qrA1yPgGmggLH)75l{3x7K3UAcHgrLj< zS^xn!EmR>SN~pTU-L%-I)LE1Rf#zEs z+;7C2Hi*S@1<8+kd;NYp)hai2ok9Hq3u&-)KSnxlm9hGL zV$>n-M&W?5(?#BI)d7$K0eV#G%dZ~gJxj2-I7CwW*}`{mwTPFQCQ=KH6p+PugS`3w z92!W>1=Pen*ktuo^`WUgO*kOm(#& zRi`>t#fq1rq|Kg2UeddU_eL1T#Es5kg}-H!lcD1oRTX3@UzQ5((&|9_zDWD!IHf8| zI;KmTMR~WV9J0&D9pX73=|(HpiwX9NA>tu7X_rl2#iOHf`Fg8##n1AVEb$si;us^K zT0E5OcrrdY{Wp(Wx@k`BJP>m1rv!#2!p|hfkq36GrfG&GpXmIZfPUH44wI zuqwyHr0b28t(vmNDlfN6?;A5R^w_>UmUucBQlB&BuqdsRB~0g%kFZx2OGE=9r#)A( zS{kn;3}OcJD3?hu>q-6hviSeQ2CEh}$oB2qd-v`Qrnh$O+LbF;E?v457A3vYhm}X! z8Q2Gd0w+vOO&vIJAh<5fTva&K5(co{8DTH}WiIWsRxnji%3;~izlz<!15-DKk|TC|7~X{jXy0rtUfsfI76 zJaXhn*w+vxSb;iq>QD(MSi9IKP!zJ$A&ls41t^D=2Qo10)vH%SSinZk%gX}|=$8CH z5Jp%K3fF`=M%DMsYPJww1R4*soeb8}&aa@r?bT zpdi9{K?AJPsZ*zJ-n{wZ#fzVQ`sv!WYZVn0zy0>x-Me=mJb18s_in6VMn(p12OcE! zj~j=pa4qgsa&j`k#ZdMT&Jtz|0m9(f!vbPK_U_&L{Q2`AfBdn$yc|!_kt0X;@85s) z=usT}{rBI${`zYS^x0>hVIbVcu6&~>kg#wm8U%}p9o#7jR>3}Qae8_>)p15+)AvGk zg3t#0Xh5Zla0-77OqL3J;gP^qxE4ongYdwhF$SeNhEJb9#cKTd=btb5_RTln;K)~B zef99+!^@X1V`VUxfByOBhaY~pc=6(zHEZxx;CY}jMj!(>j-t-U`9qk!7!D+%4Mo?0 zBr~(-ty&e6&G;7bInW>QgAYESN;-HrspLBT>G%&)Y|OlufVAY(AUGEjy4K8pF~d|$ zr-6w&Vj`cI&KC10g^rLP!wkP6KZEISp-XDV&zMo2zPix5za_GctjH=hmv4 zH1}m$BGbWQHa1;Mq(T#>E-e%_#)H}^BqWF9x4g>pPhrcy{cl_ag{+}>dHl&}12)8{ zeAmum-Jn<(35Ime<5+Docv+%1_*E-x@_d^SM>>Kh3N7PRie58fOt(#F;(8yGJi$m#-=h+JLj8`Yh6034CK-i}VTU24MB%V{0OCIcMm~yP4 z(}57-h)t?YbR2QFZ<3AoBfB)^gk;-bk+-@E7wah7?DB41xZJ>YBh#s-rFyNQ{eB<+ zXRjvT9onS2iTy~BtvbbiJ>9tyGP=HaE=s9rsa7PrR<}}*Cb>Qs?tMK=J0E2~B+KQ2 z5@hu44%*XcfzMwJeK4W^tuBGpjh!Ek3HbJd#$Oc%KPw3S^~Chw&y4a*Cw=6@0rECg+#2X~Ez)|zLp&d2uSu7`U!L}~ zH0jA?|646xkNOG6HS1z#>(66U@;FjlPbN0zo|g7JzNG~dBAA6|6&Wv98uO~qxm?d_ z!z&rZ@SvCOOiW~~7h|m%^K?n~aHNH39U5pO>gqY+dJi8p)60<>p>_>XJMbi<)z$(XJ_*{O6m8US|H3i;rH!?5ar%%5t-HV4 zGDREROr6qHn>frre^<)vg)v20`m{K0P>j~sTTKtpT4B&=y>ESOD0W(ClX`j0Ow zI2!qDE$V1J>#KQnwC=HLpJwWe6m0~q!q6dFr!c)+ygGPc?6k){2S3aiairyx&lB=< zeP>3gc`54HFs)0LK5>M9QDNAt=nz*$>b*kst{yHaxX@qCiq{Qr-~??*q}J0<&GJ>7 zg=&W92m7IV=U}a4l-4s{8`jfnX0B)9_<&bhdX4r~TL)K5&5JmYxU)U6F?t!a?Iwn6@$MzagTit1~_TDwm!4ljKtx^zNdX_h`IO6yxs>ldT- zYwbRzzgJO?XVI+CqAOYBpJxyJvh(m|afQ=EN=Eq?PYyABI79skveX%!-Dc-`7rz!+ zcrk0@_LSn!;tDo5{;%*`XoH%lGZxG&u^h`Kk-J%X zmF*Vga}sJ1J(tK_gnL5G#Gf>9E>txHvX z@k)e&j&do0T?u6u+${&J%I23bDyz7sWfM;|+ROo4_Hu8=0cJZXh)1mQNm)FnS}q5& zv(DCoJlth9Z+}>j&$(DmOTv0)o6q^%G_F)n;+ar$ct+I+9K>cTRjCfORK^Qc4WybD z&Nth+-pFz`fL!W})s2i2&zKQb{+|ngfYZ zNyRF{VVOBb17c3kj zZY7uHQl%kS`oSzXfX}FmKYJ8qd`8)FJy(2*<>o;)0K7Y#=7B>k!<7)HiJ@rF0c#erx)whp0%ec&lrUY zJ!2_SSPD9fXYn8@ktMcbtnJg2l zN07g0iBV|2=ozPQ`8YOiG#ibcv8w2koH>Kp;CXD`cs8D@7mZQ?jc!jG2W(iV!}X~nJBHbDq974zlXTl*#LN1$dt9R zbkWngMl!;fr*Kkn{D_R#}vl zlC;-VJZ=|{JFH_~2H1>X;(>7c{Q+K2rv$xgGxFb|H=gpA%RJ?LK{^*_yVS&GnQS=5 zoAMaop*-Ooym~h;;k29wa=&7g4mgQRm~y~JDG!I3P|7TZ2dS^&`!eElN}0URS-KQx zEYUC~y(>%WWa+G*c+_2-$6p$D;*i2acy3o8ULBI)<^OwG{J*s&B_-h-RvU~{N>riL zp0ctsm?vOXU^^7Yf|lT=uqAu->IJR{+Z?=zO8OE!DcCI4eTKXMcOpCra5)%*=pQ|U zKa2ErPKFMu#03n^c$;42|+=!0TYqV22LkESiB^VicI1Fdo5C zDTIcC3cxnOayM?=h|AGA#Q@+q&gSIgn0TZh6}`dc+PZZsMn~shwlJ2_8>OGXNXOg> z>jUQPfB^#tK^<(Cd@u3w@u{h)WO~CQj*N_?1S_iUN5Lu(0pw{XSQyH>!sY0^dGqF2 zA9Bu9BI@SNn_=@}Y6R;+==@PT!&RA? znV2|;#OTq{(G<#pjY<&_;4Kv%BK(&6e4|hnoQjQ&g+Reia;f2)LV75S2WK&Bit3?L zXql0bfpHHUI8aqpb?45VCr_T-zJ2@IvuBSVKmPgWpMU!4r~CKsLnz}GH0B$5atIF= z8)(2!5KNvHsx(BU@k@4vrz z@#3F<{`uFx{#8>`gY|*j#8S|+hh?FtCp0symTGgV0gXiCG33 z8~UM2Lj>N^r%xX$cZI(RF2`NQSt_uO2kald{q@&hzyA7bMMcG-Lx=+AEH?7$;5fm{5UGR=Y~~Z0<(SPd z6*K&UP^0mO!ZWsT;X>RCsClNp#dIUo##AwD?vN{nf-y*OGV_4uz65L`hlA<#FprxF zO{Ob@P>*Oh6ZNHbe2eL&F_CA?uoE*@$Hd4roiJv=33)=upF{o@{KWsAkb%KZD2R?O z$61^rm#^tIAy*RRH<^^z2KXffm{i}|$x|jU%iLO1RR%NEeC?`Ff3%DLkguOG@Dlc*`nI8PK`;~BmSjr>`e zUb4Jnm6zLv^>%TZBF?eM2Nml&Ct;6lIptu-b?K0kt=vbv946lH?_JqKKkLL!%hto* z?3lmPe%<+`O}Lq9KOO2)7GSGsBA<@&yfvcU)w;r&2KM{8T1}=58|`9U<#?>iF&Cjc zLfok-6^XWMX|^x>=;aA6=c1MCskYmlUC((-6>;{ORQu&9=Zg034~BYIH86~(#}oYS zX1o7-;q4#SH2Zl&(?6<;|9rUmpTEw3Q#N{s%+7jRPI$;?Rq0xm<92(O?-xYf>Faqa z&{osQ<8EiitrX>I7`qfA-p%&7+1<0Mq4a1<%;V|Nk0%5?nHl!|;gXP`9xwa6B-%?8L23twGW5fY0clW>m zd!*}Q`+5~M&<6zS?Zfr%k$SHeV;y*=|M z1eT8RFKMPvh}8y0>3!q0Jby=4h~CM^(KK4`+dvzhq>bd2soVId8Ic^0CP;0Ix%jBf zTey!I;cIwQ^1O?N`j(`74DnDi1JriCyk-veEgIrqFv7ohTu{mEu#z>2`A1Txt*T$R zF)9CE*Wteo?|Z%NnAajoJ86a8^}=kAf_hrNdTL%*J-@GK@#p}fTJ+vVGk2!UTohY0 zH>zZ5T;bM+vnK@=M(h0=YQsnQ7tO6(GTg7IsX_LDWNoAYGNX^^>OQBBSJ9l%qRTDD zf0L8B*Q;Qpe{n%r@us9X@5j9PYItdWXu*H4 zwpkf-Va$zLJYRl73?GZNj`8dn&XzF#9m}I!n(=I-A>7|GhrbG(@oz8|Wt&Ag?`5ru zH$qTuv~a3!q+If4r>!DBdo4!L+N(TRj_`5|dGOmu2Kb4++)23T#V&dqnAJ6zPS?|H zm;EiLMC%?Ng|d}n;i}-ea?bV>qFsK=TKwXb-iYF!zQJ9}XoG zmw;fxkikHh8g$+D>(`Ux2m)^8$dS+@pon5nC_>PX$*ly1kkDozDCso^q1sY8X#&f| zqzK4m@ZiBXj&I7frzlV=ElqBgp+kpaMihcVDlz&eU|Yha!B`YSPMK36676w>;$c8P zH6u6|bOJgtG!9~>gf>jYiO>=|R1njAw@bh^1b2`9w6rwF<69zGBtEeEVf(}I&tMsp z>_gc!?OA)6|9LELC>xr_(n47%A*+P5aH{Owj5WiB!7SK~xv5Omn1*ki>F&%O&Cp3U z%O1c63}S=O6-J3+G4U)ui6ymT?WmS9I>&J6Dut!QvRKMyiequnESg|jny@BlOckHU zv2m}l*GgIG95yGH<+f#QK^2-2pl$+7z)uV`olW1ywrywI7qLZXKY>l?&3dCfxobXP zAFN~RcCwv2*bYLM#aUbkswi#;69+N)#^|5~$6K%#7z;g<_XhM(6be;qqQXX?0U!LN zxF8C-!3wlutq1}aD@INsaw6e_r9%UB(wTK0%f?P*6G0)lP4L~1^_$D)VirU{+6-gE z2D8CfuGXwILCxTU5pj>GBsBN23MeJi0+wmwwsAPHE4=|F)w=9?Cl}?6UG5n>kyEl)X?ZkvRhEK+b+;|f_ z&0_Gnr55FaxA=+O;C0Xd-?7NMtjenvx#P>Yj@Klqrjy&9ex9pcl@%8Gu#0fi*LJz3 z+rvJd70E7ZBxRqQcp*$Y#Z8SY^GNGG8=eNP01Gfg~PMq%vmPWRZ`D zIUC?AarWh0D2(Cn_#hy5+vIZr(!~hre7O99AeTtuY-at!OS;?ER=n25-kS2}yp@qD2J6f%#JLU&7R->O~*} zGYtv~N=!^lPfthBFlHeQv0@kz-?SM}2EtVsk;)-q^s=%ttOkB!j+CGK z$tRy+J?T9NNh{0(1L2k}Sg-(AIojhO#4#Fd+O!EPhCU%;a`}e!$5KER<2d7hOeijNEqW%04<14)g-qD66!(HD z<4&QgUAuNs&-mW9Z5tLD%E0N$)cKbk4;g*x)G5sR z+i$=9=bwM>-n|P;pORVd$Zg)d`N4w+L@*jWd-m+FzyA8;k3X(lxq{~nl%s#jQ6iFB z@(n%XIF%!(;$Jvx`dduwR&o(gevp}5L)QKK@4pWk@Q70RA)EyVz!PQ0!_c3?ghP4j zWjSf=kO$z6T1AK`+=bjAIA$TYX7{-5!N)piMG$O&v)OIDA z_%kLY*vM%^YeIgZxwR@Bz4;R9*TmJK)u6YWW|+{cwKmh=(l{if6=U6*br3ItGnvPw zU}vBaSs`s?whx&yk)x@MxFIozeQgr^D9_f&3qEoL?j^=9ZEZ}CAP>(`0WK3OPc z;(m*=L6o++%4frrEs}h$k;nNc`|Xa}{Q)80Z7TlPu3iuOx*gG!iUj+u_WGB7J+7rF z=e(rT-j<^g_7iDZWiyZKEdX>ui@sbR4E8=7Z(Gcyr8ec^Sl^p1ovRz#%l(zB4Q#hN zx>lz+-R8|J5F`Dra8wI0EGhXAdOhifA*H8o1h8R1_zIk=>uHY!T*gHHo(XcKi@xZWL& z{nadV9#qC!(bjPlTL*DXocM+fVjL-Z~& zT25EDIjP!MP!g^8jnfBW=qN3>j+&jNPo5f7aIf2lKZ^%^KVs0^QTg573$oN{y*=_L z1eNsjD#B;5Z_!BqqFx?za=i+AdzN(dnBCKR^8YWIo8{%l%9zC6FDhf6+|v@yEdIx@b5YIZZGm4QSbV8I4^EyF$xG!QgQ?@gE zxnwo^bg>+@$_K5=1utt=vUDv)t_ZhWbZ2K3>jCaLA;6YZ#>W6`ImDC;ZU&ml&DJj0 zn;Ka^H?o|rHIUA`S&lGe=gSPEUB+0-7o0~e;wh7qOf;Lv=ag_}wTyd3wisA3He6m& z*NCpEsADlID2?*H*VnzKnc+3U&XY<0&kBOSel7g_52AkfDCX(Rz`MP4G`rElxhhe- zo3ML^p1^G{)m4Vws)&MlUGm#tT}k1ln;eIamk1gBVcr3L@JPto-qoPWXy%4ZUzk+G-1L7Py($R#N%@EhfqWfy~Lzg z6pE{-^3Ygq93e*&1*=m9C`^YEtf&|?rB)(pCYROtgvuJPT?a4fG zRV0fHXW?~O-DH*wdmpDfmequNRdm-0EN7j*`Zkw~_ z1d~E_i0ZTY1WQ6`L_^t7kU_{Tgh@MzO(Me3e*#OOs|bq*q>f^v4zWXDv#-lp`9XGY z1zYhZd-HYn`aCwTfEB#LUKzrMWV7taZ1O-ha2OjlmW>4s7>hh4gl0npm5jGx0S+TIVG0 z(Z!0Ut~E_vK9`KQX2Uq9%2Xq&+0Y_J^JtB327<2PQigiAfTPF`;_jNcoDDALnqh{K z<18$2l5Y+6|EeHtn{Hd}Bv&?beKOwvyLol*4)I&*Z!hMm$P}Z5rSZ-Va$%z)o^qE~ z$c8V42ir(%edIGf(h))4D=O#eC^b!;YBHTaw;5CodkKoNz`(__K~bDXbMG9t)lf1_jPW6;)shlj{K1F~PBd-4SFct_2&1JxlQ+SD3+`|uy@R;apT66mxL*!HyqE) z%fqA~5h(VCf;8gd;vh%Jc7sp=)5f}Cd9efj93LM~IIiI8I6`2jR51|Z=G}MS1@wr}4)D*Z!9Wqy8sFug$!ZctfS8N}bNUAw4)9u@kb zl0X#GLbZzO)vHHEnQ;oosR6~V;HhZXuwim?GPyp|)6;PzAt9lD{rUtdf*lHgA*2_o z7L5l3H|yH9Yrp*ROHED9@#DvsbffeJn;uPVupa+mjAuf1I z@i>8CoWetL_3Bm3nxKPk-MWPv_x}6ulTQFLU1poNsk65CA{`sf6x*C(E z8ikavgq|r{%q)I{{)uw@1*mSd8Q(&R8HI}wR0}yeD4z(=*y6>Dmn~a{eJXcI(H-O^ zAlDz|hEvsJx|aS^CJb+FL|HSYh5QNRoiK4uYVXtz5uqAAW*(5~c_4p_87orT>+BLJlJH2<1XF*96;=1bZ&O`MX6=s3L=G}pmMsS627W2}&g`CJZ8u}B*P zesD2pvhnJ7-Ph;J~3spFQ+?Q-EMYs zyIfa35@4^4vsczrZuao3%5Xgw=5(v2(;08;r!Zt3cTENyB8hdS|9n|Y~5%Sf$9vNpP@HmR{TEWpt! zQ61b-%WtjCsH69e(+1#pu-eX3O|7T)!%uIHog1!ojnuN+xlNxOT-wHCe2CsTO7GiL zo!nR(8>{z^)%u019V4}FZQUn#ayI~8GSw-$UWMuUFwHfkp*Fas+qf3)W4n9K=;1lL zm;20%+2da<9rNOyA$JCi85=yawKlbnXF-l^6I#Z$Tfg!aVQ7i9yAq0*XI~DZG+3`eD~WkGl?BpEP@* zPcaB^agK3!O^Vn0H&jQaXrs8Rr%94JA`VBiQK0jS28Byvi{?j{ydGJa;x??V);C7) z6QOl)?>4=&`^;EvU>z+dSnn9FcWtUps;Bj9sZE_7l>gnRp)XdB{$bpZ#r5(>2NZXA zpWWR(zpqzemNu=UJ`4NV?uFg-g2w8Y7;SKr+Aq^>?EjE1XW`YNJ(jjKOKVEEmwk>kvmgYxZp1w zb4dFX>5w8Fw~MD;gtM;JbFLPA&MNGpkI}3uLA>B?J;|gC9+rwo%OziSQWEztr)|vs zF%PJ&}AdD;nbB- z%Z-*!_xpR?@8@wh$L&rpy}F5V(Tg4C{+1(F<*3Dow^{xYR`(tLF8DRW$0EPO@qypx zm@uojKW9D9JOYJRLXB!f6?LsQ+qym&?qm2{n*ZD1^1LMUyM<9dE|2}`^SE#3MtnKM z>uxXoP7nP?E9a^P;^jbgL1$RBGm6ESkqbMoTF$r#7rV!h(RF1YfPRNw|W zq?yG7^^Cx~2qc!?Ul8sJA-bSF1vx{JfC5878YD949VkQ?G;@Mjgfw5YXc3hUfGjh8 zJM?0Oa+IiwF(L8dCr(iS2}mMHUNd)ce&qu?0|*CCGw`o}i``BDrIRsR9aI1UYvfNm(#n~+;T59LEa zT?g4{U>0;XT{mb9Wd?dOs6tDMmZ7K>OqskTXpakNP^xf^Ht3Voc-Rp*OTjG!^MZb` z@@PpxL6nOGB5?^hd7u(A9^V4Pp9%n_uoQgSvbH6xWG0(Ao6UZmy$;(SR(vu`PGU*H zEZBp2crs7QC!z}}K?axmGGCqPZp;mBe3*|v^QVeMI7I-k*g<<7rw=-zAem;YSr*Hp z`a#%f%oW zqO@5I8sAh78`B5zRODzN8;Bh$(uCQOza@iZbYvYHum&M4geqcV*#lW1Rl7z@tP(mw z&m-B$_u2a(yNs1>VOtin#TcECWC%_NBw^yC*r@KTy9qUn3p=yU7xR>IgyNjl;q98sYQ$)!x1U=e3n#rLfuulpwNah9$|3OAbDzwEBw%(PcGw%u&$ za=*Xl&+D2znI8OXM$oO+jx(yTUlOY`UGL|4+{$vk)zs->SM5;;^-h}8#c=VcuXHTI z=~$S2-p9J#Sz0PfyZr5y3APiOxZGkWd);_J!xctEhuqtUgfTLVUN=~rl*h#P1$hFK z@V###=lin0b#XS2S1ewsF1SB^(i@Ghq9^(kV~ra)`LrYJ@&c z;@LWLEz&Vp@e9#VN0&*`xk%+|iv8Pnqrd;4!Sycgm+IRtM@!XNj{CjbPX*iZt@69h z(%BH@Bdh$n@hVU{(Zum(U(b2Yau*))3yo-^RXpJ-Y6G$wm z!r4zg`2@V3;%9Jh-MV#P#SliYL(wgaO?u0ZQ&dxr#0rELC;_)bw-7gFWo5a!xgZ$E zEMyr80$9z2FatXpMmL3Yf;@;Y5C-uvdi3b#&6`tJ3ruoCWg*!|)*OTyNda_{kdP1( z5`wXoFJDd`do;$BK@S!OCNmYqCZ3OFq58#?`idJt84b7{SY%8Dqfk03y^hBapdV5X zLlZjg#*G`PsucRCv?g4>ckf=dip*$(Bj_Y9E)L(Rsi_zX_Z>DqI>9%&&YJKIx0T8|Q)2>qsb9Z7?mlkiojZ4` zs;YME*a13EpFVx{>ea8l`s#PS5h^U<|HSYuHa3=OJyW$Ka-Bi)5@HF=%*@Qp4jnq+ zjzFNJ^YZfY$B!SQTRgN_H#|=3*RRJAfB*gWPe1+i;>C+U{`dpmj~+e3%HSb^#Kds8 zzvSt{!s3a=lS8p6=$Rs3D65FdH&bv6A>vZ0AkfgNRV!KnEHxGF%H|vS?$9TW;Bg}0 zTtWfD1Apz>HBj~a_uu3D*|TSd4<9D{5d6O|7p%s^hYtze<%b`B`0>Xdu~7H!-NO@3 z$!nm5a^v?&gfzXe-r=yS? zDzbxbQV6LM^c%e94n6HAU`y>F7BgR_HuOquU>4IeW9AK+aI$8DH)|C$`j7aQ|3H+j z?E|SDy;B=}hQ3Yj6XhA1$lG|9@Z^w>1)YQ4;hChm26JE5%cg+{YsHL9AsWmO7c=Y1 zbiB}u!Z%)G{1SPlzlE-@wdU^tEj6yE^c`y(0;3fTG`U$b~~5rEnLs} z+V1Cgoba_DRg|k)YIQe{$|!417yoOiUf+G%;cjow?>=h!)f>r|Lghp8PSsu9ucbQG zG*zx7C?~z;3qF?0hK}P7r6Pz`#Y&fBZ5O?TrxSh8g<)}IBSAnhl#NY_xY;I_d&>_- zM?HTt?p`1Do5daeeE8Y3*+GYWjkq8vJhKG@gQc;NF(hnjF6Z%%+i9fwgz5qhKD09Ks7*70<_k_YWo;1Cq(b$ z;o8{Kk=j@rG1B*yMsCA`wT^DCNdanBBW+9*bzFUQU^BOIy*!G-UEBLOGV7^*T5D6& zwefA;rgn2Hh)}z@yC&-{i4j`&1bt|@-mQ+-qpp_I#%=1Wk)@-9X8Ji=`>M_BY5kk3 zlX|)rwA7}xPz{jS5q_nu+$Tk={W8_b>FUHNEhpV$=pf&s!thsKuTwfdx@2*^qSNgs zy;wfs&-r63druhcJ0r$1Fi{)aPMg&pAAROvzk*g?!~NA(^|ZV=ZBPSsM1ndfT94i)aHIq}+F;6Qu9N$E&zc`qoF7^^JFNKiIwkEr zCwB3e(Zj8vp4KN>ABF{KrA=$&Ha$-3@2xfsRXf(zb6V@uCkGTh9X#yCJA>|Lk5~|0 zG&Q7nxL@%o|Dp+j#a-QJchYC0hjwmLTe?l`<&odfYes92iLE{U+j`LdFPoLST->?8 z#m@k8v8MCXwl-YDq>@~WCBs)xt_b@qM(LmPx}~z7wYsr#v$gZ> z_O7?uxYVTEs^SEr2y(EI)N|3>sGE#WWt@QHSAr}jB}!~9 zF2|Kx&T9`H7e$PMA+|94Q9-_-8-9tyM)Zq~AanQg$eCTn#~|aN5r|@takrQLV5HBJ zsR2)>2i(ndyOwPDYECN#d`h_}92TUN%(jR#rKN^<#XvCln6thwI4sPEoMpYw&u(D0 zlahGRja~F$=N;^FFuR^%znkrLudm1L&W>9h9bXRi`udHCpI6uab#wC1E90K!2V-* zFehXj`X})X$&OPbQ^>($z7C)&%#;cQZ3OBih9h?nzM()uePzVhcIx3V6f|Q3A zYWgBT4CV!82D&STz(FWN^d1u%R9RRMl*oghg#AK8<0n;3BIsIzz=hTX^2iNB*eOtf zC?p1Cpgooai-Y5YEsUPAJ`^=X)ukytpXz~5;2UN{<%i5~q8yqk=%E*I*oQ@fYtb97 z9mqG(L+Ml)nlg+CTNv{tpk2amf#DCUKa7P@ltmItYR;PHupF5AL)lQ+@2yxX9E@Xe z;Vc|2y_q*Y5iFuEt4rR4C>B+Z)x$RxC-P%{*umKt7E_aaS2 zED+6nm=BeErt;9_ALz=uQk(|Ga^Ofe)-8|ajbfvQv0+2l5bS_9a?A8!JtnisQ`yvk zY#^GUv57I;g>@OpM)qR8&}Jr^xt^_G&X)IPy+KJdizaYjs&c` zpbaQNR|6R9!`OU5+V3N+uuHpj+nFHokSZKgg^OO+qki(a1n253$Gslf&5qic6xZ@d z+tn!fY>2Hq)c(yI5sxPLU+?2{wX^&6<}SB;`aYQ&cz=k`r3}ZO7^kv2PG#Zt-GTBs zZ}FTeTuZQ>tYa%PKvxWHZ6?Q88NfqFrZK71DsK%`jycIE9MaAJ-Yb=|Nw=9)7Qn z=jU$97!K&ynJXa$7ICx1C@Q_ys%(=C?J3oQ^R~MiW%@?K0j-HRl_}Wt%_sCT(~R><+k-ZQ)|$^s@Y zW413uWuZeXv5C{IMncp7oGku-++cld-@bk8)~(=Xfq{WwJFsf07%@ya{Df!#Cr?XD zgTfEa4~7Yo6Fdqm7xDsZ2P_d*IT$7dlYl`H-V1mgm2QE33bsaK1sr?Bh7AOV4x@7M z;>El8hC#pJ8|{#n9*vhSU5c~dnfOVs*9qqqR8hHPu*dZDbcg`(YH(W61NIB%zjNnK zbVY@mF`Xq#mO#LPuYzOaLNovm2URelF$-dH)x|sONa)XrO-+$x`$H)6a(9tUb}zt$tMtz zouz#)7Fb(X)AXZm$_lG;*wtyhX4@?(8$_i%#CA7Zzn$ZG z!e)IsHt=R++tnEPQmS^$ML1tq*};Tsja{o+X-8DygeqSNwq6YvPt8qx_cb9$yF`%^D(x{DC(yA>wU&;vK0;-ZQrSSc6ev6z?Rl$%b4`nY zYiV9psjkna1U;D&a5B{CT8_{0aNGJi%EkK5yRFhji?YHlUkH}>Ik}v+3Hv?l`<(5o zEw;Vx(ji&dC>eI*0gL^(Cf&<*e7hw5=__eJtZw`Bt^wDQoT zxxQvNM*P&~!D@$ap1D(B9o)@rPM&u`BLhVwFG}wnthEnUJNrAD`)e(I)n=*MxJ0#Y zpw>1>@6bdY-9#PR&TV>s&*CI)NSNL&NNX3UwvN{adAc^zT^qQ&HcHU)=G7@)+n}Jk z_oN1nfoPMWjqL80-$I+x&3#Tkufq23Q-ambLj$LN6k9kcxFFXn|IIqDW_wR>={7Oj zbIzR5;{B=9ew)z0qR;rb;d63)rq)vjWVnsVb}wwJPfu1yC2B))#7}MIe$iW{(dno*Gg-C#>-Gy2S8L6_$2VBu?;VnHP%RPmlkk$#JZQTw1*AHi+!81Z#!$Z zUh-yFV}zS+UA`RPX}pl_;HZihFSr{Xk_#T}q9;4AvrGOK1J5Mfg5#$}@sw3MWfP9d z(osR)&npUj!o3{xdFlC&copWuFKapXE#D+ zuBX`^j`aCvUc^tUl7IL#;lU8EYspeYxDkzWMiEY0q#ew@+9+&f47Y-(BW>iNEFbaQ zqJ>;_e}f0QEa$$R{TzDtv>;-{v#!>vMCo=H_0f30C({BRj`Dpt!sn}!(4W`T|8aRd zY?DW0{jdntjg{-^cB68%@$y=!ZYWnblq%zm!q8WvtVV_CDDHj>NGK|i2~8Ce0fK-$7|;x%2}89e_z?o* zg+58{2y)I)Gzj?(ps8XkXq8myl7tQgwBQJ|DGUUq6q+r$H9!W=VjtQs#j&7Us5j7* zpp8Ha9lh%nIURG5HaK|BG?wp1xNbvjRIOIsUJTv zSzHeNnb33zS&^b&2q>8RMIZ?~6c=RTax50vdE@-Fvg%L)ehMtZ#qTe=r+7g-t1CrSsUlB33k!O{4%7szVJ@(S9r& ziw4u#^l@w)uIj`3jA!Ggv1!BE@Gh(iCQ`r(_OiX`DxSrILJGMdNM6i3mc@cdT=gD% z51nJa1k%!)wI-L&7&a!C^CUDi#jnDvUykj4bZkCAB@n8ehWTlhzWiR)KIUZ+p;c|pM zqamI(&78_Zm8?tzf{m%w=+SCU#)LDj(bB zB}e#Vqzx?EjP-fmTP+NA>l@*#g^jVM!!)>saJGm@jGzs|>cRgl)Z zibr(uyuY|n{y&{hME(|Vn~aPMio^gPhJg=*o#1mH zo_rTzo1g^32(kbIh>Ayp^~J=*pmPuaBEhcF;K-39=y1uBB@hQVg&lH?pl8eu1A!9Q zt+1d86boDz9GiUCge^ucl*GhD@KqSU=#5gGKKtx5l0g)KgXwJCxDi8Rp$Lj3IXM|- zI|LKGeE@SNCp;)2-wx(N;R;wQa=(Kl>IAGFLI>Xzs0sTWi%Ib_peidXix5s=fTJHu zkpQWf1!M9UWp~Kqf#roNvS!Vi{rmUd zxN+l`Uw*lI^(v;1WxJc<|P(TMr*T#10|95qd2a6W>%M8p;FJ+v?3X zOa#gUeo_Tz>=054cF3WDr2sK_{t2`eJ7`8IF7&|n=+Oge238(|8||r-6&@0t!UKSt zd-v|$fByOB#ful9{K=CiKm72+g9i`3{r1~$zWL@~|N7TofBp6M-+%x4=buX6*{62dXmU5pPd)~{!*|j5WXj~eocA!h`{!PNDUZmO2e>j@V ztNtUr2m1sqNG~hN-(t?r^!9+M_T>3QrA(a3BuJbIMpA(M5yv`!9RY)4CC3E&m`A^i zGcenYP|ukh=H)|+vea2v!9g-d7@|ih;O_$FarX;V^w_2u&X#$+kjE!Rx{$mu)aeUX z+g>MSn_V2oQ)`z<(%vw6p=eY@`iKKNd@73X8EL=ryB2YmkNvQde8IzdH`{xgyK+2K zI2q!+nn~MTq|HqDa)9^a@xD){1y|H}sYc|xl!nI`iT$p?{O1jla zyPTn4tS8;c_4wxP$Zy_BI^$$Obbh}i>C2H3H<~%$XysCo;B?Mk*eBUm3gQ~8RIXWf zF=-u>&iOj+@NnK7qr2#H;`F>&eL$?%-%ra*(?^XDc(sxHkU*_tu+}k5@8YXw zYA$hJY8u)%*QcyVm{Zeg+#8X_eZ339^sXU#r^f2|PVO_OhLlbSDveWf98L|E#81DT zHSyi(Io&*FPYxWBu85d(b-(Py^TOx-0Z!nJFZ-q)zUq;~gEn_wqV8y>3-Y^;yU^D61< zS=7aCPKsk}jMg8g3>;m3cz@5LK|V$GwZV$bHaI-_l7z zr31Z7y1M1JP^V(buY}C_Ysr+qJ{Z3`VFsq(NuM({qIB2z!s;>>#lOS(cPg`VVU{7xI@2g6FD_$JnGr)S zuV?aZrW|C-eqIvd08TGBBbv%#hf^IqQW~3KwH;O6S zPFlo6M(B$HJiCqAv47UtawUXSH?ch&<@3|Zq~A{D{j@gY%fX&iiQ?rTBgW>KXgFK; zad!=ei?QwE9;DBB0C@>_DZj#w5 zWjV8}-bR_}nk2TmXBwf`f`LQmK)WLi3hEpd|(t)Hcm0&I| zF2*VR%+JrC%{Ri`eUk&;Qt~O9Q34Uc@KR7T&Js`wl{|)`2u%q(Ibr=ly@%!r{S#U- zl}W-76bVB)wiKudT@+d+mBoa11Z@iK$&o@ulcE0*Y#qMwR6~!#pcDBgrc4}*BPbOdCnS^RtT}#CU`7xNB7aLHi;QAX=nCIB8^dDI zvOQ~`%2LrSdZVZhbcn_j2II?o{g_`E3&TN5{~5>zQf-|HY{ED;ZV(&PkM+xD*<;z* zciFr6V227ZlRpG>QYg+OHfcJWj`r9e%7)Ho^B1$lc`UCZ>xd72?q<8uRWyqRVKiCL zNg*f%-rJNl1%(6H0FW_+4e83dViX*~4vyg3W~^Bw)(CT6d79`sLPMHB-=y>(`tJF!kw5*kBO z^b5JIFc8RljlEXPio3CHshO#RI_qCBR_-O7_ZN;@@ZT5i^>Ezj=z77|y4@yxY()8r zZx~K&>l`L*a#**?!gMA~X2Jp^?^{^Pge{_YvYu0Qrqc;Gp(0A!R>wY367raJq*1#E z3WG4g0ErbR8#R%kN=YRK5U%*H)wR9gQ>ea0sr|UZ%a)^6G>uzUjnTPa+MLgkbz>;5$m#;OHYg#y6@G`Qe zKR1daNDG+sIg@uv;$dAV6QKARWrG%2wNnt5dVPY_R z^2THJC{UBkE|~K)O^^!y4|V5J5yS!RASEAR z+sro%y=KiCSo%~92(+QYl#~>7(y(De9BIfmtYPiw7yQJ6Qz08jLv&7-Afe)8B`HP- zk`$B5$jBhqO>}fLdd8%%O3$7>`~COd|M=sNufF=~+O=y>pFYLv-MxGF#;27&!0bg_G|^;Hf-4NCWm<4xN+ly2M_-G z>#yqSYD^goE?l^P`$BI(@DLKz2_9g`RxBnJiKgf#6XBMyhHwOgVO*>;;pJlG@eN{d zw%f}M4;y;u#5aoOB8*%DE5;qfJ|sJC4TL!E%bq=Z?%lh0=gys)nws0UZ~yk&Z=jZ7 zTOK}qNNBo$|NS>1-r#}9Ukrat5KLIO^zeWTtQ&^Lbf|(7Xd^5a6ThW)Kn2COkj-v7 zJ;>)kU?b$mFptnTxd=>17E}GB3?0+0F|St9=XozH@0iY$+Myk0_KgYcLf+JQFOhgn zSQs-n#B^4e?hZ4#=098_rsszoIB&kJUPPBr#LHW?$}^d0z&K7IHee=YWo1j3E;X@j z${mxAlz4YdBL}g^i}MLvkZ5NAtD!Aqz^69Wwoo1y`NU4e6y_U zkNZo%92xUyOzic>wu8R%&9;u4Sxz@v>*w5seRgS^Af2`g*VA2V+PdEEqE*(HFZ&CZ z8#>)=r=AaXy4BO`cDCo8ZfZqcxuUV-T0>h+GnZSfT=!VTOEFG2Tf5%v6LO=IemmRe zkMm>h_6t45*ySM09=mN%fPCIp+$zdj6rnO+`oDO459lbaY+d+tsP0zks#XrIoO70t z5J4b;P$MCb0Fe=hB#{LIWeH4h9&iFjf&pWU$(WptLF9}`#sPzkJvIXvd&VBW@0@+9 zr|b96yZ8O;uBlb4R)rJxIn{l3sJ*{^I>3IDz426}<7x-#U8djUU~CPu+pLk^^Dwn_ zaJm?*KkBJ%^XJ}w<Z=3iGz8Dhb{=bW$bBQGVONLQ)BI|)m8veT69VPpLbvLfzReek zCO(-mVt-aum4D0hfTlSiO$)=DCi^w#I!{kj%9E6_Axe?IIWJZoQRZFqVr0{c5pzmC zW;r>;ggF(*DkDH)v^>O1$;LSfM_Gm?D%^o5-!glIXX9Xx`gE7kVU|ImO20t4cMnTd zu}giXJSpF0#t6@*uFBL1Wl)kly0@h!$x?>P?()pe%G7?YHIu9@yRxc&duhz??@r&; zt18b@6>T2usdT~o@ElJ{S&XHBnP+nsOQpAxo8&UIE@*CJ@SLHZ^_`W;yU}J6iqHtiH)s-z=+NU#tIEYe1DXu-O{;x;1E}HE5kR_#N>ZA=dY1amcq#Dc?PldaHlfxk%sRDBv5?>KAl6FyKP8 z^-_ZMe1!FUnDt_u^<1dmA#3m_*5Do1;H}o+ZQ}T$SHN<~afNU@`B20MeJac&8-$a| zX;;lDH_di#nc)Dr8^ryOxC0EghwNddQ*PR;nZ}!i^4AkQAG{g&^VaU)KNo(huW~U) zcglq$sO%9gIvYgr(PnOJvD+$|S|U&%pAnXpXN6b!Ga~ip5&=#{K}IWh#%kkwW;*Gv zy__n2G1BecEWgkDTdsCA-7J#79_M*yqSxIS*00Jv+PXPFRk_;9gnlnXYR?68L(?gR zwc4{*BjXS*4(ymtb6nD#cH{9x=Oft3WVUWdHe@-d>X4e2ELj4<57HF440sJBG@Ot# zWK~racs=AOa@Zz*K{&LKx*`3dk;RJ_L+XYo2zEmW6d-@%oDwAyA}(ZOx=$&UDI$pq z(>EzdBalTw4;ddI#FAq+xFPu90+Bz6_&ep|A^Sgs5C}-90`V9^4nYeNE((FuLdYL5 zU;xA(kcV?TLU%YpF|xEkh$X*nlqZKAG)n-ssD(};r=bf{E6@>!PX-Wd9_V1<;j znLroG0S3kBlNLF&kShz}-;&HvATt!NM0O|0wA9F8v7rn^DFVr%uqm>-pbRP!q7Grd zsE{}q0)~bYj0k^WCgO>pszc3(c8?wEd^gr@02`3Ya&a?_rBV8uM3xxO;xkwV)OjfT zcmy3^g&!eb5%TCDINUfEM=6z4S!y{epUS49aWs;^63Bo+#n4V~)_WuyIgAY}Wu^UC zzY%Oi1*;gsh7kOSjoCmj@8X%Ge@)0v)HT}R)dog zR)WH49JT6MJ!Fic1y}G)@>e=$EoPVaLqloNO&n+c?Y4>A!CQIKiJzHdmM}mI2j#yIo-dlN8o|Z zVOvr|SNI35wFYA?t+xg?S_3Cp14df|Ci(=lSOc-ou?D_r4XU*UE(;I(VsPZm;)pXz z!RI@N+^tXkVSd`}^4Qj1;ivP%&ZY-#wgx_L4O|})cr-O+g=pRLH@8C0?gO3CTZV3f#K~t@P;03F!K^v{Xi+zGH zig$edHikQ%?cjJe!MNR(XZCqVxCmDB$Wy}+McVEvRcSclA6C@yB1mV4Nq?=g{kL@i zrxMI3BJIDe_5JCU@Sm2%e*b*z{W+1J7b%xh?M{Xm&xhzQ#Yy{Iq(gQbI;^V*vYsQB z<=z5uWr|k%#7^2PNn4$yckDU*%W*&B?LOvXk#;M!hHCL`5&cdO^3pr5#_uM1A53ss zB1GRzrp2DeLE!bPv^@Fs2%dOI|GW^1-xL^we-ZKZGerb9WUx^JZMdfp*lhgM#J_c; ze9%xZp}}E^gXE7@7fTHmWUv6rC`6zKSY)s?kx_-x>tHR4j*f;(h?`hx$pVAA6a;~_ z8oD9Djba%jly9tlXl?D0-?V8H$iP|$ z6$y9Nty@QU=4b&;VYTeuy*nC#P6~aWawnlMXoK30`{)<8R&2F=`wb{15_v`8A-1O8%@0m0IncsvP?$E#zKFg`_Sh3-tql-!>7q}*1 z;&-5!82=(8VJtU18Mz_~34w0qFhO%H zjrg)B%@Gn+X!chkMEm=3+B%^aa=5-m;k^;!00b~bMO#l*OT=Uemoc^%8`H#?04go#cHY3_ox0Uuhl?Eh&; zKtwYd{PqPRCyYIBXd`~F55lM95s0xE`@#?>=BB2J5eN!QLXDsKz|&_t(j)qBFMzbZ z!VeU>0SxN4mX;jB0HP6K%JF{(`>)^QCNNO>KLvyrhs7TjvMTtv#8D4{AP4O`d=IWmiePn^}OehY73as4M$63aG%`#mL z(+A}ue>K{O%(iyb9?WVwRRRg<67o*mz-$D7VsP%4(t+`d78 z?*|TlJ5i%A^4@+MD=iq_PJ}J&hAQI6i#?eWrTT7nLb#YL`Nv)U5ti%;n8zHh(rUK+ z$>UHN7J*QmjImvrO1?AeJew{_JM0yhY-AkW4oOYZ63V5kp8duogcIC{V+`GGaYwcq;57xa3p`&=J!++4TgoVM9E zJ)|33Z*$+Q^DU#5w)7rvmontKyv@}wf0=Q_;%`p6Bk87Vuii`L#(9|?;-dlmQ)mBf zor8Noqs>fB!xpi19GR}vvH{7@gGk-DZpbfsv4P*?(67b9M0$HsUGzuq2tgOPKP~ek z0vARP!a62rvaE(R#}~8DHydgPhugTWjTr~PaB$b zk>NA{dPBYtl9Zy%{vq!m$*DJ*)!ug6%uPx^|8~Fu`9}+7jgPmUX2Q%CqzhN+oL%O{ zWtK^Z=B!6WA14e?Z&KYvmhxfm9KM)+O7+;B>GGDU|B?14W#$-bCKhVu7+B&_%|a$$ zOR%lZ*>9rI>7oTvIk7n2w^e}Gc)4$!th}y$Me%HG28%yhU9v?9vTmLPgL{;)R?$+Z3-Om-nZkGqidnDY zuH-UB zuPonyx2K(&X-+Y!-xJMMVbXi$>O95Fu-MqTc#0i1Q!kDD+V&1!vb}QWDE^E-VLjtz z-tS>Bn>GI9*rK{UI490+Yme&^pN!!h`scPD%qkOlGH0u(-Tw4VI;6&tX-=s#sk}I)YDNt8y zhD7ZP4^=#K&A9z=H z%SV6$Z7q-?lRugaR=|3SfJyYIstI?oZKbi)68s~j>4vCU5>Iv@hXhNE_t`CR`s-AM za5G$o3O-wW9IYW#KHMLb50W!WYZ83T32OtRx^1r9I7!^`HPMbr<%ZZ))WSNvS<#GA zSd1`RJVMO~6?Hh7Q3kP(??>1ngagCd0#by%UjvyYMSZ#uwMiIBZuV%*hmLa4n3Vm= z5@l#Yg&?deM(G76BhKhSJbd4c#r?Dl7AOX=^@YJ@h1}S}^cQd?;Bm0wOoq#ae+f8m zg5iP)TtlUYZHxZW$g8$v-L5+r%lkY{Q~B1s?)Mc@AwNaS$R5=xCMf3C{_YXjbo%71 zE({2Go0d%Y@O(N-Fd+K8P8bPzd$(}fdv#DGdMEq-v3n@V6+nh{9?-0l7~nISQ2wE) zZA&ch@`LW6{cV@IXYeI<2w(tTzviDOQ2jDsyMd>l0Dk+c*^8xL#`n6t-8|Tu1kv>Oy4FN(6 z@1GGcxQ*fjoWh5~Up|CJ-Yq&^u=&5q8pj|=`MwZVh=SbhP z&gq4Pg&~)~QW0(F7WlzQpqhIBA^)!TfA^*v)%4o(x#Yj;9eIg@0(RJ!Xcr0{rRbDG z&tZp>{|V>g8`BKDz~=mXug|bg6rC!3xST zGuruUAqr|Euo$K?Df?LXTX+l7u0dHK>FhwXdX88dAf^t|Oq6eh zYK~10BN`hO(gHD;cp8AfMIhC{ipKAkmr)MA@Qy-E znZLv;B6-CB6?S%Jn;WeG+8!MaPTGSNU12fpCxfP988s9+GJylTEmVCwA>l~u1M9i{ z|Ga??Zap@6v!8CD-|h(Z4@)0W&GwCJmJ+Ye0iqiPac@v0jo`+uFMN#D0{^adO=nc# zDqsz4^d`{b@n(WCnLEA`&cEod`An2QS7YMqf3=KMqf+rFo=8b%KbF9d z11{AW{i0ltw&!urAOfwJc_B6w*9x9_wMqC_@y{q|4Q6!~lai})-SX7<_9Gc_SEdp- ze?61z{GZ|0cf`!9H1h$~J3LZ3Gcvs)OV+Uwah;fx_WZ2b3r&jTRxKg!9YU*{HS_jM zAbqq4+y%n1yGzzx>zeE4p54Q+gBI=i39{=uNJ>96^CzJqn-*4k1kTze8xHOx#t|zA z44yn>cHd9ir;s+4o-E?qbw-PT?)!&2&h(G>tb_p1ZBfAAWmax(?Tgsf>9?G!zli-)Jj%X5MWh!_c{VUgboy~`+;7Pld@y?_Y3N`BPoH-s zr>gYLbJD5huJycYqjWR`g9I1e>U06~7h2eGR&{rZu^oQm5x2?azvcWV6hS!aRGg3b8ZAK(}SL%!;k2YEZ>)&@{U?pd^6AN4dXLnX0ds56Ovf>tx~hk-Kd+HF1yU$WeA; zS%wQgfM2+Ws+Ud0pjiECJPN7{kJU=oRmErXQ18vIWk4s}4{ErX$(39;6VJ#m4`TFxLGolc3f?Cdc5;=@F|Yqq=vk zw^u1=W}M&A`~6UYI^^FTDsCF=5YdlhKb>6Iob>@akzy9EDVDj-?45rpNz@7B8rEH! zlqEXjvsT21TG-LFA6z|0ZFbWin#$Z80fp(=mv&R<|8z6T7KX<{%p%&6WXwW01jT4% zG``2^T95N(@s1~lq!`ElEku}P(r{-}{G(o)-pL+&WlM0rs7B0Y5$R*n$nss}_>k&x zT!qpt-C_?^Rrb2uW)ZmG%NQeUy9%{HO+T~dDvubGx?Eu6L$ zZwtrXsG$G&SS&zRkNdC^Uo}Wa`ux}@{mUx22Qz&h`I+SrA0N-GfLGO*bwW3EL{OKX|DtL+`~ zlMVb5Ks{rk9z}5q1x9k)xPlGO|F5V_tE>&KdV5!auV?O@wAO>t+mE^byvjMDZrI14 zdE@*Pu8tQ|f7zLUarkHo%K0xQ%7*p7QcMCb2`YHxN}lt+TY_sB4J679tXJZPv>y~1 zi;IgXeb4d@!Z46UeXs=wBDOZRwu3y1J0gjsq{TY~;FypOk^dU$M9IP|!MIH?x4IJ% zNivhpAf2$~z|g=uT7>3Es&PhfP>4nwLa)sG)Q>3pdW6bF#bt2xs5nv}U4p7%WJp>I z7~0p1rIVGg!>%#9g&@WCq*zRmNeQ%_|5C(2M@DSNosfL>n1PYuh>Giv>>VA=4(1pe zBhW*)g)heI{~F3CIz@%@5hZT|O3c54AtNy5V0q0~1?MR4UxAT8>qk=ySzFW2F5)YB z5W2JsEewU(2#;mO@CucUI2pL5-C*m?{a0x+Z!UL=#?j89y9_}av!3J(?OmBKekA8-6Uz!Ac{miGa%WWh$sfn zbvemXw9l3negYWK5=&16KDF7Y*!AEua?0HK1ema{P+_n)*v-l;`8Zgjl&EJwd(&gI z>#N>wcDCFMD%534qFx=_G+=VB`DRV{a=coIoC9vRp>mWi!%*Q1(BH;@D zwVbIxAo4yf_!0Ff_0gkw@v>&SYu63zkQrdn9{TF+u=?UM_U0_x`KezIHBGBD)a_My z=drgIBDNOND?ZkAKYtiq7CzdFPuPl|+KFe`iaV^oOzxh#^l9oioQc|X%i6UHbL7$& zwP_l@uG+p2b#%@a34A^u1Xv8TYadL#pU?i?f`_k0iHhDjuwp^uQ;d?fwx=r5&x4(1J+A&-?r^|_K^O2m>aH)_Gj67cE z#&4Y<=xx<9(X8A(#$f@uSB@?)DU$1nG%)K5u}mH0fqOL`jFI_m@knL4hwItDO#328 z>Xq2gxYHu=1zKVxbF3We=(SAWHfPPzDVJn9s?!+1EZ@(ObuWZDNp%x|C8<&{>0>bs zOnypH^%3?^vCtUw?#g|%?e0^?Aim%=OI7OuTX+%RBFEvd-B@^>%^KC>?TVHSgDmxD zDBhS9z4X-7fgErpU(SV)!;m8PBl}b8l~ncl-Yg4j3(bzQG&vdy?N{o-XdTNGaux=E z196j*%PFPFbULkIvvZ*J7H$oZ8UQpTtJG^mRFyFMKr>Ka;X<%O#xWEa?-JtTJ5u@W zcNBi_FZW!eK1h)x6ygOcjhO$u_hMkIXJLZ!Ocup4m2ReZF9Z37#i96*Ffa-UZyp8{ z8;nDs5(#tb1%xmu42IPPhlY%tX9Q@}?~K4VAVW$FshbsL`K9_{t;V-0XC7%58O_GP zytchV*MiKO8XLhOAZV+b%#H_&u-35J(1J7`+-FL*O z|I739!w=)j>lZ2!n*}=muhP=eG&D4OG@;U^r2qy*c&gTuFtk>*7M$xZFtli2h$?bc z5J;l5*ydp1IO%dY^?o<62#`H?f#%{=HuwD68j!v4|AqIaFsdRv%frmfj4>is5nu0y zg1=u}Dke@Xft$@t1!@LdXA>AZJ3C-FX4v})NR|NP4s>!xss8l>({aOU;3#&3SkX?K z(w9Nt&*aX6oOBMQrtA*THz9D3@C56Dsrj<9GF8fN+333T3Tq*qGpYo$?5i%+KT%5Z zhAI3J?h3CV1_Iz%r30wn7_Sb=NX%}=j;T0q2)>aC8O1{e`68;szo|VBIK30sC~Zcaa4+@Rn%Wu&OlIEv#a=!{NT@mF)NC>xF=9) zA?vdqd4eu7#c}?Jrt)55C=?LWa8b6{rm$8HV%Ef6V!L`I&YY^+(2aC0)A!ZdK59D2 znW?e=>MbkR6(g-{tuEi@{hcG3SkxGN;SLMkg{3+wp;7fJFN5E7hGE~*G`Ow#6=Cnf zm54JT@5W;YaJQ~iyU&U-G>7;D@X>SnLrgr_Y#bSA{Ey$UmM4>C*K8h7Y9 zr+6AJJ(64T;-;;+QyA7(4HqsK@sN&O8J&+}Fv(B@+Z={=7O|^^61`*aR;4iJ5 z8hCJ={VYL0K5!`rKgZf-h_m7VwanIUh`tP}w&}7EI4{-CpCXB`>B+J9otX3eGye=) zdP-}0$IQBMJghdT?fzUh;QaiMJryMKeb`&eyfm16I-Sckh`ar@wfJG#{;uC-UX$hO z6L|aV`8xfYMQjVYny6S6ygYl{{!FK zm$8p4Cnn3}`RCrudX)32tQCjz`2_`XfAfz0K&5qo7GN&OW|_XEU$@Ca*7aS7$fl&o zPbgCSFa9P?gN6iVjnlUoNOZyJps*r5u}2n!Wb+U@ShR3=Nj+|ox}K;vSjqcUtexcU z*gU2@DlRG}!-Fp!8p5b42veo?QjZDu$DD4Y;(u1%-KF39?b(yc^i16}>zW;4{5^O! zaybpj#ogUhIn(}2u<+z|3^JfZwxZ2oTC8i0@sju{dw$l-ZSo8M+kWG$f3kvGR_phI zQjU73e4q0yr=}i!-H*28Zwf^oN-2PD-5>XK;fZA2l`8 zZDyIYKE|y=kWCx$&S-+yEuDt*G?x&Xkg6MI>KN2G#w@>ccaih(7(XkNn^L2E`tF5Q zh7qIs;`mtUFcR;h#(VJITv_4I`qD93dsjnqTZ@0T1u9c3qb_<+c_TN=Zt^Rv618qN z^E6n_G$M-I%qhd}T9Ui5$=#k$vdh3KOTp||KybF9cC95m($}!Bk@X-#ue?Ln;1Y7m zR0#rq=dWC{m=Z|ib(1&Vq|##j&}V)!-WsH@Xqni}jOnK3|Faq4sTe@n+`!!4G55cN zZYCk*63}Lz#rK*6wH}7>Inob{sT^Cvyp+*EM3zz=+7J|*%B(*0d0>ldau~Au$M;wv z?AGHx*Q2bP{@Rm|3s!Sl1q&DBmcQ+-el^F2Iu_br1`mz^ z@zLA9R9oG!x8r1*bHaKEP|e);=br|)*&d3uTqYIPt;@J~21O%0jVa-6S`M*5P5x)T zkt-qWhXZT`MqJVXq{;y-88vicp$K1AV{d3wMTF`Rn7p$^(Rf{tP=>JRfoWkf9g9g2 zn0e@TjK6S;L?V~_6X~Q7qW;wM^kI%r$JhxZI6}3eMlcA};oym+(1>_qG(Mw^!YC9N zo1%`i+xLn5FxcsZ&eXVU7m29mOVwJYK?Qf?`H&Q(hOvaa58OY!zo;+>vxt-iJBEEv z+#>yj3I~@PY@`gyuP`bHX6y)=H*_@m0PckJk7C@A?kJeVS~-xw#1Y-YQF=6*OH%wU zax9z42)tu8u@YR1Fg>wlK@DmKoLU>MW#$ripK}V9Z4@hDKM8hpRfgBwMulXJ#ZIu? zK0;y(^q0d1v2VeL%U!D*{>5RW%ofNFLZH0Tq0UiakqvJp8W*AI51XV+sg{fj?)!#8 zvJT!?c4y}IS}&mVOym0%vm1mRExcNmJxW7Y5{MaVWD>3AgfvkQ4YAtWXUnv&b@9jh zuz^qJLKLNI4^wn3%2~^vM;6+yU9=T%W76W;GFr$uqG?BUCFR-nLze?PB|nEP4GYMB z^L891zC)TC#$s$I_o4}X^*T_BrbrRHL|+|*wG{#A*&=C5sNI3BQ`M~DUozpf3gTA1 z>w#-PsIoL%lE^DHvN7O-V?c|Q;@k)pRL`5j^g~vz=1^n7x-plls7GswOaSdmixpP0 zYJ?o)K+C*e#?;>MHAX}Lz+wRArwuRwj z7Bh_ber$K|N>=P2lj^7j+*o-N+g2_t*crVy-Dq6=p_Q0I9QEa#SVGe?R!RTH;OkT` zSd;rgO*Tc=kw0J$u~r{t&F=vX_eZO@VJ2E-k?zxKu-F?lJSqZ=Tzz20VlQZN-SLO1TWC_buXmi_|J$y=Y|JIJ;@(|OGnhFQjeGO@6~zN} z&hG-MAL0(>^Nv*$q!V=~NYXg7h>MRzT z1apqhXAC5$+og%Z+Y=UX@W75!O@jmRva+&bFcGTI%_zasg=>JMwGyQzT~8K=T|b$` zYDr)C4C4~d#^67R`cBQyJ_8}l3#CkRl)#JnuM!dxBB$ijSk)_BeVU^NP%-2ytK+G( z!hBfz(GlWh?GS&U=#XQ+zx2zGZzANv?8i8E&d}^v4ul7VASA-Ej7Ea30kmj^QBuvT z-Q8Ui6O-;=zk0=Hz_mE2VdHyx1Sugid7FKLHK8d$&vg5- zkYQjVfdoWkq>!p50RcTLD>?#pH8i|{Cx!jX^K+1hvw_xc-)8{G*H^(T=ox~87bbv; zfOL?}AndE7vksL1p8sFv))qoA{}Q)*ZeeO#Tv}QP?_Au+_kZ)Ty|Ewm)Vd>jRDvP!r?U$bk><~{NL3W9M#lh3kAfd{ak9HoPngXie$OOQe zxnA@;sV`CVd!IxA97{psJUnI4ul^Yl|4z>=$Tx5@C$ZPie=*-aWXct$xFO9%t&(84 z($O@F;3uQ39h3*GbM2tT3k80h)33nX#M`%ej49bsTB}1XZi#$Ne4uBvOmEd>uPIm0 ze89qAL}9L!yhE#V1#ab557HVEPDN&anloBJ3e z{7E~Y)H2?U?c;h86TW}8JSUfqH1eHVc+cmwApx&O^NiS78x_Z{+!;BF&4T#m))PPC zy;ky^XSeFP^<>=mu!rGppf9hSh}ktBCTm1;8l=CQOqmJW`S!zT{Kv1mm^V?bse`O|S?DiPqNMiJO3$b7 zq~-93*hD*|io$NzlbJ1EO`G(pyE=;U=4dxqdRLiObuR3GI2WamUXNSaXV%{#HQbF9 zL6&T&UZ5G%92-?!P-;kihv-gWxNugNENSeKH6fPOv+(jxVWAtH!^YfA0V2R)W6t8l zhihqx-09}U_BG$Z_RWcjaxd|Xv-g@ox{uP{@~V!q*}4bT3h=FWF&Ywxx0awIUQSiq zIxY%B4|b1HHA*CBoA1tFnST)OS2T=|dQMm>vO;G|t5S+hh%!SuQ1GsdEzPU&PIXaC zv#`qG;@RWkSRT&{XC$7n$*?@%yJl$abGFS{D72b&wq@_0rQm$WmedZH& z_${NTQN|Xdy5{cz3VtJlpsWCzMKIO*2VKqYN`m zuYHJy?&!gFe4tpn$pUkSnJOaFmnE2LZa;rOF~OM`)z(~%ZTb+8Q1(o*$%^xW8w}aw zH}RS;IH5<=^5&|M?o1&DNA0owhsU-I$DZ4mMVwii>P?11 zz<364vo6fCZseSP?2=yWlD@*iU#V0Eaz~$u%)ZqWr=(x;4Zl>&1%T%~yfE58(f@Hh zpf^v(26dJ^&6F!*h{Nr?2UKj7pb9Z|c2qeOrJNE`NOXVtAV50wGi7i&ky-yd9CK_? zc9$K+_k`pIyP;wr{3Y9cJw9fe3CKxg5&K)0kXh9BRIeYJD}_7716 zNh0zY$Qkb9!H=MB&<2T8#OPmAxNVl`9NEBgq58NARbwOTePN@q%#^;MA&HT^P{QxT z^#_k;rDv;9gModOnjsvGP_-qtR+v>HU`paY1yV|J zyH(@OUBxo;nR_j<;`B!sL$l=^qx7{kWoc-|y@I=kem3$It?H)9#xsBr&p=oygAIV$ z>l9>2z?=vm`9>-4wAq`Z-zh5*S@r@Co8$NPo*;h2TR_W5YfU@u)Fox3*i zb``ptB?fK@g$(ikkkj=z$yG(NtAD%sQxQ`m0W$c=vxUTaFfd6GV)uUA?qgl*rQqW% zQ5Mp3`b>K!+amxsjZ^2@GileNHi%7IWr+u*fBM~7$3HHMgeiHbwyK{f$z{C92^vCDJ!R*J5`24k8xkBGw12a{FAVE75I$HgEmL>IXZ;z8cFx)7B_ob(r2@mI*!zjX$J&NhAF7fKXl zWMtpJ!$HKx$19-Y@MgZ&ch77#Npi88``QHcpY?<;2uXTe+E1J>GTrf`CqB*r2^^R|`# znZ>$YrLd^mr823_0>gsIE72HJ;Y;@$%|c|hW_hf&L%{=0zT&-kfvwmHWD@+J{xVJG z6I(!Cvq^l-z8F={dRmS^=0li>oIsb=gz z_~^A@=_+eyJesAM()abK2X;R7%a}JOV&ns z`D4DR5a*Qp#3alGYiSfA+@)t?`6p?a2WL6!PS!Pi;P>z?*xEd7=>+cZlBQSk)XIrn zd#0HO&%#QY-PWnuXZ)Bei%v(boyT{Dwt={u8RyBdCR~AZUYx0j`sCWo5Tx*AQlvu} zeviz%!FA5g zJ`wr|Cd)6KgmG+_2s1>DmhL>biX*GUs;-CNZij_uirx(_%$+V#;j+gHubU;UQ}Ja@ z1dm~B0QofG<)p1MQ{OCefM2(54Of5mM6vU9jRk?zJ5wiw0GS)aP@GXqxo&WA_UO2q zPmCv;V8MOHex#q|33B8tjWO50dPFla3}Q+gZSL66NzKwCiE=X|SnQDtMJQuAM=3Z& zLU~3k3agv2MEb>8!nzdJKt9Yz!0Q9z^`X(@H??jwR(qSzAosJuPVVs9<+hn zl9cjusFoE@uhh*WcW#xXrparVEW4Sdx-Smtq{Ip~w9i&A;lY3VYH^H7k9<-rN9R`8 zz^(C0+e#v<9BQVN$x;x)LOQ|X4zk#knjY3qJCTy5k=Q#+`FFlRct|7T6|RCyjUB2Z z^KPmG^4f-1ZX!k?Y+yDq=PdayO(e)!yrbK8JRQznGP(<%0zR3Wmqcuai$V8zzQ<=D zs_{;4Ga>iM%?f_7r(?d~A=kt`E!7emuO1F~hvO?4UcMS;d zM~V;U`4P`^0z2!6CWn=Q$c5g0tCam9Cde{gu0YwpW-z zo(|*FM7tk=FYqXR@@1a9_hu}EN9|0GKB%8+mn|+AC}z%05x+xGoGu3Yv$2BK_vk{J z?X2tkhGpdNT-6k(tcEZ zVog>v7Z^lDs4hu27*AUD>OgWirDjWle)4O|Q3UEMprIiGM}Z&;gSO|Z5Dvf5_iZ%8 z(5qcBii;E!N5qduhE8q`K1%V4HNGlo4tMiAeD5nWtOyK)GR_-(KSwA5i;PgNuuWn! z&HYzR98>%|sOPk|;aS4)FXOYkmC5k@lweS!ZjewUZ%WZ*%|c2L-v+~1Nt81rhg}hDb zS4q-x1}GRmXOzh_A>lk(9U9k_)(HCrk9&EPu*jW}`e3#54GG3=vWMr;w8(<)R3;#| zbjD?9>dOK4MrXIyB_MkhM)5+*SXW|OZ0V&+Quy~;MEvP8=$fEUnIjQJFkK~AnvCdT zC$S6YQiPPRU~1w_c1J<;scu6^!NU0a=KX3YcnI#nRZw7%EXjWwuVHwlD9DWh4ieJF zT6ag@fIvh8N-rax+Btnn2F%`%CTnp2F(YnE^r4wqa*m4VANOW{6;1@q5L^!y@7VQF zIE)l-c)edyS`pSr>oaL=;qZhdWi!(fyUh%xqgV}=nyuvEKXS>Qp16pxO*~~yG|Tpn zs$a2Q zHiF7mQqwh%!StT0HbA-B4#His?&#kC7Uxuyb5}d7)a#mPp(5TGxa>BZtp8%BeGxE6 z=Ue}G0jDf_5G*Iml})x7_GoyX^*Zo$r~lPEQ>bNVEXz_oA?h7j;QlwXDbs5SVS)%PHtMakLn|1qK59?AR6{dYej`sfs~!7KMk?p9jf-QOpL6Wm^8sGSDF@`?%t0` zg+32`be*P^8M=~DFvi{+*{5b|X%sA_P{4O>;6rIaf7Mq%JF?iU;Ne zXJ==lqN0?p3hY&zF(M(sfFy&`I^ZN144Iq(f9OCY6G65~F6`tUAZ`thxCA{e2;c|+ zz*`8iKri9d1+aVqup^#;4RRESEze!Vdi>_<1J_|sz-9(t?HcWYy3Fy_z6$Ak2FMeR z3Quh$ya3s5M&n9I)=RnfP-DPPBC0t_k|t>&N#vx1oBw)p%AvMd?k2}F5`?QBLW zg~igED2M=8)UstfKq!U1_aZVSa&)2ikoJJeTsa>J2+c_RH+X6^E%8-~bY%8qq*Vqr z8whcH0#ea)?hB%CB?>=8Lh;2-rtfQ&;|tS&OuOwZ60z z`x(nNeQptjz#@i2N-VIB^c$zAxpOp7K94rv0i z%mMsT0-Jx#vbrAv>vpP0YpF!#EsP1`$+MPETFO`v1iLe@P8-H0d53+Ku^eeu>oXb@ zGNr~IZL810tN59pBMe9^RjXz<-y_GJ?j*s1RRdFbe% zKJ!?&|2IHj|J3OsFAQ($$%=srZ@uxnZt#>L+$O>6WU6c}2bPVXZN9nlbgG!w*L?8| ziRJA{<*i2KSvUW}7W$Q)XTTY_4$%_3~9GdLxI0Z&GW^*}dH?zb*;8 z9Al8SXn?-t11qb?cp#@XjRtp|i;-eoa%{&*RK~w}PVfirylm}QT3JzHvS?^}hmFRe zRY%&1Qb%6I#+c5SayA;BLX`5soS2tsGatD&F84~2y}QsvY0BE;ztbCCa|Gu)Khnni zjQX@|0X{5qQR_cZwet*z3Vhi=*!kP7D+A_B7c%*Ao3O;$54t8N56kP5t7YocyyOJa zU%1R1+gX^FRIto&*Tgdf7K)szJydIN)dYp>0gGA-cTqJrOxX}w_3IOL2WgtI#t55F zm!Uz~24GQ(PTi#NhS5L_q>@tDvm^FRr}(tEHmgZciz>}oUtBm`WE}F?6-&BQ*qDT? zpqk^N*yARi@)h;3IhGz4`%o0653|Kz*`Co%GdR`dVsUl#x-_-*aQZKh;?J##il&N?Z}?Cp8Y zJ>9&zg}u?mjDrfrb9$Yzt(%XOiqzOYm`?i+a3is8gJaG=?GD?uJh+uT2F$}RJ{NHk z>c%%6QZ;Mc4lCyBT$#3xhR&HDb-;<~;=G%Aj;}ixB#FsC&U224^B#fK~HwKWM6j+*Yh5*Kzd1LSYs8)yzSed z+Vkb=!YJ!n8*^#^V>&JGU1Ktco#J4>k4{p0|_ijQJJ|j%fwjy zQa!$3#uoW}(4Q<~hZS^?3uq_%$in>%I>F~Cqdne$K`>STwd5VU)leEy!tIaD=blxk z=3MWar`@7%Tm_?_+0AM4%1POzWl|T|xXa??IKsRX#7bp5pq=!pb8ir{GMrwSBLIUzR z`?rA*NGv2V4z>yPPL%I!4i4V4c@g|5aQV-6B>7TAo>}q+zLyazD?<;-4GoS-9aJA$ zhC`?wVyULC{?#Z7?MF&XTz{Al8cgXZiBvdAcr(t8L*F+wWqSHO&yYBgJnB{S{Y}bp z+)TSU%A+4sKAB-C&V=X>SkxagFEsMVJ8fbO)H2A3W<8vzi1);gu-a+clKom-JKf}s zVwF<)v^iQ98>G`%(MNzr#H=<(!B6R9nSp=KM)8H~11YKRlc$OC7izM|N`zE)mw+&V zFvHE?dS?81KTYCBPmG2_fK>ruqdAF%Y$mFljv%nt63igvg}h|KhOaj@K12_8k0Jqf<|=I@NrEt#kBY~;Z%dYq)vUO= z6DdsscTJrn{!bxbiTE}IOyxT@L6aty0`)x_by(N{hHHU|u z;siNSC(nh-m-dX}F{w!j&qJZfGS4$&QZs-O#qkETbG0MOoI%rEC$cU20UU`D=?ot1 zYBud)&{Z!YJAZFl9m6a3m+%FuSbOW6!xagh9!Gx438o$QwH`3mtr z;q6M|&3|steYA0uJ8;c8L))R+ECaNL0?FinZ^iGOot+7SA2`3ANlQ z6`K=X2xMXKe-xc#Q09Lhh5y@T+qP|PcHP;w?Y1_%Hf)=1+qP|Nw(WX;&%2#o>@-a? zo%?*x=eh<{dpv8&55r?N{;T0m+=8P(C3S&fcZFT?{diawCTzhDm&DrsHKSY8rreYd z+_wdaM23Mq3BM&Bge|$0)N1aS7#sK0v$C>MTd-sLIw${t!-jLh`HfJf4se~5p*m?8 z7!dSS;86xOQAD7%b6gcH*=T4YoYhQbhjU9q2A1oko zo}%RnEm3`B^qI(Hmb}SyuB+90C^9fLYRCPN`2S1w0P!{8+jFq98_WwplLI3}5gOzt z0ut>^uBc}#fD@)+hty;&n%5rV@x`@*%k2PVU@7n|)^n@UN@hvY!> z3dB`p!x3a*C&m;tcvu8w88d$IacYU*Osa$?aR93t0by`r8&#At4#n0oxkiN=Ivf-T zgAOFS`|_Npz^4$|2^c?&eQ<#3pp+vy$teFxWf_dDM2&<(4W!X~DVK6c*d06NWK#6} zCE_^{**;!S(e2&ctROCl7#2ST8GOkgPa0_=^tWY@95RB~Bz5IsoPj@Qd?Yvwma+>lmmnzhR=8+rhHgYc)Sf{^(TLGg4qN({m5)e-y6GF=A1=rH zTtDAq_3If&6$-q<=v*z)FG*cUkF0VAr#a;rD^cRc%r{!yp!i^_+^o;2QCd?EHzfTG zECBTp*TN$7M(&HT0HV?;M#*C#DhBJC-hrjxo#+E)^&XL}lR;3T8wx{$rAhnV=OOJ0 z!u4I(zK7*bA7=7aLn{4CDf)N*`*`8*KY+ftf!@QV3bWb77j&DwGHK($h5qlg6Vgg2!>coBQ_$xm zXV!6~B9q+H8$;4&r#0&hI))?cL^*|1_IyV!acoif(n`0FEV_YgKr+={4(i1^g}G+i z?S7{6(4dj^}Ay_Beh*DG^esMEhmH{r?h z#M4-}vbF@i8|&!^29g+d|RByrIl=D&q(+EE)@uz=0^ql=1cE>#I>>fV|bZb)T39KPr4D7 zQHtcZPy5P1O0Xu1#ddEyDyS@Nyz5Jo^y09f{-f?<6wXR4f`bgCUNaoj;%MmoqaLXolihxzVKnJ;WlI#x z6^f~R1Yp$;|q&76M(9*QGW5n&ET(S4%eO|F=4 zFTb}zH+&}RH%EKwCMAkugYb&x5*+nh| zDm#|RL-Q-Gfrh7_kkIq&=|J`})kg=*#m(@^(-G7bHM2qVLgw9amg^#c%R(csl_Ixw z+(~1lJHOB@kC4QyV(@`8<1fAF`G99xm<(EbhX^yO&egC%p5OoV?h&Vn?56R!P!7Tx zQG|hD0}xQLM$3QK5vSR&)`>SF{lE;dgffD3#+3v{Gg1?=I}t%UAXCn= zl50YXBa;&s{+bqX1l#*zW0{Ql0eWW@uT}}QY>?QwOX5WnKvibMEjGpc%|0`}(;r6i zdz&U0c?PC6T${|xJa5I3NO6|zGBHC-O^w4Km|j>U{}+J_GdO2h*AEQbY<4Tuk)jPz zY;jpxxHgIU-~qURFmMMDtMk6pg1BUh=xX+;shB3zJxy|8=!iVT0%{S$l$HG~4Ppr%`0p^iibF3A~4@M0SDj^zRVMTFX>2c zkVrtD7CT9=L~RK+X3%XC1&*Y1gtYl6wL~nVDgb}@Q-5o$p3$caogVU^;bI0~(zs@0 z|8}@G?JHet{O>a<7&?-7%nX&fL%>238dGYlhEb7+br$jn3?L4))DduV%CZ(sBnR=6 zgJhB8lG(H<#g9RrC_KNBA(>VKfioEGd|!$Evp|8^!CY8NiEr@19hM-hm>}jceEI7R z%WO!gm#iOn@{|YmmjGmWAqNV$Z!>grDwq&sC$U@_vc*1ex{VV;d?51e%erXhGVFc%(eE6Y=hX6@9Jc9`cQCdl=19>awrfyi&`#}n!k`BA zej(C@r~y$~_95l0WTkAnyz;ccqwXp>t_bO%f#MSF8-#MId3p0W^UmNsWqQ(O&|-(V z{e=y^A|AgspBVrNo~(jG=voK_rZ(PE?f->#UIv}s4|QhaJt#m@lr9Z)M2qX81W>`D z#WTG~_}stJHEFF544Ul;UYZRB243M22o?c;*e^4p#V;(kt8aRP@s)7PFq=oznV0z) z^&pXN6zD&`AxleveItd6AVv{G76C(#B~P^7V=+u*={2?O5leJoCp|jn=8nCkTBc`k_3@KB)XEw-Uz3c*C+S;pJ6ke} z=#9mDQr4w-tY0w$sgjItnDmNJ08;@8iWCxASy?3{bG>s#8ZWd&=E>wF%U0_}+9Z-e z99!84hOK63o0S$9>7tEejO#B=mEB~ zgQyXg+yAbw9UB^fFz*UWxud^k-8)iK!+hQ@Z5WGcVnj%>hTIVQlYNLtW3AaiajxGJ z7U+RfCT=YA^72GPMWx>$8{?NyQCdw1s1h)DI|f^JS`C#vpEf<5KK>t**#WA|4(lW6BAPN%wB#dwE>J-xEQX9NUM}s30!(a5Z2bq%S#E` zAT-a(toL^A6DHJah(n+^bRVg_JM7*l8tE)`wRlYfgnq*e979E+-F+0M(H!UZ5<97A zNHb7)NNAfSm0fT0i=CySzFq(E9AXFzor{P;~cb--Ne;0(!63J-8IC&*1`)%UD`jK_76=wj>@)`He3OeCHAJ(gc zIjzau`a1!*5RKVTvpC|cOfKU>qEo6+lCMsJIv)VB-bww5_6mx16MuE(kk~S^9^6~vEj?Dp9oYsi{UWAU*&ocy z`hyVXI22E0ez#Igoah(5S)lo5J*n6~r`sPq>)x^pP1N~o=e$4bUuq!43)zmm8`oV; zLf^R|odia?PE0%UZ+LUf`Hrb|;W-ErkQ2eXdZrkAZ<+e0lyAUu*|Ohdo=|?@8oS^~ zY0`nOb|ebwmcBtJXGkS|F($Uya&X>X*vYy)V^g*$sEi*Hrg0ls6j4JmhQE;#NnN@_%vwrJ2nr=9l`p`Dk8}Oo>KK+ zg%&xYPttxIP|BVQl2mTWe2v@-yudbj_7odwyq-a=e-n@Lg){EDB>m7K9cO`iaBFF~ zl6I1pWVy92(dA{s-7S6B?tuj1;*XNP43@<(c$8>CCJ*(Glf~)6roVR;^kx!)yi1#N zqmmc1%Ouqzgtk*5V$2Bp1ZA!^BHOTZtPJ z@G23=q(th9!$+*&i5dZCxQQU%+07t1)BVAnw7$V2 zly5up{W$JdTo|_terfw&CUrjoQC5x1{9k`<^v8Gt4X`%r9cwc#{#MvK4h?rEVeb^9 zHniMsQwM%xQcY+Lo{4XmRcZ}7hV+tyidQh_2N?=oq$gE8_37Wv6$(D}p-!wEikV)p zL%$jXZh#9v|5fy-qX(8PCi|xjsZNZ7cH6mZ^P+sPh8=5}-M74WsI{AGJg(U09)5kD zhEKnQd+Pa1X$`2V`~`h~fnSZ>wRQqtZ3tF!+u-^{N_%~O^{Gc*EtwQy_#A5;2^dK` za9hBi?|>r|9+yM-n~gbH#mHpx6AM-7;b^h&p=cb@S3a==8LNkDoX9~I-)BcpxPmfx zf7HS4sJ?c}MuL_=^Z^B7V)XHEN6DlD)Sb|RkhA-JNAxr_B&|f=EWzUVRb*|yz#QEG z@{8qQ!$14+p^I*ypy3Rv@z)s=FeP559}bFV+!1x-8`G?SjSl0 zaC-=0XewnaSXUBW+`Q|-mRxMuM2%k|*i_GyJPxq- zx&M87*?~i|cMOhI5KaUw`1Wb0Al=0>{Cx?nLGDvi)}r-`F>xx$jZQ>Qo{MlJ z{Y1{#cFzl=DAM(%F3IORi;eMDTuR(V_XS1o%26)_2gii>V|FJrAfFCja9{Pc<%9x0 zzb+Ro#xmT#P!O&XP|a4xS?t+p%EQo|OnRfKifHkdaAt(`XUQeErZDLDFHM-TQPoA5 ztY9xBPR-qrI1%~obs(JuDFRn=zC`z#)PYv)V8Fr)A@^zk{(Sfy^b0qSM8-5iOftwp zGw>?5!h}9*(tK6C0$Pul78@Q*CLv`Oh{@P^kZ?4AVV7YFNt`U;!2)<%euCpEjavhP zz!o?OWiYJTLBBHfQV|D3uf?Nit=Ans4icHDt4D8N`{vZ!i%FI+3YadC!RG$;`H`aPBw{_gg8utAGUs_qU@3+RYmsEd|7mFJfQ8b1c8r&8&bDknlg@80}*H}<3<|csWHbj;euH86sN=01$2_8=qWI> zq)U`?Su3F?^1blUAAF-ThQ-yh1t5O-XyIb$q2d+7^8uK`;b?jCT1#k%nxY;12KqY7Ht%_O%ofRKKgvCuaKr9?13ZKCE&W@ms@tX513Y<{XSbi10v+^fPQH za&q#XLjYAq&p0|ds<|)&hc3b5N<>FVOU{W+y0{AP7_#69@!O*5GzFQbJd2p&lYIfV z$uruXqL&09nk$^pN_kq+ap?Wr18QPkS7=2SfC_g^&~$+v3}Dv!NTL1v>tuV&k2e>~ zH54;>-L`waMZ=sHlS8M3L5bzk)YjG(c#2ol11L6ZU(}JQi3za~a-shdiXHSpsGW## zaBzr-luA^){JFmPyHq5pgcTztk=zbSkol5hQ^(v{&>?l(66nNWT%>eG6T}&y8N{`a zZ%G1Ty=*l&TdHir>`BBDrt~c+&B#oQIMD_t>0(Kafs%@O;=$RVVH!c$qk<&)Q5kgj zAn$9mD&(BtH7S9Ny5H1V5v(=T1QFCfxBEdquq1*&xbJYi&^d41%eN8ib-MB)l5jR! zE3_Z^r6k=;5?n7H%z;g3+3ViF>xFL`w7ugY&4BcK*r>#+JtOlj((>)I@wdi2=K8E5 zlrv4^N?en)H10nUN-Os%hkkOl?muf9vYX1n&4~9O0wW)ddqy#P0q70SY)WH zKu+CWxb@%vEPENW<=|H3JY{vrN|g}m)0SnT^Nm>VbRsPE?GBoIyA!QMklG?tVs0>s z<44Cc3fw5sYUvgt2lBjooq2S6z+B+xq&+@#^KWX%JA37-PYx(EqkXW_Vq7S?Zn9}n z4PV?9cflkUdH32S_7u#U*T{+7k~mrB8y)We47VSXS7nxqQq{h2R*`vThYF{y%ghy5 zO5Dh;6;wYIWxr2sDBaFuGi)Z2yZcHt*X3V$50{KsK3U1Yiml1Gck8yT1N3&EOoA$K z9($uXsK+6l6NNCfb}Y2SubYq5ujiK`1BVLl)tM)!Sl54ApCiIg#L4qYUD$>{lT3IV z%!fPi1)bxySr@`eADh=wFDZF{s%86@2kpfW@8NjN$A}Hn%|5=bRfAO192f-`Tws{{CL z^Hvsb!vvEn+4_^Uy0JB1b%HPJZ8LwSieM3yZ+y#ZMFt!Je>d_heSJtah**DVJCA6- zS1qOIy9PY=7kd2QMif-f^=$~46%8`gtegg^Z;)9*3pFUH!z%Yvb)r9>@cLe2Iw8c8=vuozFAO4M4u%6{SjIq*@qS0|{4Eh9rO687- zqx&GD+Ns&lJl}_Eel*6V7v_fn_**mMTAS~WpWidihP^oyo*c4vw=@rR3{Lg9hQ`%* zSGjweowVMDBK#gcbPuB+>}?TkY)@E!dJLc19?{?=kLeats+ArWDD-xgF4g@oGfZ7Ph4#E0}v{`?e1q26zQ=|NyL)z(b2TPNYU4>oE+qtTla*~^A zcq6qOjkBAKRWagug~C;d!g1k*f(BKa_8eG-cL$1=KWFFRkFoP7G@HL-9CG)SOs-X; z-&}e03DPT7i>MRX79y-fw5c^2MpS8CGN^!cF@K73J)hN-<<%?8zozBB^yLPJjZD=y zZLvM_>>?iF7`4wPneNb`-$lyPuhN1DZoM%S|J)Maahu$0>>`g2ClRZn8k7EbE;7Vg zmm%5v3gkJ}yk(#lM}Dhv<2Jxw5QrtlOfwRubhCV>+5n1O;*0 zYU^hk7<6^~#hl+A=}qKA>0p|MO^OR^ckU>eds+`65vq!OozJV89`fNQ%GssLf>}}O zLaXXN8NL*AN2Hckbn#yD2pN%S*x+>qsfDh91Oa6;Udb<6N7L$Y6J z&kUAWRZRGP+P$MD18xayJwPiO&H=I5Xa0OtfEMXPBZe=Cw8L+S%asSu(jSe-g~-U# z*-K6i1*L0{A`{$v?L5ne+B6gpi`*7M3GGKxi_{RCfX$AoL}37d1fiCTCDy#c8FSpd z%YbR_WO11^A#(G7&aSgQbyA2iL(OpUIm!7=y_UPdk_TYROMD<-0K+2FB8M0P(ISMn z}k`tZ}p$( z(yYG_a&$@LY10NuBdkLM6~{DJjhkAs7LiQ)+iDnrO@*%^hG0kqa&r&!LbSI;tx}6r zL2qy~a-N7T-GUSBlrR@fWGH*OWzIF)Hzq531akmeJXGs6ztVuKK0kt6uH^;@Kqe)S zEc&hRXZ}LieiLk^$Ko(Srk16P9Yo1w8UlaZqr%=uf1t#%Pgw(Uyr4enZRiK}FJ7m> zTuPapQd?Rqx$+)#>YMxu&oB?<%C`vT*aOY?%Zm?z?0D z?f-rYn2dGLosOLwlo+~Ijzmvb`FXuFj51E|rXHS)_d5+skLsMrbTI86|K0SmYzb1t{>hdLG`UdKNgQp2`F}yB-@jJnj0?FcU z>=5fk*zRn5@&B@S6@J~lf|Aeuyyra33fTpV^C9KQQX|ks4x~v5tRkBIzdrwu+_h1x zlUymwCp^Caf~)=@e(VE;kEm+YC>oZ>ATv;vsr$hF`waQRx&C>67nR>&O7aW&$rXw&3lj=<7|)Gz znk&z{DYAT%a$Qq1VCXx;7wHKcyFzS+_8GV;X0MiJc!-OJ%2bZyw78P?AX<#6~m+K|Jxw!#C?!C=l zMm;IjaPWFZ2SDo~m)C*!3@>IVvO5K2)h+BF{9+>f*Hp9rl<8rA<~ti7Q^b?8S4JSC z@CWjq&}@^+k@5#W`;?N;jv!-;M2gp7Vi;GvKAcIi9+v>W7ZGSF+QJkCA7byEE#YXm zbf=UgY~10Fp_<0f-g%x>_u-}y)Dw^gH=yueSYNm>aj$xUC?xQA>)~&fqx5OKitFV0 zP0yjB>+gsTsZ7|pjdc6c%B1jSNz*p{EN|^Oe?uze>d^;tA%0uNGq|EW+IU7uT;#bO zHL6=3HmNe)eX40#8sW|T@v0mS4jmnbM1)ydv1YK;cnIhkGkGgF|qTfvFK%@ zjhaSox16Jw&IOwgT>UJWn?8{JocXRj^bWJ>N=K0KwCzc$m}mREo%aEz8>D!OGPmZd zu8jXa74P_bzubqfG~E3bbXrZP?O#q9y;Fo5qzsb0zRWaw^FJ@CE(*R+g))@g1WWVU zI|$NCGd!NZT_k?)y#;-}A6@wSHl4IL`v~G;x{rr7de-=)W-*n6Wj!@Ff%E!~6x=aB zD*9-3WfA$1ZR{vD%~O#H*f+U&CYg9C*$Nz8I1Xra9b0o>?c86S-NgB5>chJa6`dQk z`8IdnxI>+d({>YSNd6R@+Otv|4`Ljnk^BgQ?4O)sJkqNzPp}6crAmdD+$HGTQO~$3 z$LaOgPWEA1yxHs3XQK11!t~5^<;X~KBa#_vKIit)q3}TBeiC;;DP@%@Fg%KQqGT~9 z7!J&0c&zCR=45}5f$z;wtefST_+>DBD`w895gg@4`f5hc)?h&&R|^^zF=(&ylkvck zb^bNiSz{&FfJ|2N9K{ANcONhL1n-REMJex48y$|l!ioBDFtnQ#RbyT~D~AG3 zVxg+Lnz=c(O8%c*l+$hG#u3?3Sdh0F?R=CT-dG1-SPMt=ku$$MLhfYKZw*}0^x|i( zKi(Y4o@@GXwq=O6v`_YF$LpGS8~6Ig7acWiVs0X_jdgK%6|QF1if0q3(?j5Hay4#0 zithHkj3%ptIg#9|<7#7Ll_Wh>DTm7ng=LKVo71#}tLYY1xlqFxZoxd*##-BR+ zH(Io4XXGSPlpf>))Fox^K9|Sxi!`7MRe$D)eX=?};Wx?f_+bAkYR*fuC6Vu}s0YN@ zM%G_IayM<9ki5XHL>!b(;^o&Vl>^D@Ekqj@XVK-Q#THJj#eWylA z^g^}c^W?^qi;8JfAX}F(T9@2a$5k>VAKU;o8HA+jToCnIC|E>gsYfE+s@#kOV+)!H%rg!sMInOrDp0w#YQ}UJrWPnU9NNDTmY_StOH{8tD;-fFtHWc0$R82i!Az9E zCs2v|O!86~V{37|Ui|%E4i;Ai0P>sK>?XY8m$7fZL|S+t;)cJEATSDdTYT-y8eyCD%M5Ei5ndNfKhRcX6Y@uD7v=btFJp6;X9&3gTVe$g9jWSBHJpknR?H zn0OK{(SKnDWn78jRJGRKNyIkeqZu(sf?r)a#V5JOrZP$ol_QGCkH&_zK<8RdCHV+x zc_*1Ykq&<<`Fr<9#2=x>7zW}eaHIoKF0~OMel$11sC*g@Qp#NMp!yGsa zZGc#XMUh})S6E`+;?Ld7<*P)mL!J};osj!rDv|NO6IQfQ7(^fvqe(?V>w9Zy;qV*D zbdsdPL@r4n+^1noqBvb<@VLx?U-l)4X^XcghzL0vz});=E%}`XiX{|~5kbAqoT1^q z8yp-wMv)V#emY+bO0a~8BIw%rtFErTE0GfLdP7w^jC?_2FLq|itg!vK&AXv3;vh2+ zP-S!v3MJKHhUS(;K+hJ%`GH;pHk`#;FavSQA+wwQuY8xPvU~qOLtC2BG<7d;Q86V( z^q*y=_)UX4zNt*&X1^_2A>0hdvqw;C{}!tPbHr=mQT>W#Q%)QrbmCtju)%d8W3ycF z48SjUPf7l|0R%!+?HI|B%f*KDc`e?Xd*xWvknn9%VaAdG_8jSk@Pb5HtrJ5|>_=o$ zrGZEgVdY*>Q>507DksK)^gUGs1xlLmWNx2`I(L9PcK?u^apJ93S2A_*r3x#`&plC7 z4zUT5Rf&5#jGV3<8*0dUB^SU^H>u8K9(7dz;pMm z3myvXuiU2u$mw>CGtgGVeftJ%gN)95y$zA6P2jeUQOtPEYUy|Vgad#TFGG2y5l5J9 zm}M0qzhO_em&vEU>k8Z9XEl5qB#K3egFrJYCjIfud`hkyI8Qak!mcb&-}ECE`p4^D zT-5jPZ!DWXay?+U99W8>l02{rUFvS!TVA4Q{(0<8j=SPBpuB%&_!gz87B-SQ+MTW|V&xBvCFaK`yc?e}hD6NL)O zmRJIT%T1uIq}<`!@< zST2|_pbmd*G&=1`R2a9wxf%2H9R8KEWT`g39hicsXB;IpNfS4y2^cKm05t9F?C68c zTfk`+ra91bhT9I+g`?#qh4mhjsq;odx-WrC8AS|ZA)<{(e zd&$^V31%!`1Uu;4@M|C#Giv#N$w>EbhY|j7$Vg}w^u;c!Kg#wi?m0w{Y z&d&kV?}-3BFetmmQA|v18;|4p^%W8v9H?v!zhXlHd;@a__;StzfJWE=#sUC?7hpy3 zyBtmcRjU&U z9mopKk;6p&-_bXWJlZ}M>YXSserR}MqlPFM=)j)lWRNI{z;mJk_1t%VW90Dt)P-at zl7ojHTi7_^%!MjSaxo=3u?yMg7EAP@#7m^XRM0Qjo6c?bcWD^;=OH0%8L>Q5(j+Q) zD9C06r`%^WUGjg}ftHT(*A#&Th6$J=q7N#r2xjm~yr?G>@v*4V3W?DiurrpwHBrq- z8Vl`6s(*+QvX6+!dXhN)nt_D9dnRZRZnkNG-GKp@naM_XEH@k5=B;9n2nzhcJ|il7 zJv(Q*HZ)EDtih7ARd{4c49UY|O1`Lw*@U1q8|bwb+SM=^!rl?mgN$%At1sLnFQ2w7 z@h^N#|H%1-^(t=$G|S%w9?D6x`9x;t#gR8LYOhFw7WeIf%p?A8Xn%(^xO_* zj5T_`#?gA?ffUN9Yo&1!CW18n%=+H|1xUUcx{I|UZu~rYT}^>C^q?X&^)|(*skfz{ z*aX6Ni{9>cp~vK$M6Ozck}11kf1OBhSI%KRSO8QN7<%wRw5loga#g6 z>PxV=Uxv}ss^;Hp#JJD5dW30Md;`vME8rSi-pFC{apN3m#h;3*JbLvrk z<>fmyovve+U6zYm3Xi^=oUdjomrv_spL_MqeqK#SXBcQ*dTj5CQDik-9Ry`Qzs)^dj)AnJl!}VHa*~pI|&Akgyzjo~tK3$(SBwM~`Vjqtt zY)I9mqxm`0v6eFUzrX{+_7nRF?ogb!NKLF;<+%Zd5AmKKlx*S58Xw$QrM+|5<2BLK z4XTvoduqj=iiIxGytGOTiW)@}WqYC(;-A*j#5P6=yo{oy&?>L)AX(>%d7wbVYF%)v zR}Y~RZPO{g&9tB@N9$W4U8I>Bq`_%^*71SVflFAp)im1&#jVxq^Dh@|6qk5=E}<(#pLtj0k7lg5S#~E z7Dr`^;*5>rRW5CS!?-CHNf{Pw@m zGwEYzcTRuBo)-Mq=z(tEj3})ti?&eb=_XgFtbHtKCK=ZDgnG(3z)=w?oR2^zfP2_z z<4smp%F^$;R@hqU8)_O<+4+U-BJ`Y~?8O+QCn>^;g?(wKeHx+^(LzUk=` z1mW8L`HVps17XQhEz+#^C*Tx3-qpF{9P6tPa5Ihb&WewYs25Me+pZ;pn(I34lnO4| zMphZ@KP^vTrf*oCn|pRy_30)D<}2; zB;NabXY#qkdgq`)`Q=OgHOz6*opJDW1NuuPkV)vY-G{jD-zzaL#OL4*ub(t-#??Bv#J)p|)}Ye^PY_anI_9cCs;!U7*ctW15GD2+a-ZH9h>v%WN_w!<<^b%sjw>YG8f3=x9Py2J1y%Dpt6~Vp{!zw7nu?d|Jn_gg2 z=Q=v-NC%R%ZQ%0djt#ga;;F&>Q(v?Z!OAbiv89-FNj2VbCY*NS7w9_qCv$OB-^qjY<8^Em%05l(m2^?o4`)7 z9RYJec5<80$%!W$=qWz)5}wTkhb+@od@g&@Ni9Ed!dHtk2a9O+teV$(y{i4jVa0JX zQLD?w&wP36{)QAqyyt(ec!JX={d& zOpMTxM9JHd#)Dk?bU=cC3Q;GacNmhmMb8`ADFUcnS?%haK3@?Tc} zp<&L97(uztSTduLta+^#Gu{$s-H?|pq4+uSn>>5eP*`wc2}a)Cpkbc2CZdfJVUTpK zgxFnRHZ^3ZraMbC_BZTbB)pb0F0jOh}Oo1HsWaD~Tr!jtFRyhMsw7g1K_b zAvmE?FZfkC;ZJ+3Kcc`NWE_TYVP8KFhKZnF!S|W<8uh(Zkm6<==BWMA0~-5xx}Z@% z%hs-P@QUAQ&4vypbQ!R%mazB9%ufQ_U1eB0$K`;JhbgW@;%T{m6Ck{^DMUz3QtU|+ z|Ct<8qHMA1dWsFd%R_DYM`hzPwWzal$1x(Qr}h*i#y@h+XZ%B7flJO>Mz|F>m{*dt zs*aZ>=RhZr*b^OL96yF> z>MhoK=isNGV#h3P%-9DXSDFrf4*U~#&>WeMuL1&)HVleS2od>Se}fq@PV92p8v>50 z&^#E`V*qxv3N}0}n${SwC)cRbjj%c$i6>i`>b{%eOVVi&#eYVdH*s}kFM0*my2!+F z-i~CnfcAA3fEzgRL6{n+N3^2QK@@S*(}@K{C6pwK-5VkWh^GW7Vbejph7iDhrsw!& zRY9vKWIi;9DZjvxeC9K(5g$-_qOnp8nF~T+)>Z9hC3^F2S&kj9YU}HKRFa zXSc!Et>O6yCA1RaV;E=4fC@iLb0Qd{&P2&cg?hYpb9iq`OH3+J+=P>jBuQYtPAuOy+FK?{StV|dp zL3lEFe%K8Oi52WS@sM_&-(UcgZ~$Ek1qn$&4(e|7AP*tMP3m^KA$VRY#WyqKx0l>OuQ7}~OPpQcuc0xl%n3PS;IDA%(Az9W6pd?e8kC5I zDBj2*W54h;NP)<>I7u%sQD_m8U*V3ZGLm_=WMInF%HpCX(0;wu)Dpl<08(^^G0D1s zMk-x*B{R5g}$`U&& zloJ{E$JaRfQ7|aH$ukNQQ=~>>-7&4ni1}_Enxc6UX^yw>o0wShIwz)?Fy5&igKjX0 z7{M9%-^Q%wZoNkY<_)vGDl>Y()87%9XDb%1=RbWL{}gG$8Iz<-+sXG^H{_C$t>f}H z|B(xgZSs^2>CvHN-3zMXG_{xdRo_+M{Se`D`D}>&GHEl{W#v-sF#(VzTs;eA} z{eBOeKSk~IR)=_#kIF6%a=Ke+t6Ul9_&8AcP&T@{w-M6%w|1Q_OFC>uHn#5@w(OgD zaEW}45)ZG(x4nfh8brIcW}6CH*9l#jzR#@JjITQO&U$XldJYh0y3b!(oQoA4OqVD4 zzt%iwGiki<{mZESnz<`)mCjCr%yK@|wVNIaBXD97IH%r>VcU{rZWg04Sbwi=?D%`) zDbROP@0?w2{;Gd=?#U=7L5CGY??EajnE_Aqa3{D=X`sA+CO9-b#!s|QI}NM*rFr?8 zH}a5Y&A&~(g}`;5u&;u6XcD=jAMLv~?>lF_(HpHOE8-A}T?KJm6^0|oE_>|$nqtmB-*}IBLKWpdm8k;i&$W=@&VmTBt0_4Hl0NO% z>zGRYfu8H|zl0)*TuS6<4@UGFmAJcWpnYwuGQ?};x{iemWSeg8Msn2)qB6`7t+z03 zDAN+OSM9U6_JzInzr5{Hy!qKKdWtsk7H%@7#SF_{kyPC*BdQSaI=Rb0vTBtu^TOM8n!mqoA z4Q42p-Kn7t{-WmPye7d4|I)g;VW!Htvcciz!XmE#h~Zwnt&J{I7zlNx6_7kJfiqn!PTu! zg;`R{wcbK*ys|_*SveK)Y@1oM$1p%5P{GjLy9^dSPCMc}=#QOHSFa*#e5o%4zWzS!j8aHI^4GmSNbr6eQ7@=U!J&#;jv9`h%XELG+9b$Esu83E z5qbco1CPin0o3nm}L%s{wBRPo)47a{zf{is^B_s^zEDN$7hDMPYqq=Yh zX|qreqsFU_ZxlVL4!^jjEcv_{EVNTbN{9@O4@dR((O-khDCxD)`!H^OBM)T@-XgZl zCdX$Vd+OQ z*fK_nS|HaPFHe-CLy`c0`pt@ zO(H5Knwb1Ovb|@P)AP@W?GfDWMg$s_vblgbU%&JZgngIY9JxFQ5kFkN?IaqN?bkjl zawk(|%D%|&ir9kW+5y?p$}$DxK^V!(iH?~m`Zb*LnY<}dF_CHQV1`h*?|}s0`0W-%(HWCS z8gtZKes|EzXNS+A%Ix&|355=V*BfSlrWAl^*T0lh@4Tx-V;a(;kjIncyFb|>kOD=U zi&Q%jT7NBtDZNnrTu|F!Ux61g<7!F}ftS3El~*-s1gy<%8Mvj5rIqL-`&6a6a6)Ez z&xOwbwL`qEDGeRL^bnGHPDw zA>1%)!0_rX%zQLzFgmpyl-2XVNqwa>Tz^IE5ybg!dox~dE1z;Vl~hS$&~nz z8~}vjPLmKa;Mf(I86O=JpGyiHqezP=8luIn$9@?EG_OQ>twfzbLpEd0hQ^a>vIL#n zK?K(Rx^aw02+VO1+;blFBE`fCdBZ|~zu${<`T2cW_VunRR6ycHAjp-g_x z#Uz+`BWQX_z5`bZP?9kK5o?H%kd4qiDrHH^>Te(;b7ELj0t zE~c;|T!~;!k*f90Nf6#8#)llrDV9y9|Nm#lN{Ez zC^W1{q1N)0t|`q?OH1W?7 zuF1(^@_9V(4YIMaYW04gm6q4*@Vr?lRRNys`{swMu?6n2;)1|L{zuU{ht>JOQG6}i zwrkn0W#71L+pg8J?Pa&DWm}8O+Re84dw>7mKYE_q^WO8G?>Xl~XB}GX5#ba14>5== zDLMIyA#JF8ySGRb0wF@|BBB+>mfCJWpK4nHo&FJ5aujklD`Av98%hkDt{93Ip#4Qw zLw{h+s$oF($4ab;;Y(3HC~!6pCd$hyq^t&?NVEr$s6=5pFBe*i$;=Ij{=k@hO0vfa z5JN)_?NE-x&Q(!RK&qjop;0ZuN(q7>uea!TqN9&ocC)6MMVG6lU<^$vRwi^P#-bXh zrEr4bF_D4)T-ZI+HE35B^)XDTt{kn5c(aP0pkgUTA{z${-R0!!_x+S7A@ME++gXvjTe83dR;r zcCE9l*&Sn3jHQz^FX~kn8Qa=HXccD7$_w$0vj-VBHf$_`jlhWg!Vvlwe0Tv8y^Z9A z%3K{W0a`Vpj$1;SQZc@)*M$4F6SEy3P_`|>bj+IjKGzowA=BTZ zs5({Y(=W#w{P;Xy*qc8b<@_|i4aFe34KKhPxzp4xWda@w_N3mxyu0EB`wdRCbyt_) zOs}{8rco&>mUX-tKkboHM%(ZfL6>tGqw(G<1Jx4_+dPKl4YlP_dpuaD%4bacVc>q& zuqXGbYQ}{@rW-ufZ8!CN=f8vNPOQ^j!84bA?%~!t59;2w@k={#s!Dd@k+pH2CH^)! z;#k{$b!fqOCkjK0^k>VsI@km%jYpVa(`d3;MmV05|oY3+r z%h$8BvU{XvoRacAV;d3D--PK54ZB11Mf0gqaW>vD{1nTpJl`_cC%iQhT{qw!hOJJ> zSg2unq;#dFbi$>z!lAx+zo+FI$Tjz@G8ix^Dod69+N)19c1UabYAYJ${#prc=Bh_d z{!TdjvaB(*9OqWWx9+?cb&tG}UFO*UszJ5(;VQJwR{GvtQjztWU5YrZIK`ZS1Eyo) zp<(J)rQMf-kACqQ(59&ninWqXYIT| z)dSTZZ>CA^@mhBI>Phu40=i|(Vm~#ccur-?4Hq^Sr;EOPrH6AnCMiLRXh|FVW$KrD zE(+>W``e&b@)Pqj!_G^qI!lz54$zpwT<)axzMt>EE;?FRDEM|I=N+e-TB%XsM0+wg zn~N;Iege}zHKu8cnF?PoM-PMfyG>^fBRf3ZhnpvqE7g0AJ!Q;U?ugYBIXN9b?S zeQd-qAzv!(oTcJ}qg->ESH+6ITs&IIjM5mgT9k_(ZT|B0;q!p?!b69Wa5|goHwFFX ziCxZvtEXJG#Ubu&nXCo8MYIy6>t;9PZ!ddo-xTjFYcG9E1}op+Bu7TWMFfAlpqMsF z*gT<~ySkuPLVR}A9-k~bi+Kd~6aRBHMmm zutw`;Y7w#ftU}RlVRjP!uBd^~a*ut!<#IMv^J-3Q$$E+As7{|es{fDamd3dOjw>K~ z-t9zl7yI>YFI~VCV~JzK0iG0=&n6CymWFl3$gtHwaTYu^8zx(q=iIzq%9)>lg|eT(?~9AZw<{ZE<)=VP z0_@OXGDA+n+AlsRNl*;};OCArWW2GF4I)F0=XY!8j-Ko?2VtJnygZZAcp|r@b>2LrX(|`XCH)m_fj3zTeu{=zX8C7&Xl`WnZ1};01LZ3hcuVxug zwRi^9rv*&}J}h71V5&;6W0TyJbMxVeu(7e780l?Ez8S|3U?boL+R6>*i*dpR(`Uf$ z0D7=BXq=6mH?Z$g1}WidGH{sx$)@0|>IXb7M!~x!-3Q`9nhgd64e!j0pk|vd~cF z`&iN&L{sB4ZQsvM%9aT7qV_45LHt+bsg|#@6cXNysHA$6W|@(UiD)oq9>Ab-9S~5h z6FAw7zayL&jswVLTIdj>t%!Mmcgs9+Hu0=c7bUeQ(=ghaJKzGuU}u*xz>!jG4cSav zBln7y^Ap;9?-Cc6401_7Rizivw~ApE@zAR%IzvBYlHr8pCQYHkpp~Ln1k<;gf63yH ziVb3`BT7>+RwK|7r>>N+H0B`auce~OnZ7nglEVID9Wp?090V%?!9DoQo{c;b8`5Hw z?-aCp86(w@2)Xw3bLuk4q1Y{;}Hxn^-~GLu}DPRb~^TZHYnC>>mO&a3mS-KHh0@ zl`tuh=G*0cUVpSd0+Zo2s9odq23&GFsFTb66#Kk2p1Zt60D32Wg_k=x(dWj=wUAPq3bh7>@*=rTCi z6bpKjsO15WY#_t(P|wqTmX7on$dS^N#)#hUtOv6;ef7i3U5vy|F$ z!a_QI2NWP((_pTG)0_se+)XP_E8%KkS-3M_fq0$#eW(df;+ zSTeGrKU#_R>iP4r-7sGawYn{*bAfAz5t@uL>Th&4{)+xDKb*hrX`QmRjEs!z@H39I zCc?1sN95H7DR)AAAulIYF~aa4st_G4E7|^9M7IM52YQq6Bq^hl zASg(t(_>(Bp-~ErZK6lQG8d{*O2zax2KOBbvw?SvXZh~{*}hYd&Fj296pd^6{cZus z=mM=Z7eJx|EMh~zo9z!p^s=!+r*S`9O)VFr(+tImgo#Qc^O8Cw z&=xxm8RQRgL>TyyBk@TdOn1pcHyQi`+C)xNJjGC4kO|K+$c^>y<1ijZf|zq7)gSQ! zMhQvz!67PI#Yx(7MKxjy{CwbHqh^BsO{|_8OJ6*M3926r5xF8NGO~|{S^@lj!z>?< zoP$a-Bnc`B4J3ICPGOedwo2qq36)sq0xcw4S(Al{+cux%b>TucXFH#3O{9t$rEpb}uu$RK$-kx^FV#U`|5C=_+3w+c zUr_nnio;e2ZNX@zC^2^l#dL2W%HVpsu90dPq`WE>In#zSBR16ZiG&F`+zFKlDs;7w zNA0#bj*9p<@Dy?^kiigBha;JFB1ERN(fuk;BoRu*DH0L!S_6T%8K)JFXh0giPL1b4Dh)Mlxzge<<%|K z&Ry}QEoE4jiEdh?psq|ft`UNCkzN$%97Mql({}v-Ch_&B1hO2OIX11!e3QzIqHkIM zxXsSjEs%+v5{ipaO?S>oXq_LLcvtT? zzgqQI{B;v|Il#Hy@V=E9u{udu>C;m<@VUQT>IYOSCo`O*A8kz^&Z^$2%8vOFM#Vkp zsR7Syi*=LbfK_d~1M~cAs&E_g{EfZ}xT!uj_Pd^?M+1n-r6%tbh`7#iGUd#!t!^w_aQI}0{Dul|hGI^m8hCYgB_FVy-4C@ypUbOw}q2N3am)!uh~o8+n; z$f~ED99j@+RnJeMGlP~W&4Cjf$>q(c=5Mf^(+D>HLWSNcW<^ZJ&iYlz+_VfOfpU3P z?h^SWWCr-8MKN1-m#j~hFJ+*-v5e70u(k!3F1ucNY^~Y8>wOrk>@_eFkeBYg{bC&U z1mrv0c{-N*9t)SB-Cbu4H)Ef?m97n2MgktBmPr}D3zHmQSLHod)joITJ$Lq`mZ97^ z?Iy?*r0WcakGPwyiA=foN{6%_PRk7|Qmj^Mpb<{t%uTyW48KubWm41mZS4kKufvke z$d@sh_0>vtdG8!4jbtU+xT`ZOpG#{7J4ZCrZTVswi5Ht4N!LdAWAS2$% zm&~V5kk}KoVBoA0DC1-(3o)!W#XDpM!%A?2>7gK>Rtwx?PjccMz1eXRRXpN zB6g{T?+K>D`?fxB)TNHbcBThg+q#GL#3(un3=9nPjW8e@KlRBc@j(d7u!O+XRUHhN zphiUuz9yO^UUWYlMr2?iGxVSt@N*tBoj(2hnzZ#zPfua$iyxmD)xU;*B$o;m@{;L) z{Cq9NKVq7#=*zTFuqf{+R7-~g$Bps$#lDR3it6$PvNiet0ivn^Q0S1%&-br_tWYar ztO;eEBL9jk6_V59lyvArP`$-%Ohx$W8;m7flQJC z4sAK|(iH;){bTPw;LQ5}9JBaeOge;Q=w8MqS%v(Xh{op?wGb; zOwB2m;noErkiS?ASz%TGQ}Vmk7Pcw!Bb6(lP>wKbVEX|T$!LZMq*UjZIgxOE!fOgD z`0P-A_PXl@{;?t+?xk9ep-~RQD2hl;$4HOx$k1|#5XGg0Uo(OsLAb-xRL16C8zlfX zxztwvw%MJtd7MJt_hGN4elT3(P>lXIos6%SK;Kar^XeVr1nZ+^Z7EyCSo&x}OuV9) zA13F0Oy_;PnwzhW6s-&Apk1ELZ3YMlyrgqb8fy6bpKWnZ$lKL6*kzb*=hZ__@?0C! zy%Xekb3bSMskAoXd%9;+B)CLnKc8t4;gxEWco~OXph}A_L@YQh$^VoO6_6_J=rA|N z6nH&v2ei}#am>kfj_20gt_k@`__7CYB`51Lt{!pLTlLLEs^`aiZ`sY87ZG|qDdO8X zYn)R^X)>5YN3nax4xER|)k_Ua(@*YRb2QoKM{XhrYKHLFo^4q`>18}`Yk626P9a{& zctRcq8Q+tYIk0NWP52%X#3+J)O-L zED3SvxLgM@1Z;1$0X?RgdeM)A-*AQ%oJ5z%2Ux)j>fsoDu z^ee{hKlZF}F}ZLE?9%G73MT-|7s#)cs$it{suQ67mF=WEq_i853o)h|)>*ABAb*9D zr^AE48=&%0aEycuMv|v14~{sgCo(nN?C^9X_yFEQ{eb1G4`4Q5UygA{(C=OK0k#1IKWCdl4-$nIkRid^ zV#<2iKCxIuh-pMZYieqhwb+)!`kB*NQwh}~z7u4ZL>5xl=`p=1p z!a0s%z^YU!gOmY`nRbbLW-mTQ$z+{_ z$~Z0XY!E-N=*QspX3@s;i)j)R^AVhv5>U`5$s|I_$ZUep$oq{ObmE_aCOt$619PG6 z&D@&wvYKV3uqebfq2CE@j1UXxpoEpnk>2UI4SC0HBe_F#W&)rP@P>OrG*{`!Y=@ex zFk|yV#+#_+pEe?4aI8mOHAE>s^Kgn%GmVDKV2imoiHv1ZdE)BlPdX%d4z`75H&IvE z%g+^t7Fp#dEV3YEk@tr1bG3Z)^6{_K;wA=?^@_G^V#O@7ifX5^X?Bw5qzhFWAg_t0 zgkB~66)~asL@nlR-8wGck{OyiZySpf%z81N6ZVOgEV^CH6fk!>JD$-po6Z9F{TJrKhM0QVjC9rQ%V>kyEXU zo99(6ol2JcHtb!M=}K9}mGyB-J~|Fw^}f#6ZxRE4Jf2;1?>)C~x0qxGy37N*m9ID+`8`nZr29?+rl@{su;E z+i_~3^~n&S7$Hr$vi-%mbT2v^<5 z>R8wpSOVQRO5uXamKb9jVjmm^%eafJLwxmV-Q}A28?tml{msi$iBqj@abAb}S}l)n z5r$=IjnOzz&_1$hA~k&jp?J{y%F4&rLp92jUK9(SjX# z!bkL>H!=P=$C&bCzE9NRCil`^h-rvjLX~m*=Qc> z!=ttINUhCV-k$dPFrGvBam}yxdH#969KSv*K^A3WeQZ+HvxTG$xd*$+Ev4{0vo=NdWu0Tmzb|D1>)gT*fxk%pCD>c zr^q``fQs_$1hm*jWOm4Ac>joi*X2LJfB?&y5NER*e@5@!Hh4U6%Bml)(ryehLTi$_ zc%-vSGT@q9Lr_(c!cQ?|MA7M_}2WoqX$sQcq5&SUAT( z>2V+p|NZ+XL^u=C<^8e-^=%cP*pIcEIN(Bu2@6=8h(jv*KWL(>=s|JxAm5-Ls;AnCPdVNKmh9hfP}3G-2>d#yMN-NFrnk>4dYp-`y;1245#CA z%?(lC6wTFSE3G~e{xGrn%+oKX&XpURH&v=+k$hy(6okXZ2qa>IB!Lt_M&i_*F_?6j zG_Bq|w#u0%OM7_Si4vqy|4m?JO8gW}N7Pf94~9FE8Z5rsTZc)St?Y%WvX8;VY%I_k zLBsmKSLH`W$eV#olV2K=s$Pe{8OYWLSz-Pgarv_zpNYWrR$Pm6cYNay>GvK|7<99U zRX?w56xzr$2Hw{Z_+st0skey~(`!#^A^#o$czP3zC|W$1mvryB_>;1Mn;6AfkBkliOX!~gsBy6!)d z+0Kj>2{^6L#*HXef&8Wg`Dqo9P;E0L%7|c<2FO~-nt+Uzu4itIgNDZZe~vVsh)-yN6(O!BwHV)`UTC;&qwu# z>uX11Kx`<%?*d4wuQBGNgi!l|7Li)2^|2;7(&3S6#CvSCTE}3$f%xI&`mv-0w5eJ9 zTjVu{NH(x$uSc*XiDJ>GXUKRGp#mgE_0T_v?rG`iN?5S`Kbq^%jr0z&$Ny2(2rC#y}Od_6XJDqkIoW)flR1SPP znS=Kvt3OKr2!PMsTwJ`r1P`1n)$UOGJO9nh%BqCG#o7ysmnawOWXKR7=c8Z^Gzw90 z&L=Fs6L%50?Xa?z#&Qgf&XA=5k&g!P`qTSHP*_GhhRz^$7Euj_D^tPCVJOC?O^9c} zgkGVI2mZWr#2_YCq?hz6i@?Gl>6a!TRzx=^FZlH3FIAe8idy2LOM_F82-9b?2$u9i zS9-~aLJc^?e)#Uf5c>Z7OLRkHn1~I67NAOn!+$-_P-~?laux{9v=E`{a+o%>P1YG& zhwCTFEOM+p0~(OA4Xwg5(JJIMYm)c=qx0p@tH&2fos@{QpT(k>u4_jEnC$bp?jHUE z)fC8mGgQ@FRzas<``Mak^>}3t5-)N1of3sH@eCF z$)(|bB-VyKD+$?=4i^5dS@>F-Q~F8r3}Xxl%!DcFhsdqgO6z(j7`N?B!;^`85%u+Z z#v3ftFin>;`i>I5fpIApO`9xBELPp*jhMYeLgdxK#Gmco>UG_{1FO~Jjv2+C(C)9k zNzSomtEYC}Ta48+SNi-fUgzghCg^LHFmh_7G9Ah? zTaK6Cjc%S~@XvSS5yeQAu6+a>2j2PuX(5sF=&plJ)_%iH97x`;Z79;7rgeDV*A7GX zc68<%@nYKgj2!PKD7@$+k9>P`W|^;^O3T{Nn*wTAZohMoU9BfPgnO66LBk6^JLn5e z?8|Pzk-dC8C%1TOsy;{amM3Y?lzAUDw$wWYw$Ef@*w-C84_zyZvhx(npK?U^8t=yK*rBo5Tc^WEOCbDLXrhgE9X~6m zx_?6RXs7c2s@dbg?QDBLX&=1Gr%{chhOnWhc4;qd?iR$)DD&q|^98zmrQ62w%SC}( zoVq-Q-2mmnHk@u`&(Fy5=aTUt7T7yre+W2~w^b5a1jLmo9Ui6AxY266{hU9TU0_i9IFfVQpk`+10Mroaf zp7+!=+j3X^aD?Y~FjK z>*}f!g8Z9J_nT@>xQMOF+geZFRck)xpG7ACRH#0md9)C_89V3Trs77rvB@M}ox>ZZ z^n+QoylYWnak3p%E;U;tc&6vzW_c7aJa3j&lVasgl1&Y|FOv(Y6Ip9BY>R`|iUq!Bi?Aryeb&3>B$_9TKZyy4 z%D;;H%6-p)xWAhuKnrRkDd`}o?8)gf&!~B9oY;KbmgMo6k_;HO;-wSQ^WeB)3%-)e zC;8~HC*C``n)@S>iAe6wvo+!VU3nPuckblv$4Oe#mI(g+%WOlBox;0C;dBmTdKR%c zN?Sg6^m!6tSqh)!zB5V|nTg%hd`e=tcU(?ro?ClYtT| z`*nHa-DDcYpl^}=b-u>3{hQ+UA3ddL!UDaoD zY1UV0EIteSeo)@eIy860EvRUqL$@U0q0y5T^y{U`ce0tTEIZo~9cPc17KSmVg>GV~ zAxrM$%g?Xe3fWDAnqAp+*ZXz$>-YeOg0LB>gR$;@y*K7Elvo-NX>jg0@^_*XW%g3C z7|Sr2%cZfmlDp`~o+Om2kE+(`)5x#HBlB4fzau{fhWRw^*GFLEJ2N^ii-z2 zq7b%3N=AA4Ipsr8pADLKhWu6g$++~&&wlEG-O%!NrWaFRez3c3^@|~V+R{<%K*qZ4 zA!f<7rLJIbN9A%E>%j`eu9o21HvJ}g(BoUr6M%gVb&Q}!VZg?rcy!Qe{WmxST=b!A z5oJXjH`=kIk#H=lVx1oTp_q2{kDv}hgntL3u@ZrC*zgQYL~Pj!R_KeIRUoai|K&6w zqyaz}B2T|zDG_zH8X1(}13-c;{FEll7$4$aguTtzM-YvT7pn5F>tR#;O$$_1SUBTq z$Nw0C{P}K&f?!BEsG@FHRUO=M(cNJ>g7Q~NflNXLg%n3f(CrY7?P2>X10L_Xjn^|> z*k@>H=*toMO-Ni!Ow9U!rVB$2FC1g-H8MA|iPt{P`T>VMql~r0WrE zNh8Q3_~HOr0p}JUQ$B`g&@Wn&BnKcFOp3^vG)G3@ts%f`C0~+WRTyIx^T{ZEtnSBZMN%Qouc!2BKDw&$8P?UeeQzjg9oh zR16HZhcg9_XETHKrXw{5T|UQue{on&3Hm)=R`E{diSz&$omZ{$Ko>tcmdJca1QT+` z&`$_{#nBQfVrGoRkqU|AP+@47Vn=)_3KEZ~Hu;~h6@)`0(6YrMlBJ}J3MdE!9NKx4 zl9K3T!!N0$2xnE3=|*4!U>;42cyVmdH^`7}F?>q zfD=9}EAJD5^`ii|6itb5ZC_-*A7Azai}=1SsC1)#m(-AZIZSDr4vF0tF&(Gw2-Trg z2M<|R2oV_8;4BupyKi!sAEdATlCgzgGP6pwmXKZ1>E?5Oo((+FT8UWFj1DPKD1SPG z%I9L07?3jXqp6V?#-oOM^YxQL+?gFm6D*?^K zV%BN4)MLgPHHb675M*mkl1{gc&d3raBmXEY9?HOzIwlRJc5au*_~T>(imXvaa0o}N z=^9kv(IiPV?7)3QC^YHmvxe%0x8=-b@5)wfQp&?P=TEyMwbCTYO>)oam$vUW=&%BI zT(L}Gd6Ij8?f7m`wRvWub7H}dZTINle8e(RDrk-LVlCMp+;{E?yWEqN^~l0=@YckC zHlbqh0Q05AQvC*8K$o9p5Iop~>eDil$ z=4ssS$-{g%%2?8zelbK|vUg6}a_2a=NJH#?Ruc{za7g3mX3m2eBh9$O#rEh(zmpXo z^hA8|TVy!2DRkqOv5{wQqLxJsTduv^Zf9-rac-XL_?~@UlDR~`uS0GaVF-aU<<>{3 zLqVta>^u;LdHsN;Zud=$>1R{;Fv26XQnn?${>GQSq(qj^(?EseDZX{BlKe?`&tsED zs~c~~I=SVGFbu(hsps`J=CHs0T0Q3ne=K4=4D0EBkFr;X zEuj(5;U2ZO;Caay3yedSjrE0`||P&IRHnF1&dI7q8=w$JVNfln zDvhEH7`;E)^Y9sVbepMo&Or87$c_pvOH>`W({Mh5Ky8{_8~RGDHMQhn(+ zx>9pz0b|%+3wVc+tsbjWj8O@vDo?#SPH;MSwHB@Roww}4*}((Ps~+uXrOrGq6$}VZ z5cho9tf&74uQ(ZN=PlIyTezuhW|LF3-0nf8>|pss>S>Y`yyN8vHf{j(fPShnzURnG z(ySi#OfcZ_z=as2hT~4=Wt7a1{iT}t*NO?={L5zUINHI&&e z20}Y>v3{L4P}-Hequ{cpGDUQ`@%MiO>`w{L0BoYuK}k+7KY5xr@UhpId^+drH1(_%M3wpc2}yUUh7z4h2WWi~#w z!NcT}Zf~@Bb0%I26RzPga9Y|rw-)`4I)2$;>hJL2+|G2F7LM((rEvWM9;)l9l7Br@ zzPdP|`S55uJ&NL0IPD^`)vfRoO1jzVzxWzToAJ~%#K;t9OT{?5)wiva8b&OxR38@*wZf3qzF_;>a62GO_D;PQE&221+x?F)Ea;F;wcYPUMlY^ubV6eAQuj%}zs3@6j4Leg6^ z=EBkQk(0bf%@i6`leSU(8T5(KBbH?;{0YrMEQD%z7IRrlu>Pl+EAC<_okn}(d6!nL zSz63B1mmX`wJO9eWo(tvqg7)z9n24R$3`LS#Gzs)4{GRJk13*sE{lBq=N`%bPK_fIpq+<6Ba8_@hNo}Ip@+_xu>nT(t{4={c6PSnwhB+JeD^HK1J&YlAzY;YrRQV-eB{5a=D}a7OE%-dU-D}5l>o!s=_D~cc7Rjoi4(Sl+9{+T3CF= zBLAXE!F!9ydxnAPvmr)YKa_bCuIW{f*w)_1pV$VtNM{VESf2SOc&YVG!C#rl+xIvM z^hbgT!6S~vQ7M^Xq6^m_<=%o$Gx;L#_cznF&$&mIx+WndWg@m*ol{;=6Z~D)zv}Sn zuX`NXb1qW}p-WYm+(P%+cOar)O9O+yEThW}VfH>vKGq~#SC%=~4_41;-VHMa?yE(D zc*Ky>A!Evdi_I1I^JOZ%v+Ir^yPQkOU;cO&E+V;WEq>7M|?^J z{@44-)9XK9<`pN$x67O;4$I9ZgZYmV;=u{g>yy99d9j@w$;A`C$mokfkN8&oaC55f zU>y*|SNoU_82%W5WYz^P->;rkzFM+%e#YNIAmJRHkLF68Zt8O{wt4`LxuTY zL?j}0eI!r`698`hIz9eEMBH8F2{@MZ7cZ z7@L>?0As^*(x)>Tmn%rV&;Po;tn8N%n;4hbn!$rTem zEcm2h_gK-2z6D_V_IBb$fT#dVg(N0aun~CZIWpF0t=XPVia`(tZknJ5`RJ^lOA1w1 zJe~NH#Q#x;*`*t>(S2kLQj;JUEUn6hdYmxrk9yt!nB)12fTWx12(aRCAMPLO> zwL+sF8lMspnv_uvsPb*79yo+v!(ltX8I@F4UWPYE1^=Lo7eS?9B3C5(%qjDNdJ`!O zZ1P-Y8AxY8jZ25;|Q6O>>jJYtH1ycodgakU& z?^Frl{+|&#``mv5uhpH*Au7tu8N&hc7f`wX3>2RtOf*@UqsqzuSP>11C}@iSwXmP` zG_>U8_X0gv}MyKdFxo7X>fL=c6g=X`%gyx`&{wQeW9KGg$OG; z+6Y$6_Aa4GF>UZ|wbX85DlKH|cfHuKmR-0`bN1x@6Xe?Cr-5j`{*KJra3E-QMqJot2lomw~}v$slotXo8N z#uV0aO75g~1SNeWEqz8P>^;+1q_t4m<-#|1U7g3d0^UN+Cf1^!I%|)j;%k^xx;KXW z+LBL7J`M29n+E7Do}G;KZB57f2nzi(E*a_4yK^*T^j%**0+l{n6))j0c=gY(FAfd1 z>5X@hrY%44Yv0R--CV|3%y(zc9!(HDOc*n#7wS2>*RP)+yETq}wDNR{9fV__dvfSc z>6R3MuDNM?m?kW-cIdSOn8y4CCOo**yK@V3QGRa_WH?dw9qGW9Npl%!V(5Wj_()N> zF0P&1(QI1h=AV+kmMLW;!BTzK?R4wY+#FhNsXqF+pO6+I^?5HN-uY;#eC~kGA(;j} zn&`dK^DL&B0LZXA;2lYv#>l+GKvrn4IU^KbJ8T((@WHJBT6C+SsI%HV- zrwkZW%UnZ-&yg%9jl$SsD&_7ld^jN`>IF-)sL60W)0y8F-`{Q0YUkG0R_Bkg4yrAn zmY;nY@b!>%{@gTpplZN|tsn&32m7TMIMn>2!#A6V{lc}tzBx*``Ko?#&Z)#K#W0%kHB_#4WmZnIf}dOV7xAv}_a?`Q-QGoKe^(rd zKlo=Scson$So?Sv7l(E^TE}d@HbxBHIV9`Xa&9N%A!*dk)s+Ign~K&%J6nJM@SB_b zF!fiTsQQ;ykQ-NEAXrdJ-+4Dgw?3C-UwKk09Iw${4lHIJ11eJRP~?4Js?i|YnK5$j z(~!VeT>U$;v7B~3Q%jb;9_^ZyV@diS>ajmR9Xu7U8t!|`F&BI4L~KmRJJ<1hvWQ;0`m92bv% zENvApY&Q5fE4A}{LlTjdq=bgt0&b8+hh#|5sae_%|Gc)g=UB4-?HutEp;9KYrg+=+ zU5-H-AzN4xM!;d4I^ERtUf-^sbp%W?C2E?9#|DnYvq@XD6Q*gM>~08mg~&Yi^+D98u2Z{h934Fk(EBa7>T-=1JW{ zabaFX{RM%By2?=XhFplOaJagnqMt9#4Z|%AKe&V>)VN|6lSgi&q&kH0Ym`Wz1w8{2}0QiZxtp{6d*%g_Mj{=p45uTNp8FzzXH|!=y7ZINi&URlaFy8Uxmt zJ;WKkjpAEqH+t-3za{Jt)+`4N-lZ3J#avvvUWMveuU~EkR*%*wq)<<0 zm@e8JeLrF38m6(Y7$PKEB0Kt|RYs2!xvC)cH~J=fjw0#E@VEL36O9;lx)=mD1PZia z_?mp3zuMz+R~2Td;>&2AGn``>$VQN?P$pRg^!?bMQ&6iLBHJIU8w z=vFMIAw!|q8XBbQM5&0bG7rQoX2u+hdS`I3xB zw@t(htR711ph_P8kNqr1U{nDs*WKKl+5+aAN(5Xp#(Gd@@vmNZ4~Q6O%HW1AQ+N5&u^JFIx_=(~o3S{OGki zivG99`^Msx9~d7YTd+MK}seE>0m zRQUC%EH4+>kOin^_V)Iz5f1;P$(<>h#1as}OcDdYSV1wHgbE!Nnphob0Rm$;lN;kjtSI(vb!E&?$s|wu74{4)f`ZZ`PR;>tOd!e z9AlD2-zsVQ41~6UiC4R}dwMaJX0_efuP{*<-bb0ETwo>wx?3d`Ix{p?6m(<85Gjn$ z|FLvdL2+$c6io;O57xN5ySuwfaCZp7A-KD{yA!N&hv328-Q6KLymPDGcUOOO)#*BG z&pp&mTKlB>)Wf#YmAcwKvN)0a?b72NJd&^=af5rG9%ur*`QO# zlw3spRvJeplE@W@iFY=kNt*~Jui6R_(+#r3josj6z-Sp!@1bRh7(R6)@pGqJG-qdU zWL7^dGRq1K{((h*FfvghuMmQX;FzetQO4OxJNg5$esVm-OT$kt&Kxl%e$s5 zL`Q)$`k(hfNS0h^MEJK!o2pNN2Z3X5al=at51G+JS;hN2Xtu4H{FZTqYEU07 zR%4xbr?GZLudl-Qi}G(1OUp=%l!CSjKV@CsJ+dw>vZFEIircxXF$3r*s}4??LsqdF z7I}aR*y2kJ#K)5S)9kJhB)hxr__9BG&vj$2WW!H#2~U}|`hpB+SNo5D0zGcO)?3*H zwSKqr+9cEaX9`_87`ZXXo6b-ged9CSzpwF1x41j5Axb{?x?cImj^Z)vTihhisldNk zJIekLaAbN~ip&mrz4lCo5 z;(hWOhFS8}>62-ihHCuFnI-$C>M=^$0i+JJ0*s{(w})-MC`5Ce=t~PpNS>#jqyC-R{NaJs6r8D)mvu`?iNb1N#>wJf9#Cel$jK;yA0`qk} zDf}~)d_+Jx%wd>N_Fp*r)R?=Tfs=Pvd1X1$I2ln7q*ci;<>_gEkc){FuvPZhe^z#z z6vdi!_>LA44KZCFs63QNt!@#m{vk6njbIrx818L=E?agzG(}ouh;Q0K(XR@&t%v77 zp@eSg%3S~pIJkD{^l^Qplz@4kX9j1;8$qf``?@wTmYBqsF+A>$uP> zr||11s%PvqVw~NmZ#!o~LzFcidPp7*Mbw2@6)0+gcd3ECd-nk<$3IcnYc=;S+2Cq13+I>X?`P>>`6r z*>`uRP{D43dsB6yE)TZ3H=|%&XnnY3&o{kG^VTb-Sea%!ikG+Jm zzMr2i*cor9mSje&@WL7k)1zyrq8;OR`*M#x`<(rTs_#s?1v>-Ums~PS0_KxKp3%~; z>1e-?qEkEE>_^u>R|9!*>~DSnMm0MM84U<7_zQAHR~1G=qZCEwl39-@)ec$RlBWdD zc8v0u%E)8$0Ot`KA!vo8{dx@k-)-@RLWrAh z)-afUT}Ol8L#g_XAX$Crj2$_y%D1$#GQ~^gX}$FJyQfzHHv}?nj4l z;4E(}|M~fONUc9tgXl{pw@~o>{21wE-yjgdq*Duiw4iJeX&x|^5o;e&DZ(AGXh@vY zXDe*9*8mLZNa#VP4AE^slsPK^RgbL~5HHa?3%UCzt-o_=ymnLBnF_rmI@VY?JoT&h zpJp)GH9hlDfq6n3k+$9)hB;G)-w(g&G_m+8LR7HC&z6{9ci8(ziUn43ppxJ++hNWg zJto!bn2j9?6463?qNE;;GDtKLyeN>rC>63Ynu*fUr&>CdS;zq8JKpmF(d@1P@yT>Kg$48W=-?M4*`l~BU^`V52NAIk-( zTis&Kr;8+lWWqj&z_`t7YV$=RS!}n&WWYKekMR@|tf3B*IACr73?7w|QixC>up)*V zjyIV4R4^@2i3GPx7P>EIiWc5loRp0EJFbVtqgEfs%vNwA7DlH|ivy)bS>aYZXrYn#J?!YrBKL-qK$V1w*tEO)Y{^QQ9(WGo>74gcGb<=-pLdj9s|a{{sh1 zyHx38309yZN@k?QH1-~9v1-|Hjs80kuht_bVp+HNBUbHxd!<_?4v@D{c9Le$FSVSB znQ2}4AK0UV-;4tr@-V$*KXX{7Cj1Wj^()u}3K(0Tc4L3Zj!@nN8Hj>rpBz!_2~2lL zlu-W3C22jJWeFikcK`=TN+O&a*FXp6})Vx60T!u}5Rr${v#6cEF1nls7R&1}Kd%l<{EZn3QY%($j|^Oz`)MMP<6 z5mjeS`r>JnWOmWhS8iFzGG~n1Bx<=$Sw5bu5f{Qolgcj=PvqGSBpP^3cjh4jJi8R; zw&-F;AVWN&PZdPW{yuVxiK{kzFYL*x&`u){9|wgE4Ily@b^4a`|GK%_g3N+~?5KQp ztC}}RI#Nyjz*If4g8_lXu&3yJ0!hR`0*zB`XcUUuWi?y!UqUHSX-DYuNW3193~p*l zxw5vw`2PL7re65^4*+}B1hlUD@_4#?y@p>z5c^p}PBlZfmyG2pcz(HM9MNZxdUaJ+ zMzN?<;lc>;o=`G?D)#^x7x9XdS^%6f5g8fe?mD261waE?2z#pV)ot}Yo!GE2Kd0ye zsZt67ft)%iU<@}lHWm>Tbzr35avM5-48s;8oZ=zoOB(k-UAmtlpL(mHtmCIsE z8gBz^4KnxxR{-J~i3pkvwhGdT#)rhbj#>XKuUd^k-j?9DxoX!PEu-WHOpcEzwU~4| zO$Y;2kEXy{9iUez?jY_cyaIRu`|D`VjX%{vJl6(a1@XIy4$#JxP2qzx0IO;YpLna1 z7tA|Hm~W0+0Z^Lq4;Z3zd0b9_hTZ@83zyRc0Cob1r0f4iT|r-h=mIir;qf&0*J7i& zw9i?{tMmslqlO|*KnpGCMOaItzfgp%t!Pa?o(9qZc^+1I3pGiYTcAZMTm&*)i3iD_ zo{fP`_*o1`DR2{$+yQ=@81jV-uydWkp`4(S!%tk(yCh#>h$vhsT%iav-)YRrrR-^1;Asd!g!kVB~jQA?v!vxu_1VXM2h>I8Mr^9hzco#8L z1KPe~ygW-%ePuTCqljhvr}J$l~6xC9&7qxYxz~xQ38lPE_-l2n)2qYQ zQ0l!mwq|k1a{k3;WvIcjTok8J*0V!g7DVDz!tWc2fm+BehQfuD!+N+3zz*wJnUn6Q zrY)~dQ8O|XM$@eP%Milwv3g&9Mn0LRkrSYGjLF?|xaZ`IB7G0{`*7EFloA`jx~Da> z3K8D0W~oFazP`dw$=utobDsGvxI^n}7(eln-kcV#b9eJ{aU)#uB%l3chIH}O7P(E_ z4r?zm;=N4TBU<`}ez5&5sR{H%YVl-SvVY2affTwl>50{qfqbo{K2>!w0o;Z9^0zP_5iEo0SfBM`U3O+4xYtgaTM%ts(R$+Be4cOlldInU@nf}e$f+I)dzsp< zZyX?V)ApKjCJbWkIyB01B7mA-m$rpv55b}RtkjF2fkxdG$3dmnj%D$bm}ETOhRlv{ z@d65qJZUwO!>Mo1%`ZIOWf|XCW?|=?&qxxupuxVEe^ZF6=%}Rh6x_1VNxw3en6%xY$Q=L#G3qT_~%(!0i1G5|F&%ZcVcGwH0tW^Rh0asfBbu*zVcm70c>2jeL!K?l=3 zaiqrlRuu_q1h(uG@(}K8|J{IHrPV_oR8P%=8!F}O$D%FEuWUwCEQ+YujdTs$?DE(x z!WPf0{25K#6=jrYWcba0X;2<4nJ0U){i=%&SEg;FLMxn$AZsjHX>Xk>WSD66a{hA$ zrCgml8bfl!H`_q~bVp#vZty*-onp zu%+njlK}sUYJ*PNfotuJ>ty57`7@E1DmjQccSD_p#|8CV3 zpT8h%b#g?bD4A2`0QOW3*V&DVTUZENV19S`te{7KfXtX$Eg*9##6C2-LN7wEpgL$6 zv8FfkW&XX^Iyg2Vzr`8cULsLe%xwKV#(NMLa#3sAU_aB7dS%QeGgF^9;m<2iH#tt= z7Vbe;a0^?1YeOkI7^ErgoRgk>y0%OKs0%IPs=)OOimz9dv>E(8NJ#~t~T0u=V@ruD!aRXAJS?$J1=H0#yxo^y6RpppTveJ<$oMJi)HCpkyt1k ztWlhb`-ql+V>0y9wWL(=z<__@kS1YD82WE(y3}H;8_4S0Q}t zi_G`+V85A^EzGTsatB7%(tT#ZF4XHHhO3I{jo1)(zTf@6#jC*H*}6^6_q!*dQ%q5Q zf<>rz076Lph5FG(y#3Oek2Jb&OD2QK?_4@IBb|6xt}GHMDIOP&e7b$j0nfGf*F{hg zJ`vYchV$?A`*AwVR~iEQ5=m(btGT08kR(E|>DmQU^LuXUD$jSD!9oU5@#^1fSeX z@oQMPT3BQu5}uApIv+Sqw6Rj~AEV)$wixr39@PwCRYM*j9lLSN3ZPUtsadq;)its0 zE~0rcX>@m`KCeo?`L3_O4Fzu-AUw3B{`h%n1I)u);Tm{u)7ihwEacBRg7HzPm35$0 z$0gnq)76<6_M4=#F44VOZ+m+UpP~r(GaTJ16l=`E74E+qDYoJ0YfIx5E){G z0FNlkZ$+*ug`nJPJ=_F-wr7;1SV{*IhXPln8ptbsWuzGF4G?JXP*SJZ4-0d1aq0ZR zcr79;a7$jb-FfJmIY(r_gUjXm>D)>sk&@PACRJgQ@}S8ZdX)8%{>j@s_g_Q`_h=T= zqhCHs$z+$|X-x5)si-ORVt~(KxBd71HdNG6M)JhvpsvT_>{<7*x3jPifcnS9X||9M zJ8E{=FaG(#zu;&KD#_{66x4~lR$|^m=zOkVCGJ@T9zc4%Uk84iNo}L zGFiKKrBoWoeL{nlC5sHoBlYx7nCA`v(SGX`5Q1F+>IDEm_RBEeLOjBAmxAd|oqfW+-D`OlAFv44cnCC&+>o$qf5 z0&Z3@_f6wBkj<=&P5?wVjo(my|Z4%>D4tv=vsg3yQ3jhQzJVr`TUEWG4f>M4RNS3H(gP-X@%u5 z$MOlXp-rj-AP;<{-;K@al!;iSt|7QA;_8 z?*xMf3?YEoyc1~RJq!8I>>$zMetWQJ;ETVg{KYkTYYS*U==EGmC##H#m6n!%l=7-N610}s z1h({k0hnzfr6NGs-f#5;K0?(1YOse3y{yaS`op<$)ko5v$DgKIlp|X#MPzImxWcPcsg@q&q+LJRg$CD^B z*^4N{)Do8kp++8&GB+!^B88)oQ&Or&LIu-a~z{L`qhToLJjO7zyNBko$3ZpWRBF^}0^f%JrN=`9-3IRg;Z_UsL=`t(` zENs(;!xuVCA_DFo3_lCP3hfOC^xIzL+iS+y9_C^!h<^HB~BYR(Fm!~;yxB= zx`McS_s*i1(1HBjWv<=vh63ki8>LvyQZv+<*Jbh_^4@?1>Aph)=gw52l$Sh5!#GN# z!?|oPDE6t6MDH7C_FaPDsr8$Y&m1G6n$JA4JI_YLtT`W=B_Ex1GfP%iCho^>TU6;~ z(g(O@=-zS`*5b`);L$MeG#v7j8L1DIau=?CAJN=n+?dNaWz#9;8}=S4K?y5Ry8!(x zK=sfOZ|T9(_fc`(`d6CYF!dZh7&nDY^k zv8#{`Xm_&KaB(CwGgmZgt7%NF$w)3)F#pXu_}Ew-kz3E#<OwaGtD$p+8F!Ox_WhN_;VGNuAyDom3`5Q#6bCX{&{KTraCMgGk+b?##GYD4W^#% zMrP*;+G!_@p{6D=`?CCQmCaRcOmlp^%7gpn`EapJOh-P-b}diymT1DAlB zaUMS_o+!JsKC#Q)*yJeu+v%fzly%(0=v)`WmMOZPJqs~+CdAIEG=8o6oL6yuqU5X7 zw1XELYjwgE)7(Pgq18UL{2RiCoB!wJL!-QO!$J&_CBgpbQNG+oQPia&E{Zknor;IE zlBAdeenB18D)QWMg{j#prQ%g?pb-X8-+;kYx>V($E_K^8QUA^Pw*l5X3w?`=jM9&E zPp5pH-Iw4pxYf6#&FNHR85lj4zpzvRKeL&^HZ zBjGX^L-O54v{#W9aOlDGCO5G?c{C7-!_SN9p1pTl!=|r1828r9t5CCTp1f9Yy<$XB~%%g&>o4XU@;SsOmi0(b~zRsA#hwcRw_KvPHr{ zbavAMMF|=g8Ee*Nq8zrM({7N892Rp=tE2jKn`~^TL}_-qg`pc%B`btREPh1aC2|UE z+~9KB;$=Z3I2FXHB_N9X9ue343rk7(IgNLZHyc(il-+`<&Y4ak1|Jw+z=W$NZW93- zD~#VSpfUC&9S~)SE^A)-Lc!wsS8N*VfF^_;odC@^ z#@hjDCioIVgqv$>JMu`gp4lP#M~v?%EP*#c4K1v{dUfpM{b1m>rObydVFv6?cw{FH zvDY;@3l58INOeo`$bAoDJ;T#Dqk;$OANf)^Gol%c$jLZ@aKoii5}aAIC>#b88m0^% zcj%BbJir8cnyFotj2qH{Ca0YJb9vCe8-4e?My!3QE6Nxh7fMda{f7(3HCj1xXVg zkEHaR`ozc&EqafHzo^Q%)k;x_UxR%L^rE218&|LCwTTDUeJTl`qhf=aU;hmeTFBCH zur|r?C9d=lJAA921P3?8P5;ualVkVcvmE2#>SS#aA+#)&pdulRSHnvqjEWr zoKa!b@s^ zWRj3FzwcD;Mq;={jfLKQaC1gdsbyG@_thy%B z=#-K$#mP8a+t};8ribjOB=s-GzDHmjMGxsAIw9T=eNJ;mpHlx7PmDfvC+e=x!P$v% z9Oa2vB5wR}#=ue)Mv7U&$T1tA#gNv=l9s5%tb#dsB`Rm?2rpQ77VM*E+b;`ar>a!2Sh9;KN99JtRP*-xhM#-pHU;DBO_D(udiM<$U#>oo8e($LOMgS zgt?56#!PrBTD=9)aJ;|*8VD4GACc5Jtq2&(l8VS+&YF&fJuf?(ecqk{OhIJ}adG6i&zk19?|ca^f&MY0%FNnf&s!m!SI*_0$y%FCGhRxq(PV06VP7q2$$XMdcGn_ z=p7`F_BK=!n?8ih1bg%;9tjBa={qL%nIXbZkAe(PI= zNzVX)DX4&Ej^X~}ZSn|tolVT$yW##0^4)gU1j78dQeFbPv-u*u*X|MM8b?s!$e3!g-gg2VHKyL;3L1U zIMjqR$0-*Dccc1+(}EZ(f-NlL!T?<@zV-x4Iw2QTM+y<*SXrCOf% zh@$5fFr7w7#K{W538T&FdKQWIVXrvtp^6m-XIOgGKb95>8At5pe^5M^S&5;J5&~!b z&7hfW_Iv0E1_~@OcyNBmRKWsbw7vNTt9-#fD@j0>fEe^Gx+++}+EGe7EK)tEh{H;W zE#==e{XB59H&)c#?=&ACpl{Q)WjK7;K{EKXlRvC>U71hFN{7l)`E9~(xR7o8xEU@W z^)Rg?NV+B>Erln4ye+7m`*ryldaO>@lwRXb7$z1?_C1$J zn&pRMLzaEI8xLwiE%io4y?l6(97v@`|_ zD$U*(x;|U`5*DYnv#;w-rBO{>Z&d4z@>!HFS9mAO3F=^cEU>Rj9-r5kKGSqw!ztB^ z{(Q=B3Xp1SiDlsz%}%S1ifl+JS_)A>%3vjJaq(Nd)4iM)eJhE*a-Dl_K)AOKSEcPt z+JGWpHUHYI0Ap{eS*i%P6V(!EQ%?!aBk{shU}FY^s-pZlEXnERCuz1P{XGPTvjGPn z&xL!iJl2+gyF!0f%GH9*<6V~~wAI_LOR z2-LThzwaGD?UNHqB`z8i15{@27qc{c`1naQcEzOtr=c;9(2QPMVe-2ZZ}MgG*en;T z^jn{Xz=HnK&pMu7#beAOP&8cGYQ|av+d_OE#&Y&7Px%xy2axElH=;yw~wuDAq{$tb$(e zAvYPV`L2;3%Vq!{f`=VZ>CgPdY`VkAwzL(M=%2@f?aK_YA)d$s7Y~d_-<-M~07*I6?{NAkDbsI3w=ixW-<96!* z@ws_syfiB0*4$Np8(Va-*ZCLo25gt2Ih3&SS4lYYL}2EK#E^>yF%`P<3S-JCJFN9(AE zLYjxwPP#=CXAKB<{h4U4zT{7?%4U>qWeAJ)`Y$%x81OT=JEQYA;YV7y#eGwxvMsM% z{)@4+vZ)O8jEtetL3$ydpd)YwG?p?AL(wqdnkXSu{+bWk2IygtD;olnB_M|nJccTS z*WqLLKY&RstFES~{-h=)Ou7WEEI8m=fZ71#Nx*5mNo=l{FxM{;`xfTtbVXl{j1|xc zJ3q4^-EoJb2Jt1w682hwj-kP>K(GZ$g2d+hY;!%!CbVN@tJe=Q zJK;2}+Hp&-*3WcAQNbwGw671A81m(!k0Ys1U+WEf6{+Z978A8tq_L1pzszcRK*;+2 zAnYMgipoRJa$`c`JP5%QX5fM~7N$!1i|Sxvx4;`R5NFOt(+^MR5QEtR+NR5jgAQ=G zQdk$QMMzNwyau&KNpp##j?>t15W>wsMzal^SIJotH0@V$k-$O|2bo-o#H?UG zC=h@{o+?Vk*Jp{!`VRw-9Zf9fOeNbP|6579L&Zdc*bpg)Cwwrejc*|&F2Pv^M)=Sd zNe3(9K6sC(q*$ZSWI;|j6zhp3Vs!G5Dan4j#jw&)CYeo8rupYYk#vS|jZu>h4OkH@ z1Pr|lY61b*!^sXVvU#Etwzoryf8GE5xGfc40$M~Nsb|6-s7tkOTIz=V1gY}6;Y^G6?SK8~~24y2oaY;SuI zpwRn%N`7uR2lCvNE^l6r7{GRGIE_p7?eU3aKlY5%Ea`@;g zhN6tO$f0xvCm&xHD$mwofhQT0q{qnB{xqaSgtDQS{qXi}*ZqP{KOK+rk<#NFpmrr& z3aU{;uQnvuI!Q@RM#N!*VgGX<6BX4D>UkP)HgX^OQ%lizKCq&|Ed%_&`}Pd)B{XXH zdq!iTqX+2wZ9*ckP&R7$^U0n(EK%+a1N$2Ac$~w?o|u}k1j-3aw2!ViS46NYUzd8B za?`wQDj_Y5zKDD;LqqEVlnhBjF%qU#{Y^3~BS)~;Lj{xCt_V=(aoWsKPkF0ci(&e; z14?+`w7Y>`;Qv=Oe7wJPzn$9&zCZ5ze}Vo+!1oA13%*x9&;ax>plSW%$F#2|D6Ord zgAcWUhK{bN1u)`X0(J+;M+8)8SXdMVk)Q@H;TpCTos&|G<9u>L6dX~=)|P;7ND_tV zzbckdRGayt&`HGqslMRMtO#L6(_mqdlnIcn!(wBHY#NY+d$^&*ra{UuaLO%~9W9XI zQ$kb_p$!A&Nd5gH<@R&FI0C`ACCD=9Fi<1bts>{F z@rTWc%p_6bq%_l1;r1xLaX4%Ky{0zEKT-JANXR6=Sm?hF6?V=|yjnmAou{j?b(q=m zK5KbYmP}_(iIUUXmhwBn6Ju!`UaeW&OyOks*GP@~1H^Kv3M;0Ah(Y}4g&f`BvxtVum zELYZyFXIX+?OF;33vIqSnAh(9FY29MVk|!FGq=LNi$8^D95CON-YckUyK{5y?T|2i zntvZzV>WkA*zhJcYu}y9hxFAd9L^3;_ZVmb5GPP<$G5!j7a z{@%cIoce{-ho9rZtL?=o`yG)|wbe}~p?^N>X>i?uPon3=o>(nu7=biBYqZF+L>8@e zuzW{ZgBa6dk0#X344$r5f4(tl!xLi`I;;hlj0)hi*cBzxdAHAp$|^ z<%!1ojA6KX!U2D7M8ApJK_|&O@CMDomr| zhgSx!$YIp=kE%Ooxo_6#=&vG}E>XEWMC#ZHK$v>%fZ$+vUJ~V^+UBO&{;Pgb4#^{5 zm?=xGg|}^asGtJnWDCMmG)Kp~PDK_*rHzGQNZsIoop#e(qDXsffP+E499G5Y%gAlX$fe23YHgN>(x|7k!6m6Kz=-@fba zwFUC5O)e-xqTJs*j$@tZbhB08-?v~OSm>LcLLcArs^X?lHgurQs+_L!@we=cyFm+7 z%Q}z2S<$%K>Eax6&9u503@tC_?xX2tv7OUjk2$xxb&~%{*%t}J{&yuMd@}Pl%Kk7G zD+&{|&Q1Bod@-s6$gLc*kF$akBXnaI){jqL=dl=xH9#-U;$yM!eZW)i#x}OApW{e5 zy6+kiTu6INQQfey?gdZSp3aSJllAUVNr36@HYPP`NAcv%zH;i5|E#4kd++uZE!Jmo zEa77!d2IcOmmJrXWfbT0SQp8sg4E#CT1`k7I?T@mmoQW&iGN|Whl=`IgFkk!6Dy>{ zOR-~NTXDu1+`)>`HqQern3a!;3C7qs}(+&fbBX?iHsv3667K5&H&l^ zXUs2M>I4*}=`@9kkhDOmB7DBYnoAX(d z#l>LGH~JtMAT&lP`h>3A;{wW?BT%W6&?$pcrxj10V#TouwuCtZF^MLzH=J5|OdSzZ zbLP`He_{=!uosb50ilahi-7RO$Q__mBb?IKsKRrmK85p#Aizdsl7~RvKpa_7^Bd0G zK`?dNg?-`R7$eXW4r&H+RhfVJ4XIoh(3k`p9(0lSCO-OOvZ4z11QWgC5W!L+fP~9# zp&kAdf|F3+7a5);EM&)sFP&1qWqRVwZJTz)i+C-NES5S?8ppC4RgPx>mTj(wPfaf7E)Y{u;d!LWAlJdYxz`REy7#R{79b?^|B-DuDngUJG#5IY4kr$w2K&xU2HlUF_ zCa6QhUP&;@?egof4bNcM@FIjElb3VIpn_aSOUq0xD*Qd8@2f-UZyW4Xnk|8Mm>&HZ z8hdjr4R1*4FfFoUH;30~)kPlIvVJ0cvAqJ{!5xxoHVlfrOzxQ-$|HNk$*#8tVY&}# zTm9tYHu!QeP0TJ5`H&S<4+*~Aa_AxVJrojjJ@C_Y1HL95N|Zca(uZC5$Nh0;XAVQO z7u(1sz7ccG!DN(kVww?6;)WAvPAp5Vf@|0O5KuBU`W4OBhf@Yza%cKn&bOP)y3iYC z#S9G16m(l?P^HgQ44Z~FAe6;ENWPmADH}%2Yd$I@r-p>v3&*xJw){Cf+Pq{`3i_n| zS%n$>Kt8R9UurZ;Wh?`cbX63!k}yo6HJ!Zo6lZD50jiky2<9_5fewNG7DY3KfC>yD z79(Zpo5ijrKjZ5rppTE{0wWI$4)!t*Ccq%Y6fJ;dFwxO_=IMdhG~f&4f4WshIAE3O zu|)&#F{MSPmItT{^GxQViv0?mQbYfd*z({Pb0XC&aq9eVY+Ec)+MetrL?$jKu}2gn zBqAaLA@+^r5D)Zs!f0^N@Q)w!@Kz58HXgVq+|W@$Vao=IHK!Hro!6(`TIcY1{CV+X zqwQDK%f!k2>D@7pOBYS!@2-``?zf+-A)il{Epi9Wv1*vq3LK;CuukalHP%`@A8eC` zln_`YV(`MPWo2cV3AZJpay7FChlWgWdg<4o2Dw9&vI<3(5Js`vI#n%FC{r7=E8tY1fDqAOJeob+xEv;4xj8>H>TN;kWER0<2|I_jvSn!%c!Wt__ z>IOztV4B^TF2mp#VxUsrT!TK`i^NWcL9p#XsQY!f0o1Pb+2}5}IP3y>@K7Fs93n=0 zU;sVSCvzQtniL*5v}+}p;Hl61Kf-LTTub~V-k`tXwE*7(*8J=?yWS>!-)~EEeR#Z| z?a%*yP6aEm(I7$c3vILYjEw1VtFU_!5x_c-yhDF;i4)W4bxWl5g~1N?;C<0|WS} zMALO;DvL@>Rp&zk+SHNJZe?_O>AVdKBHGlmQN$El>9#(C_mg@-JE7IhBH@>< z_;zkKCqqqYVq?D%j68A}_%A(TI7!O;aD+#?E-lC5<`ssA0HRS=2p<{&ModG2KrKp- zv^gWmZF*cJy1Qxlu?p`1gf&cpl66{Dy*=@1Yt#!xVJu!|GU7K`qK8dq?RR;)#a^99 zknM23LV#e=nx@;RGVwvhvDNRXhm#m<>Y{SN;^nPRbO|C1G1n|BQV{Q_L7w3&q=T<@ zx(74~PMZ+hUV{f~RlE=I9G>}Q&pc++H}@3o(XROE&-ZrNi^a&>%{ zFLV!`bFP+Z*y_$Xbzj&IZBf9Y(>J-(Sr(*UHhw|dy0*AlO^7NSDz85YhnDwLx?l?bWS)*GecC-Ku)@w0lc`p%~dUz5bS0-M2Y8uTLHE$TaA{G5GIkYRY%b z=OPq+nA78{FW$u~SmbQa9{uKG`VyMv$Zq$g@=^6N{j@c_x4I_6?H8TA~u{0=b- zsStE>JXmPjJ5*YjW7X|AB`b-#-$il|=F%sTS+747?p{kCOAQ=5`a5u7hU$tz+D zhN{}qw32jMtvs4>S-*~i^S`m5778)!x5oUP#^W2?Ip37cn=K0^m6x)nQCzCV9InlM zDwkOoj*_jv-F@PPsi|&@x*-0+C<@KC!2$;!$U+)@B7^h z$!0HPA=4_}>Ppts65S*VbgKB+>2~Jwa9Xle4Bgda+0TYbIxl@46B*x+*BdZ7)$#%- z!z$i3>^L z&`ZHkhlxt2cbar|qK)@**$HQGcXTqdkK?5r(I@R;I2k6D)(cWWvvV@jvW%f@ON7EG z4so#xPSfGwb#28{jKaO$qDFN|n;S)>zNXAH4eo6ivC8%yAi-AAKP)zJ3=QG%kH=L? zDVCDe`{Hh<1rN*qz@^tqsB54QztL$j)exL-Ft^rSuOB}JeilBVst!SyrFMs0B0qPN zt!@LzaXF>a(L~}rn7#XWKPOG#MQgxL`J;*ff>H6eGZI=7-UwGC>tl$0F0-lgWNH2m z|F~88gwM|0gxf%{X*r~)UEaf~R_*3Ixpz0)(`rV4&EcRAGl$66-aDAKl?8Y1Ss!kB z^S#!U5%;QWNd4+PE(WtDhS4yxH;+_5-ZwrfulAZl3Cf3kgqHz{9TnDdnWkN9S^S5b z*p$~4!7x9>2*w9Z{``Gx@i?7AH-{Q(h~bxvQ+#k!D-TECeZMHDBeRMz5AyjGqzVlBlb4JGvKK;_X8>WZB zn`+p-OnRl@yL<8J*0nos)mv0e!19GU*0uM*OXnvKYvR4hNbP2x^+qKTrn%&blHKj? z$$f%7zN*{AG1JSi(2#WK?yUW4C(ErOWAn?)vX*bR(rxNuyKge+v^p|67Oz`LWHKC!&FGJ2Y~@QcLq*O0typN@ScSivO>#A>5rh; zw2|#A!?a+^A#Gggo59YQiu@hIr_x=qu0oS zKR)I9WBYC)CsfINh5v5v5L3ex-p4d}-=`B&A)y_)|8*f`Arsn&I>6GXJSR6oaS)i?~LwQ0YP97~;G2PZN~l4CehJ)=OzR~DbHp<0r}%AID;d@1BJh>L z$K!tJ5Rd*jmUC_EZJ?5O`dTX*u1bKyFT~C{=Erty=FQ27x5}j#<;v5%>Y9Z+2bqov zDe0vu@Uf?eZfpqGLn&ld#4P7&HT69Xqnuu9IY`YhxJvV!XpdUSG6~hwUF|0n@Hogd z$ym*49q%-0u>3kT3o6ONvlz7)emtcRIn5JEQU5Warx5-WrPjv$q8szX;i;PZ%&jcW z^u2=GuuaC1QyB`mQI)&S0`5)7?fyFuY&7a^5aqngj{>QkYrkrAHhvcmPmN9AyEdG1IaFkse@IOHT4h=z4pZSxHuKqr5a3z5lC3O>4j(f zPZgHQ@v}ECbU4_Eac|PZy$h)JO9HBv_msK2dRrp4@h1x|py`Y_2n!Z89d~FoV;cE_ z6_rCo%F`^tRD%(j)&)$cXxJeRnvDRL8#Ro@$DCrkq+2{Ut&F8H#nu2NYei=WjezAB zsd?2)z?bw2*$}U)A}$UUY^)@h+Lu%!j4kI|7BtYJ8?X$tn?(b3#6}0VfXZ~F)v>UDu2m|Dfhnyr!h~<*`_|6E+W-`z4l!Tz7Qub`>Z$wirVOd zgiO3b!H@A7Ru!Q<0CnGC+-7NZngv)`D9WJO8&x0iwlrtn2 zn~PCS1c|JKV6g4L6g$n9qQ>|cBY{6m0eO~aG*}9SG)hXR?hi%)@m^Vj<#fOwsEKKb z09LXO&8@ErFqg;;#MxhyO-)U=T>pKwDg4Ap*CRgC%@m7jXS9Vx?p1!*TgK+ff=07V z(&!-dnExF#VJ3$+V1y+uZW`4s`o2YNDHllJsSD3q8>CD39D#mjQmoN*#DKV)N-vJt zrc;)3jPfC0n;RZ+6rrRtNm8r16h>~+y8^?=)~`di5e6lxIjB?1p-!jLhxfSxSL&v^ zOcBuL|ytE!F+Zwm9{XF26-F~*Tf(Y6sQ67EDAKPj0XG2aU^Xy;0< z)WmhQj;9{dVyslM=cUV=zxQ%WJI(RnF)ra#_THZ}ET*%)Nq8>1JRCj9sDdhMc z0B%8%zSq%k?KO|RCO}d&---%2;tJU5bRKuvKM8j4b-Ulv^@9QS`2mizO`JPiu47@Y z-OhmZcIU;^z|#@#yFT1w%2n5J_ym?HSL$0*ste>J(!nzw=m@0#J~?D z9anRl7aKXSvb&USKOb+qHz@S$M~lB%()d44PPkhdb|XLZc7bs@E9j>u8-Ks5socfx z;JQC8@^9~!e77R=%h6#UhwAU@wl9W<|MhV3kB>LM+R}Zkg?_P%_i8)WPcQcQ*RA>g z`OPbTTHoPVfH*9~=YvB2d4J0vcZ|4GWwHz=>ZP!kaVo?+Pr7H4-%>6dGvo%7TuAm+5^z10uwIpn>4x_udQ1J5fw7)G z(Vp(Xg=JZ0}qVmw%G*3lHa9Vgs z$2cRuky+Zz98wfIcS`KCj@~Ib=J?L$%$~;F40Cj4O5n9v2W4D*t$_s)#Kw9 zZ_cWIx5M1iU1q+NSu-uZuGqJ@qd6tr7>bi#-WlGI98X~LFr$4Na~z)D#XEga_>%F_ zOY{A+5{qbcv|+^3Q5)l*AgnB8|@RW}hZz1>Rfu2QNrkhIS1JuU?WY z(_~kM*Yx)85LExHhtCFjM@t?NPf$| ze*N{x^jXVi?z$t}{2oT8AxBbxiuP;oI;oDJ2EYpyqB$V|WqyMD$H z=M-<`Qkwl*d)MuPkbC9cFGhsn*Za-9_y@CNzpRS5Q(|22B-tzyZ^@MS2}(+mMhD5$ zxYpJsNi1y?2-DtmwY5ZME;iOKCab`ibUQv?YGykZAwIC{uPc7bvx?lJ@D_Rgykd#$ zP;rPSWR_=fI#8Sr)=s;{wW6Ybe*evl{LFJP;$kBi?{iW(Ke9Q_MQPV^-8Z@hei~(a zS989mC@3$eacqGU$kqA2P^s2nyuuK7PzDD{R?oQ@g#xv9-s}~0J3p8aN#~-q<2EO? ze7CCfNzW=+^%2Ryk&-xT>anaB)XVDat;+1yGOR2UK4*h@<;Qdf{oDa0UulOxbwra%y z7Pxa0CQKle1zszBIl>P32ZXM|y8J4^@<8x7=EZD`Rm_!;3r(nh z*W=>xo}wq&HN=s~;UQ8)^a-ZYe(L(7B-MaX9<4(G*i$G9JBd!Hr?F^E-5ZMHlC;BM zA_h7!`x3JhF=7ZUg$*zgw!wJxBt?3QMRBPp#g%-KZ)H~MF1imEgPDhVlo&`RIaX^(S78tKV$`h+p^LwVS zg{g!=|F5)(WfeLJyDMwVvIe|~oRM?s+ zrQ8suuZyE!VbOp7^-tG(CZCD7eLXw&vkLE((#WqK&Hu;oxewR1JJZdxFWvKeSU{~V zff_k-YA*6s;B!8y>8F!i4~B>SFfab|LH?sY>0cEpX76}KfN3N&JSbc#2E!2=B2x^= zjLfUmIRB`^e-e1;YH4#ik0O1@AdqD;lK#IZ{I!)O-xdQ zp_L;!rU0&P7}S83GS=x-E5%@rF%ZQVEkbeKfddDQ9zB}Nc?|U!)-XUl|NQe@PhsJM zQIr7)Yu2p6l6>2?Z5X}|9Xf>3e$%E+l>EYo{mLt^y!z^^>({SG0Sx$9s*e~kf{(_) zh?4jUBOV5ST*PxLDk?DUWo2dIglk(YieaIZ(-|gVvS4PS4I4IK{6y<0_J#WKT(l7e z4;?y`_%>?BN36uL5XMx3*$pdSd`wJCL=Bko@GTZZXji{}{U`%cUS5vj9SiW()KrE+ zVOAnSj#gtf%*@Pe)~p%27t;`C9IT*WEnyZ;6X+$JMk4BjhP4)k^+h@dgF~ZsG^1w{C&4?-;RlfX|k!@!oMl$7Sw2^;0a}5nM^r7 zJ>8Nep$05Y&;&FSe{mS|-h1!;^2;yx@83Ul>eTu3=kb2))~)N;uW#MD6{|@6DJf-K zq7M9i2(U1F6!{^js&C)EMMXt;D6tL#5sZ5xqGAO-@i&l@lLJf0FtIo}G#DQ#IK%iF zE4>jKg#yGt7$QXY1t0MzroJw2!Hp!?kjz7%iBc-O9lx=(v^4e`pH3u)s=UO77`Q~t zicwk=9404<^c6-DQ8bt^5lKgcVu+&fUMlWVZ-;?VR<0;=Z!CZa@5}4+L0bwZ#2Fr~ zPx{P@7FuG#2;9)Il;Rr31mS1Htug$V$+op3j2OJeNFs{2FsXr+ONdXgf{Cmo3zqsE z0o#@J$)H!%CkEo9aTi@XA{cDQqYbiMTOS)@MrXsitm1J(&Zxa3XA{mq|#p_CxTQXVwY2aF9D_ZY3 zb)~;VZ0sFXkWqJ`H4~z%5D}j45uP65MyC{y3Pzr<|Z125k{wQ zPkUcTjxVHTq^Dzwwvr#)@hqU!oO^TJV zLjApq2l%U77^A|>_KDu2W@aT!>T1j`Fc%h@OA5VV-0s9Evqzq9 z&iKexZG5A0yjA`D3-gRwo}jd7qdUy*YRpYFhm4M!eW&-7mva{6n~PeT6O5otBRI2} zIi$e5XmD7~#OUP{W0rL?r#3RnGmSBAj7c$)*duF5^A6AUj>E^sMsbCIX|^%U6P(rC zH*-Tq-Kxe*+nE!6o>t+W4(NoDQMEAm@uo{BMlWq<4Doqd<9dwQGtARY-$wO?=VzE|%1dQRMTOOw8eIf>q3flI!Jq8AtyVfs+9~EdRpngJgI=J-z(}G&3;lzT|KV8eMSncNXgt+_qbkf6P1 zckWekF^4qQ2X_6i;<6;8jwLg$xbiFwB*S6SyC{cI|?9gy{fo8$2k? zeVohSYcb>GuwldCN5L6@M+*NLjwSpkN(kb1I9FrFjKSQ2nPRm{c!8=IIlEe=b;e}< z?6c2q+_(`pVy?m5$L$Z4fdTNn@hnCyVID-C+{J_CaP@Hle~&3R$)3gSB->1!I1we$ z98RXFga8Z9Nxb1BP&3_Bl1oY2Vbt>rFT6lq0{9Ym3Z`t7#C6QYcr?SPH6G1m99D8?OInRQQpOrHhY`M|V}Rbk3A=^78g3pvqYkT#m35JR4)a;C9{8nL zKrz`6Wm}R(GT{^oiST2Lh_PN|eI$s4RFO*6*%qP&N)m8j7!ffRE6p^{@fEWqv)VL~ z)>^dgF1j-TP#e((C*4FhM*UC&7B4)4;wOxk;j5)+*-P{qBnFj;l0s29Sxg=*1~VH` zPth}9&^cTg8&xz+MMJ1kyR>z81ydp#dUOwUT2oGMS zFo_hgAJ790-*KI25<{O*5A0#T;R@jtGGr{sg;KZVz@P^*L`GZD7X3y6-u|LLY%3FG zT}9W(G-!U zkaEBpLsS6NXcgU)D{MX$`V=m-0L8|E87?9E78d=lKR>+M zF79%U^P2?;KR%xN_qPWJ| zE3l$gp4jhQ`xC_G|#&eF_SP$a;x#ylk%@`qZanGJT`}gmE?X}m|u3fu* z`*upW;0lKNef#!d5PRvRmna~H>1L-oHgDdHxd!7oCKSvjufF;!?!|n8!Wgk(He*pS z6A|~|5}T`3SjA;E48~FyHKQIph{CoQKVdLN??3$E4=@H38CI_t-to|pBS%tLqeY7r zs0Sa13>kveZe?X9`UjH^7QzGpFrVTs^adV*c?!4V3YEL4_l1wY``z#G^rWODyvN4I z!c?xdU>-UXZJ}5hkrRA{PI%>&SI`5vh&o4)9*urOdr=IXjMkw}G-S(`Ex32Xh7IT< zlt*urmX^Xcv<>;}L zUC|W`Svqs(%+;$`4;(mf^X5&QpFDXIeLgnS<1Xb{xfro&uCe4O;3u+@- zgl-1=EVGnFZdFGO4)D^02h)58@AVknG5XK6z;d-Bh3NGy}90N|v z$_9Cy$?+f{U~xrok;1w7ofAAEJ(%kKw1kk$zmZrPf;og=c!f+4A}1C_2meR-Ct6mY zbD2-E0-dbbYVv{U5j7;YkbYyKE<{CW4}CLkjZxagN>@ZLj@OCd@GhQMAc@6PVXzgQ zG^z^{Vq!!RuJCpXy5UZo6_Lbqi-$u_ILc5Y#}cI>-ZDVtgC4802Tqqu21&(yQFmSp zwQo>b3e|F@=9nt<-I{&1W`9;rNe*0m#vyYUJz>*V*yOJ9K$&O9@t)~A9OBwxcO7*) zUJY=(8tB+sW$DFPcUdKkezBkbM zL5}%x%b=Hi`booitz*De)mYLGsz+E!~}iZJow9T@&CBB_Lsv`f8JX1?Q?_9CE9*|vE$vL zjlZi+Qi+FJepem;)x3mvS{WBx1s!n&z3+2eX>9vA*7>&6ajA>d*nhiM$mI^kGhz_H2vQqio2NwKN|k(WM`0VKw!?ZTR&58xa_J+qLBHWG-9kON zQAXE9vtLtlP&0E-Cv#esIa;z-%t;ZRj{Xq2{0|Ljc z(AJpT)|eD$=Al?KV^BL|T84K-TVqNqQ)YI~HpevbmL;3T31)#z=;Q61ZH!rwD&=0M z$1m+*PHAUO?(3W1%B*T>PQc^Zn^TJX^Gf`Sdzo{)nzJ&^QLv}8Ii-U!rL{4kl{vnp zcWj1AuRJ|YhSkhWsLS`w?dM%6sU7|$Q{$z)$%L4?+D3JcH?6HrshJsHJ29qiN^IS- zMs?HTYZ`mY@RU~Gs>ViHlaSJQqoBFBti6A9V{b`#VEdGi{4{e=sHZJVP4$+gnk7lb zzzGR+&v&0$*Jw$1&pfowAKV77F5YRSzM5j+;(q>lef)E~8MAY}llzCxFN>U)WR#e} zS!QsHSTjG>J21l2DaO;IyLWaob8w{5V^qw7HR*Np6PNbz&Wthg;>`Tc=8Q2>b=8gP zhDI*x=o=I5>5jhzIy~N65M%a=@Z`dn-l5a`_~xgX!Bi^!W`pgMJEKtA#!*iZcG4-=l zlI6*2NAelP9yuc1Cv8rZ5LrKMXcrsVu6J}xc1IiM`2_8R?$|GbSzJeL&a>gRo81EM zPmTWZvDBY`*W|lpNncHlx;MyoyU@5@;JH(1+$;BeJ|gUkaS>mRkN9F#_}voox(blF zke~pBR80z%w0GU;9w@b6dIaLX75}^+xukx}iwcx@LJc*~C>&#by&COaC6IDh1bpal zoCy)9gT=L?!vFm9_usDSeyg|pN{)W1x$Sbg{YIyNd!^=s=`o*I`Y)$DPMg|3O-iZ! zPSy6TLPC}+$Yp_K(YRg~?hk?;mm1qHHWMd-lx>@Zd#&o&#}(4Ds-6M(I~7dyk}Apa&nN+zmsCv9*--6Puizga zYxK*utUql`|KX8F-z-k}vMTbk!Tx(?-kW*Bmzqk7%cV5im2~_20s3yG6@!<`;-s$= zVs?`z%umGZ!+58(Y*keiF81%=AAS+j9K(r)Lxl+)=kWC2d+$AXgM<_?F~LiO>x+Bg zzrk&T`-E>FK74rk^l8@2=yI}L;0LZ>zaCy6+(NuR|NQe5+GYMDOjWo-dIm~j#(}eq zDU}3uJQ}kMs=_<$fraFdFdZ{yDAWVb6FxA?;Di~Osf$Zq4V6*|r=W7&PSz-QAIN** z%!mS<{RwEmk0oye$C@>3IRD~+OUxM%Wne$dz(+odZU84rykj-NL_)mR+D_tbCb_2b z=FKBVg_}fNn^<~u=o}h=GPsD2f^8_w?VBg-4-~+47>V17w{#R8v7BeVV=Uwwi^fd8 zoFEdYXTsnh!Yq`ur7Q``7l;Dfi}M_jlPXf1il$WDCH@d6;>d@=jkqO4WMqjfrtHCk z$Yx=-AhKMj{)*cv=fX%I%Gff9iGfIXDDyk_6@8ges6>?DoTy7L(TmUx3wIWsnW-5D z;3BgiG4zS`lf4rvLcPLEsTot4hA?o2uoBZW<94yvJhTStF8_x^tFKL0m`O{iCMPp5Gevbb#j5S1^iSHU z_wI_^GFB0BTdKU$i82kercetlL&fGa5n^Ov(ZBxwmp^VS{qgbUzr0cK!xQO0KGNjQ z=%|+hZOe7X>HxMSY4qNOOw z-wq0OQ5;|g%zM(IpANS_Wpj>D9_&gXL7NRO$BTApd@toMB?*oB96Ko@@m!hVNK$|p z9yM6S*mhA474F?Cv-mktIc`L6&v!VsD8=2zs-+=PTS<(Qi6NwFZby}vK&6tDDlvhQ znWnY(){AboA)a6%`d&GGlN<0gisC z1VcSaVj>tgaNvv?GqCo@1c9|VhPYk3cHtw+y!6sbxN`L9(IZEWV6esb$*@g~W*9m5 z?Ae3K;>8zVMB&}LcVpaU8fZKkBOYfHGCDBw1P!8;x| zY}hc&N0{UA2&~YPlao_YQm{rw`HYMVteP<|Fp`w(I}G&)4<2MjY;+Ocw{6>oE`@Ed z0cBtzdW@RAlv(41!i{^_%)EQ(6SR)I3FuXH;A`sm_~VZ;XrNui#l;M^VwxUHp9Ixf zqPTb`*uu~iVlP$-9tw{z2{_sa%kdP-@8GWno0;Vb-!dOJdkh_mkCgny(Muf!te72% zaWRa+!C@sKV)qglVH{0*dOC$?nUjbN7D6SYvhW9Bsn;?d$AUg^v3c|6q^wZ+mZTIu z5mvD^w5g>gg%zk4B;JIA-S4+YAYLLk+uZxEIB*g%je?8 zN0kmR4?0d(E@|tD1XV04T{2#%yvML3nq&#cShN<4^uh4^piT==;*A7k zELV^~6Svi1Cw)-@M5|R+Xzl|to+vjlagdrp=Bue~ws3CK^esxe@+q54WwPDvctLZ{ zwPA8}Ocaha!nM|>@3gy?YkIYC&5`U)xzCGL(%v}7W?d38=L;RTzbaX>0WYJx!})u! zz9-QBw5A_&+23;MyL9JA9{uAO_qWrd55)$Yj_!DMMUrWWUlT;HcYuH9PQ_aBQ;MAB+$C@u{p&ed0{G{lnD2cVpdW z+nPs$oG0QPm-8c!1-d^Q;{W@Z8UOff{hwd$ej`8fdZ)lYAD#2ngvk4|(!UrT_GuH_ zv6PU{26)dkwq0!z@QG8q-ZSJvit}82;FXr{w{`c0#(^J3+Ro?sZ?p?M6KcPbt>5hC z{&q#@zg>CmUU}Hgc@`0j8bOf~!=p*g&=Y)K9u4_VM-cL=m1Yv7WpHZ{K)t??_MQ_U44% z=KLmRMS{_{nK{G^&I%4l^#y19gIh%zU8Bryjm?3rj0xVL>}aEVl+h#B=-u2L8e{fI zGW%zG$7Glza91O5S%SA;s#%fZ9T4yBgKxu)j$xkMXrouAF{(0R$@u8nA>q}T#^{db z)Dqv~cIKqE=EUOAh26{<$wp}}bM`?0l78L=ZH>vTjY%!daj{0Xu#mQKMxS)=&_> zO{yLjyRfCVD#<7cGukJZ16&ef>EoL>K5FR`O_#Ux z4>E&V$Axt55IVYh=#(_GEZtk#%R8&JcT_+ByqB{V9c;U>I(cbN)b2Ay^@-7-1UAHWC z>FOp+H)htZY+Q%N4i2js9#K~rUOOhL4!zOGT=;q`$#1z@&u`f>eP*Q)Q1Yn?-Uu= z+PN+%ip!;@GGOOguKQMg$em*2c0bScP5~EFv=3eO-6CMEVt|m=vO!T(aB;t~?XXI| z{DyEJ69MmOQorSs{E~nD?Z<~N7k)X`cc(;F_4zR0{YepDO^n1p^=B16ne?Zz_P*}e zDFU{tY(ZO;^vV+|s0y!jk|^RjA8)_WE#PXlemnpr-ETYeiz&7nT>`G>x<8H3-W9Gl z6-NcHH7bngDHT1mO(8JUx~UI?k|9e=6r%OGiZgma!7R_ICqAhL#z&NF_&(wODA;zZ zckuVilYe}y(f2Eoe|)6T!*$KkV-IG;d|4Io*!JYMV>n}n4jsbD`t|GaM0kU6U+LLW zSrPLKra4S8xRH|@zJ*m=w{AtXIES^E{y2ZZNv4_#?6g9MaEnC;h08}#T!KP4C(i^m zV5VdG49t?u&WyPXb1@+l(!X&KY#|tegYGdUS;+UMAPhkp?u^j8p!gvUD(+HJhPwBb zYCVjxwt=`U!UO>XRj5Hn&w;x(j3Hv^P(u|L5?w467b~7y@AeA*INtetmyFZt zppp>}R|rEl65xT(s{12>?f%n&5ei4>tF3oSutVM2J7`mR&m z5~QMjg4DgC>J_0Z=^^a1mGdTp#8sLk@@&&3Z9|4OD~%NC7EV#zn>Gr8DNz*Mo{CyC zNIEKTSH$Xm#sB)xzyIrt7ytIofFGZ1ey`kje{9$fvtm9gFwSMT-$`(84wUiV7^fF;20VX_A|Y}oLJKm6gqfddqSWl|&zZkQupfBkh-g?dl}#(zvGn6|L4oI7_e3NW$; zt9VRj7+p~ww&4-w<>gq?5+mZO8RwWIU?fGlU?B!=Ob7Gk&BIsJeWb& z0WRWk+1c5+9V=pnCSe{yCFlwg9!P}1+CMHX4qbt#j2t-<4`P4}>VdT=hI?TlgIuWk zl9`!_lIR~8gI3_VFbPjYd&vyJp8(Uukt0W_PR3k&unm)1)22=FtPvwd3?Dw6(*&mg zRJCi@E}XAjyB6~W?n0gTqu?UC7gtal1`Sj)bm&mb5j38Z66RkfgMttT7Vk8kfi^ig zIV8H^3fK5VIq*9{|1-rBmynon(GU1@7Z(>ZUp5XZx1c|nTpOK(uh0sVq2@5jHh6Ta zR;?K5MD&B;3BeFjaj20?gn{2O%7#H?jC(?(h=DN85RoF@Lev7SV0ko(91=@n7Z+Qo z!y^0;dLrgVoQm`wvQ5YcVw$9eAP;(stQDANCEBJ!FX1BwL|Y+9Otg(oU~VR|Tu5#q zK8ADVlVbc4e}N1;LK$>vLw<`D5JW$!VTh0wdP5B1kp?-5EHx4qXc!>FYF9PL^4u^W z2p`FNv1C;qtxsb_48)2{T2Y?^ng|1Ju<(`@^*OAGpWw%gpDW7a-;;sfbm0i;5RI`8 z%MvTFh&YMG-oZZ$H}x8-{-OZ$6IuKgD!TBX7T?PHG((p04Ou#bP>A8s=R^%;8e2-a z1eaFSXN$JrnueL98E#4KKBi8SAea?T)Fv@8RO#hb3w^EH{FRcAL5>-kzQ`^o zU%Y-NTs!Rgc1r z`lXh^7g8PPBgETo=e|Jg&hYrps}k-{iu~i6w13?G-Oro*Ue6U@RhRz9d*gn3HuK@u zyjumn!@6UaaG!MA_QyKz7JClIIIpz~xZ28nTxh4_^;d-Zc%tJ{f_N}Z?&jYr2)REs z_Tl#4-#=6M%~O?seQ(y+i*vrKN&MRf<9}My^K4_s9|D|P!)*tGokxRQ?|W?5X2yTk z-*ab7$Dg+p{ZJQoD&BRexxT<5lLvKFQ-6(O4!2j?pNFX=Q1L<=FZ4c27q_2uC%C(W z@=|qwjFi!n5yaWTmaQU>rYO8+sN}uKbkf6BNKvH*Spj9@4=GN)K`+2e=aB&HP5*0WHmO=|*K^Z+Wt}q9UH!`ZG$iJXMeLFm&w#2`rqd7I+?3-frZ)Oba z8#-%vcx{QV8U>0&7j`hGbTX$83au^)T_~;f%$^xv^M3BaG4ZpT8pBXkRdgLHFZC~O zX^d}TRJJn5_w>y~W1AWSI{T)yHz(zJ=l1f>Q5>FDaYnBwql-T%C)*qw?dg{6ElxH| z^1O3qCRSG`!o8Tskv&ot%E%) z8p~j!(JGc|VodGU7K`?@T(BUiW`KXm@W>?_Go@bcS*4fzuU~&%JADRxUHqITONAvV zzO+)s6D?FBMY9#&vRXKwRWUnQ%fIh%UP#hz^$GrJa@2QAlfGFHf2SbibcBp9T&z@I zmJ8<|;X0pWzgO=4W`5lF%Vn7_Cq#ZWEbOzv{?CVo;{2P%2|qp4_?NAjf7zA&r%mbK zFHinrbolLpkn6ebE7^`q&1KS}D=i%|;;+ziyN|qnAyJ&r^_?Ps;wLXi4a48Ch4*aE zk6iYTUG@*``Z0-A1RM|nc;7A@2m2KN=U;z#_;TU53!{Hnk^IMXS-eyTni=oyu?=!DC0Io)xyOTd|E`)f*Z<(Nai zkSs|!=!y?rj{OR3*;$XbJfeUUn71DTLVW`0%_>}IWqq(xt;(!~hOAZ{{)kGSvGBJv z*7^&IP;@*%znvHI)r3fN&X?mNzL^*I^Y5Gd@%g4dKGsM=STkepmYUZ)2FPHpQ1MBK z?YPr%RFmYE!}5u)?Ly2|cncgPxQ?V@;~?IGu^ql2<`+0Z81UDuSwq<_c$f?shEGLJ zT^#VK;LgDfK6>=%Pe1+ix4-@E#EBE|cu^kn&fU9r&!0b!$q_Cz94*v*@Zdp|gbV%R zi!Y+und*QW31^-(8`4l2^G=O-_>G%4Z^jhKnG{vQ3x%oN6NC+ngkr`Y%u1BcWv(LT zA;NWhq!J?&C^NAUCs|H{6zOH!Xq+&k1f1ACd-mWQ4aC%muV5Q|R}yGpGfAHK2;-S& zkkP()kVVHutI-@nD=^8*=tm!b5DM8Zu#GobtW+zK7Z<71LT>>pl;_p)ME!xMa8HEp z4QhishXqt$(RZL2SS6~aim9{3?8#y>St4bkY=W3DObja&g@eW50b&3(MoL7uZS!y?^<=X?>Zg4whI_H+?l zdWasd4ez7GsBvOkrKqeB6^ux-5*S&i3m?IJ*%SfpAUdG0PzF}96@-Iu5xvTOz_X|` z!(35M)ILFe%j&Z7X7#v7yvl$xLg>%y`iF7O&q|DYW&Ym>O4>%50$VE8kUmny`1DYE zDIFCYk)gtBItyEWVOI$bZhb-ht`KYQ}1_>5X%dT|M|Uv+HeC6PhYxS_#`YN&3Z5DJh7RipSetK_x*- zq!;!-fv+^UM)n-#{B%)n&uZa%)8U+Cla3QE!nSRi`-m>J*7GGkBNc8-)xK5wZ;h2b z07olbX4cl4NHUL1QZP+zdu7VJ1CE`VoGCDM!aO`0mEezp#txOCNwO1$sz1b&Hrd0S zlrjAj9SC)5^%Xj`d*Hx<7-}#M zW8}ezv}MZ{@&K{iz;LvC_iid*9X@;*gCsE!l*ep>s<7_HU3enu+_r7oYp=bAchrE1 z34=C9ISSjN1`M{O>+jvW7qy|1(dxh=6tVZ&J@FVPfJU~ zz`k+gMh1amf`b9DYWw!>c<#=fJ5m43FTac-np*>y#b6%RwkSM){P@Jg#Q6AlEReCJ zMTO{`!Gi~5sf%jM%gd9KlN&W^gmo_3g&7LQpa8lEi(m917Q(m)v(W>TaEIk2GhjSN zlQCj*21IY84`Iw3Z@ht?VLcq@u`1^jiPph#+>389iDL>z4I~RO+IQc+eMH0Xccgs8 zp+ko-=VGF7)~p%IV9>ys>Y(sDfl~be$Hn4wa6(46s zLRn!-)RQ3;Lrew-ttAe_%+8kJEgFlOaS=U3Rt&)(oZu(Tl1%4K#0FL6se@1tW17$+ z2C!Jbh!q865iqP+9u2RIJb^K7x1=qs7^Qj7I0)8382=`LPH^maZ=yLJPWK@8Mfmf^YGUOwW*F zGFCiMo5aL8A?7JX)+K79!K}B=A!X3o$+Ug?DkWvPLQS%B)E4;`O^P94w*8%OZq)2M zJ@%u9eYb|ylWT+KdfDaN>2Ms-Y_A9E+kN^zzw=O_eTVKk8WwP?d+^6Wjt@MJ^Qn%H zy!v*{d8;z)VvCS>f?V%A?QeSmZ|8aLl|^1|6MQCCKk9R=4{&}IZvUz({CHHrjmoGG zlS6KF4*Yzu|5oqd`{P1SHg)a`bDWRV?hc9gC?)u$NBcC*b-ktYxZ%E%<-D5VJe%V^ z*u=QdTL0_*;s*;`-yYEX@3+@|{Y=a6mp8vZDe7)X#7VdQX|nSTo8zqj{a!`*=AfW+ z%>wTY^k2yhxY{Z3lU5-IT<%w#&JWsz+|3KR)hp<;lE5FIEC2JcSvR|i`?H#U|7hC1 zvXGzG<(+L4`kqaH-D!WqrSA~|C%pFStz6&DjlG#4ay~ci$H&^7>K3v&*nQaTsxj`c(*D#?s7q-!o3hQdEwr^S~7V&ho5tF4zyoHL;+f6wkT@{~ppyJ9FD(EIn zebrw$>Qq>96U8WAEz=b0#nKrQt>Dgi3X?r$&Ar3ojoy()=l160cIKoEV^~LXa$9ee z8I&dCi9$MfgInMvMUq%XM|e6#8C@ehxf5e+Mnu-OGR8Oc4vsNr=IW}@> zKi|Bj=HMK&s--b0-(1kkoY&nub9O>a4}Vp%IRN#PhgNqor}s4%7I+urn$x=(bGmq^ zL>gVwy@OC;PjhaTcUZc2NTkup7o1~;w1_Y|MtC|#8C~#-GkYhQMbqMI79=h0>YX78 zBoRx7MlKl^Q9UuHt~_*6l2Mf4>C@b-%r?g+dHchRY;)Y4#F`f~Wrm>2@amC~)kUGR zT9_kXLr3q_Jl|}IBSqI#r=Zm`_6G)ZGG??jC&ilijm<%EMz0j3s3dG|Ma1F^V?x!%c*y%kyBk?H2J{{BT3p>=)DMcvI=140)**0k=mR*RRX*5;cFM?^|E=uo@) zCp*qvm0D97UW=Ywk^0E8lx4@CeV2I4)6-{k5TZaaphhb#palvOSfu{7nZL45ZC^gE zco#=C*ZDZx?LyCk*|9&YO8I7S!tLI{AN%cVglnb>L0Ta6mxcRGl;d{akT1tZd^IuZ zvmyT5eLXjO2jA)wa(96F#i;PF=g0l@yC#3x)#9&jwEksB_K#1d-k%bEuiS_K(`)UV zmzrr85(Hi{A9M#7URSam=OV;$&ACnSTh=Mii&Ll$uE04eLXk!-azlo?tz!n zY!{Nm`6wAE^qL5GMdc6LA>4a3*8!XBqaX>YTx;vRnxmf#wliSoU0uHrFD*PBYT(K61Y-~_sRMM(@TC+UzBv01**puD|Bm$dd-uGQ046> zf*m(<-8Xv#-smFJsC_;%?7N!8pPy~=^O~lAe4&}_`Mx2SvmNK-We}Fs{cdVI8>)Sv zJ5I_-OsU_pMByzXMvQGOgDHg(@ay* z1rr|%<13tChD8CG$Cw&sbAaK4q=^#oz{E{p1k)%3 ze8_Ua6h@{BgM;9NQwRmkK@+%?*s){Bh7B7q{oxjT#6?t&TSy7Uq>M!e%%k^0X$Qw8q>hq zytC-sTC`3RX&psJto^Z6?<4xuiMm;0)(|nILR1VDLvf`@6j2wYpXi6X>`LcDld&g( zAtEGEB=#5mr;F)TqH2H`&_nd-AUdEz0yQl}3*ry>SSm_U058S?;oE$XPYn}hNv4!; zw#ZHw=}kpboDfJsg}9D$Jcx7=oD&xkE!f(bA|qlaJ>TmN^`mR`9R0TCg!idJU#9TtiHc5Bq|yYmQ?5}@rB>HRO4RE6T>486XOVFDBo#+X zQ;^GW;lRC}gw3h6f=4KR??%n}mRm2rj(6Nn$uG=( zmZ>q}x-^&>Q64>sdN6|w8#WA0!$sT(WAH3$ zx$oJt=kVdfc*ni__U&W%6{^8&-7(PP= z1x;lp9g;Md>6of1I2hhy<(MXDfSOTf-@bh*)iSXDKrazbz&pkH8BauGNDE;-w1*%A z3?N7X!&shAr?@T|EaalFfTbl%8Cyn35vE}3C89BubY^lQT%npS!=~_DA}XXXp-xnS zUl)Eq=uG_5NCC28}Q zkHW?5>}&=T(Q@j;kmo~w4kMi{Ou!{z=uzVgOA^5LkDtUM{a0ua@;@6IU2eyuzO=N@oTh!9Z z|ECl!VGvq~l2orHotQ)yE8LsD9FZ3CTj)d5LM!UWvfN6XM1YGx3Vk;EfmVv5h5|J! z>r+Uttj}THkOMkh|Uwc@DLoO=w%R+~&4jd^g3UFO-@?Qzr!eOHwGaF~9* zz3Y>B*X_KZYb~57eU1-9Y@gHt#R&^wXN|*V{WEZq7MX5RN6!*R$g9OltbJ*Y;_m^G3dT zshRzLP1hT}V{Y{dyj7n1)25QICdPa|BWsh>iN)rD0NcGj=7km^A7%y}3w3=lI_c}0 zwzms{KCkqCUDM&a`AuGPiR~)W${sB1bp2{pz{er>o85!%3=cn(Yuqggxe_lP36LqC zhsrrx`X^nK{#%IvdO?N05{oNPUQ;LKU36EXZKX<>4uxAN-$RMKN!smfl?$NDZy`l? zB|0Z-=&c-;Dy6j3PEn3WsJ~r-(9(s-6(ZR)FxgX{>Z^z`dNef$C7b;t&8|jpW|-05 z?`fT2_Q91fPdi^oPMD`X%A|S+rWz#*Z|T_19NXM1k2ZUxcuVn)@|i~YxagXquz8)m zQ+xQPm4_`E8c{nwx^7%_-I%D_MafI2#?=i9lOV>*#&z?OYWn*Z^z|+%^e${+j_>W8 zvoL8{Rdj6!V|rh6(VWEEN1Mt7#}i_fR)*J<_!s9IQ(74lVvL^6jUjo){9I#tjybNa zIkBg2)}rLPm8naMLg%J=Mk!U6VFlj#<)PIDzWH5^S-Ix;HsD}!O3w!&L5CcK0)q4rxng6@_3?hW+b=oWY-%W*l)ex-%u zdMEdtLeCdt!oRIaeE351KX1u+xVD)Tc&UoGTjsshJNRlF=fy_aS)Vu^BsF+tcu{lP z1za)3yP9){3e0&~K^Z4B=jkBr(+F`PN#-895GT%h#VMEgP}AR2+>*7zvAv-GKfe3= zX5Y-KE$v@Tj{MV>R)70o+<%-L^QWz?zpG7rFe~P(Nl|zE8&`AmOHH&-4BKJhIxO{1 z+$S9R8ISgY2T+Z5WSss(wD>*DfS#=i)S&Cn1+U zAzzFN|6z5?pI^?v3!Qd%fN`azej!<)i_mX)KNlrVxwX@p{8s%ufmrmUB8L zLCpKipu`;zZUN&8PB^*aI(7TeMsijyy?W{fFk>U5hq%LH&8pV4&K((|DBLljN+gq+ zG7{PPid#1PD-drXV4@4%F6=hBoOI$ulTjgISm|T^9w*|ks4o&lSn~H4y?ctDxb7Ez z+y72G!XX@lDR4VZETTxHh;$1#Zb1P<7}PfDCc0ILsv1!-E$7>(Q zSpAP*e|@p6qJzx+Ex{JzEm3OorH@QZtbZ8ex|*q9Owv9I)87iwcWKVUHs?v3^FvKW zL@pDq!7AuRC4jbPDE*Lp1vosWSavg23{I&`Ybf7`DgU9LiVw<%m$zyidsD|I? zgsV)V2Qs}*f|S|U+9;mwG=MZ4ci3Q_K2ljbM`h8aqCf}UDIv?CPK^9m2I5`}^(eV>=S~c9+qP}PxVm@m-VGZzJpcUjGiJ=d zT^RN&D=Vq2g*k=sAebFiDsfKSi1HYR$vng$jB$?>3ZIJwCeCp?24onC$Kg6!fsdHZ z7%PF1IV&p*C8_;_0hts*uI3IMI)pNW2=Gp50(PQxl*Yx=ckkYf@p<_0;hZ;_%NVO^ z+=4YY=0Qw1IKh~|fB$}%jjzUx8H4t6fdOj2mbjvYI24jT>}IB@jnQ8H@K$*_b< zAd!)gm|`$!pb{bj)I7m_iaz9epCTa?Az@-@3gB3pv|QQ~gkhMEmHmhEBlx?cLNYfP z8dIi@0Rsl0b<8A%i&a%sFc=dk0Seqpc@v89_ErZ~Tr9O%)X7*O7)GuL&Ivw{RYEon zVG2@K2*ofA%7Sii5x<9=oE)MPR!LGsm=c<31|yP)DG@xes5R7JL17ZTsA*$mcD8UB zT(@#e6P6*^!cZn+M2s-vSCrf3y-ZroD`-1OGE{V7_9E))FouXBNc^K%a{UdF7AwAH zWdpHX!Uz@&@<@G}W^2di->{2@5i}N0rXgo$RfE_iOE<>Sr?ns;o-~YUB6dL}giaCu zkMLg)?+e2P%5dk7c?7s$MrI2!6RXaDYcK7z`hoB49OilCt|Dz95t?|*;`)Pb9vLi5 zD{1Y5aTkv^v)`=dudG+ZwfHUnmIK-XtSpj`B?Ut#keyGR9D0MeNHUBhZmD>pNy0Hk zD&gA}$;G}TuVO_uPN||zQc)_4g#L(JH3SS7jvBQYu{^+iFy{Ya?meKRxUz2H+o8H! zp}VSDIp>@M5)wiZ0wI(I2oO1lEI{Op0vH?T!87B8vB4w*#smWfhD8ALF_CfXjx zIAO-)@!R*FQ9XS#Z@u^ZYrQq^uhpwpsruGUt-j~nUHj}~+M<}3*m*qBTRQVPo$iE( z{*O8sX!C5H^nuBA*skAaHDC|2*l50*Xg(LJZ}v5w4K-izH|&dZT49nlDbmSsQ&X_i zlkhNq}m=#@cyF6@y8WO->cDb6?=%iiDyxVGA%Z#^bV*c@$HQy|Z`ErEUuQy-)>zkRUq8#65T%vO^Gp#pB zmpWMSx;Nb8guCfXwE0py$6ZEg9^(PUy+zzmPcf@wI5n4N>8%vjyk~22XZQ~_g1p*I zXt?Ez;OKDz^~w=qFQoi7Q;=ZOgmTL;PGx}?!4o5+6RP%1 zS8Dv^uAXw1-8t2b=OjvrlKUkn!`y7?F4h!RTe`Q>DNO0nL8*;Y`}rw_zG^p5Imc7( z6sVT^D4jVIM$XEyO-xlshN)GNO1~^yZJup%naivab$T~t+EDMh{vLD7?X!n@*N^b2 z%U7qQ%j5dE&g!aC~Nhf8$`!xEzTzPV-I;X$;ye07s>r>{9_N|Ms4fM4Y#L3l#>dXNib(OAj+R0r+D-XcvFsK-1K%i3WZOe^T`r#uVxwGP&;9<-3l(W6%PJT*(-8seT5aDXg%u>e| zsWZ#extZ!%#W~4Uq`+=3*K|>)7AvzW?eox$9C>P{GNGd~Ia{8TA=k#+hF7@EdNHDL zUdX~SubN1;?=a8$V%MquN=cAf9;Wo}=^51 z%MMd3RcoTZQkZNT8E>mjl1IcVLtJfXJgUnfOm$8Uwf9a|hGPQPrOY|ksrGpGJ7jgX-9F4WL{*1*4#(17!Q;!i*MSl?gpKl4E zV1Zz{%oIcyxR;;lOy`0O*Sa{~?5TcHX1knVY;rQJV3y|upKYF?!5p(2uI4y=QK8%@ zwp~h*&UmsD&bkv;{TXlF#TY|NN6XEg%H7eP_b2(@o8WV2up5e9&2hZk&U7JC*X+)Y zn{`K+>9~mlFU{VZyK>5z9n_gN39iT{j&fLzTXd&YcG``d@nFqv?4*;f$!I)g)E$>t zliskCS>9!amBr=H9y~ms6o1OfuB4g19P0jcWALN8!21)t?~d_&FxB_*;)w5;#66l5 zaJ#=tOGn3MPcD9o7dnh?oKo2tAO1os%^vKO!j4LYeIg6e=gi?Vp%t@3#P6&TaZE=f zso9mCbkrTtOIt-(KNJB-xakeO<0~apI|+;&}e1Q!-D4eFQr% z=D0`jTV{oX5WfXJ9{e%VzlB+WgG!WaIDQ8X9Kcc!2WiTbDR3rnk&-eH?U|l1V&(tj zlTUW<-VMj;-FM%GQwLWNMga~gTtOHJxP_D<6wSdCfCCSUdHC?*y?ghf@`2*ej5SON zY#&Z2T?KqjdL)Vi{h@eRlIBCT)GIUrM-iidI|`4E!jW*1LTF((pKr~<2m1&!t#L?5 zDVI1isFv&+MZwcvfRZI)AWN1k!RY8u^b?=r7J7acm?!mSc?uO z^Gb$Zd*q=}d@0i+Ws63)NuQ0L;E(@Bi-5@tie!J&$_OX|I1T=D-{>S!N?^e389~*BmxyI-C9+du}`%Ka>sa z$NKeRy^2^-0V^nFrIoC*59?FT%ClKEaY88P5l%>zg|t&hu!U4pD6cfi5H8`z{Lp`m z-$I%#T5JyKx41AD4A9D~gb@+UqOn{^Vwa#5Qk9|L9m+<8CMZ=CPS9={E2D@ijFGg6 zNr@$g<#b_P3Rz(>E5?WhvVkbpnRU)!8I(2}!@&uWVJcWfEvv=wi6?{B32dP()%X)F z;>Hw~LL?dOQ;f)sxxG~X5*F&8fB(Iyf8QF$l9*oQyMA4u*un?!Ma~XTt#G)yXZiL;6^?#bgSND<~q?Tr{parHdTNvnB+;qp(SM-cY(LG7Z{hnNESJT=OEzejZ(#6-NaxE z1u3YA%jp=Lg#Iuo=ior)odmPRSv=+l`+q=hVb}tb`pUX>>#*cv!5=YV#PsRYV`F2n z%fqH}*sx((cCfX_vcGZTM$*egFihTR-8WC+U8lWQ06{flwOEElO%4Erdc4@<4^aWW28_e6e2`YEtED0g*EyOb%~fQgeIYT#Iezk6I?-6R5Pi#5GF&omd4egNG?KS z@Q#Th*%k_bBDM?#7HRpQHF}8#M=WfWs3o|7&;X5pq5&gqStwfIOxrje4J*(nG#Z|x zVJ2;%2~F=sgG97E(S%xPED_>$kW?M~E_h$`D<<#|KSQiG^xSGuYknen#-u?yZSfWj zTxvTT)C5I8QD!B&NO9vicsfGfAH1bbdyoH9-R+mP@TZmRf zKB0!UXbc}Bu~0o!GeI3h|CTMF}$$ zGvgdCO2EYp-ZM(8O@=j!<8~+Gm29^w9URX18Mc{B+hqMRqv;ciakIhv2WHu8GMtQb zd^pZ)tEcmcNasT)X^WHTHAnqgt6^7|>0qS!dV&0)F6Zvp=u>$vuN!nt3cC=R^kW00A2>yYf2j>c--%}{gLQk79&a<-c-BTy}I zv!$avPqKS?ag~b^1u3`q3gJr;qEr-Y)aT`PP4wG;c%7+$jMKqN=~3s7NnGU$+@0#M;|3WNG(lPM))ZOzDhp&jCX&v z7+2c!&J0dbhvwSHRk_w>sG~z|6@f}gpi+u=S6ez>;YzPuWom_e&IqrDDvw#o@(2%G zM_;*1qC6D!7rRVb7}hv1WI->_sZ)bzz8o>X)OBjO(kDdek*199>sB}1yS~JA+Hmi> z(LVD#*hl(Ub9p9ir5N}6$XyiYgdnBV)7H_&*3QS)IZhqoVasxNZXcplM5%o<<=P(h zxwC@iFH5M~(|*SO%o($S=jN%C=Y%vYjjw+zwtl={eRunuV*B(WWp1%Nb6qRmvY@s$ zkg+)702AIaUOb>&!G&HOJ{EkER|Umoy2w@ZqWI{L*?2BccPYVmDam*)h#hy-Z(){q zgysqEUBxWN?fR=7%r}ay*Ylk(MjMVh=y!5-!eN)rbi~1U+D(5U!qAdozShO@dV%xR zPL7w;O&1dkJd!Aoopxm>9CgPe{RtD#F?8C6!zrg!cHBX?pR3?n_A~QQgYkqT=fI%m zCX?Z?LE48!gE=-C3@07gaY?s>>p>Zo7MK6>;Nkg%_$J1KZdy88cw4E`mGxi4VIFp}P>xVGT65gE?#$ zAxrCoukHqnI>Iu=fdU zArak=c>R_Gf;@A?Y{Uej8wZWjVTC*lP-=rjR0dr0|w;jun2{vSsAH!q&h$f&&Q$4}KbX(J&ME6t)kx z35Kt!sR>RdNeIF;;0nsGTD1z+0ZuHMK%KpM^&-hQI1(WtA-i_%I)DB=>c^+>CE#|# z?>c()=(%&}Hf`Djn+Wsz>8GEPEu&=0a4%60&f(mmCnT{8uNdwJY#)5fQ>RYB2EmIX zxmu0*s#(J~-gpBYLtB(@h%7!lQp)KAml!TO+C`%<@hE^vpeKg7zDyh|R;(a#8{C4P zknButZ7osaF*Bq{k7_AXhDOwR<&{?`X9N*%h~z_RZ-fP5&MDN0gnUUS4OgBQ;%Tq9 z9z?t$?;QuFIHWiuO%9Hp3SrckjsN_9?OzU=$z56332Z_kD~x5a9ax9aZ1gI&Y6IKw z5__qdRS#x^XRsOdtiF!b;Wd(t?8rJMu_WTFP?BZhgb-(jvQEddc!D5E#+IUi@NQ>z zcjitI3SIGJo?1u|UiegDijCPcP7A>-nuH5pTCSy3mKw<-Nh~Fp1&6XwywE7F=drvB zR#DE%No}S*YfrckZlv&^G?s=@VC+S#2xF&=OUW!5J;w{@xQ=4oST~&Db~Hy4xOh>E z6wQQciTOhk#{|LL369`=iEztBkClco35y2oi(<_zCDBPw2r;=L`_j zHjD9mxbbodU%Rk)VzIrPYT2TimKeE6R3Cxv;x$MJiR^PQUJBChF>zPq1!kThB8v%l z31T{04SR4m~wZ~6qk%A^gF)MPWB_LnZkNPjkPbb36;PdH$bC7_Zdp~RcU z^=yrcm}#Z256VU~4A`W=jOqxr!bp3)S2x5gg<#^mAf`&TyCjtOg5ss!oJvdorEEn4K|S z5p&c-cwDg}0kmEy4;wlAMHkqbecQ%#kwVJ&KYU=J#ygfZtb5hf)wJElGFw$uh2@vh z^eZ`1_}~MQ7Qv0Q zd|^4o(u#GmU%!61NNTsw3vCD3CX(mHQi)=3z4aFA95G@9>CkH0Tvz~y3>ktw(Y9^d zMvoqi^5_Fe>B26+65%e&hy-(kRhnX>u$-=4yB60miXA(4pb4z%Sn6RuNJ$>cJBEhl zVq#)YIX!E}T^JNHLJ5P=?WCk6lq4kf+$~r{yECf1Lp7iqBy~%q9qJn)B&1G^TQD?ys^Kj(y=Vv{ zB|#)YnJ7L81!x|qW13noT_h}sz8%ubMHA1r>dk5>$D%e$xmwm93XsvD6>?WJ@I(t8 z(tsL54~PRpZ~;Lp8f}K41A2JW7P@I$iPj$yPINYp%D>|+6!Jr<6Uf8V z$UItP5J67_n`k+S=we$O=J_`AI0QOrXp9C!5g!kgb5c53H`wWA1D`8&#B^}`H66IbF8^J+;BWbdNketa*E@%uIhzY%c&s8jgsZh zlI2fE=_8xrU9)9-jPtTk>o&!-*F{=svaFD#GeL%J1|Af=ftjym*;?A0Z&ro0v~xNh zX}B@Y=XRxfslD~l%!m)-oIlW+j;e;HMEPQ*5!F+Yn?1l zR`+~%e(u+eQ5)UNhtr&|=QtdXuq|ZLhi2pLa`~{E>2Qp4F;Kr$k`@TY%P5}Ena4X% z;jk5tA%a0q73fO709YuDD4yv_bZ(H~yTaG%E>7~q1E3C~WG_KIN*3-xl|XOOgi>xV zP8;T%>k*>zVu8Ah;Wg;X8O!5(xrKOnt6e+UNA`4^8LjjSQ+tLhy#nNJzDn0{wRe=# zH%KW9ReMG#y%Y|-B=MX+&h7l=LVvlNt2NEd+CD%j4wZZO$en$ZEyEPRrP86}G zhRWr!%0M4E&tEAFQ+jzTolquR=^d%`6&a3lL)FSyWpI0We4aA3K%QBu&M8!8c9N$S zt21kS8>+n;hIrNw^Q!OZI=iDXq1-;FpIaTSRM_W~sM_v0G&=1e(LFm7eaj2VFqbs>#i?4vu|C*-NMC>*Nv@{_v-DW!f&w=kvBpJ%D= zhVFUF*?1+$LsOKIG0K246%Ex7@t7ZOD^;z@n2-#4LZ)0BF7iZUmb`4aDe|!4-VF=F z=J)rW7Gv$5APXsB|V z7cURbkjEF>XBDW^;*~+!@}xkuG+5=pj@2nTRvw6#yUde82k_pOWvSykah02i?c}j> z%HT5B>1$GE-zph-uFJ$;K4U$cyA1cPTNKgoGLOJ&NLGg@$kl^9=MVL&-}v%B zBy)!k1*2mNvz)Z+ucS%W@|~`Ba=Z|(KP(aez0Gqsb7hh(%)HNNI&L+b@#Yaj7h||U z?)eBE4+{!nXGK=%vwrN1ry!Piu`_-Q8wXrJ-XUmiScj!Qkv*trn>wJuIqGR&y?Y#_UoWV~JF@_2E?&zp0f zZqNUDQ^xo2$32+pce~1dqr`T-z!~qiD%CH_rPH;S!y`n%j2;&!cz<4L>WGXb+~ z6{@_u1=nQOiWs6b0$JK5@RpSva^d`zw*(k9Q%F;e7f8!6fliMR&tdCD5~x22 z8q8{DIcSoOJL?Y{OsM?0&2TYBch*PYFSOGH!N0e9`unzEz zH*MMkpLX-+&GdBg^y$;M0?!k!7aS@$Z{)f#XW_Zwv&V691Dz zVl@H@h4#R~MkO%WWOgw?d`bqFQdGbB=9`p$^u71q!|s3*UZ67+yn|LyQZs_+0EJ(X z96ddffX&54__~xy5Tl?(e)KpAUBU#CxhH`&bOTpN_=TRT5J`p@IFt<#{e(A;K2Q&F zj%ma@?O7;tnc|&ku&4x%-+z3&_OFM585E|tGFOsN6#sh zGd|5>Io(*dLRN@D^<+JJv0fBQRmRGSSuyF}P{wDH)g_Dw&7n)g=b;4IxP@eDNLz-~ zX)pxBTYmcc@87;Q?Y1_-Mi$Mh6c6vOlZf`Ab#vI1d+*&^w} zZgh2iFxB^~q3+kRoj+HlH<@XfSZ$sYDJD1S&3nz#1|3)P9x1$t96|GlVtRX(3`z@`xwEi< zME%VXF-PuVm>6t1R|D3&a(s#Nd3*$X5-@)ZG7 z1`fsWTL@($UW>PgUHZ@bPPGI3N9@h9SYg?jGGz+(#aL3X{F0gkwy`~X_8dEQEEX{= z)L0u=u3TANU5$^hj;&w6o-{47FfLoRY|WZASg)`~VM~aeK9;EuKl~8u>g36j3Ce!; z)mPCFRyHhsr1t_7gY^x|EEZDAQinAk*M|=uj>Ql+q8@B22M!!aVq&x@Lo2X2*k}1gv`hc3TGGjpw z4-cnyVS^|$EQSw@K#%Xy){Y%JupfktTOhK^Vv0~6#n!D`N5DAkKw+D33k(qK6#j(C z`}pIJF)gTy2t4Q^dP1A5v8@N}FSgj|*rZ95P%S13=cs(us#O@&sZ*yu`|LBE@7c4b zxw*NisR@n31fye^rQW@JqcEwg{OM%q83W@0X(U9x5Pnqbq3@j^L(h|xhX1+JHggHDJ!gNE?06(1i@3=Kk2 za9v}J5Q`(X^&q+nWfyASzI|e1BK?UI5@w+~DI|x`70TL7X@F2eQc@CX)&Lf2lvq`` z1sx;x70f*R7Q&rSo=`s8P~%&}zffvwDk%x$gzlxJ&$uNnE{;O32sNTeA}zf&QFut6w-3*5LBXt8WA^xC?#45Qrn;;joG{Czv}mDoEa3L$4ppZ@EgHT z1G9}f@e}CZzdwG_+qZA0(C-&oWp%YMC@pA617Q}n5v$SYG|#okb4>{%=`Ky&k32Do z6URYb935?YOFpVLz(sA;X$U+aUr~coG;)iEEWOYwrK{0?C~Apt70uNo-U~gdBfZ?r zz`*H3zlBl(Rx_!G;9(Brk7&6y9L={9Qa;o7716{&=M2R$q_5zSFTK!*nLX+>~ZpsW+aCFf>J(S6WT)F#QTW_GqS0nfaK5bTrcP zy2ZH5({ey{IN)!(-q-a?iTr4m-<>}88(mFb4zu4K<$2P>bTq_pHcWrm*K#UQf5Opl zqr3b4N#0j-mEC6ZnGpW)x=A&iOLRK!W4Rb=Xi9ZDQ{i?o!SclbkAorBgPxYliRPAE z$1i%hobc2=s*C*o`<%qje!uI3`oQyJ_R1KG-jTa(K z_iEg(v^O2IOCK2xgLGWCe-M9y!!riP3n}{~A+6SqQ*b#R(pAJhCkPFIa2;*=*aDP1IF>>?gW^%BpUQklM%Nx6a~953YH@>|mov=>VI_~OzS%Mve7 zTW1ejwwIEZrH<~Pj7<~ihg@y#U9IUMO7{$gB*`73m8vYY7PtG#`O$K} zNVzvoeB>_Pa$bxwFjX0yri|?<*XAoTx+*ie+vg7Rs-F?mILM=Jf?vbrfX03vv!mrf z#rA2VeHsV2*A>dMa^xu`>g-PHqyqc2=fW1e5#2aHbpGO~hR@o~`eAhSZ<7b!=w7qE z-JH$s=1dQqKRs|kt^fRDbyk5oz0^KC*4E$MmgO!4a?t^Axs%;F#nm}AMjn99@YF@h zjBI&Ql3LAWUX-Fhr6ffenXOEYmj?#gihSfe2gi_PTfadbjYB-@rv=O(;WfX%TYW$G zxjFWlK)EzQ9vrI-4pVw#)P4$&x(Zh+ebuf$wyxdXr`3g`KXs#g=MMF%>*HG2S)ClB zlz9l9-cY4S08g>qKU(gKKke3Jn`5-6EvKV8p+uQorq04V6uM3x?lbq(j2VwAhn?#> zex%>5eEY-^-VOcS>IQk%SGmngQ^xdgn>jsj(KP@0pS`-D@RoOKCw613XX}5?I3~df zU&+jSndPK~9hdZ<3l_(c*0jZc5^+Y$g)+%5W;vuc9W@w_=%u|pzJ{k`{!|3}oOaV) zN;Y22brh+b^#_eyL|15-@T5G1x(Fc@(@AID8Lxk8u<%zTr_EeWl`f~5F2x%-t0Yo? zG0t$My}2dJawQYLUn5sj@nt6*bo)dU&uWqVXNACdj+zXoZR~)7lW?Xn-6DavoQh37 zW??OvmK%lEOG(lhA9gN8cPWXZ7WZp?zL_7y(^4-Cc`(iI&LFoh%H?bM&R26BuXnTF zu5!8E&*iI;9$(i7J^n-Zw=aa;se|OqA}<0N2N_g+H9>cdd&PCmJOhPr2wfi!@0a`MdNl{1!OaqehLI zI&~^#1%h!Q88MC7NI8ArD8BdJdnimqXxQZsKKKBpkTP|`EW$@!y?Qln?9-xP>quu%T<$u0=hl6NPbwo_&!21}Bq9;IK95BwYHU zq9U?Na5mu3!X-na6DLk2hyz0)0*U5w-~`=+MS}-Usr`r+g5DCv1&%s)0q|YX2Y8Qg zhEXkg`{tW(Vrcl20_vX=lK2`_f)+6ZEmK68HnNIU&1dr)SVJAFdzrobJbS*8H7;h0$FuQWS=TPCOAgDSd`5{Zk)m}dr4eOn zCb|nHKhlyldooXrxT49nv=zl|E164(jfS_7U<+L&tr)zcT6{!w8bUuhv(6+tgQh7* zQUr^@EhtIEoA#`I8cV|vN>~Z;Snw)m<;AQRuToY@nVayAi-oL^pc_&%rVQPLw2+K& z2Ft*wXcPr7b{aUrNB=XP=pVmdsqS;uhg}HJ9dIzd#mp0g#7e&4jbsQ)hE3?T`tZOX z!vwCIXWpzgenoQYFHWdkB8rbiqsMDAY?-!9Q_g z%uPs+BnUDI1@Z(6v>}j(+vteZOuFASJj0Q}E?6rDK@0H{fJ23NNR%q*F6RI3e^F^^ z*{|O}*%}%6hR(3mD81oens4A)mCHrY^ccNyfnM5XG@lJMoenphjx;w18jpBM%Uw+q zcp_{bG+iVb@)Z$1aG4eeceGNlToS}U`~*@|B!q&KM7U6i__RUf;Hwfj1>yx{L_ygY zueTU)XF(YD7Gf;22nBKyV+NSX{4{SKbH{%SMu92Chu{2@j#HRb5cizF8 z`R=>#VjaWsNc#)y1hM#Ib%KGwr&u$wc9Hf4*6Jx!reKw;sj0ylhypM)MAU!lt+%kl z>esIynjlUK#V%pBM9riPP7jM=5wHQNsHoVmVFQ+HtmL#{u3EJUJt1xhR`{)3x5D~- z`st_We0h2K`|rPxCTOY0a*c7Ja@>M}Y}&L5mI#X~t`pHBARqvhV1O_~=qk+3*s)`A z3uU85_b>_Z@$uM$!?2>C=s)p!Fo75>KE>26YRyK6yD(8JR;(ZfBUXF#8SN4~2xbTu zG5MH7Y{1b9Jr;rCz;vMi*3yYIgF<{SKZ@!~~Tc1$e#fbkMIK#8PDz=WV1!glaZqAL`z)2>}Rie19HrjDx# z_7;l+rsj0*j(SQz3Czc|BNL+;|A2>-&Oe9fRR7ju-5pRfvLNdS9iiWYE7&=B! z5A^}V!T&A&Q(^e{?(hTAf|4*mYCS3{ifCYj(hxm{SS~ab=uca<77}|QE)85JxI>hx zi9RKA3rLQz(Qt%*`3OaSNeM09_+Oz)8%}+<)Djn+}F6$-MlwP{d#)vNpIulI^!ObZoil5 zV@JygU+G3?=Pw6&e`>QFPq*!Mum@37f@eZa z=OP`hhAS$s_zkB!=)^zox#pqZI(5z4u@^JOCfBXvwkR( zb~@-=5>3}SIb2Gz?6aHp%9gzb7_!{t7mm8qSTX*n)6o!rJ| zxr|M-4=?kW@M`3&H)CcshRk~*y#7xy_0xkI7ezMA32rD+XV?1IzY@{d&tp!$I<37r z+S8WfYE2JTE6{(8B|`0;D^Km9j7wCiW7I)@NHUZ2(-O`Hvuk;d=+F5u&dE9B&Tw+Mo#A?c!_A(` z%^nJcHC^lMgq7h+hNY#0o zWpE%*ojL`x2$KzKSY2IBZV7IIp`>KWg9i`BjqritkKMX;>&~4!ckkYXdxn}}T;NaQ zMj~Cq@g6m56bZ=^Uv&EP>F^cF(7^WLPjmxb=;XyXCMQdkNJ{Q58-f94;Hl-wDghOw|X7KbijEQGFfWnFVv z4rR(FwHdrJSO)&2Y)yGAuM6u^zzXonXZaWl<<2IpV4Ps!BoRYNk*F<=v_tucp8fv& znx0iFbZnQ^w8LUr%k{v`ukuySNJQ42Lfa%#2$IAJgk**Q&tDfy&{~~z!b{(hVLI*0 zC1=|Uw82+|{Mbcua! zB~;+NT5$te1gLfp>4e&gpr`*%$V_Qj*|R54p7#mrEx0s&I3r1Ji>WHC>*#y^^sq!T3!Uoy0qq-&ih# z!zns(BIEOGg1w2Ve3;H6FslsihDq7X5FlfvBVj7)qpY;t2@CM2crE z%d-Ce}CF1}YN@4WL4mL2RRvF2k3v~S-&Y(=p;VO1x@0G0*ss0s@#R&1=*y?XVc-OBRi%SnWa zVj;1#PMIC*=tLp?-aL3!Lx zko%kBAn7d_2zmrSvR6$_O?&t5g;hf9AAR&uRaF%>-F^G^?boj#hJbY#-N4F^&J+EH z#7|;lV~I6^{)dN$Q|cK^LP<#pHo4uqcZVg#%%H23p=r{jNf-rMNB?1%Ug4>U~zrjx^=zT385kcDWE*M(M22-mqZsS7Y|C3q)cjRDj_+zPRXe$NQVM# zD60`A`XL;Kz!{VwVOt8)Ayr;VoJ1G#C!L@${WrybnqJkUk7) z+Y()em^p1Fg(=lF9Rzj})I-ybcTxk!N7MwBq#+Zzg>Jz}wWLe<2tO`N5ur~Szh$A& z?bo7=Da?nQ3PK48Gtq#Nwz!H00%$y8O)rHWIZ-M`90V`46&@jfcUw6HZvw3hsc zz98xePKe*qfw6XsRf{O#j)KQBQ;;xALv(Tb z%%|QC+XEfW205HBu%9b%IUV3|%1?hWTKDAuzb}fFVs$~Uid_+fe6^(xoxdg)S}V{@RxxoFe55dGCcm%lD6 zxjV}9N_&T9Z^N~G+euIJ!+G&P9+>-ZX3(`Fr;D8pKYWmQt6bj2j8`jME+#r`&>P=3 zNoVquoBiEymdMRsjvsrQSDM&i9XrH$%)*jUA>8P9N{Lj##QW5)8DMEp=1Q=2zV3x65Si{^~h^}OaU}_2% z>dGH8nNkE3rXLUH;(o<+Ay^m9eF=`MjNsK8DY0X>)?`NGnAAm(TPTN7f1ay|#R{jz z%hrVhCUSa|txtPpg1_9&Pbr9#hXg7m;c72mTV9Y_8m3g@6{wcjt;r!u_b9cWkCLCL z42x9;ddfL2wlo)Os)vn>qC_aYqm+KZYWG03JWA>7CwFzVr3Wg-vC5DPd3=IW9j)~B zwsqn)t6c)+q9l1lfKr^slTeS3RR`dDs9M=x9X-OkVP;U{K+kze@~8rJTD4a_u1pPV zd?l)JUPxnoXye@A`DN}?e61C^^5l;4#4x!}gwj7o862ck#>zvh+#33O)>XRAt?{j| z3vHYf(D-r`MvYn6(U!LD8-RVKM$vDe_PimrK*EGAx^H2 zQU*pT1KcfnQSN$oJ1}cQwr!)aZQHhOJNLUkAm{AaoZX#o-e+biBP<~fPO0dGnE?@eZjl{V05g_E_B{L^ zeqHN)P981D29l76Ux}`WS@{^H^cFe`0Wn`qo_7-n;mu29JJYY?4ISmw5HCWnLgc4KY8Qx#JJOku%4R5@*&fq?YtTH-UKZPx@IlAtsQ}q z%e>LMP{D~hp-#cS=kHAU?=pzZ{dI^fbq+n}_N9&$-7hM5Z<#&TO>eF1=L531Qa3MW ze|EMykl&Wr2*uVOCBW}6;GKV)uHrYdg=?BKVS%34NnNi6H|^4(G3`x()k@wBO4bC` z@wNiVDSweY@46EM_v44z>ah#zBf)!aT(ZAUFZNu>ubv&l?sZM>rqyR1cC=d^@z0K? zEAa<`x>3>{vpw#s*=mket(8 z0=>qa!wnnVQgo2o>(MF3(UTE6+97&m88B^uqqI7rnJ~%m5&yHYBw`*UCpac<@?ve4 zhNzyxDw$~%ziP7!WD6Uu+hIhymdewQfTSPw`#amAh#esGxQ%Od?-{e>iICN+gEYg0cLrZm z8S%&6{32d<NYCexgJEKh zy-yT#0Op}mXDm4S)lBQ6C=C*CDMDAB$y<9Oppa#6s|4f8eWKZE&q7yBiUQ>%s}2;3 z*-sWT+6D?O2RVpOg3QV(1{Je_J(gxweEc^eJPV^jj1rm*%wc?f*oz&~Rbj8!>3V5m z@&F8L&vmTOceE(36FI@gzZI*H$0t4*k`H*i5H>wg|TH<)x*)4~3G+ z-uE-2Y8=4!YP37>tmU7G{fA4%eEk+M>hoC3r_|^n8wN{5ifU4Y&wvWi?%U^^Aw0ta zZlDD~W*VCW9~X!eLFv^66{h!3$&c*x1=mQgwK$M%&d~C34z-3AV)+X*+=HG`p8D$d zmgsu{j$L(?Z{qn(J~*nT zCinlYfI?$QC5)bX0{E>8lYizz3)6R77OQll-LGl%&dnf>O7OrIftF=WZZ(8a`VeH4 zzVR3!iSu1h@C-Br@eK_8Qz-m-Q7l7fPjC7&GeE1g5jQb0u|P8%3cy3j7f{ak*tR_H z)@-_X+-~R_JZ$t;N!bZhWceSJMjA_^*M zo?*gdC~%ly#{V0raDnpcm$8viOgoIcu2^@{UzFKl5`7Fmocej;ei;JS05e?B3IyaJ zY#5N=A+Z8AGQ<}G{5&WtLY8>ITk((E*mKb7JzPPB{~b#X24{yA$WIZ6N!AGTD-pcH ziuX98f)j`Z)aMX@_&}@@b|g142!SrephP5*ak8=|Il4o(e$!0!+f!wQ|F8R(k%RazD5Qlym7WaV%q?bn+T zHU*@bd7_7z_fEm`4JwzZ>}+Kjv{FnFtus9bV6j?uR|&?S5NQP_D6c{ln+c)7+RPAU znVAVuE(&Rv7zX4a>1+u>V{Bo|aNoggR(w%Z>= zbk7ImZ5S=_rKlF#Zj%zjPk*&ZjxsGx7-yqg1oHIWrz;mvxmythe~VReJ(zYlowa=oxaw2x}kmmAM?mxc4rW2<>NU1 z9AB;@n3Y%NL?gvvMNfm~?!x@FacD-ZK>P;!=2^X0w`{d=LRp*J!ee1F?OESI*j9u6I$dz$9 zfMyfY=FU!Z5nFF?63ipjgGFbH`KRZb#cLhWkI&cQ0OXra)$Httjn@?#p7u#+1==Aj z_*agr#ni2&0{DvAk#Bo1C~GaC4dQW8j%SW0r{x}19k%+1?b19R9aZks#;?ZX^>Y)R z73U)q!H!l`NWlWIWHC5|+x-rb~O=3o&htp)^ao5L`Q znUJwdHw_HNgv;$*WVCvSu{f_CBL_u3n}f?5B2aoZRtRqpxP9db8SdF>_olL?Xpkrh zr%@l5MhN4`gOm5QlDT8yIsI$sHK+76pU7LF(qA15Nfl21G!yMiI#LE!TqxDiPoPI^7XSoiU?kw(l4?0TSNaw zT92V~f=sRiJP-`Zni^i5(WSVT=L9b$0g~^*8NVo10XFolbpdU~)V@K-Nl&+_Relq8 z<6Lo%1C@b7H7l-6>T-wGA^>S)~4?Q}oaXT9xhzwgcG<-w+__xSzsjPG*mWo(jW zs8kZojl(509()qk+?A8NM`f1oX6H-Dh0JgFl+kzW=`Y{3>glfpfN3OapOhqJW6!5CA zMWsZoZL(O~yf%M5Hh^sZjd!GjAf_tG!i6Q@S;cZ|?z_YXULKa{+kEuB$XujTZY1Hs zJv33rX;Z^4O_>GR1KRS5+4mNyUmAo}jB{<{?LnzcjKPAObLL!qI-d%8JN8ghZ0_R* z%d=_DrlI&{?Szaf?fBmUSjKB*O*!{++fvLdN9@f`^f>S;xh`|I29#CUNlZFiy{LP2 zcz-!Y@t6gs%CcXV|28{tXFFQ^(7uYgZRZ-i`!w3>YDaqAOS>Yw5Y%c2f# z(cE)l8QQ5Yvrhi)v59ajk^h$w6>?WK$6j*R z``*=*x5w)q9UJYJ@x^RX`!W-*r;O`2CHA%qvx4V*suL~j!{#4X%OK{p1xyRmSr#SIW;;NPof( z@q=LEyE`akxNK0?Hw=3v+p#;s9_Ae~Gn6Is!D+!Tdcvrfq#5|7P*cf9bj3)6`b7LD z@GEq%(%z1KvqEN(c^+xu>=@KmF%%p>ujy$iyBKe}Tm{>&&%X4AFXN=VuPPS~`ryfO z2ew5TUy^qE)4-w$X}U+B8o)puk26%aB!?!k5QTsaHN9y@AKHC&IqTZ3_?7MR`tphM z#^zK1GSEV25@hO54=h3xENDqHHW4=MZVX&quC|{|PYQA^Ub#B-q`U37l_cdkiTz0~ z(3ZnJqM-gicW;mhShB(&(^P)K+nFge#EkKSvtmnu~o#eiuwB1XZt0=O{3nIP-Z1qQOl7wVgQ=b zKDApvFTm5?)MWTqMDjOU+_47zHt3np6GCL;*x%^wpjZJ3;s(!5_@Div1-$#qe!Y%x zTHk{djn{>#ql<^P_eehLtG{mfyo}R_V8|A$PnoJ=IcB76 zM71@ndtDIS(njuxhx}jl>c;4ZOKXH`E0K7e)@sihg)-{+%P$6-%yXMKwxrcRqzG+_ zgkyzEKx+8wPEdIvApwOt9na^T!IxOgT-+LjienMAJaRcyRaseoBr+_l*H2b9uLj9r zq9?50e2#QIL^-OUFeC!JkHKn{*_|*NmYu@98!(@#oL82Ek>msBwQ5S?F`3Q&Pw4;B zhp4WFMF4;O+gr_FPoPm_0Oo9DhFAo8_|r=Nga}dT+|-UJNwGYGybcg_Kq>M2U?dhA ztyWH(m|#%t@Q$n&v5;Sk+x>T*fKYx|bfSo1;6!157B+xw4Afyn2BHa)_JAua1zh|! zcfIO(nqD7Gq?*B#0vjnc6c9T?{ShFaAK+e1f#-SsJp#_yAqcD|qQ^Ez^%0VK+ zMaMvWJ78Z$LQ)bSW8wOs{9V63KeD}FAyKg(apGS9_8c(M0acy@T<}-wOt-wAv>hCn zcwcvd@vJx7TYxUCabjw&(A{1ua$4F!H$WhU*5;k`$+6iAq;&wxUIC>$IrUukfq9Z7 zyZDRXl*BR+6iEoq{71xJlCkW7&IG2rdezANATJbZ3p%~tkWdqnl0=$e0}&C!Lx<_p z&S5YKRbWCFXN!f>`SbggW@=^ff1XlS8qO!QhTx;jf|8z~M|KMMwW3qlS;|3b5c?Vc z&%Et8k-r4B@Ex(1%%7Q4tsXRO5z7@(^2j)>k7!rz|f{>u7 zt$9AD-Q~9Gw<%!Zdf`F4+9I*o6mE$Kzo;!H@T4m-KbAv}^L*(sGX$b%nI|-9tV=3r zD@iv6nh=x(;OZXlr3GDAXMI-}WyPTRHIHEyrUb`@yz_J-dPZRo-SIy-iU*!CnWlC} zUou1`xWf$et8Y{E-OI=^xP3{LT}ZZG=7Yp`xqyeOZjY-Rf1YC}k*wl89O*1SC`2)9_67ZDEVYs9Vj?+SijB%L`YXlVMPNBOf1Qr(tSe0(w(Mi$ zAf4-@9Br3|3%kGt<`8zgeo4J+WW;ec(@PoaHkr*HuR`tVZpnuL`73(M=L>M~dAPky zWT$Y~EL(r<$TUf0Uc%XR@NK>+G|ru z&y$Gkn*(@$T9>P~!p!APuDajgg?6eEc%Dh|F-n-wdyjj5~O22_sT!`WMfdYRBAx47NjGrncC#tcr)ntEk17;4|H!y}(= zhFLAAaM~#EwgdfDMPTE)@ysr=gAohj4y;Av>3H+aJ1tW_FDEwlzrLRDkFGj>ZqIGH zrn95FKD>wjVr)i%^?4n3pm6xO>+Q)^y)wR)J7Pr#ywcZL+YFWmO~1}`Ixt@3dmec5 zwNU9?avS|~NSSSmemhSl0`LT8l-5NND*wKP&6G7GV*1o~9a*#eLtNS}DFq{@^*CdEGq@QYCU5 zUKq!pv{`u{)j7S3TMZ4&u(QG@giumMUn0}OUq-VD>E~>7{RX`?w&!BUvgUi1?ONTz zIfuzlO)xW{PUz5&LLmR#xUH8_f5LG?B;1-{#=bWeg&El2@Swk>R%Z#z<70A=}A&+)Vp3hDD^FtTOcekVDXg2yY> z$+dcCA9fNWtU`-6+FrOkgYt5 z&fU!8@cQ{bzugrT=K!|{t?{r?v(^Yw>K7C|P+~JlT-ir5n2mA|yM$tbXidD%JumKQ z&mMLyv*Xmq73kFF{%|U7*DW4Y=>G-|Bs`!ZPF5TKP#l4k6MKrGt2`H0SNEd~a99NO zMO$}|{9jk2rQ&+d9-JNt*X=X}*&1N6qi-2lP?YN0L>_oPwKurEHZ;62}7?7TO5|}bDX~bQI{W!Ww`l1RBRF6}) zhZ_@TCDrY+#p(KYKu0I52dPg@9Rr3GushN2uWm~mF2zVjk3hdtizAChKb5P-2IHNJs~@>rzRnQ@3|& zxH{}Z9mnto?WA$hK~0`&>JLbUYmO$=6q?dSPXTIk;N!FS1*-1hX1f-b?sg27x!h}T zL)m-=hjpK`71oQ;HANk8gA1H@lqR43X6~W|8E3TnPd|hcJX8A#fBT#nf5BxMW z$+f~2b_fz~m4dWr2fGqWbNA{|gCuGLhsEJkUb0)c2QxWEpIpSHO$A3FrG%lb72b z%?2LDrGFXv9IvF)&2)W!G>JxgzrD`5MC0#Y_4nCTqBjp<4^4mgTXKeGr7r- zJE&9jaEY3oJclorA;=)lDnl%V@9Tbk*#doORFI4M)MjcLH4%njgIvOadMiV?cyIic zuho-!9KEeVJWs66=-Pby-7)ZeR$ zhIT>USAk^Hcn|Q$bXv`Sb7OgFq>xUm^>?%h;mThUE;;Dw5r~AT=+&AtdkrDa1{9H$w|``Cj&1Tf;Fe2sfvBfNXXa{JKDySzW-uul84nVHo3^gTnjK=weeIQ6=} zDTUN@M<972>Vg7`m=vo6OZ@s3$Ha&<^pYsY0T&!W5v5|EXu42V>pF^LH}5ZZve~?ay1T%w zKR|bG>vK?h3(x!c`%)KB5OH{T2z+pNa2{)#n^UG?kZNX-WEK60ks3)d0)E3_Kcjp? zQe?}~!vWn=pAaPCUrDy`?^Fh?+cG(Z*UH;9+xs!>wgPI{#?3{g5cd2Z&$X3WEw^`5 ze1;FM@6QYp27@JJD7k{LhzLTZj0s^O#0K=j=xCkLo-LrcF%d8`s&ZM=Se)e4;l0ha ze*Z-Dd!xuDLTWAJ>){G%1vE>(*NI|dA$=Ae6Wl{xC4{E4a|CoFC`)%=(gyG;@Qnz7tXUyfT(B$BCJF&9rRbyR2I)%`Bzof zr#s+487Us!M7Ybo_yk(rgzdDwE^E6cm{Ym`QLlXN9mK z2oaHN``vV{#*~}9=3fKVnan0%o32KXr9xVMr}rIG?~3U+_jw4?h}|K>oTogZF@ihaIP2ixSG1=-mP!ih@Z`fQJ*>EAOTSZoTC zIT^&kTmI0ZPOx=O;lskXZ7sc>sE|CBk&%Uc;Q80adh@f6U!yGf(~_~hTk4J4{d{|J zkOTX6(Tj!q^lD2h$L7%n9{1hx@p&p)^Rq?qZB6zGD@c3H`Q1))fel)qI=Zju)*gNB zVzSscWgFrk^!GNDNlM?qU5le8@f@t77Ju@3N~{2gOZgiX;$EQlr_>F8xq_?EUFtt6?i!1!jUU>u@lb9mAW;R0eO|T7 zqRZ~!RKH3c{DLb4^EkOxFi`RwpK6`&>y14iT>Di|M(bWmI(C)e3IZrM{1b1{o%HHC z@x^FdcDBU{MSro&$t9F1?dg~nL$t9BSW7?JTH_VpM{gp zmpsKTL%oPzd>VIeld5zr*p|jY5qIR(<`?wi`U zS6hmbPn7CcYD6f%-<5IW=J76Q`A|xzT30Ri*QcjgCo8l`R&nT!&B0_^_L%LV9->#xODbhRy zyY`Ycs^+ieD;eIGB1f{By7PUJ+&`**SN1PIyoc~>C(slV(=4t=OZ^j*4SCF`hHIpB zdqaAzk`Nr>o1HKSl7SfUZ5<)-hAu$dw(0)fQOpIw2|r?^#i6*pJ;O-SzDXpL9MTT0 zm&U4T2NcX~&bORuA5=iTBW6S%%atSxq$lgWM*b%wjn(xI=W8vXqyZ=BQ#!q}qN1?c zr9R#j!z~C{?fEH{`kt3PkJsNupteg}A5#w*#@fr#OngvGYOa5OJhQLMDQJWf=i&@O zk^%~|`c-(Wha`V}36aL zpLy|vZq(N8)}52SmY@s%a{LL$TsF^vzziBwXN)3iJVSqAi>+pG?^zkp>xrV<=4rK2 z>%=$lWq$WM5sk~cTl%!|*0JZl#=tdtIQN&OTA!7~P+}9a+h#+j&H$F~>+SHZ>1uB@ zen?j25m!73)jc5M8}g`tW;H55^|7|6wx4J2F|O{Rq)9w!xi|#0`7mZ&jj+g*Fox%z zmdL?_$)@+X?BIaaWSWafCRdG2LONvhpAijbJ)G9s%I1)GG1O2G$dI2~^M7T-Ta3}& zC!Z+H@faG_|8ZPam70}k|J&b*&zW*arH$4ErUm@(%yvmZnrM_E^bAuDMJud>;~&-V zrxw%@OSsO9D#KX$HkU1vI}ZPlkfoy-GbI$C%p`?07eog@OY=5q-(_Yx#@-i0yS#6& zI9qe(iP!Ss#r;-tkxn>y#C zpX>AL=vor6x$3;sx!B;*Z1!Mwc``fONi~XbhfXc^y)Mf|DBUDvUek6Q>Q~DG3UeA7 zTmIILpco>N+atN%(2~#GR@I+oGq2N?P}y{~b}f(lg|LL17cs&PuJi8)Ta3y7LHWc6 z4Knpyycs64-l_T8()Re;`yqjSK>8E*MFxA?+7=87hn!$oQcn-qZ2bXL0=xjT0sd0H z68=Cs`N*GJfDMm4s=l828VxEQR9W=@Fd0z3YuZjHo~l2-wfe0L}>7-+f>%zZEIC zzjN;bk~8)PwymUz3F)Z+cX=dz1wF_Rm=3`PJ-h^BI6>cR{MBQydMA4PlD=cO3zY3| zT|X52@@JIhzmW=eQl9!OdwN5~fEfS{nu<(j<-AJ3dvqm;b&$r8{#74;Vt3XhLh7L4#?pzVOq zND^+RBxVI1zHq0vYhTunDn()(e=sz z33cTpa2$W`w2pPGM)F>o%dJ)J7-~%Wv&wKvRpDi@+%tiV?^vDi+-=k z;-d#Ye1r5tpKoskZm^H{wBm5JRPD?j{*EuTv$P|3eao^=LJ`-jQ>hF=E%sa~;Z>mY z#>}v8MXGB=%ZX7yH{Q~rHQ!h{Pidz2);6WGHU8%LcN0$Br(E@AOjYNsY{QsZS-T;Lh}3GZ^6(j zGD8|KDn4D$h0{gZOPO3~re5Uz?cA%^$t~`Q5>4vH-bJGR>2p8u@N}lo9?IBUoT)ol zn${YkJ+Q-VJ7d=FRt6ljjN4o;dctsOIh$j@Q=-Q{&0$s zS9}CA5DO41oRj)h=g1U=oUcs>O8#06wV>^AMCyCB8tqQv<);f zdx-Z4e9MoRMeLpURXc}*>-XI_)uWXaiBSX^R}+*Y@~sIJtJWX(hhX7%og!kiP%J@i zsSWM|P^>YC5yJj~`5AGGC9%=4gr9Gc{|M_GxY!Icvj36svOlpsUbEbk5V|P()y*%8 zTkKId-6gepV6?HuF`ic#q@}!?Fq20nlRnf?jX0&w45u8@kTfRP_Z5DB3^H^1SC{;+ zES`XMK4oK|D92xEdWE!WIo*HSWc8N_dT@?Cn_{vvtJ?`|*xKqoC+6ya8vL0F4<^;f0sou*)`Uj32$;I#D?_OpW)gdLGzu~qOg=ux*|1>>HyuOiS9HxgGc-dj&AAk# zHF2+taKlKq@2IIaDto{njbj}K9$94iDXTGmcYye0yePXA#-Uc+nTDT%Ic$+*OKrFVweNAtvqH}is(gaHp;toaQ*d3z!`zNhmE9?wh&?r|-l*UJ(fv;?( zT)x@KTDM=Dj3AM2(e@gw2)On_+t?bT z8xkS4a<|$$3EJ+vNZkYmUqzdzrWecbb$v!3HC-XRc)5K&j-am@Mxy-MG^;RZ6S+$l zXbSkJveL3y!QGSmp13NBygKi=q|z{>BE_yZI3bkii}J(RfnjJz ztMy?8wC|r;9kO(N@tWrRV@(J1d~Rzxn7f8`aP)4-@5XL0B&D!HN9BV7s z9lRTh?@}J&Y1V_Q)Mw%TmXoI9rkt88E=-FB=%sDPla6vr#HZF?tH&!dd8WZbTQ7|J zQW-BbdT?u+v^5_RK?FQlHegV9*A8E2iae!1)x8#JzG=V|s0^FOWE+RjyODw1XKQHz zw^49b@L2dSzjS2Xzr-9y!!aj>nn1f6mVyCJNKV2N@&JCn0Oen1vCFyoCV?AK1TA@V zM)-mp`Fs(6YO)r;#|QlQJ!f0xYZY$jUq}Uz*8H$B+BfSF5isjn>FMD%Aw_zEZf<7l~xz4Y2}2^%}g%#TA@a<`wf2CMjW|+ ziE5kVG)`?%H&a;POnOk5h4A2Py}$?AnqyTjKZ|@CGcR!7D&`n#TvS!POaRF2cBV$1-Z_N{7m8bIkqqQ z+5V4WhIjtFt&t`+9pJ@~4(@T)sY1}nza)2J4fLqxnl}U_Mm)lg^G+y?dT|9nLcPzn zdVQTK3S;T`^gprVv-gHbZ^TS-i;238mZz;l)y7yC7afkEj*?WX;?^3C2?-kbyK~Wn zEe>I+RIX=SUkh9$!lG=IP{Qy!KQYnJ=$tK_>s@3z*Qj+^&!3>KT8#$~Bwdgs|WVRRTrxd-T)- zenvyhh}D0WCCj0YAg1i8>()V|R{N#{V~o)blMj0N_y40!pn5m?j$H}(4*>2x!sT(- z^P_J!dMArSYT66kPocB{mm`MD@-y$vx5IIX&5wm42)8MDw6n8I@D6eGIlfJ>3mwEle(oSb%~68m7fh4uhFj0-Ib*M`se!!-)mA1Hc^Y|E$nO z0bw^o3wkr@G-d*7G;TY9c9_>g>3cT_p?ClF>z5xVX3yuIFSV+dWzg#ilZL2ySpP6gE*A7G`zOcY$k0 z6g^@5zW=qcgwY)&EbK>mM<9Z=`A~580l8DGieY@ptDn!qsJN?^gxNaORuVptqkj*i^n_AS#QfDFjBIi#$|@=b z(gqY(siSCia~vRK=FS{`>>w7)W|Kl=Sr;h(wC9h~Y>rYzt7^q26beGA%@`hkz6< z_wDUQqItHVvWAi?3RPmqGjC|aDtU5M$F;I@Bb3p&)Mwa|YLZZ>WJ|$uWZ}XwbZacD zpBj@hDdsjSTE5EVnU_;tnFofgT>7=^`Zbg-=I5gxX`ts5z}1wUVxpQY9#hMzP+T`Z zTl+foF1-IZh01jB$U!3RnuyQM`D-PwnMDVr;@vJ_N7k2JT-cQsz7WvWeORacSX})f zT{xFXle>ZgH&k5%NyN~CMmN3=7+T6#KuFv&v*}CJ7#he;^n;bWbODA+!o5H?lRI1q)#!esp zj|)5*GOVgN&IYPmXCD&H`#e<{x|#Z#$!E12lLr3sPsJ6dnz0T>5zDC(2%!-1EysD5 zeM3o3dZVAc6onPPYpp0?C^|qPeA2|W(@70V+8n^q7o+-)YqHi={3X;>2M6g{BZk-= z#F|_?FAAlM!yM%u2)i<2P0pE$uzHyL2G$!;KD&8dbbckP^}-6Kp$8414+&w3wqbBd zCgs8wZpsgG`}`}8pP$@F_tjADX`Zkf5DhM6(1C*TttXA-ni0b^(oX(&G~D-p>i zEoMbuSYfB7YHpuqCGTPAO?KwT-89P!8^)C~C@j(UF2!>JTLa08Tzg(LkS5}q8V86@ zQ&#ekS~?M{%)(GXZ783}lv{PZ5y!4sCu!18OkR$mRKfgu)+p%2SMc=L0ySCj#;n;5 zcZnc~*3=zcM!II=r3&^D=E4@{bu)~^KT;vdq{0${ep2dYZfQ>PS25tY(EA9eXhQS%>NTQ$Ys8J4d7gs8nc1bju_O~&_e>Z##DLz5zOKRHpNZ7DEL z)YywoOj|~>5AL%VnukPhBJVU)qECq+45E%FklH=0>%QcYfgM!UYG%8<*+w-vc$6A$Q<9-x}vH^yjF`#E!+O0esn8+cL=jJmVZ$>W$Au)R^P@4>wXPvZ|r5aH#eQV6c#@D zm^pVwYhU~=2{(+laA?aGw{XhwJqbZ;V_v^m6l#KAX;el$k7xDX-itBs@ucX3oAF8g z8dX8dd*3c5RuTju2(}Hg5dH2~SU9b*4q`3liGc*y|3l*YH#@(^9sIdn+K~L113Ab5 z26|pG(PohEKl1Ru2j*(TbwjcyVTy?4y4@Yd4mrulz>UTykHruOcKev`b}RIVLw2B2 zV<9a;??iyUAoTJXr)2?Vm=Wc0*mgURREbHFw*+F|W{@)Y0((#?EG_+i)#H1FtRW$P z)@DL|2xp?uMUd^1oet5&NI@_MS+YEbIzU{btOPi$BIm{s?vA0eX_1vpTH3{!2fja2 zv>;7VfCp29<-+b~2}NKHb+O{rM`XVFIH5-lviw=%_I$ox2v@zT?9VoN-=BKtNi7Dy zyEfUhv6E+>NcL&L{^E6iE|Imw1EOI$aqzo1(x*&yh{L6 z;E@%du-Bh4tS4Ybxh3^^cmUlg%=Krw9N>GI!yk=1n{mheAlt#R56d|ML3RWw(ji5Y z|61LI$f2J0c}@!`U3O!WPazIq?+Bvdq!Lxm&1rHrS)R=_*0>q2`z7r;gM3EThglvi z(k@bMhs8q%>ub8>mH*15U4)@Uv46MeOIWlE1k zT~YXK-JODL2$)>61wO1+ts=xv4F49^zFwhKiz4xC26^;Rw!v1`XJWBL(!zL(P>Nnv z@+p*WKgy|hYT_yVYI{;hL(9LXzH7DQst#__9n!w+&*`Ylq8Ev5#Rq;{jH1AzP#XAn z#e2U!yB!hcaYEHg%Lm<~8Xe}YJ1*XmECwr?uMq4*rB$=-Y<#@z@F*@V{Vj!!gOeJv zg~8}Z?&zPsRJqaY?X^^?onx7l^p#jH{Z)mn=Y>mD{TF)^UOxyttW3^mx!O#z9Z`59m> zbnt)6Y>W(Y>iIPN=o`%D>E$IHYD96lPKWzI*spq7-z;FXYV;A+&y}PLVw|EV#a0WF zfK?b9*d8eP0!mD#viXL(laiD1xSS(qf9H4MFFjY5mX=ml4*sIog~6=fNJ~ot5LTTQ zdoTwHXC`J`DLxJk3@b4)vENMeaO}n1gklg0X~9yk0bAiz^Q^b5{NjG&BzVN5;PWN& zM2>erB^tU9jGmqz{^!8B0@|>0&^O-^b7DqDBtv0`Cg%$c#=i*EG^ZTy_vDp8zzvNE z1`Kc18=z8rw^-4@C*|G5#2|P;_WYxUM}o*XSOWjd=;d*f{1Mrj(Y0U7ou3Mb_s_)- z*#L$QK-z_wY@_&p2kw9Q)5;Yfg~j^A`hM)%CE$c&+n0P3`DW$um5q}SEVelqN1$Rd zUyMhHaOg5YYCm}7(wA{COoU;cR7xlt=du@PBsq5PZi&h`GfZAvD=zAsNZ$WoybCxcKyDe z9GSiuxMF~nEKQEmL-SVHe)dgpiX=7ylV}J*o@7o~!?ZTMGyO|UWIeGjT&yG>c#dI) zVjm*JIo+}IEqnBI$%&^&fkByuO%=EiM;!?{x>LeTQ#Way;5|j=4Vx1O&`ys_+r`Q! z`pf#oD`y2OhEnHdP3!4@Wd;+#L6r8fY3x=##5kXL?+1+RqYYbJT^lb-kn`j^#6?6mp?CPNucv;&PYiq)pC#8lDL0eCt z;~eUr1zh;>I>?}W-ZU%+P9BaHWzy=ZGug|@c>J+#|>P7o3?}*li{aNnaR< z`F68RsOaMJPxWuYsl|CnyvU#wfFH;z=C%C`ubbbTo1d6j2IAIO8)eGS!16`I>eX$vLN3fEANzD{N&tayudpi8~foL$PTuCo&m zRrIFXJH{^VHSZ+x!bE9eh=fc0DI~@b8M$IrV2301qL00Aqz69N%)032pDB6EBf*s- z(o~1RFxFKtolcZQ4V&H0W-=Z&>}Sp^d{O8}ZfK3XS6w+#4^Z|R2wf9m?`ywBW0*?4lTltIwkSttK)xS=;O#F>zK&N);K9u zkF~L;Z_wzaZ))zj!ivlPxIa?o{M|5!Lwe+i+T03zNdt3jt}`dq$%oBQt0dvV%xupz zk(=^4|4ezQQfcemV)Y8vDHXl%H$~srGS7f@FT`6O#?4(R<=LdF?qw$DqUusdY{KE6 zMIx@#zSI0uFumK&A@}6jUldNY|Ilj!+@jvF5Wfgk6xUYg$gbo zubp{V9xSO7eWn$`{{W6balYyjZp@zM^Zs=2segX*_VxZ^R9-8_x`;&N!-Uv8Q$+nw z7YMYonQl4L?@(wDnhUFlfuqju*D1Fvcw*uO0uH5sX(4#03MZ3(JeE-Nwa6U3N2%T_ zwqNJ+up=IwG*whiV7xptkwUv&rP-;{>{e^`YZV7A1Zb@_&;C>CudIqSznEix>jR$$ z--kZ-sErv8EHrwFiu?gGE4v9PgtF@=YSns*Xr|r ztMh`xGZ_1#-+;=$HdX)e*9CmmZFqX)Cc6LP&h0WM+ihyq5~lx@={^t$z^hf}7Z&ij zetGA5-RKr)JL`^zsVjWhQ8!j*%?IJe!a0I2ye^AwyGzNTS1JRzndnq=-N~lflg)J; zv=*AnXMVTd_qe-w*ozxA&uWiep4_%3(foX-Ess3PbH2Zz(UZ-|FRyNS{p|XyTNU@d zZ*^n3$K^3DSM%+k?)~8P^SiH~UVE~+_v4+VukO_Ro3!`p>B}eQpPl^h!LsDXWgA{S zcnfOr`o;6Rdk>sQ>{!)WdnwJjs-tqM)0mDB$@H$4G3^R zusKM=5S(yMpsbK^AZF+0=B`<@hPF;{1wTpZMj1-+iR%zc1`HSgelc&}JUoaX_;44* zS};w>qLgZZ>?Ej}EDsRx(Ma-mA>#&l)C?Ii1i}dg6Hzvb2M@SA1VlaxgQAvDRFo`B zA2@ITeL-#Dwh&EUy?XWR*)u{Wq_}&^vrMUo$g6~eWVBg^+m$U*5WjH_selk&@HjLc zKPe6fk}&Q?83?*~9BM#41S12<0sm6(-gVX`!2q|DPYZd=qdd8?lMq6c<01u3Pp^mJ zm0erOg9vj9N$+2!wXy$JJW+!N4ItHHQXmw)e);B(~M0SEF*~}#juz#7S@8b zXvf;6yQkl*ld%%-?Bf-(v|6*Sf}=^JwA~vGLQv^ve0I%SvU*F z39dxzqQ9E>)wX5Z7ED^uUf;eYYl-$mu_zzr)G=i4g6%UZ$F!kS;6_ zJF7OSxWjNh)3z7zwv7aK3K?6--6cq*AWRdchA))b3V%&alCd_^_DpB1bE(!hXL#S6 z*YN(lzz2((KVIG8$=X);<^`Qkwcg)A^|`R3B#P~xI1wn+QP^!p3%}*5!o4;_xY4#2 z(ufns$mO;X&HvMDpcTbX*q6uPD8m_>*)8ez&70AYkrhXdpubBOE^MIINTQ5|aB(5S zOL|fgy)d7D@BtHkBldcIZ4IZ8+1(O#0uaW?vqCE}cp_gGn*nUhIAuRJ= z+ziE2GEEZRJE=UJmaUsNk1B8N;OHsZ<|WX5Ipm8{wQ0pl&TXq}Mp~L8(GsR=qELN5 zcP@Vej~?wmV8GRS2LSolx^-(%P!Ke}H~(;eXlS+nnruY`)&a8fC}R#3hxGJxB7-2{#4_-cGU4JDD6mVHE(H?-wH!Zw zJgV*6w=d`wO5y}G4WmKPmmrUz;WI^=V}iY;r|- za>0RLQ>Y90!o!CTDU;6g=g*%$eY$ATBB-Tce_mc*P-CIt9zTAZM*hNu3;Xx)-?eMk z)vH(O3RE)!2L>OZC{(Z~kSY2X?B>LY6S(Wvt+&{W;AQ9_cnZb`4JN2wD0-mD7&m+q z!-clHaN$BiDkD#r<;$1j=jzp~QJB0LQJAb)IXO8PXYe=*0melPAX%L7APg6wuVBQ8 zLlIay`h`dQE)I+*9)#h7c8{T^Ov;ofnJ`JnrG%nppvaR`N7t@h!DO?tvhb`kXU^Qa zcMl_3U0q#ORdwmoB~a-bH*TQsr%#{Wvu6+9=e>LPUb%9muCDIv*|Qk%H^T7%lb$9H z2mmQ5DX5VAcJQhY`^NR8q$IQ--(+MU99nX-Au|F2!^XwM;T9CbH+n~zteZA%iWc6v za|e@)X(EsoT*R9U2_!Kwk)UW%1F9tp5;?=%ym_-vpFSvzN^lXK#a);hLW07?;RS=} zLOB8`TnK}MW`u@@`uh4tL_}a1sYe9=i!M+`Br=ImqA2u^;Gl6YnuKDwg^-`oe*A?A zB~==74ywWwW2`8J6IK@Gf<$3V3Zd^(uW8mOyoiiE_$E*noTF`IU?H~_a-fy%BzUiI zh4M4XPAp_FAp?kFB2%!b39TyO=Fxx4~1$QXzxG$o@05 zA|U5rsbUd(6{XAP5mOni|oWgnG|r>-<(@WOvS>)s>fm^8w=eI+Jyx^fBt+LNl-S( zrC2h5{q+|*BAaVaC&(Zc)b8E8SFKw0ZcYTU@W}R-Y>}U+hrmqh%K1CrKbb2)C;XEwb!#I4*#dnMof(n!bZ zPKMo{=G&Y!<<7ce!RE(etoE7|yRD!WX&x-^ayri8>SW*AY^TGn%4?Gw-v6!d-KCkg zW=HLjbUW>|n*)uNt)v~H)@6Q{$D%CC9ofkyy8YIwazCr>G1Bf}&7H5?zNngaGH=X@ z$@wQIkE<9r_Sv<|n5M@y)fG)F&ZnAo`>X#jDObDb5BaOlCOF^Uvzz~Wy5dBbU4;v) z%d)NQ?@->t?pRZc!>;VkuY!rceEC&ObbqE8!ZiJq>aGe^f03zqfXJXdga-|AuiB*r z6JI=kezUGFF|bhrV_7_vGmjDO!{asBwq?J*eEz)d^yx%PLj+T_5{N77zW(Ob>(^JR zt25j@x2{|FE>8#h**<4(jG6iOxp}v5|2zD^2Y2tT7&>CO0yXpCqcou}M+?aW&-FK? zc^SI8tg80#ziM% z8&|25t1&LjG$2+Q-B3#RF?M$}w(*p@hL|$LP5teSF;2#I4#pUNQ(B0W6=>Cas7q0? z&$Ldaf>y@SeI1I%x)+aeFG;c;?<=La8auZz4g0{q^xKf4%myVYL+KJlTt9UXGPUrHe#A9i@T^p2f#Hjd=6rpv%KX%nh70 z!>{Ol|H2aQ;)!0RQ+-P(dX^4%DK7Awnq)Is0PyxgI~$rZgQb44(ufSZqCB^$d2Xeh zrP1&A|M~v4hj%XD9h&rEL(5EGsk^Jx*^gV3dV3lZ*m({E`{drU9s$J}4*Ao3 zOS2pb(WFlsP95#WTk9ybX<%&Lz%nMOQ31`uU*GMI*N?~8dP?0Qt+U5@l#FpG{Iuuy z&&B5N=pi5bs51hjbO(LM!EHW$a-YXdto~~Ev6s9rl#6X`t8HF_$>=Zjqb)+FA)+roM{S0 znxR?3FfyHK=ZeJ8TbX{3O1)dD*~WC6nf?!vSLj!f?D$KjdtboS4r7}B;$5B0Gz*!2 zi&A^glI>P&R@OVEqs(}wogmz1Q2FQe`JjInDWSJ9v+evrs9(2Tp*<)mjs>f0QcPFI zx?C@GzdFJ7!a%#~ZpM?%v`1VO+f-USx`=592#aMi;nPA%OYKB3zCrAJDjDmVknrHj z)v3Xa(Z@8kK2LA;f!^?$-tehTe{mxpfoHdGuaz7YiS~Rfz<$x+y;g=NRXmvJ+4+id zsdiPd`pQO1o*YT=2bH7BEVr((=bx3{QCmagQx z-ul?@(g@p|-;U*T^8D$wS>5UeSzpd`zLe{9b-df%IsT7VHG6q=E1!lJj~=h;eCq@6 zE92d+jid^BxpD8+gS*da&%C(#R?dG? zQ(e`;_e{95!k3jX_Hhu;PXx(_un-~C$)TncSc(&p>*&BwGQxwk_3z&wk}bpoNPu7s z5a%E@fivO?!QF%15Z?sT0Z)NQwrtrl5*ks@nl)>{Yry94Tzo@LTeof<*yXr!<0!u~ zh2En*kPj)_AY@c%0kl0gV#ElDK#;f~$>iqd67(VjOX?*)GJFadSa1%>AAg|$I=ErO1`H!1+>*x& zDx^(VlD)}62z8=nXbr#q`YYk#U^F&v+z4R+qCVtpw3e(Zs1`pV{6jv(H_GEWMf6kV zP}-U)tUpLLpg3Q&YgF(B*>xA3S};y?htXSe(LAa##+f z@9E0A_Fz3q;@%>$OrJKLZkH`QYNmG4yBt5vb*Q?kx~^tjxr<0-hgYvT<%HPt?;gt3 zw#?Rrxs>fH!@qZK-f3@YAIoCnS$rx>?Z7%vSWpYrq8V$3dZJl$H_L8S`>Rm+^nuge zth(VZ3Iz&a0jMX;ChY1ZuB@$Jx}Jbs0$HFh^SyHM3NCI~wxMK1$v^IlFaC0Ix5nL0 z9zFTD>(8D(n>A`yxFVe3a~re9gbCxxJe`^IC&iz<5&kXvQZm*uwppRutkWFw(_Mg= z(Z`lUKlv%kUDZD-I1x?~`FEPv=W+HDzAll%d9|-dwF5%8+o(RKGFu9x zin`v014V*Uz*n!v^>Xh46bzaUQkZ!fR^(gDH( z1sVDX$P^SqkXDc#5IWFXTmjj~6{xq+^2qCt49i$kAhV#xpp7`kbzB5#f&vKL6I=$G zFRnm+*tv5jI0Xt&L;wgom;!DjbHSf~{z*Reix)2jl>;fm2}TSgof5-gpg=-FNYPGc z+~hET6DTPJT9TKS2Xe~!-IFJqH*ZE?z@hNm4dNhw8?-PXA;Htr6St%0t-@)Fcn7Ki z+4S@C!%RWtq3i3{ucvS%0+0bIek1HT$B!QeN5JDyA^L)Oq_FXA+qMyQ?ur#F$V>B& zKmI_!$gvX)3nN1ej+`xr43WY(WPO93V_)ITbZdP=KImjvP7CuU|h*IvzxhHxx|NvSrJFfPjV#8)9DY z6Wzy}h02Tvkuij@Z85ND2DE9^h6yEW5@psdD=UL)4~YdG0pEZ0=xvk)x=>M30eK8F zh{?qu%Ut08{ri}43Ok}GFp5;d9N}VkcsPX=5x@(+vF_gWX(6{3*)~GgDaD~|IU(m3 z+0H<&CKOdfMiMeO$o7_h1b!h8D>>zmoLGk>C9=23PA??AKxn}#AX#x{J!Y@$;zFqw zuqv@Au$r(^AP`~^p%`B5<;$1<{PWKk8IX6%UNrsRAMd)*;2SSL8Ab69;}LT3jvP5f z(dc-|DPa8FcqkH#i8PUHjJKWZS=X-KDa&ZQBK)p3@;D9nCRtIxz(o=Y-b8sSpa%5u9wz1LcOdqKC_K8|N&Eg(M}4l>_p zsoEB5JU7T`yS;uLV}F|Iwly`L&TzZ+Y4quK78eJ(Tpa9jCeEtDPjxO?sth$;80dI$ zpxvn!`t6qbjpmv&oh>ho@jBJg?0g^RD+M0MgEiINtZ#o2d9Bdx&Nodf!!62Pv^Cuf zm0qef3iUdTrp(LYpo@C1mHM!&c7K@WXG`5~rTR>Ji$|+EA4~J!XvUVSl}B9IqZ543 zs#WgZukP_(=Gqm%e{HT+DT_@`#0RJ`r+-{XZG-L#gp52to-QXwF?&Bz0UXYUcY#; zd-a;gt5Qb!+Cii^}i~62TZv-Jon+K0cYceR&^YHIWgx% z#}VI#6pnS9+Rvf5mrZdmyU8s~!_i1vLo+918(&jTwAtD!($Cndr{kD0?o->?4r}Dr zb4be%3S;JWaV`jwvN}i;Fai>&8Cn=E8aZ0DYTIzs^Jjbtwtly7fc2C%ro32V-ayCFLhsUKtAhM4 zUp;=pb&&kQU-)Y>n;S%P=MdF^-B75>;e#LB$nfh3;s-}ko(s60HEYuef1PfpJ|4;;YmR#G zY(zM(@MdRvw0ilFJ6c?yH~X*yPb9?ATH0BhiL$=?i((lGJudCzjA8ns4CWn)a zmzQ@vTHp8a>dsI1=e@kP^X)Wqk?-~G<)5GJ(d0x6r>c&j*S`Lo{<$`1e02+Tg`cpu z_;Y*9kRe0JZJS&l>5v0kz@f;nKpu~f1i%L=#Sh`K;)F70;$H}jke470LjDB%gY36t z$&!^TR~|cdY{!lrkbCinfddCp5~6kM)}bDpLx3k(lF_3_Q;Huv8Zsos-Q($CwU9y~ z6hS;As5d-1CnpCY7OrGwWd^CpK<53b4GGtwd(2!RtAP7J47iD}PDMd~sjrO1zxnvwYdK5h(UlfSklx!3- zKkb)M&ZA#``3058o7xchFgSnXu;9q#vMwuM^%MIVgGr%Da?}+KJ`Qr`p)C^HU;fwn zxBSnKPHj4o&(h2pGtHP;2n)$#StYDwC>xr<622J!1qmv6H~#7Ecx~Q@pa3679~Dz& z^~oX%i5LDK3oDpn!Mp`1@Z`~xz7c)HSvX~64q`#J%oZO%<`>B#o3f_(Bw8jO-@%PJ z6+0{1TXAHSPOKA}gcD!p8*Uw5Tg@f*-xmEw*@*DL$+@a?oIzi``QrJD;|GtI4lNzh zeaOt*nFJm4+ZbpYel}bS+=#AHWfaqZ3_ON zej-!PNTwm`Jz9X1_ho7bEgnokuqyu%9Enm`ODs8Ztin%(H3f2L6ctD@dNE(2`BtS~ zrcr;R0J~wUx+dSdchA|`88-%rk3yJPw8Bb5N3-zotC#uC*X2u>8v6K<5hgAs2J$n+ zn~i_|NxHCnJb(Q7z%NUCczBrvZcI0(_7zSz;1m>~L`b=i$1rZGjQuR^Ezh4mjj(r6 z3NwqT-rA#8Dwhg-OYMmhF_s2@?gp&1P$(7%dkYRI=3!xBa^IoOLrtK+Kn$&|t>wR< z_Yrg_NZ)^J(fm&jP&q6d3Ia+_PR3#Z$$~CG4jUkPZQHg5`Atkr96EF;w4Pso{S`U_ z=po2BR6?khAeEG{4wQQ<N94{#2W3sO&}dkiN2B11aVRSdGqF! zx(MyW^@M~3G7u30&l};HK!Hf8nPQhvXJc_RY0`uoX3$v-1vL_a3i;Tezo?uHP+huo z0aK<)rySX+4HwBrg#v{lA|m|#{ZTDulRTZkm&xsfGEI{`1n(!k4|o{}a0Y*&Rit-g zO&vOP=#4-azH;S?ECZC4m0|jbSYcrC2Fh5zcsI~a@;)L*8gfr++O#PihgRVn<3{~` z*P%s*q9q#$uE>Zka?lWA_0zv(Re0CMMfMnx%`AVjw~+6L98C0XSh|dnE1Owl7nB+Q zKJhd)gV23R{KGQGs}4eel}oZEWw-_z!D0dZ!8?Zai?x7@D1-7?4){s?gR?g8;G-t}*+^vOUDRdVp|N1aB^!Hyalr>3KPby|VKl{b`4ExGZ{cEK#U-BsY5UYs^tk#E|cs(V@Q^wu79QEpaiw!7+% z_^A(vXja>3t5e;s6!@Lbu)UCFR~2V@Hr4KYrp=jno6VNmL+xx%L|I-M>Ga_DwAz05 zcjkA#Iy2;etM;(7_R=WVOCw!x%;|c>O;OjwxYkm2xVgnXe~WFFs)`V^g8`N+jrzR- z`f^{@E-(FFYkh@>=GOaxw`O`SfjBV z_eR-;vCk(?LdpBru6-61)>Z&(U?g)ymZDI`_OImjmTTwEwASCFZXyA5Zg*BU}qV4J=KzozO%YjPFA4 zX*rIiU92YtNa^0DcN9?TuO(ylniw)>E}=!ZyKLyJt@Vyu#E`n8R$^( zWzfXUEynMR&Og+4R9&~69WBSr4lMpSpm=`Bly5^Pj`f%nA@#8{#@HH~xfwg5Bksn| z!Nz`FY{z{aG;K~`X+uMbjlPwuc}G`6M=xVys43f5N{Kd(h|-OU@))#f?LivqDfx4~ z&3mf1bVM+(%`1Ym(C#yD+RbzXr)*S-JlD%dB_xnEco;1_9D9L(q zl4)|PZDD&|{ zW9CnKtJ&3^znKv-GfX|$&!Trzvth{=6W7n>`sVFx_j95?>1sJ)yvMXDUhn6*Oo!V4@jJ`z6yYX8NVVw6cKd@!EgEbe{_(mU&FKM8pkk zWBQGv4EegC*e)~mv4+Z%(Yne&)o#5S#Xe!WsUqi5v8eV_j`5=VK|~v^6~>&k!jkhX z(|sp0BJGz{C!1-{rCMJc4ARt$;&S~XK(O(zI%I+CJILO6eS zkCp5#lY_&u7#l5mH%=tHE@7-%*jpaozFlgvpUl*=MDP?k^*hrXYZ3AKIrkd6yKcep zFrFc(!iPi6RQM{6H&!3AW#x@RUI@h9>q{2zR4d9F@L-~|U5#hE7;3spwOI`=^tZ$3 ze1^^0E@s!~2z$$;`{xF5Q zcKpL#%P*zdRYs`G7<=E3#}h$jreGq9y(N+^yNS|n1(*m~LLgX@r2--%xC{Xj;ugx1 z33;0kZV4j=HBd4I@WeTD=AalVq~IWk?!+#^MDQ2o6#}2aPh$Fb5J?NDnf$Q{5tqEs z(LzcI3V8;i8qOiZpf<=h7cN{NI7b|EFcJB;kSGWa2q~1nPbg9wJdP4%Kpd1$Lpow9kU` z|G(Q?+O}&;1A{**7BQNQ9>#_>VNDi%#myrS^8X_@9!Otr#CBEV@W%Lw;d&!> zy`kp()AA&Tzj*dyP}e~clT6Hnj~2?b0RC0+Y&Gt!S?drM5^ot_wSzCHgF6qpS-Dv< zOK;}gmbFb}iCtKi1nY#0)m)%jyJ&3?3&MviBxG;FabVklR?e*`^N|~Kn=x_*LC4_u z{d&{Ff>c1@2i!|Er6g*MgERnZv1k-dA zAT8bkIQMS{0od9w=Ef8r+{U78&6K^kp<6Rm!NJ%InFbZERB1P>wUtha6M?D|A)0bq z^)59#)4Myjx45`4;r#-s(!Aa)hK^P-F{kQ7JPIdIX7!1r`(vSc-Pq3E%9-!cV&;521`3404>5K85{L!s4b_wV0NtNU-D)@EjAmxa{`gy_Gt zNdC8v+}vDHR8k_KxPiWrfgMB`FTNnV2Ur6LFHS&Rp{j%4Qt~5|95G_VOpyW=w=Z0{kf2CD`|LBq%Y{M>`i$Gb zhRE;&CV~cx88ZgN8G0!A4j2=v#T9~L*tl^cIzq&D{rdGNgG$gTa$iJW&HMhOU7Nke>BM=*8$4I2T$zP8_I;9*tKHR2ol* zcJJxw39UaNAt56pW7x1^(7TCqf%8I(#L$5*L7m5l;mxe9tOOT>o`t@nBwiazOGk-X z@I>$|)C>lM@x%b)3RHEd{J4k)gW!Wd{{H*#U>{KH@FJC$m!k_96EqJz6eG~Sef!MJ zOtKr0{ek>OD2#{zS1DOD!RJyQXY>V=MQR`#DF<6UeE9I}*|X3(p)HaLQO*aAQH4f{ zhM^beZ*p=n#-V4=o;b%h2A`}ectf(XvdH{`alkjmijY#!$k^Ce^5{U@nl^1pIjm6t zx@LTQJcg9gC}W7;2tP4&leTzjYHINP#~*)yGot;307Dsqz?W%`z@0G*Lxv2YEJ%db z+oVYoye+hB@fXUIAqWqp#7P*Lh7B8%y#?1#o#MX4P~iy#h74;2gp27Wdkcn+tTANI zA$!YnaV%fHJSQgy(gOIkjMn=`sMwg|;lqbxWUwemL&rr7CI$hGjEIP6A`UW4VSGYE zLn&7^jUKHg*`Gv4Y9+}^PB8R0kQOp5yc;w`Mi??Cyo@|Al&K;~%PzD(k z1#QDE^zZiV+sS4kyWEf?G!~;Qs!0c z=CZ>Q=6%zqO$5(^zqW4ODz_YL4iuG)HW&)o6B4VKaz;?JIN4iT2>%ws%N;5Bezr(< zGm2^YFm*eTgP5>f!05Nw>+a5N_VUgjSBt_=wzsMX((X5?t9v@^ZDBYZWwG6;xir%0 zN}hAMgYrYYW}V)Ax4UkmT5~koP?adHu}~d#WveYUWp>Klj#_A8Yc-r4bGn z$HhI^Qd$+O+2pR>ZLdCRuh^zmEmxXtw9p(0)LrOfQ|@PWD9CJ$O1s^pUdGh569O;I zOe}L&lzHhc6okEb%zcOM)|@L+=;x}{Q@J@szg7zsQu%XyA`RD$;%OrodrxF@hI022 z(|vEJ9l#VhO`6eAfB*UHPK@P=1kioodgt;eo=9PV>BFJ^*-sNE^SA5TwGV^BLU|r2 zRSyB+-;ncnwolkwuAe)X$~isknt>j1@$%)% zg@cCVitFi2mCsmzd#9SSXYto>-z{jV;2?73GYpv(#k%o$qKrU8W{{NSX-f1oCb$_p zIT_o!NpXIrUX7$I2SbdD6c-^4YHQ5zXv+69rFckPJWbskjIHo-mfE=*J9k>U1e0aBomC4|)x)~5tM!Cf z>*0Nz3;H`0H#OzNN~0!tmV6pmJlv%u%dR-fzBtW(QVY`%cdO3AZj&4;4RklgyBIr0 zN<-2c6rn&L`{F^)B?BBwx>--`U>X-=9No+~vZ-;{V3(p<{-u+>ibuH?ei1n3+u$ib zh81mXQE;_K|64u#t!+9n#i1Zf>gQwZ9w4RpOR1q!zn(UQs58T9>?rrb{*Fa4w%Gyr z*O=N^8ssf?_c8VeG|xn4wSGz}#ogxJBjYqP5(Ylc>t#h&+r)E1A%l4MVqegcZZdAR*3pq;cV@?#J z^>mRB`g^9IF92_6FG;dQ~e8RM!2(!kw!n0*14^1-L%JiGK^9*;JnJ>&cC|~KXIva0Qo5>yK ztGgObwa``gahMdI$;g~-S7?_p{f8nCTC%_v2oNbGEdIfvUD#W$P7iK8LfBjKcmgKw zU{lOkO})J(-(*+7)SrkREn@mDOj#A*>h%jgJJ;uZddPuWNGkmmN8EUVp+mN8uUfIk zqv7+LJi6!7s^5QQnmrag(JC5zD%uR%%BdFm;}M#ZQTnRZ=BHY!u6!_zv;K$oPbWqn zv}AiMRBMzxaOp$tBgC$J%N52~_itXE5_x5u(~VNkn=@UX9GLr-XjgsrcwO7azehb@ z-|orwK2Hziy}Cm|=&vu1ZhO?7l}U<2cI=>$ZP8lY+RC#fJ}Tc_m+Dm6R9$Ap?JeNW zkY*u5lax#D&9o^Xb0T7nAo8I0gsB2i5Mn3URlpkT?Cc=YfE7aKS-pBSYJ*4su?HeC zSR}Dg+z6q99A+pj1;reJV?x-TIB_C`4NBGtPKpyWgC0t}8})D4umRE(*c0WZL~Rg7 z$Ppb>+#D(fx&x_$1X@VEm?PS8MeqdnWKfN6ycrjM)C|K_J3L)QzjVV zi{5Fy**}B)#nbJWor#%(ErTm}@LZz%ckZ{fvSr-MLWRFtv({v9sjKGb=zACKjc4(J zEHHotpv`1&dHnG4pmu{imA&HbV>C*X2kv5T(6e*O22C<;w@x#f*r7)+E z0CjN@cM%3JO3L}57xYL?Wh_My>O5gOp24)gGF^qgmd9E))m6osoeWcNwc;l3ooWq; z7e^$@_7)2f{^QORWb3donI?=Me^ZcL zEJIKgOA8cvYYCY?Z5k~lEU}=VAeu)FUai)x_X_tmE*dTQ#aFG45inmGf$?G~a9u(~ zu10VWVSZt&>+LOV?HzQ&twmY?6lJ}=<@kvcv6hDZ0uxxRV2g#l1)A@>&Mi#XTR_n8 z*NGD+Y;0`a{mb0koZQ+$F8-_QcA|6M0x!_ae1AsQ8!DtEYB^L18Z@+`AsF(g7-m;YC%@H2+r5ix)5Oa#G@(%*@P$gak0q$jC^_8Vo(1Oe?q!dfUEzdor-J zY15`lmoDk)>6Cc~H)4>%f@MbeMwnLQ$DuuF@Z-miABlq;Gr$2Ue>Yw#)I(84cpRmN zM&*=vIzB!=Q5<+E+DS=>Vq;@*1!G8Qj?uOjEm~l9a8dS_!D~P$xTJTZJ^vV|iH7am zxf7F%p5j4x;yQt=fRY6JO!#kPLd4OeNt4Bk7t_6XlQA-sa2cb4b9|E*5LPtk2}XwO zEf^V!SHfQ?M(L>V#**z6Ygx|tj3*MrD~?5r7R8DKUBg^ptf>9a$MKe;Fd7^k9UUAT zjCYm#g5^MFF+2$C6!VBFBfln;C#+*a&7E1FGw59plXt;vWn+jO5QLv(SdhI`^1f5WVZ)LRJ@wCjte4K!|T5HI()dbYRJn zC4?0Hrrw!~Rsq%&?PR?XZf0xNtdS82WiuL1-W73W=sc`wTD+S!ZIZJB&8SalJ>#9E z%=k%mSMVLk6|%8~yk9Wy6p{v7LAW=6v$uelfY6b%3J!FW))D3xOO>)Pkn1w2963I1 z*|G&cX_t+}b#8JL{w*!Jf4*jt$Yng3X`rU%FikKAYU65q3{NYp>d91JYBkqqM!b5s z>E42lyL@!BnP#=6cAr6gwvE}50M#KE)wRiP6~UU_M&;*Bw?VHj^HYDJRhPMI4~739 z?%o3`s;gZC-qUA*nRABTyEN&F2q;!i5D*nX1jJrZ>>?m7#`J1pVv3sBqe<*F_HOJ2 zdrK@a8Z{=GL{0pkyc9(j}D@}&ocG}$zhE;0)4!!Q-;rH*(FT6I) z{=vSNe!u<2w-aOb`Iv4@2zq>J`u#N{&v!EI4b^WkYB$*!-%#sjtMrEh3@4hIuJ&^} z8fmI;W8NfbH)^#@t-6nG^k=%*-6)GV=p~(LrTy(9hb4c$aA_VhY;iKK4Y7eFv&UP% zRj1zFw}57N(WJ>k85<$8A|^7mNVu#oVd^0fQIEv-*zmze@f;7~d5C(66wf_)gkc_o zE?Pt$&Sz|{z+3L#yg9gOlt|6YgR4Y|yo#L|+ad6lTbC|(w6hErh!&*G6~eyd@h`uW zwrTIrty6R{Ox;-|Gdi||J0RTs;fGp#m)DpM!Vj(wVCudm>D+P7ur@7U-a)HR6RD=6 zxYEkP8EeOwucfP}rL(6s)k98gCJ%{{`}$fl{j8Z0a&f4fFWH5ANF7_trRj2cygbxX z?i_0E9cbw)S!{CrLIDb~YidmR7!U=V*Cgf>hd7u8NZf zI$Gkrfo?Se+^X?2 z(NgMb&9buzaLmGyP4$*@+mmd8a%{ru#v zVb;PvuBfm!MCzSl9ak0b+<2eb?)Fm>gX`5P) z&nn?qG@lu_=(Q&!v=_20SBjl3cDL5Y>mkN(*XouDBN-(t^lE0>%#6F#JW}pXwQe1B zD`s2-X-sW;; z_QES~Y*n!%Zag>BAs4pa$o8n!UoqWg-{9YFb1%`WU#?imG>5%4=TdB;>>LkP9dD{W z>aRj4k9e~a&?EfV)$+ld%>H!$c-uDX7{lCVe2;^rPww2hm=|;*%Y1c+ z>v!Xvf8HwmiT-$ce^K_&bHg95NPM`e-NTjbeqI>$=scG?e!u_2sYH)WYRzh9*s0U* zP;1v|%|C4B0b?H=-hC;>`H&a)CwgAkw?OcNe5;s=f?YO_az;!9w4KCYk|M^98;1g9 z$PIytoN*wB)5}CkxJ&`r$jXIMdD81kut>_lC4Pz<>eNVX^}OYbKuc>#x5; z)*`VSayvy@Bf%AX7XK>Aqj7Te>Q%^HWMe~C+~iC|-bNH+1p`OV75f(S5XC6G+s>Uk zAx%NrCy{`dxRN9qlRz%ylro9r4UD?7vQmMDE?&GyabRc(pmFRF6#L%2dz5CB0`E{v z03`?y#XTnbmj8Qr3mpW5kkC%bR@I$!K29}nSP(lZ6XvPEUw zsnLNrZ{Wx__m`Z&^y7uCOPv6#YnYx>Y?c0NGk1zS5vDm2syUs&aho%3O_%blr;=>; zSyd-O)yD(WyG)w-f_9DKp}KTA!Y;SF@OK_4SVk2`R}91#Kuecv^!w~IpicX(n*A>7 z)h69*obYlYj2Z^>WOmvYnQoPxcCWi;uf)#eW#7B^3~yn=#3c-~t=H%G?tMf=(8clX z)vLpX4%KKhjaRBuYU)&xLe$1&VhxdA$n?d>#pRf=aC`XEPh(oNOlPb)Q&UV`gRpOD zE3y|+yf19Hj~o1n(p=mc`=zq~j<>Y5v-DzWD`OUw>WjCz%iEbVXUG#uIrMrxX^7CB zb-Mo+KUl$AKra4y6aN2nKz#!t1r>#gos^UW5(Jt#fBt;XI?zmzQ|OrF!U>%YglPNr z?NBE{*(jVBC>8WmsL~YG1T+mgEUtiBQe@YeGiOqSE_(Y6E{vZbZlgwxLYUOGQ95k!3-l0EM2-@aZfV)FWyFz@ZdqPFmgq~305R8FOM9NFjp}# zG0nt5(P#-^fg%r)AX^-iI)@TZ6HI}L#Xyc8J$n86buOs_SSNR5zk3(0+`4s( zY&Cc9-i^Z858&O{NLUvN8w>W0I@Y;jKsu5@*!%`q6NB* zHO1!a)~y@W(7btb+@+WSUC&Z2mTEj`7y>0Z+O%mC@r6Ep`k-m-b|s1n z8Hdc7Gl%ZN%A#p>65T+@$R-E-BtAZ#{IGDH{E3boJBDr$q5`(tuU|jN3DE1wB?N1P zVPXjQ7ZpIt`fFIiE~2cD~$u>D&Pr~Y3ypG#CvIM zSJT)9iB3q!Y;5kLB&{aAg~D`GN`iO9fx@^B@`C4rj7**ia1swAo@?y7&p!LCzP_G7 z7(7;>S|kyHqLRD^$$_vSLXed1frcybmnc#Vfk@<4M17;X6#Eve7nuSQz)tvenN5Fh1z8D(^$jfyRyxWH3A7&vXAy?gg6VfqyNmV95|mv~NL^>Chjn8zMT z7Wz(0p|dv=MiB!M47I z&FR*LJ8y?y>F=}O3hm7Hcn8ZDw%RLs-rv<95pFn|>AbC5jJa9bds@=GuViY8UNp*QUMW(3-&N zH$tl?`BrtXj*gQC4RwF6!oO~!UtMqKDQzrc`@2<*_pQrws!WtiBjp0KDa6OpDOm35 zC8aizyQRvL+Dc-K@g3#yg)WujeQVoW$A-&&JmeIA zIXg)1p5;7tu=m8;pb4`gDn5*;t_Y}|!>ZpV|VKau=z68|#SPj%{`faeE0({%E7fL9p=-r!p*1s`)^&Cs zXw|hT>{9jk7p@_GTKdIEr-}h?)lsH_d2MRRpJ?{PRoPNmjI|(IDjMujHPX5A-J*Gq zMSR_PLl%v2su<~2Gd;ZK<>2ac)A*0a@Q@deetTS=`B9>=1Othb2gFK)XT9)DsfaPuD`(v8&!VSc)B9jaye5!`x>XD9C^l0J_Vd7obpFHdI+6y+oFS-_0j z4Z4$&+Vkl=^Tma((&<*l6XEK^Zfv(f^A$6!5z$~aGShaIVV_QYz|0|({WhwdYW-%F zVY^Ou#G8XHmwMS>>En1V-Fz}uchr}ga2#}Ghn)EL*LEClS;kBs3KRDU!XvM>AUKVa zJ*b34UDPGx-i;emB4Y9w>(7OJp8b3*2Z!0o2K$z`tS;{|-N($Zni+ShbeouVe`32o z1R8ec-I*I0J1opv(9eB(^;aBI(k%)K|5Z@P8}k!rhTeyy~(}cI&y>%%+_(GQ5wGnQ&r+Plz$z}Mb zzdyNI+w9I;O@5vmd2dGOt>^r1jIn?4wMch+?eczKx5bS6RjuXGZwwo?=DPxKd3bF1 zr4$!*cqL;miuf%M5kcF*Stu+CMS@ifDWU#@FF@*mJdBc%2Ovd2#3iFs{KQ?5Hy}>n zE|kPYh@D`O5DvlnAXTEyFTVI(FoVxRDnrL0&_a3#Zv`)= z>kzrYiy?@9{`uz+&J`gI>=N?;c27|XaiE_NS;6ZuA~1c70unWT5}Q7B=n&2!=3(q8 zfD@E~pici{mS~>I(U^J#HVT$X?}Nys2i>FASFBh;eqQ*Ck{93#$<>%5dgn{33at($ z^`_(?82JBTyoC-d775s)p`m|!JEoe%8}h5dq&*ccvI&0q>w!5WIlO1jp8wo2J3Bk5 znJBzt#}2dE-1sj8GbGq2T)V){$hXejTF43qv%#4xvoq^_^$a(9*|lz0EQ?KM$&~yk zkOd~XCsKUf`7`DRumD_X!kSPXqN4Ui&+wMASDUh?Em#YD2eV)lKuN{E<-y~pKh7C) zI#IVl(l1o;6mL_7hvq9Fs(Rg-MANkq?%xh{KIqSF=O*xYS-LqQu=#e4ey3hLj~U-& zhVe`{jOjo#$z2rpF4O7{d#H~!;S}slTbm167SP^vsdlH6P4&%m+_W!LeIkP6#z#Ea zcC&7oM!(4n0ZF%1qg$rceyQPvnMC8i%O(gDPA zFC?Ba$xB2~v5LIXU6`hnn@O|PSv~Ik_+NMnj?9?Y6$=;9lY&PHLNyfRe_l0JL%0=H z16neC@L;N8>fj;iTs%`t0+2BX;~YN`$VJ^?82N4k-qPL0&Ay>RIvV3G0e0pLo&t}V z8sfK{IB~+!(Xnw)6ubp0vsSBZ{MY|F-a_^*P_98yaSnPy>KGJ%C_B)U2Mid1pWp(1 zetu9EzWnmbl9CdNpMWO_Dllj!!3!Wq(6XUl<06PN)(FIqVvnPKqE`g?6PFn@Xb@;D zDghTj(**?uxJWPPL08cVNIE(I(ucc9i$o`JPS%nr4DBAY_u<2bpz0epZd42jlmjdP zeFGPxcw(UKgr6Kaas*0iR8$lc)$s6eD7nz4p;LoJl2ysJZQG)wqrr4;+_*t{7A6EN z1JoN+0XmO9kk(9E)wOHa!1(BW1SJVtw{D%1(-y1~Bcix5TtvKn9RsI4I6cHcE+^0# z$!g^TaVU%lCuj~+O0kQ{>4^?(HZ%$bhqf>$6xIx!4rMUO*rnw6MD{HNWl)Y|a&;l+ z6bwHnCkLFAFc88?C<{;DzI`!o7&Te}^8$TF=OF`Z-nZ+rG=cB-?b}o67$q$=!58F7L?@}KsTcy9z-GWc!G-~kL$z3F(Cw2a zPvQz$n4li~g()JhVI0_y=s4u%Z*0b&D2haM7Sq2MiGyA*7PvKkTWgUT^= z5fKsC59lgX`=`PyIXO8QB{5ehhHqm1#O1*(Flqh!_s8vMh_)qWj|Pa*5%7h%r6mSy zLrF@~gi&CzMvop%FM}XNQS2Q;T4t6(W)h|(BuXiA|wTG!9zjuZ4ToyhRv-pN!eC5e&x?0Y6u{+;dbvV*ym%ZV9SFZmX6KhE&P!RdIi-EN*|o zgA8|L+IHefB2W3mJ`{M%k2i0YG>yz+3>xCwDt&=Ulg6}>jBON#>)&0voM>krDQ37C zgi?XG+`oN0-)Nh`)GY+sQ@|I39T7=b&Ky13OmD~%vQ0Qs;g$l9w{XXV!)w-d(;8#> zyCn4}9>`Z!A(BVCSlT&S;=HBKo|XTkQ zD(AaOZQSJ!E>e=!933p>1j^l9q}D;!93N{JM{^4&bApeYW^WhkVovh3v=5Sd`dTw( zbCjo)5^nA5g=)<)t*t{}4x0LLWc9mEtEL525A~>tvJ?l)St$;ssSaaXNW;?|CJghe z$#tqowvLRp_7Arf_{-Tnoy&)~J(uDzu7h<#hC`XZ)YVOD?;~e;%AKO6ex2m9Y{#lK$)=u!{sfa2AYO@{6HjL3N)*Rn3j6 z9^_FGV99cn+W5)Y{?=>{xl@7LgmFGqJ>-c)TfVY+-GOIv;IF^VpSQAA^CBNZR=A~4 zh_!c^H9uI+ZDU_Dz_qrk!{j8(=%F6fqr9uzSVtzyrNL77SWEH5*w>#t;#*?b`@34{ zMyJVTqrA99O18_`4mP9fvOj+EE7$kG8280Em&#n{axX*o-filB`hmYLe__OeOuMo^ z&b57A_=qOCSI+J?|Mw@H?0htE(GbT9N^*@xyW38hHJ0<@-yc1BHD^wWaeSu3yU7svas7aq9gx`(51x+suq*r8R<)If z(44MJ8!8<0=}?K}p%Bd{b;-PU}0eS z9owPO@6e*ds;vec1sUF|Htb-k3&Vz@TU0D9si%C(5MogVLK%GwMe9PL7~CH4pA3$Be$Qo?8o#RjpB z9D*n_5pIEWL~I(`H{~iKKPgm*ksd#Ooc@J|1(r=oB_N8^n@2KjRro#zs0hz!g1{5P zyDj|Wwle-lu}lD{R}sH+4}j` z)~t0pOK;EG2eBYu=9}Q2aPt}$vp=2wDSi^(LKZBA?F)ar$Ca|z%3h0L5zQDkj0tDq zD1eea%!lH)JoxSDuiMAnp6c;UAIBs9`uT$7jufUQ&oTXWC*76qR#1=QQN}q;-Xjv$y5nC=~rP_v&heImPCQ}UnPi*#IMcbdrc=%I$9y$ML7eT>>jeo~#f-~T+)=b# zr5mB*?oQwuCBhD;lkg0(Z4~yl@gXtROxW(en$_+8k3V`g;4LFcN}i=x{^O6YHg1IK zp;G;SNItFverc1KNHRHyT|Bq`)w=OOJ)A+91n{D5D9;-37Hp3;F0THKa2WRA@fJrr zyD-sn{QZ^)EAgL&w+t2s2naM~i1bkUpw>Zu#7`*Q(CL5fiT`Y#ku==&hfP*p(W zXV0Du#RON1i;KxnAv81;N+?;#LrDQm1K|Pz1dWB#gmcg?3fNUtRD^Hnq*OmtV5py@ zuv3f_(0R}_)Q@iT?%kVcCa53&g8oj-f>blI1EB+y96Bu2A|)Q__uqd{8I(YT!37BP z*t2I3b`eGw5D?I^Wy_F|5O;TX=+sbPX*r-B$HvB@3GxuRc<~~JGk5M>bQ{COH-$!l zR!-XVwQJYF1u#ZXX)s5qNZ<%$CWMVdC_Ql*=+W3bm;|W*si~>Z$I$@=!{4-N6S+}< z<|_u7;4=3ez!4=#{G5;}j_?eWHlqU{U zf|-O|K^kj)eSP=t-O&&|dQiKtuh7G8;viQh)PNS3ELlQZ10N4o-w9o-1UM^OI&@IYpgl=Q=(-LDoF?@JUehQBCxj{}b|wmtqM#fUU{*0Z zQL_9H7NNKu{VTjh$%UlgIrM}ow|1g~ zK4_7NRnv~CyNXb*lyESLspCZu-E5|Ljz`5bfRnz)^vj^j$l7gMcCNR}kF#5SSKAR9 z88nxzj)v=_y}xQ={#>owBx`R>3Vw9%m0PcOIGt>Jx!8H1E!%5gI~+@V!Gu`O~!}U_vM+XL+Nb~uev1LrR$ws}YneEAFe|M(rm|_pw2{-qHBo zSg(_jhMSd5PPTA98mPVZar`f9Q=fc)lJJ)L7N*;8C0(1?@8= z<=)MkLz_mn5|Q&qFJYF$dBJh@5H*couO-*^4MRf(! z_uv{RJ1p#5&K^D5QqMt^R!rTBzfRJW+DO+=@DXiUvnE|@$Y+`azJls@jP+!!FJq3D zL>EhHm^D9I?(blZbC=rtS~I=nG#5*v)h^bHr-9CNloDO!HZGRd&XxpEImJ~CmrC{XH(*70$Z+WJ^C0_1LOtRn}z)s6J39p+gxC7`xCuy(j-b(%am zLh9`*w~3Pb;}dM{)x_G}>3tV; zvn|WAmUodScaSEw(2RV&WGSaVkDg4=o734ev5Wnr&en={(u5CRcot%JX6fj7fwv?J zEJe5!@xX@9agQOMrDzP(VN16cZzp3#9;+1Lq2EJ9#_lOl*1%Q#FHKK=bxhjW5I`K{E5-k$LZ>u=}yLK>znJ&wzs*EC7nw% z*T?C3;3lNl z_io%69TA-+aLybtx}iM3#xwhtM|bY5wz{rl`gsj0mc}#fWUKQ-;cIzo{a0Ty)u+PI zX(cm#D&7%KV{Aiq&p(9W#I@QHC&JkgU(GhH?kkaHX}3zZTDVqyCQ>hb$<&7ji})?~ z@6T=3u8&CThG(!d)07B*qI$wxe)#@eX27Wg-MJLID+T6zU-Gy1zyI>^bQ{mhdGfUq zcc?-q!Zdrd?ECo~Z~67orO%x_XENOa;hD9X8Fmuj|BUK?)T@{))O z2ys_Qbw@k}RGqYSNKFu9AUF*lJ{)2#P9SShP9n%WWT29tpAYs&kUN+fWKO936iOW& z0fliFc*L7;zBy;k9Q=!WDOe&nDRDaRGTaUcAEFxM&wcy$k zqZs$9+<%}WU=)eI=L$pGJ#Tt>73;wMX2TGFl5sv?@ zl(PR*$3NpO5QGRnf!^`Ax6`{_=u=j!6+aY1vY2RlUZ^Uiwa-?N$yFF@fLCv@?k#sDE2MW z%cfI)BHR+j!l;ZF^Ll3A^62T$%S*l+?{syT>-o;+-LAU%YMzw2H;;(RJ%!gg=uRcu z)<@_z>$NX41BHgG;nJ6BuSt8@S9>_faL`q|R-=1QWS84z)EstZ-1oDqbUD}lbiAoP zR)3+3`ARWw;%0@(F zlo_Wp!$c9mkBnGC_-h@`Q`$0(8~ZmviNwpZx~yykZvjc6M;n^&*nxw5b^3p-;Gd49 z_-E1c#*P~&Gd7XCz;nwN-G2}EMOYARkKZ0WigIy@YskB##BU)T4LY4`CS*ltu@SC%6nqI%@9SyEncGekmv@ptL?@MlxL-^d}BT-APlO)WP-&|_!sDF zG%heuLOC$8^cNT(rW9kuB9@hv5!eF79K*ypbautI1Y@MYDY!_cCZNN;diBCoP=X<> z5x%iTxw*NhkUYQelcK0mL^TT4g4-!53VMP;WoBlQ`wRu4#lN^h@C7=9>q$vTgwLQ+ zY%R)eOtDky9#R-~A6$e2Ud-m|$tdAZ&!d)myd;t$W zrWF68tK`8nNF3O@N{D;ZPr-!oFZOvxMh0evV%5gR#-b;<{rGVn$P3yxj7%zpC!4@LC1=0vQg8|}R43@G# zV*)8%5pF@pX!la;AI0nEuXsyi0EFC*2-Emef*}$M70)20yaJn0N%BZitU>6_onqfY zHZ63ol8s1_HWlL*r7!_0RG|{xh42>Y4<2gBnM9gMcKVYXi=9uwtDlOzPLLawNP2i) zNa7-tM!`uaz{^u%U!<7ypha{*@$sSZ=osaex_$dL$PxM7*ESpq-g4#274i?lzvvI) zEg(K*fvf}={1e`yc=0#3Zvp)ww1*NR5Z=HglUn^RgCdnr^@_qv)6hw8WMHD7md+U#z;&|NwfV|%QrO?|Xskw(8eO1hYB zf3wo(R<+-E6-{^o*%J4YQTp8u`cE~6ukCe*Lu{`USs!c}`P0h2hnnbj8?<}eRiBx( z`+W>2Qyjj~>JPc7mq^C-PWq2cx+P3=Ia4|mY`Qw3$&VkmIO@Z`o0@cYPS&GuxVz;a zKVAK9qSw`7-VgUpyECKZ`JRq@?bzzrz=z`f)Ul;s6!2iPJa|it7R$Xtwo2eFcW&P7pf|=dbu_n=;TfZn89Um5w-o4Yl6n0+UR*9ycd;=a zKf!6pt~F~WYfY_$=}R%w#qkKeERQkSwuRLa=W0pz=V46pTgb!xqwjJwp%$KreR2$A!=c;uh^m3}~AWuk=NBc{e=tC1ZCq&NolGEJecIcan)Vir$5G@VxvUEh#6hw~6ZhW71&)>SuAC3DzJXkPkWnIW~gPf*hJ5Khrq&iyS z{jJ@6q;zk&bGWq#jSly$8SPy&)V*e)TlGk<+JTCPT(8n_eW1&&zX~LJ2^@& zZepJoBIV6^;Tc_ezjQPnXGq0q0zxSgnX@Jf2Y-CVilk$0nL1pALkwi9C;{1Z6yQt; z@#!KAx`yz(bgwe~N8C4%qYd+U2wWp&eBP|mACS0}%ZUihiAc?{ARd19h!+nI}BQqUv;ICFsw=$k;X+RBojhgk$v|K(;I0(hlJn3?wy41h(1Y zfEW+?2dWlH`1|+o$0WQd4om{QwN#?wuU)&AqVAJB6}@)Ezvwnao1u^_3f@A24k$<( z*&2~N@XIg1pp#fT$_9m#a&cfGNF=~qk&85Bx&IZF?SJa{XS@YE$W!sQ7ozN6|AiV$ z5$*Qv+h;KF=uMDLX<{KS{cUp;X$LYAbl!jNn1Z+L+_}?YvHa;@XD4TRgE?>RJT23j znc0y!+A`a-XSg}>(j`loi1gBgHHl~OVJxh*duzg5=1!j*$O3~|Fr|COdEfTW;)MJdhm=RPS@uF4S-v#Tg7F)Bp+RY>Lh0e0z}4!>+2mPTH-K?y#5Uu$v0xYmY^{ z-k|$TtzW6v?RV9jO5pj4ZkBsrAL~)y!n8xD-=NnnQ0W)xbSos?Rv$xsoasa}n`02k zY<2I7Ai6K}w9&eF5kgiLaJL;};1`fL{FzGochmc)19w#lyv5nY1$qSOj}Uh$2(6{T z3gxc{6p&S`R)HY=s|TT|uuuqJe^5&CHbON0TT2_(fVW`3C5MHDi-5YyUmA@DJAu@8 z?2TuM@cuLLmK7^jV8%e!D4j0I3n(XOAhZ?y1S*Y8G1SjL%#sQCvOqZLqb|W2PkY8C>~`a zLQg<@LAt58pv-u>(HyjXkXFnpR4vd?&_n1)XbYVO!GG_)_Xx7UfxFNa$S=Vfcwi|Y zJa`<1E}@(__$DoB+qP|l2@sqD-bWdJp@5Uk4@M1gjj!49w+LB>hHMlmug>esIy<*$K)j~Xy=EDCrKtqa*vpjVhWGR6o9 z2!Q?$T1W8;z_Td%GI=4P=ahtqLe@~w4~mhCMWm1>l(L6#7_ehZ3%S)`$fzMNFOLjD zu=+U9&(BX62W6z9gg#`>K}Jewh%Qq2SyYZ{aXYRnQC`Vl19zbYY%|J3M4$vZNgxs$ zMHw=->EFLU>ZCI58xGWl;h-u?*o0N1_Y^pA3#!FVpr;;sH2DV+tbub(5h|qIVbF6i z2^6iDEOYP|p+ictBy^sFGaor}B&p#*F-#Yx8jOk1IjlYoCLapk(l~0aVthhB6+lL@d!b6EHALv8Rw(4G694vvh9uOAKM`3kEB-`{@fLF4 zAt;{e+yRNJ>g*8eG}EdpB(i&=Yc= zqHiimu1_=s927E0!CRC(L?jzGj^9FpnPT5^_3BlU*2sv3WN!s;QBqgXh(J0hlrCr* z$$?5H2yBd&zP>5Kz9oyNvr#u?s*b|ZpofU<0CmQd=M?6iGSJSlxK)PkEsgeoNw>gi zJeuM7U3JXyIH#$0>RooKy`ILsZrTl!?Gg{;nF!6vR(2-+7)KQE-%wwH~pQro85V*^~q#;wZ*VqZ8#RBnJ?>iOZu-JbX(P?je70# z8r`R%c5@`dJd9R>C!Me zskw-*-+{5sB0td&H*d!4O+_M^Q3lftXF4p?L4mhiIC`{`-h_+UB7b)pQ{~uLE(sb^ zzh>v$*C@uL>H-zy}6myHpB=qqc-Q(bT;XJT=F;JW?KzZa=Y?V^vRws&fC@y1=?@Yel|O)db&~iN3WR}*MLm)bS4_KdLhi;)M(wvn!u*2!|IyVO>)jr5YcIGa0o z7~9qa)~#$&xj44!`6hLl4rRek`J31A2k`E_dqZ?=D+69&20ydP1sqWP4Xz^Qr8W$7Um6u)K9+LKA95kZ!o zrQS7_0dPEX%&MSQehExoi6|NpUz@z-ru<|eRWX;hxpm z*2;eNxE|?4KqtecNDfC6j7u^7`IRoqDf3&EHaLT@l+Jtss~ZPc#kE`G+Z)cT|T>a*=^&SzQ9cC-#cOk1PYUtG)c)BXDWtvx;g`;6>(km^)(cI(4wgtu&U^EqH; z#{*OcT-38wJgw4cfww%kba|np*9S~LTV#5g%k&>>?XC&D<<8FCnbD5 ztOs2M+aeneva}>g>8UV#09Qr#DA*w7&VZB)X@BnAxj2FJi~d7oMVIiCUKBzSgBXwg zl&uV25OV&PI&4va`Ck8Ts3LTC(zNjloYhY#@=<$%W2kP`|A6htx%HXB_D3=AX%2P>}mi&pLF=}C?Nixw^Vx6UaF-U1EC$;s(Y z|AvN!QW{V!G;4_Ni-YoE5#GW$P+&=8Y3VGzlY1w^TNX@TK$(mPPQj;t`~HNtyi@j0 zE7mHG#Wi6~LRlzTxuE3qY91j3v#=(o*T+znHM;dC%~}XZY6vizgC45A&YE40+TE_k zW1)sqEe$+YdA`G`mNrL%^?PK^UL!XPJQ1or9dA4tp{bA4oliGkNVl9zu{{yN4H4I= z3?FdQK0_7LFV*mPS@m&-3mNjaMNX&UY`;`~fhljH%7SXoLU28t=!cF&A(v&Q0<&}fb0WKE2jML^sz#2IvB8*H*+ag*zT}otUte!4vLo?z|Ifr* z@H8DbZ~&AX1QrAtB#2OM3?HNqGYNtWa!olkRz^94iu%s(ty+|#h z2qYkGp#S9cIcU%zijD=62qK2(6qQpT7lPu^D4DxZ0z1N-De5d`Ne1y;x^yXOpy0RA z`oTDe459|K1qx3oanT|)zL_&;qC71kW*#F&4FqePK7ATvp+GSNO626^fWqYF=0ZPu zDjbe{e0(rEdUK8P1crkTq6rKKT?IA9SSaKvC;*}EXmQ!HWtdNTa{+b<`b^+I5x0{k zPvRnmk9xp%3knK4cI=1|QO9r*b4%%&h@fFRf$v~qDML5TL5pbnL9c`gpPZabvFj<7 z9x)m)FLWS3Kfgv|xI#IAl(;k`&2HJUg%%pm8m{1Zq{rmq#fysB33(n7 zqC)F(=gu8euGqH_tfUNz0^KN?n(y7a2Z>in#zc4vPDlWt0Du%tSHWBG7dg>4Mrjnh zh313~3Y0{63urRE32mI8DAw24QRFA;!1Z?xxM~{Hv=lZiDZ*f*HBYI-%}RTTgn;jA z^c(CodrjJd0X7#it*27$4|wXfxES8o>GlN~E@oP;4|hG;Nm^*u9f>n;(5bErc01jI z3li%j!^=!}I8=9apz|&-({4$#+n_$`t=ZPxT;EDM%L${h!zW+b#t})ebJVm=Ox{Kzu;>>-3Ls&%BcCRUPNU2_12G{wz}L) zI+JX3@7=KPs{^hM^SnCP>Bg(o9F6(qmuYETrt-*~JQzzFchX_$t{zv_rE+@5Sr08<#F+ z*jbu$unVoQy#jB!fAeOc-lnq%#E~X!Y??OUE#DqJman%FRyP`SAe*VuZOm5$-m-Vi zn$a9@Q8!`g_B_%ri)Ad4F$YtE#T0FC*UZ_H5G3~qlCoVaNv={GPdUZGu9;*L9&PQH zB2DTjO^lHH1X!~JE$*3lgAWNGOvCAmxOL*!l|+;gW#l(m19 z)Zb6)+S_gH%fWT!{&l0h>bkp*36?T~q}(iPc`t|RUXE3R-Kz^-$~(yuM|st}99%Qr zx4OABtb;tRhqbD?b&wyLvvv=XbE4(`p)z+b@|Ut)EUm(%UI}tZQ)@whwOg3nJ4)&s zD-R5jd-rm#crm#4`JkF{KGn(c=qP!Br;ekKg~OE`Qrm+cwhNyR$rIkW2L- zm#Wt0QO)dzCE1M%x9byPm*3htBF~|!lf$@H4nyMPL716Thf!0T)P5E_rLIYBkxO}2 zNYx{Ox7@k;Q%TZW-ulc``;mQ|rwnwN($Ayj$cEFj2c921FF`Z1pWBq_O)3}VJi}XN zmVPLq3M`WsOn8R3yq){WXqT!u z`~Lp+IRR4k`!Dc7lz)hLxjjVgp7A33;&V*@qDWOto^F+6s&% zMW8Op!qz2I1S6u5O(O(oGe{&Y9U$_SKF{=@FvC`j{*V(p7Ql}9u>H2`ts<+@A$RrJ z6k8rNpw#1Xq2s9}!y$LoS8DxI5pHoQGi^6$>!Wo%ies+*`84zK5RSL3Wv0)C3C&XR z`AGB@t+x{4pq)f-J-8_jtLl<@=f;h)i0DBgao8wf!BWRZ%FYV=miu?^bg?>gW9lIS zZ#3TlGgLh3#=F9lWsZQA z(6M)fB}|=g$eO^|;=w~{HFqvtoUS)^<;ahE1XH7xw_;m7{DGTG{c`K}N{@gYT2>#e zJsQX^z4ScCTYmXvgRAc@wfcaqYPp)zf#HmuY`|N#x_fNYX_l+F;Y}sejM19TZsx4# zyPdmN$u6G@`<6=KPXq!^90z2alzOroq4=w0GL3V{u{eQvGeR6B)1oj0gL(7jL68H7 z0tca_oM1`h&kku5atJsGlndO2CLrNJ>V}9&VlYMArKA$598G`m#TRH7EDVKF1EdjR zN$3DIK}MAmCQMKg@qjBSp>b(4&!0b!j)5nVg$yNkm@#9izUc7kMu3d)? z9ooEkGx#;R6rm?5j1zpL7`!G55h8ZHAQl(DjL?&H3{#(|(E)0afD z$N(1L&Afe>k3aKojJGJsiK19k9E*!*@zE@LMm3L7^Z4o0H*)d@GB#4=f*dB2#C@VN z?03+ahjw_gmrRpWR2<-i|a47gU}cBYl-s4x54QN329U#HjavsZ({ zT+EiP6g!Y?(&C&qlpkt6yQzFSpe%(ec2%H5_l@A-wWM zR<)5L!_ns|{Z_4Zi3p$7nrAlTwuK6c5iH(1bzvHZe}QcL^(fBDx^wF`)H4!U@!UB( zJG=jj!^6XaoV;voY=|ac7yZeJOrz02Y^KLnL1R=r9RL?r|JKAs!CS6hxza8sMgdS{ zS*FmQ4XP<}!5Kqo=5Da0k{7=A(r0Z9aH1Z_t>l)4B@fJYeRL3Qz5fWQ)H4;l*+3W`nf z6Hp#2j*g)xU`aSZ0ZbsMD99=L35vRY{d!Od%I%92P> ze)}zG?~WZiDDnxkX4H(TNTCDq#3Vq0#khzBEn2iFI5=2wFFJMV6zW7nXyVH+zr@}k zr3}rHO9)ggO65%N4k)fKss&TP7;zWYmyA-59zBYB$Z8b*M-!>3sko772!@8r!KXHC z*r3EA0RzP>q5qgh^1L7<8Qc>blXwBeTSJ`$N`n@~#KeHVU=pxu6fcGH2$PKj&dIW5t|bxpIXJfhd;{I!vKa@RODd z-f;XRyamhMzkmOJ z{rX`SVLyN+;R@E577;Lkz7Z{7z${hfFuMCG{z_tfJFgk2uLAA6a@?+KxSH_ z>_v30xc?}yi~{CR%v;*1R3ZIEPy;peCqzhbFCuK=d69~}ao!@F;E4bMfBg9IXP#r0zobPPjt}tMOoa>B^{>lMxR4n%k`kF`o)E?DNuYi7*`ww>=tWvp{Fu=VCk&q&*a2 z_`;xC=w-Ut-n1(~|B6<(FVgzc{8rx#aQ)%E^b0*44+Wae#OwDqGk(DIn@oo7p88KU zx_4EEO&0ZPv;IV=cBh|ql~%t_t(ndAD~*Ot{`#Fp4V0!$%w`cYF1OQdQR{Yw%U6q? zPL7CLVbp!5(Qjj#pO13yneJt$4gc|akK8um?RImf?#8s~eA0AT0&!~YOC_Vue*+v$vHjI|JW%SPc(^x)?AWqP{-LhU8+ zB%uLsxpeecj^5N<*d3wuGN$QfV>vJImhEfS_-b{5Ocg7TpRQcFW+9BZ+ok$ja+>g< zvMElMmXd9_vpFHmTHqt6TWrGIE$t%ZzEN`j7@pj@Bw8NeFJ(nZ{UYUlp>n>5)ILTY z6fETi%GusBcQW#qvwf@??s5k|IU~(Fsf%^;5cld*-|{SnG0}1_XG>dmsY45CXh(Tm zXX}LS4wYHfiguRq`7Y%jMpQ41sVecVjJK4ul1iJ&g95GH0_7e}xiqHr&g zY$NQ=v5w~E_I7dpQrD)|B6mw0FH1)cOM7>7yB<#E&jr;jikY%9uClL7O*3mBSE*g1 zJR-(gY_V&QrJQA?>L#eQ6=cg+y@n(o%}{LtsgzJ)LJj*B_T6*2xrIeyinVqX9J?d?w+ zj!iRmP7HlSiM`oYfvZ&^8c`THT0Ukt9!w4a=AEz7Z+{7SF6e5b7fK*{pMOn@Zn3h5ZbQ3>^ ziBK@Jg<1Q48`VJv_O+dA6XSU+mNVlXGfzKyDbN0!{?6yq&By%Jn{_+}><1#kz-ngv z+Nwr@i{0dlJ*{U`?2ZMf_UJXMnQ0+2&K6yLU!<$9;EAERQ(h!fdoz^-WA+>!Vne!Q z+`Vz5TI7Q+5qQf8@xcRiXgxP9`SH%39#+TROx;h|=M3ecbhZ7NW{l2wV#P`hqCS4S zxAytBo!u*RHdF1Swe3=WKFw9CU%$J#uxWG|V!xdB=V>}!&@F+ymTzo`KSZ?O3lM$4P}};O9&p|p6D~Zj-l&iWn~b^$rB$u z4&n-Bgrrmy?}-DQp~!w{0w)BgfWtyEhfMq7haXZ&e1m~1cnG8^G`C>E0@7K_f93^FT(bWN$xm6>l0vUJXd$pJD_8Per^L%81Z>!_VOSB$3Qa*>2#oqakGJ4}Qb2QyC9t!zQ?5hi zBtz~$`TO+g(}&*nVhJ1^9F*%2R_V1hmfO|U_1|_(!CRh+cgijW~Wn5L|0 zG>eX9vG|Ga_p09`yyeB5+)VBl$sz7`;vG#5(=SmQzV^~x?jhfKG2p@Cn4dn4y!~R} z?Uw?7UKDfp?U1X(Tu&z%_S&fzF~fB3-D%ip)bDZC)yEo6B^l3jusPG-=4?ls3tcS7 zLiGoHw1>Pk`_1Zg%&=a?^Bld%3?DPY0%n}WjIWD8yS+u=KBDs_!XOY7K26x`dNCC( zgO0JLJOnO(4fkh9tbctJWMzH--FF}|^i)DCfh>LHUms9%=|(6-q)p*;5>GadA*u z!^6WVY$k=(qV#hhP@s^|H&F=)Ca#bVCg?2)9q1>NWl%Gm;0hK6G#2Gi4=5PL(WTmm zDWFTxXfWrL_6|xDmP8lzqqG(=XKP{+_o zum;+GC``+QcCjw~`}fCmQGakdmWg7jU}#_s(8@vgu$pKQ3*EPGU(7tZiaw;KrV?xd zO`r1M5TZinD3lO~V(Ow5%TTnP$imyVMbSNz{`hzK@Y}AzZ znf!6c<^+9c*|H@HQ#cgdg4SC#9Qc`=n~P@|bz;tGjj)j@Su+{8kO>y%ik@^#7lqzM z&5ETc*ejL^^`xh#qyIP|>=fJ?yAA^+GZM6tkdT1R6Ffy3g(%hy8x3z$}DakTVX|uUOG29zBZOb?)3bf*{BqhWWL>Fee;+Z@$ba;@jm?N2Az9EviZ&3D}r=XfN} zbajyPty;f(%ge7%X}8UuU9U~KIW75{Jo#Fo`!<~|RIs%g?H*T6eXPy47}Lj0zsXL2 zB3M1ss(IT+eI~>0!Ro^6;{)%n9&vMO^V`q+-kR28Z;uvQ5?6eSIHds{k;l`tW+Dl#K`be8M zcoGop!=s$xKE8G7rzKq;?5w-_e6ziQwpT}aepBRlC`!Lvr~h{0GlH;b(cq?g*wDYGb)6;^o$_SsSm__h6bKLL1Fs>QKfK81u1Y_*k-hty#`e zD`!iBk2O8e+P%3vG*T{hwY0`hKRMIU($dM2;A%;ZmIv|(Dwf3N(y%bOz#E0-?y>Tq z1bKLv+}lIyhnbH%?P9~E-YujNXu`o9$Ak2m z<1MyLy`@fJav|z;wj>2g*-ho50_Q0&h15!lNR_yIRIVeBOVe5&?Jsu^mkJW3k~q8Jy+Wr_{FeLI@BRMk<0p@Qzi{mOnSpEZS$G<(fu>Z>2>F<>|mcP*Xg9mrHvbl89@(s<#E{iKqwN8kX`Zm$` zD#>}{kKZ{_n%;L|hG|l^eMNy&RjxzTD?VixUi?gOiYGPMvj#a-O!Td*33#c@d+LVQ zw>%Yi%fiwz;~0CL>1Q#XeCKnKre`4!46Da}Naunye|HJJ#hs}lMBtIWJXn)9Ph>1= zBkbAQ2=^j^*KsJO2alM|{SEV(b~ukx!ts{X%(O$T->cQ^RO!|*6Ggk-s?{9}P@idK zJloOsc&KihK{rFT?g8MZLf91)IkEMrIKbG+r}%U8C!xqqeBVh-z=zKE&2XpM)raDSp3 zyY{S;U6(WMOvcKEeG7yoN+>*597+gQ#Y0C)22Fw-*_Pv=%yeL35H-OIAQgkNK)r<& zjVlzBgkqaeNHmC)_=!r0=R$JE&qa$CLE=RLGH8Je306Wc`@l@WSfLp~5QQ|3>tH<) zdMm^Meuu)~Q>c(QDFxpEhed^uNg%x|_9wVPa0&q?ki$_Az4HRU2A@R%bcSBbLK23= zMVyh6isB0XLK)n?aN$Cn;~VACZFG!;cH;Yx-_Qz$_eT>L2%00|>Z!1A!JJTk(0LLD z$g`M6jVZ-E&lf?DQ6cVC+@`Pu&>Bb-B>NUZNV?*U##x$TX=yYG~}XLhpFdx3P) zXh|r60D&YBQb>U`0wD=4bO$iA|K^Ou=K(2r8mzU)`@eL1d$Q8NeqI`%L z(oJ-W6LHl8t66(*o!9CkeAx8`r(mb2=ov5KSGKOaO?b=F;*vZevdOD(unYkmkU!LT9d7|HynSoy}iTwJWE?=+ca%E<~xgoYgaV{I&%&!T{i^Qq* zfucVdt3MHKIF)GpXn+d@>?cL)xq+^yyPHoY7!UjE4wwzE2I;G6wVx6va;T)TR8?!0+_&mxNr z50rLpRaF&(X}{e*_}~K=DgWk<61orx4e5T{F~(bd_~D0%jg4Q@YiMm_zXEpr#&#XUM`W@vERSv)RlrEbg4&;w>yOaD)AQbMxQ4g@=d3Sc9Pl6OY4!=FXi95ma7Y z4r3ONBy3XHiP*uzf+r5fJ50Rj=xCU?kn@mQuyin2FdQCz^ifDEOakO89t}uoEH@D8 zoaTP{^5q<71S5zY5g|OWF5r=@udl~9#|Lpt0faD9tV2AaLL5P_7|&;98}-17LHWan z4Mkv6z z4SI_s_{kKwxa~E{Tp0T}h#sOpXa)8=d%tiLPw(EnF((|_gByUs2ToaqS;C!wDaBYY zI&PW)nL&dF6%`d>I4FktHOvI3GmOAM0sF2nZ7&XDANLaWIa3ldv9OB^dvS0&o*q4V zaPSTWj!JM9*qs1U7qiU-zYOW%2IiT;ihyEFDw~#;R#Q{MD+?sU)TvWBs0X!aM*9B! z`#F0x>Sxf2IbJvhiYK9@qy#rVD-yRU?+h+C?g1?mmbT%BEGsKx$DYxnNAm-YI|TLP zI9f+5jLz{T#`VS6(IQ5T+341-o8~hKeFIa6iA8O=h`53n6h_2g6elxcoQlI$mUW`H z)iN?K{Tmx21{x%y#R~n(vxR^1DeR-tISJ9Sj*QRP7-LyZ8@sq@OuT$<=d?sx{p@qX z8{;jS-wWeE49lQqCM(9{cIM0(%sity5H)yw_$uI*fNsf8B3E^ZG;Ih0Tw=~}gvS#L z0fTb5nwry&hPN>I!y{!!m(F<0ZGu4Y_;Aim=#zYWs)^l-$NJ{>CLY?j0fGU&zoId z_cve2bopYE|CJd5r_g6UM8b)(MuzC-_#-n!PuvPU(YPIte#an8++ zP2WD6{?pm#udOKfIK%p?Vt7}@NB@X0ZEzKz3|CHMse8OF@0cwg`WlV|>JJ2)clj81 zdYFz5^FH6t?Nq3E-)3GR%v+q+k9t~mTTPn{=6j6N%vm2`{`pfG$n&o^u3Z`vaAkJL z=gor-c;nv<{A9Fe&fb;v)fvVtw<;Qq)3Q^q%mIn=%ZS*EA*cNx>&u0D0j7gO^#2e-gzh2 zXc-`{ogqrH`0DHMuf$t&1MOKMj$x6Gf2+7AT_(IT8tx#TfB!Y zHNsKQ)lm}VD2s87_OoXOD1*H0=}ucuPkT?*y_=_dcYk$&uf2b+=d23n;!KaJebgxf z)EQmWQC{|5zV^QO*UO$3qz(yljEHuWN2QSut!rLkNOg?=BD8GdjIz6!FNmv zZe0=G{z%;7O20Wl%7_DdPknde$M0|a_?Mf1`Pc7%{Q0{df4ce8cQv(Cr@12fgFT0}s`& z`TC1rZk1a<{v9$F0ftDEs&gRL1ZS?_dbNm+UU-;Tj-~IIc*FP*T zc)H55U~*8~qw#k>7~T5e!!n}^49M4-rnd<3IFa0L5Z1Q|id-kG50FE}V0l;Qd?bP) z8BzKYUN?(^ZmJ32NT<9-J!Re^sa@yEdrV>)Mf6=GSxQWIQ@G7*L~px|K3k}X*M;>h zVSUeH_|Q}Lp_l$0lj#}qt655pBAbNupj~$&Mt`Q4`AoWG&^_WS_M42`$T4QMOmAwT zY(X;GZU#XmL&y+_6goc&DRL8{I5+ppD_18ZBn>Ct>3)O`^_6ZgGPU}5*Kb_Cbg5AF z%oe%=^2wpB!A@Oq<5ks&TP0@jH${@^d0- zj;z5#Q(0LFWfM2Sz51DFomL=P*bre zLw#kq0~#+I%h0b-20efJ>8EiV_PA!9ee%gCSj0I45oe3$0-RHWsd1TQSIdP2?G9@_ zN}|Aq4I9|Yf;V&w$FYE7ax}ee-MV#HyV-SzU0Jkz(2TcWXq=22rUVo0bAF;plP2K^ z#}i=(^T3Wt>M+MsQ+&k`>8-zpOk5rJ!Z`!^dd1oZ#kmiO`Td81?X z?7ZE$aigiJ>F-P{tl=%t*`R=F`qgbR+;GWoyK1)ts^zm--NOCTxhkE|MfyixyCT)> z*B*YYKopdV@;DLaAv~@0NeZ{I!jvHclcnEd6a`1mB+R*v0*i1YPUv!kaf&dF6GkW% z=6}zVM9?(%QP^)z?-(`D9ILsXKADz#L<~4)$-s2I~w$6(*I{`fqAA z7z{A4@Yog?7wh%^2HG(ijp!Kuf(iV0Ow51(#>u>f4<8OWylK-W7!k~#3hNTK0Gk^y zGuZ@=j*fm z)F_nUbVWD_8LP2jVBz4fBlHc=(QMi{#&gS-Etq^PDC{%>YX~JFH<;=Qq5vJn5Z-$0 zEq*G|B^1VpP@6`j1Qz@_2meed4A7&RmZr;3^ z-{xb7BbBh0p$zUZbQOknUS6ISPlVCI28R{T*J8|=F<6dynZs(rDQ_6)KrzmC&Hf~q z9Ly4~^|*24`q9R`E^Nqi!XQiwYG#)eoW*m6p5uwbLJ2X2ONp~Eb)4~sgLIgj1ZQy# z%oW2-yzG^M*3mJBbQoa4hMF0lVXy_2U_=bvFlx~m_d^-nmhAok(;Fr;R#jXJOcV=Y zEFvhtu{NBv2&Z5P<~sow3wO)A@4m~H_-(R?aigL$XoA^vIj9FUXaE$BSyI`QD3Dm>lX5bg^0pTYbvLV|0`cnh8y4)x-u(Wu6ki#rWXqrVe6|{*qG2YT>2sHKY+CG0XIMiECeqGb(vc^cDNqy8lJgnl}`*e#P8 zkYx^?970{{2t(*YOu6?6%aak7k9t~9#2YS+_WU@{c4>_BlLF;*qWOzyfhSVkKS;J+ zEKrU{xLlbTczLq_7ft?W^PF1@E*pgDC1Lra)v(Fi_Cb{El{ryo>XP4cio@Y97jkU} zo#tKchW);lcl})77RL47mdk&vxcz8Y;ubur-p0d`(yvNIBUzFvebk~y+ zh9lkFj|S4+`@Zck3=RWY%raS@%1QXL?$Xgj)9)Oiu{w zvnKO0VR|jd?VERA{Nccko7-1kEb%y-?((6xX^U#!*~5L0x7(e_Rb40jtq_kKScM_gG8ow^ku8|*PV*gjNdmsWF8%)^!v zt&~JK3Y@l{Vd{urH5=bu)Z+e*$z@(GOS-hJPF}P;x~w;j2Wl4y5QKV_OfI%yx)%Q30ex3$=NPHn*a`C*G6iEn*8p{*`(UW#+{ z_~fN!sY~)w=hpRZeLVaA7jhnG?Ypun^{%n0cQ>c6oSn9^tb6;Y9t+D;+A_VS_Hj(f zQfKv7W~9q_tI83c^J;us3p^GUdNmJIXO+h-X1rzfAGe3;OA4bFJv8CfJEy)hVaW1y zk16qPV@Eh=J&^bKAE&Q-sP^S03Cn7Ho4fiH7bYzj*XypL;MO#IQ=&R9RcRRR(LBMg zwLWag;FL)td(0c|zbM&W7wf1T=+V?3*}kF2+${sz9~$!To&6u28?|gsXxp7#+YVQF@Qw{4T*YZwtr==^Ot?I%U2L+cAxzdMve2}$V^?UcmSxYbi-%%iSs%;<iI|+No1NJS!*PUf-%k%x*6{3 zA4|%%uYB;(D{FQ?{me^EGnRxz&K9~#dc{1ngSe)asKS|7VK&hnR7+PM1MH&*32(V| z^XA0f8GVG99M|pf+PVivS4;>FuMoOPLO0LFzCL&4#i|VO5}GO#reT9B&K@afg_CANGej9) z1bHRJLCy;CpKe&!py;A?DBDo6SV_}XUMNeNnT!fyDKJegbM1225wvC00P}&>Y;+i! z+y9~sOFzF=;h$X3sZ*zTF}+PmLNPRS8#aO#*uvq)46Q(|(()6bA^gNVb38t-5%cXs zf5trE2!k!`E#EmeH0B3&VygeYo}VZzEDXcP{R6f9KPUzZ5jW4yojakfK=VI%@F0wf z{{U-pb92MIz4OjHP?axUymYrOeKO5(L zag;+c<;Llc_=(*nnON;PVcBapo$v2!Lp%DUSH+cDWg!(#bS!!-cUoWm+sFP`!n6x7Ev@NTwGE^bmR@A~IL6CBg z_7J~#NFk=ijvdQnQ|wE_PKGECVT)A*HQ*-}4*ZLU3r`sq8i-}|4X2|Ll!tM>nl|(e zf}NQvVQL*Yas>52oT6zIgSdtaXWI%U4+IPQ=U{!|WFp^x|2?c5&cuf`1(omJyO+f{ z+QM;W?ZBw<)ZeDqF+ANcAu(zWfI{CGB;mZSSRFX5iHCpx{G&zw2K@E1f4x{6VtEjG$fXMB7-CmTXPF*+=uJY<-e zFv*$lHY+PDH8mC1Is+isaDDlm2>ykrK>>E;z{aF2I1%6M>A}P=oVth|GhqJX$~HAM zaq1kVtjWvEV-FV1L4|p7ApoB+$+tk zg<%b*En+em%mX@r%W<1LSa1@s7W6qoQ&KsnBT8o;`5`eD~dVxVxY`G5IjNvP4HmqdzEseT=bNw{Dn52FkSL z&Kf<7#*xA(M`ylV4U^IQOn&7Rqp_@Lek>X&!7edqO9O2*R76X4q(vKPB(xfO(>b1q zyL3m##_k^+t;2pG8j_<09O1#i!-DImX>QEhcJky&h%feOVTTYcNCyJp^y$-`Zxzh> zv17+j6{8u9v~8PEN!Jxnnqp`{aWUK42_`4?-t|)Vgrt0{+^uV+X%sWXBlxZ{d8ddF^F!RO z%?mn}YCak)PDC63yg2MkFSmCT@vv$>U^DKr>&^s;ZQiDBp4Lx>+n@<-u;|X`IIqkM zx{z!8qS5>0P{;Gaax}quAjWiVR_;$9zwyJ4DgSa{^1t5P{b7IqwN}G+FZ2Fbm&^4& z7lzr+_I5cDZa84m?KbI8=KK6gpX>c$hPuha<^53iOC_G4RXF#An|B5o9;1A$_fzhYp3;sJgM=6# z)@8!Ltfsu-qaduhM&(lqp>$%$&6M01hAtGBfl14t0OvuJ8YWpXuIMqb50ih(H`lI} z#U)^sAL_2m?-HFA5Y$BmZi`xBh!LXRFQ}qxOpZ;-q9~;P2GeAd>tdrT)U8a4Q5s3O zN{mi7-e8?=axF8;gt+WGsrEsB9ft0<-N683KvQ+nO z;p)hEwI<3@?yqF}s{_6584>Ep6tzCtF~RB9!^1t<%bupVcXM}*inJG?u&>fT%26u0 zc5SJ?>VO1$%?OVLcXn-iAyJ|(xsLhC>iBrID#VeCZultuyp;@JrJs+|2jvy__;jT) zMlDZK#`jPshbY5vFy2uetPDkk!RoMJM^100q1bcLXs@<3W$F-hZn9&nWZZSgeAbbU zqW;RvZfbRaV@QlrGFY8A+N&jAsZLcICI_}Z7}xf6V(a2AZ4>+!&j?(cZl4rrAMWKk zu+(e8ypXm6kGWayv-6ea3g@EH&Xz*Q!Xn4Q(auGcUhQR`GI>z8GIyxbobOmz;oUMh zsJ+y=sJq%2ZyTExJMCw}Tb_Azlb0o{n|hTaKBc?T80{$UrjEs+W`--}IKSwg$h}djHn;uIUH(GL86H=ExWpiEqTy9oS8#qLA!}WdyU-lD|hIIeWB70$xdP zayPw=eVl-ujg(qw10e~|3hN&!o6l0pXVfapE65q<12_GNuDVl6#^YVYAw{=MXIW3# zh;|5|D>RO_kp^orI{PLJM5D5&&kOqM=i(w8QU-06$R%Vh$AOQXo@VIRLT z^8e{0#C?Rf{CM-`tUmoRsh$!chEr|q9y3W8|0s03g?N+%=1^IueuB_15{5@9?a_V0 zQcH=s(MpqK?ls&i;UE*l?-*hU?oSWz6rsOY7+xTDV#ZtQiN6K99CPbK2Wz-Di5 zZf2>@Yn+zIPb1gG#@?G8FOTH_$FYO8X2XUJqeqX1o(XjqItDY0VU2^@2_+omp&LVc zM02PO`X|1jV7&I)Yb#c)c<;UUa2Bd!N)w!moHZbR)%(&xdr>^59R^KS&Kl^=pLW`{`>D^j4=7J$f6#!h-tyU(3!C=OWKF8zsBUS zmkZiOC(#72?+m$2nKA_m6$-;5K+Ukwp@DPW=ZO<1qH+uh+8TS3vnvblpa9fehFqA$ z0QLfOw%^Zg`@g+et=5o`5Lk;I9v**BdDz{-!NCy`5x?Tr_W)I2x$iVlY1;8Coo_m`tk-rkzgn$#~2AKE`vIZs#&xjt1$D zMd&}tx1UL~oQRRBOo z2@}}a0ETTsLPBY2DUQIdhY7&kSq%*hm6esS_hACp)YM=Hq8ru?L@T7$%P+tD+;h*N zFvoR5`a!ZmI`Kk)(;)!s>+3lT2f_-)AYC~d9V8)kP=8}%BP`RcTerfNVQ_xSmMt(* zu}o~-xRD7@AiYsw{rdIm)~$m;M!Va#Z98)02*2QlOopj~GOxVy%F|Cj4P%E9j30me z5z7{gTu4;dELf@_KN*z3Uy!kAZq=$)?5T2_2*C;q3t{<|m6gE+j*5z^s;c7h5c1o% zZ-)R!XS4_)oQTB*%LnSg&`=>34)(a1I&~_R4@?wnbq+!u4DJdB= zXb^VLI=fG#q@=)d$0@k7Xm`w*F&qtqYWw!>%Q=h~c;TGO4A(HZE;bxbPfyoC0}eGR zFE1}BC}6OK$#@eJ6Pfu0&7l}O3-#{Zo6BQ|<6^LrNE^FqFzX9WM2nd7ii!$!hL3aj z4^#Ex_CxFF-l$Qduqf}{yLaEdeYg`aEm%5n3Wp})EJj*z7N=XnhPxMc%ZpP6!@f}${_%ImMc4uy)# zFYdH4-oo{3E-pM+4R3)y28qYAjx)b6qfAd+9ybFnz2*y~VM^?@!!I$}^@S&m(Ik#G zVjY`baq?MQlHY!^e@lL7=o}%+DDQAjG7t)glO$c}lZYg3Fp-laN$y-7?4Z>~^Wg-; zpT>KC(G+;8*mJL{-{xg`-DEkDY&#Ke`KYhkp`P|LJuUC6#G1db?hFuoZ{#e&_E~Y&?aj`c2FYnddc&6v&>A~xKUH1kXKI-doDqeqWuJ6z1 z@4c~V#Ama6{L7){_i_W*xf;$7wIB2~o=!1;ToL;?ECK-?11EhgsGIxIE)xKGx0Ubeh}I z7}Gje>smJh?4A3CWf~>Vt)zTIn2pi$%|0wRQ7b8hMS^q$()TBfB}W*-g|45_*OE;$8Bygf$HF3H7D4S6Y3Zd zgQ5l0eeeF1s=BM<}RA*!;Q{&a@7{{0hr7+()FVACMFSQ{-t-^PdS{kSh z3UiF`QTqAV`-G}F`01}?^>EY=Q=3P4&i8ZAj8pUf7}xrCuX%5DUvO_s`@OMkkH)tZ z`pk@XlqYx;4fL2kP?;5}7Nx3n1C*KL{aR;*woeIatMzLucFs>z8q0jwQ`WhOq8kgY02@JJIrHl zwQoy_bJ4`W*2zKbdCu8!YE=)VF;$(Asm?5ME*zxJ>a9!}tj;cQHjng}AETDWIjW-6 zk}gXAV0CV-|H5Yz7ah-deepwQ!mIa0p$WuskH#L%L#qx%5nM!zRKRVt>^ZYQ87|*AS zs|DmZG*g(Kr-Re!DP}T<974LLN>?KDQhY!|y#&5(l9zzxn2yP|~2!KmYvmP?9-ho-z9-9x)x6x-zAoBn-TQ3e`T+r*F7)+l&U1Oiaa5(2F4nimA3&9DS>gwt^ zJ@@3vlcA;KGO^mg{?YK3A0exYM?WA0o^JLM ze3??SEs<^{k}9f^M9?^4gb00I3Rq6@W_jP={P8f` zf^*SseSJNI9@Y_5f`1|BAv5>y-;cGXw6qkW6~)jr1ND%xkiBS&VU2g+efL*>4%&9@ z+O?T8XTpY!kB?_6SD3CaU?HD7M+IuzjT<+xoH71`=CF8iYGBCW-_oKo*fMV1I7}L> zV;JL1uoL=3-^k1!rLq z;V>oi7T1&k6VA@VejS{y2#dJpf`V@*yv5L%;RjXa(8iHIs7*V)ckkX9IL>0=h4*nB zI~=&g{5cG0Fb@oe$!M1e!{H>zWo2cYlzcR8=q-*6A3hv6E#?Y$1!j`HQxX#s87yKS z6`YPr7_ni$9d41`b(jkUbIX_#?n#cT!iktnP7KY2y&6Xi^S2y7ew=R)Y)pWIbI~h2 zM>v96#W(ZhptraI&|5Ukh1pexeQ!`6_2B-*>DZ~Qt;G>$55`&O1N-yf2}Q3khuCM* z8*~g4g&k%>MuBA=gtz>ftTP)egDqNs41*h5k|6Cwjd6uh4$V77+qCpOT6P`{Fwt-o z%`1kh(gJfdiWsdGjgpsZSlTIJwC44p?eM-9)5QI0>$LHULhiXnOUozRb)2ms6CUbC^zB#oCG>C+;Qk0YP_l{)iIo9WCD$Dwe` z0lWT?uL%a%;}-MLNYgG?myZUjdjehF@pIW5>Hf`q;Wt)Se6@1$$GP^sU93BN&HGdB zZ(EGd7@W#-E^*3K0`qSQ;uUGb3=VIDrGk%h1ySAj)tq<@2>y2GEULJ8g z-RIWk0Y`KFcG$&@wN-z9VAS~mmJ5UIt6i+?jK*Cyu`k-P&2B#Guix!rxX)nRs55Tz zG#?IgIiKZvag=iX@#HV227j|M`Nt0zp6cuQQHl4_7?&3;hRr6!Q9s?e!L|){mo09F zJ#L11l!atCInz$1x6j?lAT1Jx1j&f2Zz5O!VsaAb(P5Cno*E~^oTXWpOmkR7ucvd! zaLW|pS;AYszjkd@TtXJTe1~y&r`}j4v+hbe4n|)bHPoMiwumu9wwOWIUvEmzTTG0m z`LeKdbjcxZivC2>(t{FJ^K0?G1oh-n^5|qzfzpInB27XOYR`|dm&QA4x;e%MC|SNr z-&l2YKV?>uI?myqjGtW`#lH5wHn%v3ds3VtnMnfG!T$CEVKP)_WRzMOqUI#1HIZr& zc6@AoBGm#M_frOVxTnUeRo#?IFQspYV?-B6X^@&7uI9%&Dw5T@6tym1sZ3TUjPPhK z@S4-hqdHb8!s&iWe}8o#TJ*8^?X6DguTFE?x}#|v@l!H=mHxqwVNOXGljLRZZFh}L zR%`mGGeVRcr>!R%&2`SK_GwLUjB$61^Ro4dRV%{DV+VivD+5Cuxt_M}*okvg$E%gu z%DgDGWT5Z3T^V!V>ealgYfGMUc0)kRlCJIZ!rJGD-SJpl+sc^M`hb?cj>+AWsvb&1 zT|jGlm-Yn_?eoLiYy1{?EB(?vD(8kQ>#f!Yt9kuAC(R4JV?p@hI{&sJ&xP5_to+z! z_HTLVkK3cnRf$S%nmT!UNb9`t_I%HInU3jwm1#L@bCplays*X9K8wPXk#S0SxmQc6 zb5W_YwZeN*Pqnd&V^ox*93=-Tvj(ZNGnA&jYEy4@azAxiL!j*K=3etIWKZ1Ov-!c; z)=|!d#m)s4K8vOXEt}-my5s(LSa~?zG;6)kuO~F)DauTAA2FiTQEXs$nJ8QrCUnts zOP5Li3aQ&qkZ6qjx2udZlJ71HiDq^xQM)vebI5`Yu%(@Vlxg(W5_$*vim+~?s3I5+ zj}hLon4DYiK;0w#;jQ~zjVEICr@Na@#_K;&^qYjs)0B;94J9qwO1?4A(TSY2Izv7b zx^U`hXb0Y+5MS<}32*VIWY&I^L<40os3gKz5-FuxJl+4@DcUHHo{nM~emFF zNk=t#1dmAy3B5SVy*^MFMi8w{4n0bvexVDN&`o#weEovAe1G%i)IR+N36W352GMh! zL#Ja_A0-c;5%hTWqo*6fb3SE0T0nG^OhJs}^Mq-Yq!cz!pcYw%q!arS`|!8ZDT^r| z^lZXgphj{wB51!@df3qdQhiAWcRdH^-gn=9P#U4-Vh7qC#|JU?0==5u&N;C%w4m+V zw?q3qbm$OtW>keTD2X!ALZN@oojVu01Xgn>#?L?h{8Gx@xoz7vC`dTStce`7j-T8O zbb!C1PP0ckHZH(RPv^A~D?6uSh7t}{8QT7{&pyjUxah;GRjcsL;qy<>_Sj>O!BSYe zb}jmeQ&1iUQ8^R;a>7mgb??3RvTrT3)@aOWXc6P(cb(9LxmWm$Idz$o0F`jJnfn)$ zaOTVzoQMfwFI=X?!6leBZ5k$x0T$-c>CDr@EX24#m_yDg#nDDg@Pd6b$9fUJOZtz) zaQ@%+f4u#p;VnPhzJ02+{3Rh)Q7Gl}gluf4*w59%y2WJPa&~e6P+Axm4kKIN0<$v0yEb;H^Zht4QoL3)6kVvO!q3 z>C9VnmK{>?nqSaC4YRzZx9syUNiUaZ!yzyIVNcxwtMM&iy;E4GO8H}XRalOO7(OXb zE|xmZ^e`WC8V;a!yYYb2u+7!9t8)nvYOfD{Mw3%|R0{rAONVDR3i zD3X6}yoLE&U<^W@!)}Fuh1h4;4wkx*kdURY{$S}tYK<8)X8ic^kXp++QpBPRtS1Oq zNZK`P)xhFn*9Rlha~{328U-t@^da`G*ntziaH^9Q4;5}KMdq2+JZbp zqcA-2FC_fdty{5jLEfT8NK14KaurqW+O_M`Pd{ZZ$3Or1&zCP>-n(}%`Y>+XI0$p@ zB=@|kstOgN(PPJsq5PFASAP2Gr*FUg7E2g9KXKwjv<1r-1vnN4TS7ttP-3tJ-xv$W@}S!svxBqn4HKTpKrj%d1Yz)mYRqwikujr;7m=lxL3- z_VYo_m}fi%!-o&YDIB1L`R8~d+yXr^3(6_?Ph(;%Wc1#@kG4!Qf=T%di_2>^Vxnb-`o{;>%IB^ zdi{;7cV+DHu^bJzew3=;<7eJ%Gp;gWnKr>9dr@!tc)0WCGu^+rKl=SpX0xsFVH^7>E5%4(h#RsIBY4Zdm=u5 zN@lP+)W_Zjr-Z1(0@bWUr7A`p6Y9wGR|cY3s#;s<+1$&sCd0Ea#Zl|6^hr=_x~e58 z(A81eRT<@QP4u(%^|cRB-BVm$BHdl1De$Kl-=4PaUDZ(;>eNueQao+FJZ!1n_KZ-+ z@BlR{L6HtEPWK*u_RI(+AN>qcvm+fv>1yMYz$Hb_`Tp+x1MFE*j`A>NM3PdOp)_?> z3vq6sk`+R+MFHyIu8!gerFek%_*KcR3&Iu;_MRATA3w}7cVRael2S>iFJ{#ybDD*2wk;;@VzHZhbheeTe7e6lMHCbsA;>Lr!pvD)O4w&(YXZ ztDoR! zw5_}6m>!NX3&WQ*2FNg_;hyt--TDOB2Xu2(rz=x>E0g;x)AJnj3p^I4Dieb37+T$e zu+~2n)t$|0Yzk@_lp z7)rQD2EBo0KqH>Yumem_idqVnPn@izFM%f`2+z#>B_c^i=cN;kaGDSgOq_h1Sg)J< zW%ei9-XR?sUHj0dPMDq)*6HMUpVZL@eog>*L5IHQ(S;}@=azg5T$)X$&@3qsIDSfg zx)a_tTP8&^k0Y>aCFO%&O8hM|XU^n|HoO2W?UWplJty%K+UY|NJ@m*Uk38|j6VR5~ z*M%Vn<^zO6$k@Wpojb8^?%1*8^y$-YyzvIKAXcQIt3YSQUr>zE%Jk{eu}GmdcI-p} zW}{`lQ#6gLUVQOI=+ckUwv5oAt8WFG;-J zWxa5@mpHhdkWrK3ctPRK?|x^z1(EcMh;td1hx{BpdUQ=q4Xj#Dz{h@!IMUG2fIgr+j9Csg zfq|Tql$4N=fPZ-j!vcXt0c#>o)RN+1ezcswm_QUp=XnLe5`%SW)22=AqKUc1K(NFx z_|IH7=r9Uk8u7EfzMkV#YHMrD%gZq|tQ1&bF(>$0T3Q+&9*(}DE&Qykti%*yeIGh> zD6DdH6*f4t&T@bc40Fy;g#Iw;2Xlb1Lkq|3;9z`wJmV+Kj>}PIsE6G}_(Cv&EskKM z?0kYPKR=(nQ&50YJ~44Fy2O|UJG7vZ0Rsjw@`K7z2~RAiPr?zLi(>3*!c-~@{b5`T zR^S^)Dk>_lbaHqO=Dez^3LW5}Cr-e`SOmjF=mvYUpbXzd+%*2h)L|6pArtm8_QFIi zxT!GnxC9s%6U$&jVa^Br*=L{OdBN$-C(IBH8e(9FXBExiDs$p#25mTK2!AmzE@!4j zb6Sch+{5f9G;-uf?l8wU@dag+2A#xo;YzU|5i|N~cnbq2TG)=ZX}%>|01@LYjDl+E zn>$mrFrL6Lf=0U1nfpZxBhq%X<~ozcXwEPUk}T;U>Fr#Ml{&^-_$&<%VVHvZpalVG z2+yy88qK#v3FwWK<{l!$V{k{M6yOK z_q)yd7lZ}H?ll;<`uW$Y4vFtOw zttXRhXVT2ax>`PPGrg*pI>r+e7ksjd{$i=)WU~1#gYj{rX{V3%y=0f~*A-t~-2ZI4 z30lNA%Oif+(e#(A8-BjJ?&m+RztWm=B+hcWu)BpM}hreH6^w~u3wZinG z!MZKk^};}xQ>o^2L)?!{8u`WClrzITKFYFw`)uLa0k(5}t$RY;mJ=c88wT^MI+=N- zfpU@L6ZA2KY|rjwoiU3=4+?6CCG;bW%)m^-qq<7k7F{tREyKu;8!8=9B+u|LV!|nq zo-kt5LaAJR|IIgZSWhIJRW!XY{bp0r=WztQG*i^WKmvC9QZ|_!vWvs1 z10#uHV-h(epfd&JBr}z=b@!zA^0O&~A&Wv7(j_^nh!7%4h%WZxD0@k~THQ?@8>!^? zRwwz}GM)Aw{`UTTmFeMXevCT0yV{tdHY7S~qa0VyQfGEp5Hs^+1gI7dZ{ zqdY{-@mB_T*?N1~dk3glsmg?&$|Q6k+c9&f+MI%(s5t@Zpd_^>L!B1u811iSIc&+H zN^V!jr~t_ZJlM~kHO6oDnv_Ki{_W|W<73q-Oht+Jf(q}}e8+-p&lv-~CM}9+d9qvU z-CbMni)nc#ana)mt&1Ys8Uq$J2DD5JXdmspDBCeRS}F9k^^HhMg*mX&$_<)w?u=Z-7mQl`yQ-WI`NobuP-aa?9?arw7 z=CH+;-mNo2+v)>aOPvb`s584bN^og%)aDV6`GubIi=7LLJQp?ww#^A^X$)G>7})Yu z;-U+~C$36uF867v@?AJ5v~8T9OjJWn@@8ozxTc+GSni|Hq&A{b zoJKf6D|xUyL>Y@-CAyfG$SeLn83H5mnJ0wxU8DJAjPXLY`$q#@Pekgs871q>AIQX5 zN|?+-s%@R5rlz63w>w@Wo~6|-jyp?pfU+!K6Dj=h=28ci0L<)NNG|f zYNR74JqZ+ZN~UnaWMU}zh--_Yu&G2!z!5+$G+}fkg@X0^5x~OEW&;R`N+GwC3?U{B z8nSim+Re{DUmYEnPU%Gk(5z+{id~;Gi^aOq8m#6434np1K z$RPBP(?CPV#wa#!+=zXsve?HrR9yUvGVINQgG{{$^8rdYv|sjtK_A%B1sy;!D9q?8 zYbodt+z&>MdjlTG)}!)EdCZ|?5yoQHbn&Yk}Vc*{?>Z@*hy zzFdgK^2L*U_XK(PJOzP1CafC`=3`;{3q#${4{gNXT+=Qkw#0wlbu;Z#Oiu~xQgYjS zjKK4GG?10je@2`wEV%i5`?vV`_`s-%jg9>s`|y8Z<9H$_T!je$i3&LeQ4B*K)?7nF z0|X1KJXrL3v_aJ4Ux-=AZAkvjn>RzyL9)I2>Z=fa5UMzxQ?@}eGb|5#0jmT4g?xh~ z#}0%Tc9(xgdfi^Fgrz#)MlS8<#53E40o2AhqbE$5 zfPWz^(H2^SWM+C&2+#HF*YDc33(^?XvQ5hIo;U(&4+)GJh>3~8U~v$WfFYwBFmeL} z1EDbV>eUMtE^K_pc_4%tCuhDCjB(4BEf_Ku5lkccz?pO~sEHFNGBqyy3}B+L+~Av` zE%b_C4X{cnfTKSn;?nOl(qBRfX{~ zLV~ei#yBnvL#wN+Yiw+UVa`BDVqzk0Y=)9x#WV9N|BD@b;}nJZTdYC&GHKQ0#M&Obt`WRy@ z8XD6Xbd;Z8l5vcNjXH-BY0fR3;TrynBf%LUTHKK}NdtMfS6V!g#@5S~XrWJ9exjblms=+E>_VSL zZ*`|ocEUmQx|Uv)NIsApVjHNYw9)q%tomh9BQ=zO}RduRk2V{(Roocg22n zU%xNrr~LKmnxAj%`|CGbzgm*C(_L(LbN`~&`)H!&eRspzzHVBDYDE^eGZ287Yh@WHI%lY$i=9JzkkQeQQl!sfTwq*>R%@A!(Bqne>nu zL2*eicpHQ%nRs`HQ*clQ0XX^e$ZIFDJTdh4k)AU$zB`S666>akv-e`BC#7^r zlF$$=P?0D^ob+!gO0d_aI2uya3CWJ}1C-gmw!WUW?!n67zRI)+M`4mW4xc!+GD)qC zag_TgeWKLTFvkeuZyD-g>mH;GN^n$#sl&q@`C00W-s+?XM^U(=AV?h&U>{iNvuI`P z;-?c^SH>(J>}c||rw2KP^--n`Qk%ovhpDa!(T)2Ug$Ni%BLk)ou8uCg{!4gf)>2qebLLwt*@mnTHj;ALvd|2{tGhgGqWADrv$e) z1-F$tTl=b0qaCArsgrzceS3IRj`3U^@2KqJD9v}yoz>;etbm!r{pQRFUOXkJt(Sdj zrSF2fquQqhw~zB{n;+ixRATGopw_Xzt#iVpi^~|vxZ5^4sJ+gw4NY`&jEz&P`YSVv zJr`AYw^sVJjBqTN9^CeDeA~*n_N84Hqd#x=YGfvF&yKZIwQ)HNHzn zJ6l)ZwTOMd(^F5`}!=6sQF}(<)SwhY~DFpBIAk20kMFp0TPfG^*$@C#) zCYPcSA#6vIOHVx2pG=N7)x zS%RL-v~oDYOm|pQ&_gKRr~!ZBCqp`D9aXVkC2GdO<;#~d{=>^O)-vd>ILL4clL=sd z!-fq|Yu|qRZKioawTvA>0p{qTM`&a37j}S0-z<>Z?9BzY=gzm?n|7j+=>F9>mJ zWZ{1mZ-MDlUteGM-?xn&JGP28JhT4?r^8qqGiJ>1i_ZN$8}2A{mE8lXtE(X=Vb??X zXNwPV2?iZ@U^8MKU{`Vy9Hz!yvt|uMC1fCkFr@A)ue`zxS1-T(GUsK+akl0lS8*2B z2*@i4Tar5TxiyWo0E!T(@o= zdn!V(qW}z5n8|1kRxr-RwS&Oqa8Z`1ufP5}TEP+ni4AFw)0x*Dok2;A6k-<5p-~py zu>E0)v&qk{HMqx6IR=6ifk|3W6~>EJa28wHkn@n$m{pFb#hQ!m;Rxq@<+m984g%AN z@uE?VDn*NoCc%7WMidOErly8Nu4c@bf%}SAfzr}aG>uEvty?!v--Dl8b{n?J@eO;t zOP4Mw7wgCeMR8&-8q?xoaJ1g_9a9mIS{{7J? zt{Uc?<8jzI1odzf5+g7iJ;Z6C+2;gDxF;O@!x#=nDslQGc64Dc7feDyL4lSh8Z(3I z&Ut*$Jv?8yEm43m6HcXs9S$JEH~P;26z4hOu`od{3b4Zub}(uTjpu}ihAv^!Fz2Wr zLuP7Uj#t{edGq=6=XdYkjVsF+j@g45B*Cr1?6vGl#MHRBnc0bmX{-Oq^((xEkrw{uNIDIH>Fn8}VI!TLTeLVq z4YFXbGp!QFRT#R_9MVv%Ggpj;Mf|%AFsy27KRaV18bqV%rN3gbW$>Xhg9`Uh!#6tn z-e?{sT0GIO@D|P6gf|8~Sk=^U8?F|j*w*p=E2f95$??ko8DpC;XHvM(K*@4r$QHV4N;{G#-A+V~jLy&v7P{d=KSdf} zu(Nb&LWh!f2>oQ@+Fd456wyzT;_Al;!(yxX5r1?8|6p&0g)hsL8RRmFFPQ6l}LG8!tI4V_N+i< zaG*M*t6JP!o#LltI&3L1YFV;c>!)U>tCPDbh2Ba=kdhswW;<;?V${)b5?~n-pk{?Q z^17-;J&BLSU(E_ra+B0?v5tx^j!|9J(kQjGm(p0^J!ej6`yZm(rv$e~+0fj8F6yWR zr6yV}Pj^fn<7|&sihPuQPJ3^st!JcK6rmP6Y{|h&c9L2fuT}@CLxPo|f%ZZ1YDHJ2 zDAG}g6C)j?q8(-Nj>-f_4N>4``YTz9>ewWu8a2eK73gH5TH8}?h*v9P)Gk7R>b$|~>^#r8gB&w6)mekpIRlhgm;`To z-)zsOmWcLA!3(AYFPs##u#ZPWgi`El9~fmTtM`{_tQ-7Wa+T&R+w5%n+#$-GDzDaD zj~OAhyhwXdPkZBF`;2V&Ii(&8$NRPxdCbpHCXex6Fejv~&ab7$r{#{w)<@$|es&+* z^kK@}{`Tp8>`jvcWTv4p&PC{O7kf#dGBiV-i1ITC-YIb|Y7A_9AWri7P7i4r;Fy~2 zHMu#g?Um$3uca=$FQ%==w=LhXppQBuMQMEc&Ml0$T%Of**eQ;Mii2+YE%f5Dh2nHL zqwyF554HTxW9d^tc*|hA*#^-~1!=HED%ESVWZ;sBl`c1OKa{Am`58~8PohL=etG0ug(*j3 zL?|z7=tai|Py)D2!d1G_xcYREy|64|&KY(w8rlK#<`6tPS6V|5!eT~;e+x6OvcT6? zPkeJ|40O>)9(e>Bua?V_!*JN~gEJ#zgO18nhfo@!wW2&$gsoe*9z1vufAM;Xg^5ub zEE!Ni(HsY_KVZ8TtZuhLIqtKiZs+S zlwsYTlYT;-X8oC=FveS$l?C%TpEhh_+ zBrlq`^yVYp;()thqp&_8EcNuNXgNXgcS|8}xr2Nl3Hg!8{DNPiki?1n|1#bJ$@A|r zrv8rGwQJWRBP0KZ?u7rY4I=%EFTQ|mtg5Plp$DM^VF-B(!6%N3G1cPY84t}C$h$g1V!Ukar`OIF05NK!x(ioB!;u^LL z?pC4Mv1{!*N*6SdTDW_zMPmQ&ST&jG=FAS7P~KzZwh~%LMju98--};^N|l4jqb_ zWXB0e63joQ1*c=uwr$&nIY&v10^Pt2U@GwM=+UELt#j}ZN9$k*lY`T7rO;;_#2SHx zDV;VROJZUoY<3Rhf&65z6?SgnO4#Lz-HMopR%30|NMAXf5WB}P&n?QNrKPbW3RXRo zkB^UMXAcH>7`Q;ST6hu15aBpy6k=F|-C!6@V80P&gTY{#K^ILhh=F^O5imx9IG$+O zuwnRSQ4EPaf;L=AcDBJjhxssl4F^x5ewOf@W(ZYbs&UWYF2X_fMZt+^2(vnQ@?-`d z*m%bk;f9#hcQ|eA-h(3?0EGganVM@r6YTne$zeYnObA0jj0m9^28hmZ#1?xWX^;yh z3g3*9F^YxLwFE<29!br`MT1I~c2MU2HqJ;3@LAf4Csx}ucNcbRVNVRMP=iObq($sA zqroYiom;eG8qA_mzHlX4KRZX|Ebho*q~R^xB@I#G`ne=_@ILMzMT^lG2X%v zms&hgDB&$lgsxPRh7XH=xv*3drNaPevq($78@bc>AtFZ{TuCpoAJAErIjpZ6bZ5KS zt}aSA(c67*wEfHwzw?7_ANW`fXR4o$@%%W~_49F_pA|U{WvWjQDcm-@?ubM8s$RCT zRk$4VHNzY`>ZiX{<~SayyIAD>bhP7AmEX4yL|vT`bhfYK+h=pGEsQ)CtUKG=@Z-D1 zH&+e1Sn9Yu$@hnixwm%J|M1SlD{X^z1ejpoZM3>RlkB$GWH=P$ayrBHu&?f*&9L8H z|AF0bvYTa(S+~h%xmR!4;;Cyf$q{&NNR%v4Dh812NG;`)9U|TP zWqgp#qH8o4dwNwmee#{&Go8MrPM;E|&se8VhSNL4>6PX59_{q$=k)IF^h$9$tDHV9 zPQOV`-w{-%+UYaE>0RLT9`5vB5a2&KCZNdao#*uF@AU3tap@{_utOgZX6QNn=tVg! z&~}qD3BN|t z>1pbe1hvXX>8H3S_}DT+)!~uK$S^g(i=#M99f2BR9HRr2LBUE63g>yw9q-rL7`V75 zU|zBBjNx80W0lciN^bi9X74?~t2nOy;kjMXmG17UcL{X~Aqfx&(IrHagnE}yL6s^H z5>*{=3AW?djve;~#-?{;TrkC8V{DM<2IG$77Iz!${NMb4dyci=jq@hYoA1k;dLM|FCSGe1f(4SnlmD62Y z(Fa$jp@A-whAOkdXE}j`Ec*G*;+s1f)2sKnlk#6^<6Oi?g%0)#_`tO7wBIstHF=tJTeF^|M;ztJn>p9%*$RP32>(E-6-*R7dBbIs-H0j2y6X$sjiD$z&rpasYASAV=v0e-)6e z-8?m9lBiEiguuX#>d@1%jI&J(Zw;#{m>m+^w;(}=MBd5jT4 zI4VQ@#*v918EE2oN!S^AS6xlfsBsqELpRW?nwlDhaUjs*MlZee5}Ja7@XRyMKxk(t zDc*L`X9)SI$-cUpu_&9n@OatEg;`lNs(g*x4b>QX(a7yJJ6lXUx&bNiU#PhKs}dgi zzgn^Wd4f9gzcg44Z`oK-QXs@6vXY(lQ*a+%7F2t=DTKAw((2tpCWZG4hl6JG37a@# zFzpeJ`-J052Fr0r-Rps-3uB!6B8*oGlrQcX{O!i1Kl~>0!#4kSXS%*Q-TKC4>)RFX zAGHU6d2jT0n}>b7dGyztQoet2=GPljuP*fOAL;m6HjDD|a>)5uk+94{-NSNs#~pWI zeZ$HOtrI#X(~4r%!s3ag6~9lkgA7))rAwDGq!0ZCs}q|eGDeLBAC0qB43;w75&Agv zQ)uYuLrY5w%5l}>k3Y@QA#bkVfbsL>w26eQLJ(Qs#Oa=1j1Fpr0YHMp5?!bhgWrj_d ztCj&1%;ACs3(&XcpMM_P6D9{68HZ1T#tlV&>eQ*|8UadB}BeN37(2^E-DB{ek_)li2?OE4$~op3qV$0__Om<0t#*7`Zt z4|^UlAQMRmiy^}WJT4~4WkiLc2DWD5BL_3$YHX~6Cyr-_J}~nw<0Kd?3h|^eGBWU+ z#H68+E8y1X04C?mnKRd~U;lxUeMZaMmvG^aKmMpywy$QNzeb7kf25>c^|O|LmahMK zf|$Y6j)}rxIb0D>K6`SpV^K~{4#tQ%VMvN+l8JQK=!hQ&l6^pD&A!DxFc1eG(!74` zgF$PgD-3IB=m(pN*hezbsxfk6AMl93YM6<=`3i$48V#?8v1pVs8l0kGH5!SE7B+~{ zPX78c^AatOgum72pMQ=SzV`2z(5t)dx{K|WZ=g7pEXdi5a~f?g+qY0HTLuYTt}v_-mU4RAGL+uN4i^SLp-Uw#W<0%qT}%4ZUF3$d&q;SG%;n=76E5c| zuO~}~!(5Jgibc7X`kaqP#lT_5E7!59a%vu<6bYHGlN0WYxH( z-s9t*sPA7azJ7M~m+RB7RE*kh(PQ6#%)#+jL!2R{YFdS0!-6l0kNEO+NWU8vDsZbb4s!mlZD+S~VW^bUtOb~ty9IRxh5KSYK zzxw@0{|D24?b?s{&tALsNBUf+&kuC`?%K8Q>7$i=PfzN{fveQ=@2`FP-S=GW+V|hD zC@BjRVzzo~(<8zmk!V|>eBEx;&(RqZR1OMNO`AbV@giYn$B<02HEK{fTl7uBG=h96 zW(wm%VNND#ZH4L~B}#>ewpEO>EgUVk#LLy8a`tF>?l8G-keuyl8|~s0?dF^qCg(*; z1rfIV0BNePJSjxUO;HwSxGqnU8b-_WGUXLR%HCYrVQi3{Ra_&fZZic*Ky1YDEE^>ECa&;c&X-f{2r$;Eo zl2dGel$M~>2im3tNYkb$iz__3b3j*}Pe-oXid&*OPNz0+ z9Jcb_A>A8?cXWhyCdqRXU2DfH&12;G8Orhkw~l(>?i!!2V)u?J@6P6co*MV|+p0HQ z|Mtf}UjO5d*Z=s#HTC#^usrkE>pxz*{sTVO)z6PVXrF892NnKEb*@vM>Z>1QQ2EE} zKedUo+=5zDEmzzBuOEIqx9#K&K`Yiqb>AA@-4fJO?NC4k-m~gtxE| zewQkEvk-3CEgTQ&&4-M}%|xYpBhj|3rPrzL!aRpy&vbgRn>ui06R#992pLbB$Y_cU z)cWUmiz6ARSm>jugt}@Nqfw-)VAXOJIz3g>Q#CdU$I^G{EZM-2ULeAtDf>3Xtc;X4<*(HaiR$PmAm|d1zLpvO70;jN1;)qj_ zlUWSF-64LXHNGiCR(>goZHPe$<|l!iiJfT8Kxic1h|w!1nL%Za77gJa`w@4EaTN|2 zs!2qDZ-NB%mnEE7Dnr6w2IcC?OqG#g5?%hvdN1VKpqr>krK`UK_Wn{U4P^4}(X_St9ud!!$lKhevD zrSpl>Z44Q_#i-%oR9DI<;@K`CbK+cKOd=Aw0`e<-)@nN8s((Q^JZ~@^(VJh=nP2nN zU7Y9$5$kfg%X@X6pWPDv#c!iNTO0oQZ(}~ccjzZKL|$Fwd$q;yYK#BJJ;7gZ7%6L8#hAV!5Yqb5Vjjs<|PY@6~1C^XTMZ!1uHKvhqlO|2bOx~IfvSauY}l}Y!FII9hzduRfx?1K2d!bL z#9*sM@EKeb)lr=3331?^c=S+EG)!X=0)M(BwM76pfrjSMmJ^36pn*4 zc<^9+W!D?FHepDIId9|Rzql(3-o@ znt{kW8gilmFKY(k`|v47IW$`r`;bH0ic_A?@qY=euJSx3*Nds`~+u_Dn+p)*= z4g`tTPT5o7Y6K~o>C5T?##;Q5A&mX2AMamH=pMTZ@vO4Lk>rSYDasTE3KFeK{v$~dm`-n9%L%N&&fBZk-)k|R8}9P)s<2Db zoX?Li9Uf$S&c$>p%J{mgI6K_(cDeh72`7HL2IE( zzPHObSC`=-wyY$1Ua~v~JyEG~ZK=bRnqkT;)R(RCHfyZSDF&HPCC}HEf(}F}rODEQ zscx+^y;PT+Xt_98$@G)cg5`|iQeCO%isd2Qt-(FDzFj@xU7Hh^uZ`-Q8_-qj)0wE$ zL`uc8e7YNay9?bq^IY3AmE{@AvJ83oRB36dtu;|@%vIVRoW1EE{-FOS>E-RG%cT`d zf_pH56I~Y!ml`KZi$}-}F-j>$v?fxGbF?6!r`&7hbhoxj&ove9U7K!J<3TgtqVnas z=$_W8*5Dlj%q>M^yH!Y@M6<{~YBqr+3n>l}GcY%iBmWH~PD93il5AQwsiBS>chiUI zaG#>W$H;TPhkSyXsD%~ePsFjI@!29Ao}kLx)sG{Z`YqAq-b6Ue@5uA$M%6qiZhnTXTns1UlnYLCu9}N(L};{^T{x&Nc}3xiN6JFxVF!- zI}bx8>^0BQ<0F(9U}5_f_V3Sqn)pNQRf*}jzDE@7CM z2d0|D{QPI)Euo>I{HpZdBti6bbaeb*8thN;p#SSn?>3ii(2E|`$j~%_=(WRD-$RwI ze5o)!OfH#K^fD!dSlUpyQExfsU_9<>=nvK%wisW~n~o`lQ=z7_v8J;_&2JSdAGHU4 zeplqzkHvn!H}(3Np6jPNzS%hbvs=R7tMmL|uJ@Ps#a(-$;M%Lr*Wb9~$IlM@`1zq9 zuH1k9#NuyvP5Zbj_)?nl>yeHJ+|0WjO-~ujdmK%#xSNkT8xNR_2c3;aT}(T4rdx<# z_deCPR9)c@8%(b{=}vm-PI>EI_tc#pr0eq&XNMTyoMHa_!J04sPQ2xp^#3kl6~&s# zUX56P*jNQCHuQIBf>3g?G(oe2GK-ZKI|7H)!AiSr+cq|6L1k$4SiZJy-HH;16i^Kf zVvRxxYj9ZMnS%wlW0y~~R$W~Ul@pgU6Z@`RyU^^89Xl2*SWs40#&(9-9dJ21#{VyV zO@IxEO;busOXts@56!f=xEPltBqX52Oe}_OpoI+^Hej1zdq~^`U$<=8!o0OuHnEUm zUqhF$4r6!2a*C0%mkryuFhL44eqgS!CNt>@JEG#qv6k2>1}(5@(-&WS@y$2i{P4pM z-+lMp*I$3lCJhYE(>v1daq6qDzWVI5&u|uXP#O6@{NWFufBreH{qoB%QI2+SWa|iS zmhCHWIr867LI&;NLL9MCVwNy=JWWhBhK$KapYeb=YH)09Y+_;}^L{YR3lc|aW|V^| zT-e%#O)m-x3NXXSPnj|Wv%xM%+O^Du!T=7#EJ*_ahImb8e1HJ~rpaZPg&Ag;?v+hD z&=l&kwG0Zm1Na4^Fh4(k=FFLSd3m@hCntyFF|jWd<1ie`M+*SN@t4=HUw`=U;pd)v z?wMzvfiUp&(@*c+yZ6~=pT$=c?xm!i+P7~XA5r1?=buOZz<~oVyzl~-Anm8bUvU-A zo;Y#h^y$;bjvagDl~?ff;K75)zd-557hgmRsELerIo}s~98n)XX=bp&xY$#V*vT)Lya@7O%ge6q0aE(!;xo-VC30Ri)}?Tn-hBr7R_YE-qyt)is4sas|Vsc^C>Md zGPl4o3B#mo1|lqQQ%v4+!$5RtehtJFD4KDL28^t+i%+d}>1PzR4AE#tFKY&(7ipl4 zMptW3rNTH2{^l>g{PM|@CtsuV`s=T28T+J_;5j1CM=rN7`5P_#Wzrh4ZwGnq43|TQ zWilDf8A|givNv>LD|D{OCF-9sIJjH2ZxPI&P)vb_(}W&s(MGZ$y+JjWP~R`lAf-N^ zd~l`{Wh9QCzcnryu~$G5M-zi+C1W`^6l z*9d^FIs z)6wxrq~&Cwe!qjmUcKdcgW$xO{qsgxCRk^=Y6Z}q<_sNmA6GNPSYck#@Ec1v9 zB9q{tbb3GDD$ILr(Df{f)d-0yJ0&?jsBhKtiTSpHUdkgMNxYZoRMJ2%lr!lSbDA(j zk-(Eq#4HtI5nJ!OZ{HJ7JhN_n|GFpkty{ly-MVMjtvj%8-D~TfIJ9p4zIE$fTDShd zy7ezmX4ks)=hr>)!n*a(ty_O=-TK3He37c{UbpV}x+k`;Tetb~$LEh8JBeOtH4zPI zAwesR6#Y4e)UI;sOdh@QPa(%54jNU8`BzCt%{e;5Z%n3avJjX?!dr&w39g^4k|7I! z+jKA6czGsFLGt8>gtzg{A~5(|zR>KPfdzDUDXjM#^&|$bRqD~1<=Qb`YE6{p#!5BmN?UVa&m2EB z$`8(tmllneTC!bN6u5O{D9a<{(y`L)JF_1+y6M!5oBEDy?mM*U)C(I=?%DYAu8qfb zY&`tTQ?I_Xx&Oe%llz`JvHz*pP~pW*C!cxh#P*E`H$Am)@5Wb-Zt3sa(SK;uNnCY& zYyTU&`%i4^KfJl`rOkcExAniewSV8n<9jzAQ@?KQ#}Uf21m&BAdTMU>tt9QF%` zBSzCvv*9I^=|zM2dA;S3(R|oo-b;|nTB2c@Pk$?{6^{sqgC_F{MfaM!?q#dtfZqHJ z#fg4WINVRPidal;`{~`N!y`ob*hzmz=JMrE<_%ykS%f+kQ@3ZTmO0|)jI0m;oWDg3 zKrWnd7rJ1fQwGqLzf9O4IkL=OrZ)O49mw3!+Im}STSsf_vewq+t*sq&R09LGw)VER zcDJ^+Q(+I}K&t#e>ymA)OCM}qvbwdklM2_+*&AD1?`UniwQb3YhQ z%&$8khof1PqdrU2ECyi07cX9nnviVS%0*)#L!JqEAz-heutyxXi2Dq&|Lj0IckbNJ zGDxQ>A#>rvh4bgnUl>RiFJ9!Y=P4nBI(>b8$Uw4sMVBQ>RX$0*=^!Pf@~G%q<%ov6U%P*7J*F`~8>AO4$pHMH=={ zX2~B9;y>2D?4(Wo0EY47OlLU_b^(tp8ZAuo`3OMjlHymUk>! zsEjp#<;s;zR8m-2SXWoacrw;8ws>5)aN(v+o9gT9v9M!_!v@jP(gHmi>NyncMT-`( ztq97|Aa;s}9(o9cvu4d=>kcTV=r9!6;^JbAi;)oMq^Qu=)`m9O83#QOLE=-2;3=>EwkpJYEQ zJOUgUN5bZdK`~(qBOZ9%m`P^JO-@c8Ja{llpxv`;4_BK!c{0=9Vse-ig-uX!mW?r( zdWG3#*|$gwKZJYX5wJ@UTfZ<0%}`7d1$$y+8@8E2H`uy^VFc!=MN-qgjqs&7M0WFmn#iGn3~q4-D#HjOaN}CVLVwkj0cQ%%7vN#cBp58gr}$S^O*tv*yub zPjagPH4LrTx26S6W+6mFUHBCk=Na(PLKiVm!;Etp?4!N%`%7yUzL7>6qecJe9f$<2 z;VK%zi@k3VkGE$aX(5$0idv0*mj{9)dvj~BhQ^x0Ce|9Qiq?p|*N?q*3iBdrVTL*f zk`@<<+tDIKYpzlnc?-9ub&msE@IV-EnHU@lmAp=`;w@u^K0xR)$+BgQ>M39VltF90 zE+P6>KcO2;kvN!UfshB4&6O!zmyr{}5hv3Nj;2SArcD;p+tZx)$61dAI9$oFy)o2u zD92+*lI!sZ^QAP~=iA#q{!Qs0ewTSB)AwwKAGYM3kAJ*k7%=J=qh44Pl@(47g_z2D>w;ouvq>!>9n7xqw?N>EI_ z>SQEF2oWd5SRsb1{@0>{j^l+$Qt9tRiV&n#sdZ9?SOi6hNNY2o?$NcQ=)#FqIgF~M z5pQ0~06k6`5%v}luf-~1NmBu?pLjOLky*}QvNb}l$fU=ZA&ixJ<64U`kuEP2CJv{Y zEp#P9Oi}rAbNp@7gOqf|Io?Ye<1I~yldC64ZA0WrKY5amG*J!xqf)6%gM#m28>Ki8 z^{^!)6D+5Xl;?!WdBI9%xKiXVP4SdQqkfQ*;Vq3vCRE7_mNUo7%|qoHSLfjY(v%@` zb$~o2S)Q9Bw@g$P&hcN_7TjIn-k$E(7AwyTk+LG>you7{0BN$XJjqv@gbswtIpK1? zw{5JaOR~S5hIV|U32x3K{N>3Ja)C2)`R7W@wP>^KAqLR z%OY(Bqvg4S>otNV{(W!Z&(}E<2 z*;y`&=XrJH*_KbYw&uAkt#VyiC@oKQTpVdB3$)CLv(%+q+nT*QO627Qwq=WbS3VTI zvLm2lp4ZBGUhT{L+L6z6S)5>LOt&s6mseKEE3&OiOQn@FoR{P{FRgNKpX=W<%e%A4 zy?u;4Z{G!Ep#L4)QgYmGz^sI1vMmWAGEXSR6 zr~E}9uSPWp(WCgV;u{*h|3vVv$5e=RJ#be2aYs9L6S{Glcj-ohr_%-jVrn5l6&W-%0Y zeATR3*v}o(H452K{Hdp&!ja=RGrb9f-Ulf`;zbElqp*a`XbCfkvFH9O3hu*kE6{=a z@4x@hp+oqJ`nz`Rg4oPM+?aBz)#( zL5A&NwD&t2@fynp{1q7nc{GMJjSP+70kJf~yaZ52AlTz8Bzg24^Y--9PeWFRpamVF zj?$btbC^?(qy8ZSxfe6f(hElvLL-p;(Q^)#&09A1R_4p)jhpvw-ne-O|Jf@lW?*AW zcnlW9hhY4V`0@Ud{(TeUE#Lg`r?0ood$-C(B@&M^yrdW(C(_zmg?Wq4(uY+v-uQZ; zVXsN0%<3XY{4QbHZ!(|q*1haye9}Sn%IqM%tb0_`LDiD*fYH3)!MNYU+?VKbdX&qD z%c8$~uJU?c=k@-cYe#B6xgp|Ws`bKH$9Kv+KIsYhdi|K|C)UWSZud!*@E&NJ9gj}RaI5cnsF>IFK16BHh0X; z%|%I5Qxnu!^bm_@eSJMLtfXKkf)36mE>K``3uw*Ek&6qPo10O>fp^(O2wE+wvCkCr zS?JnOcu|7O(QTBla|$}Z4p!(e2RXtWq3UAwMjbQ-O&ODbI_L~mZ;ayIci(;EjW_!H z`%j-fee&eVS6+GL)TvW_eSMcNUw;4n_b*(yaOKJs9FhOvgAe$&Mg zN9@t;l!^gD@?lySMm$(~A31Vl5>d5qlw{_`Wq1M4U9+jspb6B(jIsF&G7Ot&NCN7x zD-e4avHuGL1st+RgMHXMhM@xvrNjUZBVG8)!Ft&A1y6*nR+!8dKMy=XJZ`4FMNMpc zxR5bJcF1C&g9BAFD8UujLyAEsjhC6A}poZ}9RXh;Q zUx`IEW~tQ*?G0%(>l)3EY0W^mXKfT3QlyPSadKO4v&XF(g3WuTkki%#+E6M5S)fn>XtX^}_Tk zgXyTZ?m(2~^(oS+skRe?jK8)zb_nCM{>FzK9B#B(4|s^HRq`Px{cCZKC;ZF@Y=&M3 z(>k;H0MtE;?lIYPD%kOev#wGY=MhVPt1#V1df6tU`GAvYv#`A2XSy`ab|T1eyTxI> zlXe;o19jv`!TCcR9bOCv3z+vN*my!w7w z^(~53!3vdGG?R3|DZC+IKovXQv{+k;^r-+sJNr(?$ZWJ%86Sem4;JD7|VFVg{IIzGAWk9Qemzo z|Dy^Dt2Ca_p;R>_if*J3)1e013P;%H#3{2pr7@ni(Sh>x;c~+md10JfJxIwJsnm{@ z=f^5lQA%lulIt%|36Q6FOB38(M!4IO@D+7}lnftfg3U3;%_Y%Sn&@Xs^_5cmRT~x6 zy=a(RJ6djzl#9ZZf)FW7w#NBNDvwIEToEJB94^&Yd3E0u+1nD-HN&GVUaF5(Dm`q; zvU36-1LUbuasmUfvpJ}J-O%OFB(2EzUZmKP z2g%tHN^yjoRqxw9*Iy0FT;tPK=Gi_$YRQn7S9o?-d3Bb0bfijcnXW5ldUfWzt;};< zQS056?YeTRyd+&|%Wz#%;nB4yu&2~}S*GjKrGt8Iit23(?#^>tneW<>EYBMvSI5hB z!=#2n_x2{=-sK^^t0Q{QlScpcj?kV(f!$61oin{xPIp~0M4FW(&(4%r%<=145YSui zqryJPwgosAxphpDm&{O>m$-N4xpgGSHPKQ@lV8tmF)DeBI&k-ubw1t6N>jXCKSZi| z;Kp5yw|vyqe8MUY=uHm@hc3cfs)?`#Kk}7itM)6B(%FruTbLgbmgjYrS6%ddfx5G? z#&ZeAGco#8zT%k4cvx6oHtSCMiZcm@b4lj&qs-?LO=sc^r(<+|!Q!~1{uvVT?<2#N zwM6ImywP+zQh#xx)1_(77sflBj?usFp|U&f6poJ)@eAWE50l%`2I25K`d{-Ouy7zm zbpx5-%qK#aRpg^Y7VHkc7KV*~6K^pR4D#13TmIhUMNGW^ZEHqijNzjsgqsSYs`xGb z(Gr0z67|GPj!wQp7a??h6w*41Vq5!BOzKctNU>Hpky*|ls=%KBI!nHdD%hJK^nP^T zaRYb@cGBB!yNyX`*}jFr7IsU4#LM6b1hz*XeUyn@)~#CyaeCv%jS#1CWJXzz=7Y>5 zk390AUBVuXHnGbvM=4MEzdXAOlMhy;3GQ8NDH53977}dN9Xx0BR}}sMM0|} z1VZvh-ym^8LxFHLfBt;P_PB&+@{T+1;C8UFGCMABn*7>=qj;dhycct?m|K`PS&glQ z_j)aeA;VA_l^nZSv0>6Lc*}o6`pLfKhd+JsNaKgC9`Dt8zFp?_Zk6YiBF~dCjz|40 zFS{FGwdr2>*Po3w9J8s&`CY=YRp;=m#eB$U*`rez+hUR#>V)ZTm1V_YqfVta{|z~K zE>(RtO*aYSK6fKp{_@dL*N#+vyEE;>B>`vRjK>_ri)O>oVDq_D=ij$Se7z<6+g-We zAFR3F*YU%Z2Y-0`k!#0he|l5c?^guk_{r*!Pj3wU>cPRE-5T+6?;zx_F7$gdS3Z~I zaNO5)$jNxZ+i-S>@l>GhxQqUvi)p9HeAvU>A7wZjC$26p_yuqI=Oo5kuzX=D!;;e4 z+KQD8>sw`IB~~X6tHxx@0cZQHgD8#Z85VATw* z<>uyM?P9M_9C#~*+E$dMy^_wK#>?z?yF*nu@1{V6IcLIw1y zxw#paK(h`E41{u8TU*OUCUJ3bI3^?{paqVY1JxU0~Q;Mw?cgG}f zgd-&Eyx4PbA!Z%}M9VELEx2UCf(4jB%s(~=bdSSiV?H-;-pph*!zpRzCGqj`jFSwd z$inQu!=S{ZNt2k=g`IsEA3z4v9vvM$gc3(4!Ynav!y$5p5+5(eCgwIZ+y>ROxYD=< zTHyH6?4`x#E|_qpuVJ``Ar3rN4h+Q53LXsyA;NSqf`O->k&%HC{FYFS!$u>I5@znj zS5#md7tx5lO^+zli= z8fIu=4i;n>YeEI|is3=7iE3z#16$)OPXaqtaa3qF6XIvY(X=(=8J=@i=NWJL84<4L zZln1P{mhbt5eddaZnO(##IXQywY zN*2u^M`JiyH4vqUK@a=b(46>M`#?o39WjDr594SMkcRo#yC!LPh`mLP#?8xempxcy zZ&$-DYuBzGi2l5KAo{1~{iN~zYCUA*8OC`uLmD=)vA55`Zzdr)c#ImbSiek|b5;G_ zP(uoS3URoNR>PaBs?vs-C#i(4P~|KS>&=@@ zs*Ay|g}Hz*id)sU;i}&Eq^tRH+2KXm3`K0a&f!r<(~B8j&F-?d;&Y2;$tU=NY zKY6m7Es0H20_15ya^_IEZkn=mtkMu4TOOx+49Ue&N~wp- zC|BoUHm4XL+xSSObdZu2B4}0Y%+G6FZk<#qpa>FpWZi2EfR+%|gnKRk7rP`;X+PiD0Tt8ZwSMA-M>)JkD zS(c_Op5od%QE477HH?v(Gu+yyd$g2zv}d`lNRyVN%gd^~x@vv8irm{vJvwW=x<|;h zcy^^;D_2GIHu-fIdbDRM%ktgYF-x&ZWt3F>yBl{h-tx(f&98fi!$#9%YS?8p0MI-# z)>@^83Q|o<9#>-wnO72$d806I5DrJphQ45NKFNGJ-Q`M-bZN5n>`>!t?&7$kcuf(f z!*u799o|T_UYcaRFwWuZaMS4s9d7ZG!L(mE?iCK(gyUlbp**QN6a9p@Tu!%LOm#Xl z)Nm?DyrLpAj!%%*u=XeXMCFHBNA@lFo7d5wH;4Z0O+-_$N)0)x^6sk9G4$qxX5&G# zX}2&O$jJRme`%^^=##Wos|7|Ad4c;A(HRA z?>>(E3@P!EM;=*EiNo$dXk;Q6$e+ydf((u*d4LiM8NES+$UxF#XI>57!H&b=08a7y z2*x@XU14h$4qA;uWFT!a_<*Y*zM?*dNXGt-BX77I9vXEZKQr$xM9ts*?sw?GZzwT} z!ax(lENmyE8R4)si^es_BD#jp@M|61o#h#nLji$Oz$Q14iXi|)J%Q+}x{T2)3GKZP zZ^7&+gj4oTL#O}&hVO`5v&}7|F>D)SFW6{?vP^f(G6pWZeIWgUxBMq0&7bHGzpMLX zwcn>}LOxj&^7Xo5-|bBKq<7GJb)J{ftS8)c#~k#hqg2j1G7fZj)?n`U7q8e1$U_Dg zL!t|Wghz$th|&0>(YQufYUoYVRFW&EQyjzR6w^B;uAki+^T`dN@6~%!$rPdK05LGsqP;x|MuF;^S^pH;gi*aKA7+GR*~!5z%&2u+<&vYhf4XXA~RraEKqI32C__N2FXBgN@Tu6%B!cyB?@FL=v8 zClwYJLOX_nuko48nl-Dsx|-wXvik;Bs)rwb80tKSrQ5Y@*Nz=Kuxw)aLODvX{;*Xc zmN=||*a)y}V(I3E9s3U2NBwo{*45Y7mzI_?Fal*0E#PvT!ajm6X8ZQ-O-)T4wFiB` z(v1q(1<;M`>})iSaww`56%`zzh81Dxs*#bAXbsCd`h%SdH$qpj)MI19c81jz<=B1L z#fIs>&^X)M;MV9KTILAOC_zuKS*Vh*t*^OHgby~X^qnxCAU4EZN6E^g@1 zp=`pzQHmH4KncehX6yx(na+h#7Y;DQF{4qAYuUa+a}MHb8LnXjhTWIgoPz^8qgiAo zPoB(H8EAwJVAziaze7em*pLG?IoMNCQ4xMK46twoj{eLvx!Cpa^I-cNHUeR|gCP_) z*I|z*hFmlT8%AB2|Aehp7zJWnMYDTBLi_9l#9^{Ew;(o&K_3`S;UnAs@OY>v6{=jP&b-N0jrdHQoGM~7TEz-f8K8vx zUuFtpx6$;JgXuRGZH@^_= zw8`JptTQequFfpg!AJG|DJDyj!9rI~CLdM8G>UGTO2Iw~DRgoK^`VgPidyP?J)s%N zRBbH9B&wtlEm8SiOw-9qWG?wTj1YP>5+!t#h_n_`O$55iS+RNi_|=-OT~3a=cpHRoG`{wNJ<5 zpq^Uq?rv)cQy%D68C z*%c!d`$(w)wrOMJg`?#8gXO9w|MnXqdeGpqLA@Ozs}=?JF7WTobX_`1S`aQ5c}Xc_ z0cHo$KE{PFk2I zuSk)W-FM?|##=sJyQnWfyy~pmMV2Qwkpaog#H_MkI2_Vjo>2|IRiaup9oZ}#kC}~q zL4>ztxLnDXaXg=F=^G?o_fY+Y`XhDcMw%~-cf2^k@m!**KTP+ktUG8hKP?>h2*(#x zpCgBT!f~hSl;rr5+1MYd!{u*GvR)YPaCWG%KV0{kt2n4P<3{(AeamlzO60qNY-1jv zSfcokdy`_oQct&ki?nHoZo-=`@e>_{F4*&^m+&p zPfOn*p)*psg`gO%j=Snfq;nx77Ju>^3L5YmQU*MqoXMQVpTHKGe34|j3U%DKjpFEJtSaXe^tCPJR^7ACD_r(lLq zAXqZE!5ky_s@aV&)0(z7LFQz(7D&I?)A?N?HhKuIXbOFZii(0@$y+DtLmuYgK+f-` zu*(qs`kY3*;5)t+{jdz{^uz`lC#*`x{=d>JM5>FS{9!$fo{y$M@^KKL7RL zZ?}#AZfnx#w@1EH>2Y?b>2##w%wW^0LB^B*swvBx+0yTq1iV$^dMZeF+^Re2uX|&P z^P4&H#T2L0(T3N(#ihy4ZW{Aptepo_p>=2A81ynwpyO^77o= z+_JJVD3}!$6<8#3uid+Mvt=U|G)C8%9i0IXEW9~6IZ!m9Y_eYl6yX^&W}r0=U5E9S zm0W0`Sdh^sTV^udI@(0v?!EV3RKSvpu43DQ^4QeW#M&Y@EEHlDMaMYa4HSIrCXBJ#gRv?u+#t!$EHu*gtaQ2uFQFPk2`Gu&^WV-Me=+@p54D z(Iy%}$MB3$f;|W$#cbmU-JGL$X*?thbRdsHCL%@!wi-gxOiiHmvjYz6^}~h@V?>3? zMz}(Jd_42zGCIN0oluj_Ht_7&uLnIr9=*b}j2$}`S21G1jWEFr;~{K{gP#yP6XDU| z!lI%gwiUq-AR{9K$2oK6U^Y;V%W=e2?2g1p1rnE_Wo(W-A|`TSyhU?p;v<{)a3NFE zaAmeCVV5GV#*8%l73Js;CI@}MRXDQw3cEyc9{)0Qm|-8>f{k!E_&8gt;HD_VSTZv+ znPi5oW!R1=Jv|+xW9$oG8K+_>iOpT??OQaoL<{}Hr!-rOHFjZ!IK!X^zxP=)kl4x> z?P$c;_`#zo7GN|mM|=Cjc#C#ZZq44_Lo=kY|H>`cH=@~){LC3jv(nM<7R_=-J8DP> zo6u;MFdFo+W+3ME>H!LxRd()3_F#zCCRb+YWA#9+B@L+Q{qum-8eFq#z`uz_J02_- z+MCLtJ_~i)9e;+mFph-0$`%zI93Vt9Apon?z&)xzO9tUB$z+>RKu|`s&<`S@VCn#C zZZ2s_?1_^`ye`RPOp;B7*!`Py#t9TmXAIH2Bn!Q-&`lugj(7qJ<_ptQVVt943+Bx> z<4H-kN~aR=b_sK;>MWz)E0e&A6?%QHUTwL8%w6W_R7qn2fh}DFwzV?|C&{JoD-{C- zEacat8h#|x%b@u`dGr`psIZS}8InSmj3$EFP>Ok7PL4U_bo$#3#xXSf3Np?aP2r)_ z$Yy2+0YOnx!3yHCIu+dBBX){Dc@TfA1F_aR7%3+{6TVNuskDJQIDa- zXB#iqhRQk7N?C}K9Y6twLZlgya)Gau;v-E6Rq~>hiU4VfyUWN3r6^9W36LkF!C1N4 zTbdv_$N9L7PgNF=kY|sP=8Yj*+T|f#zZ=rMbWnS~SKDyc>Opc2nv$$>e)1$gDJ@1R zPjhV<=~@#Z<%LT*(Q-Tdr&6y*c-h9r z$Q486O7wiRG=H4bQsdp(5!(CP*sh1-J8p~W9p_#bE*A#N8NPDbNM%-oPwy`@m-PUzH+we=F@{Z<6IjDE2V?wnUPB2XnB5tdq=30JyfZx z_UcJjmQ9zJPLr30%DFz$#1v&=j@!x!ax?mjNx+1s%ga;cmPBcGt#A8{5#3FGT@^kn zvOQZf-P;xh_SE{Qo=OvxW^{YJ+}z;Xy?ju2b3k{ZQaeVPpR2UD26sOk-*szrSCv<1 zgKy6md5)JfAyAr@;jy^Rr#szs$ylW&Nm}sGjX&|Xe0obuf0&Asyks)&5)O|NQSCb6 zuwQi=(!D0Bb}3H^huerK;9gJM#Dj2d0sfa>|nrEXA_L)lgRlf zR^K14gPOo(FAtH43kMS2t)euREyCdrVOc~ZGo|G8v{qQQ>nyL>43Ou~#u?Ec6$Q$b zFHd(lGg$XlN#@`1C;FEpMTpT9%~=Z?s<$JM(NhH`d$Fi2F1i@v!i^+T7JF+PMsJ*i zaG;M>wVcsO10k(BG2X(v1|ta?xf$EK+;-b-%;mx_*C5M5u;d64>`4TXn^|-Dr6J^4 zh^F`7e?R;0;48{5ApoWE*&X z@4)YL*wP-igJ6%2;VbfxmUr&lxrXQ^nV5ycd@wO1TeC1L3!CmRjKi;f*tCXCcI}O9 zdx_>nd$(dAWdP-DKf^Krv&}F&@h^P2{|U+7zUA#A>nFW~zTGt9`@K`Xe|p;Y`=)&R z)bP)55C5<&;GLQ7Zx<`4qs+T#0Y733y5ov|3lWtsAfM3Ng@r=MsT^5rgrz|h zepD~%GdcGgkP%5P^lgFHm4> zxG>fMtMuhe>1=}O;zY;OgH11(#H(3(zu+zZoP=K#mRzhV_4V~wo$k5k9xO8Kk%5H{ z%NjJk1q&8n@yCM0%OF-@sESyMu@P`spY7YXvpedJ9XqfQVAaK%j72gxHF@4S=a`(UHN-hnLum*8F;=oyM9E@UoKB&g@uyP@QUhKAxRzvwS5Erre- z9UTol7wa$^XJY+?Mh~?ejbJ#qQCV3T2FNZvIASej(-d^_i6@>w&)I;1gA4PS|PAmn>bPPSA9kh&Xj%_<}2{u;D((Ku@u}2&}d>GfFS#+$nwib^7*WwaX+p}j6 zMueNP2`GlMdGlsWCiYM?i!R~eVm36s6xPiddtpLXyJ_yZ=NX=d4< zi|gY;6yg+9-Qp59fMH`8JakMc9y$YJOvqeSRfTdbdXdIz%O*5hbRNDuBO?Qk7-PY& z2)`vJa$$S}?O-Z!3J(`QEru`HCWKvnG|L%Wf~MFrh-r8^7!x~8@#q+0!V!&gubAA1 zjcwR;hwWQ1dkpchkqmoOp-r|CQLE@&g%9_oIhR-l|p(X7? z&7wxLiqY&(wDJDT+(jeQwZ~iRtzYuHDrJ>pFn_e!USq)_a80vEh=fJsh$j7CJRH83JVw;g=wDNaI>(a z5+CeJGKrZ_r-tGZy}p9*mT?3!EL9^4sm3oOQ4Lh)=}fr>T>^n2}hnTUDAoeM`e&mAMR z%<=DhJidEL(CT4wgReZv(>7Xh8RjFUMk*!Ya-NTr;$<5XtjzG2r>F)hN@k!uJwdJ; zCO0O?_2Ej9N}?i9A1TipE475k*?w|bkdldOgXI~xFiI(lm1nxSB)Ykb7^2kpQQ&ER zTUwl4H&m`4CC?cp%^xAnn;X#ectY3Ckt=VFT9xg&I98q&DrLJlj|f)^B9-DHa#f{Q zcZFx?1gSMmS{5niySNNVlB&{`rNK&8g4@iVu+^hIOFV32gOm)E_$yOAq)|3&tn55A zNST4oqmzm9>?CPkxo5|cL8~5)>pC=g>4t=^B=>R;X{@(24x^5f%4@xQoBVp}eY?v% zI?Fw~R!8)9hW4(B=)EVl=Z?WWb-vwI-d#A`=-XZAy=sEe93huP$%W~z%V+!c#>%D1 z^4tQqjv4Zbe7E*ox0U|V6c1bSP`P1@t!0!nFI36%kyD4ub;Yh-`L6BbY>OtzEzNVc-1P7nZ+IN(4oQato1|Q<48jgGViCv_9~A)`sn(B1R;}y5b=9GcH4pci40nRY{`b@ z*r3^M2km2zW}rZGamC&Z;gC5@*zg0w=d4+?*jF85KQbufH-@OmyHZ+t3$K zf^k9I#qea3a-)uFKX5-noSE61@o58 zOp^cM@u2^sl7_c@ddJKwIrp>s3cz!U??Sm%Q zODU!UPT~n6Ubc#hWA*1pilbKXun;#3u|$XkLM*419Lw@iy*OYJj|p*;5H}J_|4N~L z#p1t*KF4k9s(x{t{_PT%H?thikJR;th=W#frw}WssWu^gEyQLgadeP)HBg*z6GzQr zlMuHHahFhUx?YH#)Zh`lx*fc1632|1AMU1WW?%62AJz1?$DXZ(V< z{BsgEg4{aT5JJeXH`9(!VP;xm=3RZHg{5Z=nJ1<$-4)AsB)~&mD@5Wk( z1rS>X(>pLyF=x&kEauF=0d2InxVXBynqOO^GJeVI?D^n>4>BrVS62t!9F?I_Mn^}7 zg@tkSANEE;f1toZ(`3W|byz)T;Kk3+ue`jR0|#RR!Yz0e)qDZ4V4|z+KZ|7?r?B5~ zAUQ0aXq=<)K!-&iun40+XlL{0%~)eGMd({XLITuVoMIFOt?|nR<}qU)t!>-3J^Spl zSdh^O+QdeNa?Bg9TCiXN=5YW1{iuvh0qXPg>C+1f3o$Ji9rhFsmV`Z`he&Uj5QA-O z_Uze%3wQ3^iN?_s(}1SXEP**GB`>U!w#C+2Aa~m zdzjS(m$P9BV+bh6vu0}Cyu3U-)0~_f_CaIUA)LY!LHUdsGZ@OlSq%nZOB?=eFzGI) znvV>;;O>kIF~EYO7X62VS+hM4dlhk;4C5frL92NY{>p|o?61V= z4flZ^j~>>4O{AJJhvL^!eXBGevOlc ziZCp|3`-2NuuOssn2noE>cS#zn@ELiMac=!dPy={zr9d2jMKmcoPHjp%)J+=;F=tH}F%|J9ImM!fA zIjylHtI_O`e+F&wEi~^f%_vC2dF*dwZ+l~pL9HUTVfTQMh91J9XOiP4m_y7}c7FpC}6GsXfZ&XcRRM(gaGWkd+`ercrgQo?zXRj(iQ)+H3aIgw0I(&+_LCV6QE z(4^%IFuLT?%q&!=R0UW@kS$6jnfi<&Mq+X!QUR`sgnea_MGYQ7i!k9{7+^1%$K(^u z%RH5%L!J3NVF;%e==p>u5yB(JsVbfbaGdPzJSkYtPE{7y`?r^RFOQR}ptk!+6aA#L zXr(+rp6cQl>1YY!h|FPf{wR4th?3_kr3TAcBjh=Wa#MgZ-NTj~B&QFP>&7Sx!j-~7 zN>+e8-A79GlP9~oB;hJQX|iOEb$1yVsFtVrOOwMC)ei`x86jh##f%2=DKj0TtPN5E^`Y7X_|v@L4rYUY_f=vOQ!?g?D?Qhw4LgckJrcpw;sO zR@eKis`c)jDf^wKg12f4-YhLVUr=_gpzLx<5x%1EN@d~c zg0dG2N_Q8OK2uP7q@e6%LD`vtvi^edzI-+FMoH1z4F&JF6kT0W`eA$7)#at{FD_Cq zoLO+bpyZW;vghbpd|oIhdkrm@7rtFr@b)aV0zT@siwZwzD@F_N&R4%)sVcZ!TzIyi z>_kD?OZ0g?zwA;;@q2R$KWHubprz=YhQc#NB}WU&4x+)r(zj}he&1F8>D{$oJks#x z`lhd+n)CI>IbS_F`}2nyK3-e#)rPenfArD672e{a#w60&{tDir`}1H)CbC)yA`=(u zUujT3;F0u~Ee;ztjEQF%E!VVujvv9qxlHH>DVp&XNWx5w%i)UH#08=zpJkgC$mgif zCN2za;8bgCEB0sz0L)LqhBO*<&vALUCu~Cw*{PNi#7++NhzuG*HHeGos)nWTR?AJX zKMroaiXu%jega7q(kuj2jE;$1kl1~RcU2?~nZ(vF>_Wxj2k+)9R;*Y;iS0bG>oJhR z7B4vB6uONg%QH9&0e%(n;IIA-itL`Qw~wYwlVz5MtiNp z_zd@lr2>A7!~?-3U=)x!|G)nxe@Xw&iSd?efBe&TFW&oy`>Q^Gu>P~VW_^5f<;TCO z{NmBZZ+Fkfiud__btwPhk;cz{Q~mb5lK#Tdw`UjrzNh^CMJ2Bmmu$~3*;!DswV-5k zLCLm)lBWwwUo9wmYgWPg3kt8)7WEaD;49YW!&v-_OD>lcy+(`tn=@&xZY+FnVbOaF zim+(EJGb!a(vovk#V-_;ZYU_ZyP){a{Nji5ig)A{AI>j5m0ya@;#fi1^VHOVf->9) z3-zJ=vRAQL6qcSTD7{!z@@`YX8x=*^9o}0|{Mm13{o(#P^z%|#_FFgK^7YqW|AM#t z6B3pq)(6?H0D4?#XlQtNcwk_lqA0$;zEM$8tWh#GD0@jj>DDxEoMJNxoQ;l-#^qcB zbrmg`&1RIK9d~zk>_NC3<*ZT0#Khn#u8h2oj}K}}lEn8#LLX3w&UkuyTCG-`!Z9o? z3{9atC@2UOa0&7#$JgNC;JCOr?mQagp5w@73D9=Y0pv|4lZS@~Bms>!1noOJJEH}( zhQ`qW<`Q932VBK>K~p%3yYMh^c}7MCqd?3G!dxFvsL?q3iE6CxqXk@x3zl`ET(;C5&kcR^Da9eRsPP=bMQ%pl~^ z4s`QqO7ZdW+%YX_k%&2dF(0)^&lp}9A8UHO3KU2neZfaAaPH$QSe=mLC=xrA#)*D zVCrC;qbkii(W3T(2%9p>rcuCnn7%GoXmHJQGK%e6`4&hxE# z2Dm5OTTa@X^Hbybf5C5{mo&STxz5Q$HZ%l>#SpLSn*dP3_RrqWD{LB0P?NR*fj+|k! z5NmauEKd{yLS7ZcEKa3Q_P{4zrP5VD6X@((DjBBoho~jUmkY6)GXIyo?*NbLxb~mh zcK2?*yXt+_+X@x50)$XQqBny@M1iSDrI7Lrr~OW(-Z2Yk zg?gD!cVDsem_?&gPNX7*(@WvBcN^u9M(b>#ofc63Y4o$sRNCBw_M1rg7f?Rlv>baM zL798dV>r!}QBje929}Q#{195W{$zp%7A7q;II;|7e&WPjLtTiNFOt}Bsi4fykuQ~H zh)cW;`J!l(M`6eSkw6}hIn7X(Yp5S#r~?jJhDs>OH`GBdkkX_D92X0Hfg~W<2&ry? zx1-S80j!`}f#gUt6x+n&@ls`5xT7w_vBcX^Ak{-T2u4#g)Q7@cLnR0aDO?^1h*~#M za*Q|B=ZkevA8Kxnb8@x;07XP8S0?0P@WbC2nHn+4Gx%`SVJN74%MKSwb6~6LK`V!(TUww1u zb62{3=iB67_@yg-LDTI!y=s1a=kq_)^fgV;iZ=Yht?ietw%u%L>Tdh>w+$6BzHm$5 z_VRUig)bC_-^l&m{vbsPWctXc)8FV-c>+IIE98v zcowXQ-osi{5S5 za);?QU4*+AOqz#9@~gWOuAI1{!ut7z`*P}9dvD)yt2P~ z?U!yV{};V}HI!cmnbn_N!$H};`U>=jfAxI-@28O?N5;p;|Es0=Kl%rfpPwJb$VHot zq@*Oc)QN0&NwX1UEQFvljGlLF=O=N{Z_Z2IB>W z7?=e;j}*c#(2WR7(Boz-rQySeBM01;X!PjO zCLfBskg{(z2GmtHss@qJRSgI?u!d1!W!(H4qt>AR6bgwz2TF^kK4^A=rYVS}pe+qT zE9f)?Z82UA&Km-KIB7VIXmz6q4+M2^t0SC$bREKQyGlf23@n4K(80N}u@OJeAmIlf z2lXO%k9p!wSV!??Zg$(17{B(o3zTE_H*?9`beIhkSpw#ZWE&)zLp8M-E~h0c2?J@SY!V|&8wW}mGA?~!@>;XLUp5kwOJNznul(Xk0CF>Fgnyw5N?Z8l|*5&!?@*(-?vMt(F4{ds=_JLvM(q%z)+oKyDp|zvD3RCqn zhnnV3v(%3^S7z!<({-hz49g!dH;yyc4KkGU*A>TTr-!S@_-S(b8y+0)v1Ghdm7<>= ztQ~38+6~$ii*BHwAwN=Akf@uVuB*&8)DAPO%r`8{(p6>ZDyMtZt_W$UvDW5!PV*IW z3>v$aE;rgZHP>7^&9kAvygc7fnG3D-6+`spgY`=aJgdqARxA&46#F#Jv^37JG!}U} zioKnSyq$2I;jv=4RF-U*3$v7|3cc({JqSY!Pdef?Pd%E_ksGXKVk0G2^%zJSKb zzGZ2qh5d~Shns3fOSNgHq9|iQsx))5XXR2K$09Fa?pWgMDD`s`cr4E`R6rVLtjaT1 zrR$bvYAdp}l{vbaJVWgRmWHVwj*+Hi8M=xLZAGpYMpTxhn+rE6+fX{&d-=+shV|i% zZ(jPzo&TQ*bmV{0i_cxe(LgIY>ihE@Nubej_0)*vuO+M1I&IptqM{-+pTM7wCf6o? z=$o@?(@}q5?zXE?l^9?%cT`*g{J4=FKZCEX32&(o(1c+|W!N9dnkssL8ea9{%oQ zB)5{15?~I!;Eq2yD2&k3;@UA#@laNRz#s=SK|$A{^78Vksw(UVOMq~HQpi9FG=W~0 zELpN(!2-xQ=w}FY7s6jvWynxj3F?AoGgw)9L%^n;8+VWaK?}LyqHA zDpgom7%-eYdp4}4y_@_+f{x&)4HQy8KR*u-572@O3JSglpWXyx1i>OAB48>&-+_)` z@dq^@6lYMwhYuePGWQk+3a3w>j-iHU%$Na8VARk`)|jkTkT5njcGRd*7_1BwbKKTv z=FFM%>HL6(4n1nMT8qUJ5)y*3TY({P0OdL~G!)8xe0 zKtfRJVM2(NOqZrF@3(xX;Sb}xnkyE;cJd{8=iUb6wAR$Z(+9sj2vV@7Y zl-QJou?yQqwsnpb+O@S)7VMCVc2R;7 zeB*+7?kwnBK45uaaA8bLOmJ{8+;I%)k6~Cg-A(wCro62oq00}>33Bd1oqdtQM5*9j z()()f=J#Um-u3;N^cKe5J^cQp$f>kd3elk+jkkOBg5G&(Q?0Z@rz-SL`Yv7fru%ci z;C}Fq3M)x=cJ{yehvNU}pF#gQwD7assjm&5vm<)xNb_P()kID;Kosp}aY&FjCQ6)Y z6N?~)iv|ATNQ;>1DQ5bJnf~;Sk+In`(ZFI6qdL0iytsKu(Kj2RUWIIGafN zNO~^Oq_SJ&L1ogBjtm!P3^i8O`PUt{)!wj|-W*u+yuIXdO6j4*+AUFz_13nH*0x5=g@T{%;a$@OC`=Zwe&;QBb`8Vu~FDI5iVyn8ASbie~ z%1f@NlwM3MKV+-hWvlD7IcREwBb-8WsC+%K6w*ao`B}( z;gqck(%zWLjwI`ERYxS5`T1ta3iF)_Hlg%>N{CZgD=pq2-AUEjPBb{iLJ$t&Zjw);TY< zIuErvx3|g+k3t=%^P1E7LbLPbX6LPD=POR<(;z35@7nyzh5fG|+HhsT)GZvT<47$> zst8$6G0A2X+2zS~d2+jq+*Y&7VOEVXs}c#B=czg#uD>3xzi3o#B&61;+GNu|In;7C zRlnFrHQmT9Hga`F4$<229C7H#E;HE`#MOCnRYqV9jmNt~) zoFWC5hj3&7M}j!wPhsRJ8Z0;2YR(RqlaZ5yy#&!@r2mx~Xv>iTj?Cc580a_ET9;~_ zo^17(w=0s7QmibvZFq#CUBq5VPl$V#_qq3ojP&*`rg;8hdS|&_{QI0GH29oH!GB228r6-0>|dT* z|A*I)Tv=DLFjWpsufO{SVbZzcB>%0%Qsc`c`aNJ4{SGmYqV^-`hle0}*DJYu-fh;r z=r@DO6w_~{IQ|&=d1ErQq->(6KD}ZIA>o*dXn>EiG%(XK`GX2+p+=SouINR7-<{h3 zE+kRO3+N4AOs8lJoi~rZMCIs6@m~ucO?ZsLsL`s_Yi1Z%z-!U%5>L(G2FSg@`uN$Y zQ}6HI{e;u`kkh%>={!tV(GI6`qtm(B>0IS>!aDv`i}Qmv=WA`wOU-hHo%E6%^MU4% z(#7_IQ(mig(T2+BJ*cpSCRmdnYjr-~>U^QqdEV(fP8aGebmd=7o9v|tSUs`Y`RZ!t z3#*+6=p{_h98OO=orkD+S6ZDnTAkNgohNAdUMj{;$`b_GPBWKUoEKW02U@C$TMb+7IN zfZwZ0k2<*I`p`6hid09#j-8}&#>UffqswG^DjkK&)i#Qz@cTS<{}_^$ad?UsFFtVK zz!^%Q#Y0J8U?5~b&4-fLUw<96IkmbQ=1)pWI(qcz3zV*3zph00g8U0Ea?kgmM;>_u z)R{?>CV6>zAw$qTq4|%0{NsBy`HKYIb#!!~gk@!A-NOsz`3@z_R~*4{r&HM z4|3+`=fmW{%-l;D7$aPLgwk~vwYIi~g@xh70^1KM!L-9EOo-APn!tp>Kwy4g{>O|N z1G*Sa4h#({0~5Fe69_|Iv}h5?20VdJj|l`m+qZ9DxNzZwqZ0y)1L42oGP!)dokDjVpe zzyx_h9@y^RzaKg8?}S%T+sER^nU+k8zXe)hTO^CcqMTwzVXl;89e08fg#%WzxJZ$? zyQ7RMicC$?uunlBG3Gt37rywontJx*X zN2q`xjC&q4-M!c>o&KHYi_UNA+@vV`SFc{hj3Oi+VYY9IH{t&`I5;>WB4YXS(!3|3b9Qu6MFy-DHFXG!m?y_^5VOMj6`@2UCTVzmW7~&h4 z5QU)S-HDz;O*^dCaHNI9uuz6s2!bt`?3m!H!qA`MCz)bZm4DkNTN@mQiB)#7M7avw zTnvZx5C*SW?q=KOU-fZjQHg(*KPj?{MaW7KNfEVAumuyXlj%0nZt{cd>3eu~5Fm#@lg-^nb31RT!Oa+$5Yo9+4T zR&AZ#M@-mOu6Es4a@EDK^R6V%-X>ZfuOh28){4!6l^d;f-{Yysq^c6A1)ZX7`UUTh zoi@1yGMfn5?=S8bNhcv!GsWLj`G0Ja>sJ*GyR#|tJk4iPCPmb)%tBl3Co=kBr-o#m z^z^1bsS@Qn`)zWyVYI@23)yeCK((7T@ph(s6;g@6%*{f(9!6aP_fx_4(;&K72wi;K zIrU~|Q5)qCS2O9QO|*Uy^8AGZ=*>e z=)Fzs<~CI%w>yjIG{|i&R3t)m1b5QjL*KW!r5=TUI|lN1Ve;J*>8s=~QjaG17+3WY z9&f?@N90fM=5^X!ya)e2XQ_&IYIF5|)8BIYdUI?t0Sa{ZlhK5 zBn{7fnqn`lgj7(~R1v99xaMC-EJasl(NUMt%w#%m+I!U3(b+S*24bc+IPM-88BbBH z9+wPsQJ5jm%tjhre79@ata7atxE$g01`Eq?EabWSy|{}t1PX^Gn1A>yz%)q(>bT+uuzjxIs7RYM@-Q zQ2F=YEtt)<}ZKw%U}KKS08=!(W|e%`ph%W+`M@cnt$@iC%>fh;fEi7{PD-1 zP{PcwD8Z%o-+%w*mtQ`2?%a|kOT4_iSe6CmFTVKVuYdjPU%3byA}cKUbuap*i{L7B zw{hb}gTauJl5*wB6?aw&L!~kvu}<%rN?%y>r$7Cv!{Goq)6>)2+uNUf^2wt|k3ugX zJj_+^<~a6uD1Gq32S29-vjm0EcG03mVPRpQAWxq@9R!AH2OSwy=8G3Ee){RB$l({i z_{Fwu+hG1dy-H0@jgF3n8?<}(ZcxgG4I2ik`j|0eYHDiEoH+y2c;Ui@+}vCkINU4{ z6{iI!{JrS%6hUcY|*fddD2?%WAS$Zy!N!Rd5@ z{yuW_$cup&FMC}Mvxh}RMa9L%g-`++K%<9aLP7!@V`F1OLqlaljx_s$kOOP7))X!& ztf{R*OVq7fw;nuraL0}va9Y2-zQ$2gIjVB6d2eELVo*p>93@}^Y>^4%u>_7#g8b2L z1nNK`%vgMUJWL226_XOkLpjuh3MgApP*7lCAPS7a%k>j%W0S|e8ux0Avqt$`2&2Ay z`SPVpmrkBMiFONn_wL1gd+VL`FZ#b|Pp~H>CRnZ3P)cZlgBk1*nFB!v1cOV^DT<9w zTe$lYT!K@0TA&0Z2Z7Ob3yx3|9(xcQ$AWxf?Xm44?bnuG+qz?GS63I@nnQ;U!7T=* z8w5Ue>J&!L!+5r(C8f`KJ(tu!31^_8p#itn!l=10E(}hK1bKMaaPI(&5Q{b&Xy1a4 z;ZTCM%|K9yG5s(QbdUVT^&30ZcWm$2zIEf)_Vw*>+`2)|Y-``PecSeq4IPlSw{M3E zatUO%x-#433{-|oP$*Z}xD7LMosMlF0QQ0bctIpQRWc|EoGSk8XsLq1w{d9hgw{Uz zkYh%vkKy~2c>q>u+=5#|DC5QY7_1Mc3j-A*Lkxk4g-S2V%p+%aQp7+fcO`l-R#xH+ zD|LYFCc49Bc6K%_zvRE)*Qf7I-@{7({|CB|yyRv^#miacPYtfw7S}M=YiX=uT99E( zh-sA7G(OriIl)i_Db6rE%=DnuG&aN}UxE~57#V07=5NULF=TriviuEsA*RtGrm;}y zr625}w_CKSUV6Kiet?&L5R_no$`Pi5IMb|X)08Ng`P}iA<;`Kv_0i7C5a$R}?O=Un zqG4{FX?BEZa;V(nc<3wCG!a@QnC93GC8_!ndbI!w{Y`n$wU;3i*oGL!&h=k@D{E<2 z{PHwI$pC$6w!S7yUj=;u2VW3UpXH^`@X*=K+7w72o<%1MNl%j@50^kxU>In~3pI@g z@dg-53cVZ~W1UY8sJWF@`f}FN=Q65KrPQ~_I-4V$EfKQV2NE2Y?6r^DYag*Me<-mL zPT_bc!Lcviu{Yj%Fu`#=x$$5^mLYAzhT4 zH|&ge9oQxBsXvu14Axlhipu+fFjVSS3RBM?X-wBpip{=^VR%pgZA+R!;`7{B9>WZVlT&P zod$Ik&&;6Sf7{LKhb-z&5A{B8@$|^#)2ZI8{k1QQvaa`4#e)25{#1m{sp97nrbNZG zX!!yS1LXxKu|%g%RWS@z+6GZQHP7Gd&yXbvzF8X9hbmzq^in(_OJY5;L_Qn3)2n97 z+e4Emo~aoW3zHC`V#aG!cFF-Yarb4-Oh3ZAC$I02Jg915rpJ5LaYt{ipg zp?gjDqs8@oSrJ2FWpTTT1~~>-2J;5jXKQV*jC&q!MGP)6;$+pIg$FpuH98OafT7c6 zAT2St`a(I)E;At?`Yro7QLmQw=97IqL)@mNhvZ(aw*IoV$T$ff+ub{J%PRiv^x`jn zao~&Bx^||8gwUEkt|k~IOqnk8cSe@(M*?-kt)o_1(Avt$C4<&j|S~A@5Tt;YmV{5EANIYrZ?!!nuq? z{tm#?eaS=}ZdcREsi%<9bUNP1yQNi@QJy|$f~$n5Q)F;m zvKBu{UJ1EoFV$$-L71`JD!oB69av;>~syO zWmpGcM$o*M3oFu4yOH^Xg_1CaIJHa@;o+IwDen}?WYHzg?sCwsk+)MM5e(_{;h&2Z zm+N>By4rZTdKpFeLnZ`R=@?jbLE!TyeiGfPeuRujz^X2@jiZ$>`SXQ(dGj?J^$M@i z@g)|%fH0+mZD;u7zUpaeHb;h?nA!4nmhuvpK>182%p7^s0Va<2o+=w3F;J)&r}dz? zzCaj4{|C)|m8CIw{Zd85tSxyz|cA;&=AhXP*@m z6nJ}kA3JvJZ{gD?!OXO^wIwAbVR&BLIO&;Zp1DtxK8Flb@xcclfG#$0;6RJTB1w{7 zuUDy5kO%GW%P+rlul&j@uYk_3R;&B<>o;oDDBJ^R^5n^&o&D%XKl=Rh&#~H{{`99S zSFV6MZ!(#-Y}wL-3EUcxbJfGE$Q^Rca_#8oqczPn`}XYv75>JJ8)wd(0sS4f8a#gd zIB1tOwKZcVjoA{ir8v4cIzAe@MhpZu52SDz7guEG?`VU31*;czQ~@ElGw-Vq#(t#z9uVCptPB<`(jZ zS3pOQ!Pvi6t2I789x~7);w=csU^Qq972tM22JQ=%BSM9E2(XQbiy0L^>c=rZu3uZf zdGqGx=H|-EN>HD#UcCzQ2E$*qY88f<1NGMFbk?=j-3qvsmXHQLLSN|Y1JXiwAO>Wh zFJKOZ@b3T<0KGtuC?tptN8G*~^3XfnGK`j!l9GZAArB3K8;A-cg8JxQgnMcyWhU*6 z+WBbdqg!@t*}s4PsZ*z(c;bm?pM4fGFTC&q{A2FjyBA~z^}N2mzP7UV<-nJ7@^TQd zL2qtEDlps%x~PDvzKQy+AZ~zG6Y#u2P0-~P)S{Dm%_w*al;iG`aKs8qzH>)s!=Z-C zQ(gIFqWs&(77)H#-mHhW6Rz*gLcSaQEp7kX(t@DEsh+hZ zfsJQV>u+V(Tu-mt5ZyRGaOH51ib!c%nDk(*d8W-gJKj7i);uHDJU!A}5F(8Zl!gUL z`EU#|kFlD^hL}eOOQS-~viziMIQmPuq2_Us z=E;zv%v1ZDif8#a*G9Lj2x}?wb!MAt>{3aJv?y6xm?+Ipl;+yZbD&?FRG28u8z3zi zY^oSwTAE~@50c26P0PC=jR=s2`kFF*rEI%-;gQ6K=Y}ks?o*#^EXk3oCt4ce2z>=e z`F^HcA9=^;!QRF}UdBP*#&k$th5;T1yQgsgEg9rv8Vo`POXHxgk>(YP0-Dyvw4Ap$ zyp~(>`p~jl*;S9E)o+h)c0{x^gtxRswd_u4zG!c_HgLs-)P|EOj=lYwy81QkgJVL| z?u4d22~7u+ocsGZA@fA~^0$XA`H!hZpG;WrN>G)q+hOq}Z!E&4ITx-M}p)ghndCJ6jM!VYD) zVvfHUs(mU__pqPlkcPWe5PdNt7<9GmKKjRo1s{&mcLl1pXNEjiWIr=D@ujJuFJ^e1 zx2P|AYMOMyXu_osb~53Yv%+RcU(cwHNUCc=;&E^F3Qygha8rv~RZF;fmc5kZ`SXU{ zr~EZ-qG}}(Hd}NJTGb-LM-$eQVJ2z$Se_ljaT9srnOvVPt?+oFsUt`?SJW)vgj%)c zScLhgPBoL~!w6d@sdxEm3Rvy|x=;ORfo=2Boro}%du!KQO|Q>OdS+JKi3H=**&YXM zhP}y_bLpOk14K}E4kmcKFfHLksC3#_^TSmA%`EBJbf42!kDVd9eOBEeFLjq*-LB!D zpKU*w>~%51@Qbp6+d|Z!*Hoy5V?o+IdZ9H?nx$8d7r28l#=`^sT2cd2w5njb9C5Oe z%0v<-->mm%m`H)0KwjF&eWv(g=l(A#5xR+wcG z7xSti45y-Ml)FiSBwN=rkSx^99x}k$moPpw(<6D_oe(1Ml-2ycD|@(AR@fqEFPeHV zwDf_}baTV{9<)1)8^Ge(UD$oZPTf%#JW4c~siD4!9^I3{l~G_B$yH4OOokBloQiGa zai3tXP4P$e(1@XARsb17FVbA%yXxbK-UP@IAW+ z0=gbEgYYYe5KEX)!nCl$6DIyvhULSW_|G3-dHdYlec3^|0v9OTN-%*m1yG)lK**ST zsEU6zMNqSrCDe9`4!zZ-p92h5!Z5~P;br*oytV2vS!naugj0_AMumyD?=Fa{>QuM z-u9d6EUlou_l9~HH`p4X6}1b4(o?`Hmg0A!Yf0@ba1bG@2)RP7B{s@H2mDhHX4wP& zn%OKLPuNL>Zze(#Vbcg(O@u;L@M4%$!i;6Pe0hsQKA+(>@WNsuOe8#1&R|%0Zj)H{ z0fsZnh$0Km&j6VZZj#8S6AoUAU`5r+zy-gQ2$e)=B&q^N-Y{_v;fE7$AmM5`ej6`9 zCWbHvla22bSr@Y$Jd;}a4zTc#iER5({_xm47@?IB9#nI)2%kV$_=hScLNlXErIq0y zs>~>y^ijjZpGVj#B0LlzUWw3-U^pnrVcCrqRkcbONB3~_lc5!MvcRu3fK>b_!o?Dn zm;ZmEt-SGKCXprS6Y67RpoR-_dv9gv+LBK)kedgTnLnz(4@w>d}M!+gf7<>w} zvfmhs|EdD_}s*;it(2RcfyWhEYfAqa_3a?soE8_P~pUh6F>OD574U%*n&oX@Zdqv z;`i*?)7;!#RZ}&&aPl*r&m@EZDk2!yO_-QyE8ggiT{Q85w2M4DPPL%5=AwmKiA|fK- z>58NjLkUe>pcjNAK(#~=1}dbbrNLD+EP*=M1oxi`3JStgL|qUG0XbnR5)%`FL!2!x zKPvx6kw04Aw*0_>1E9mMUcLIzp+i@$Tse92B&e}1EiLeDH#RncioJfzdgl!1iJ%jy zDXD01f`&clHw1zq@`G4PN=gcLr&K^`;Z&J4C`2*|+ZgmYe{cmRFW`SVP7w)Z$ z7A#0;cYz=STt$cg6?xR_5nVy>0XV}clp~~oxCap8FbXWwzFGcDc{7Fkn$z9Z>Auk2 zb-X+2ztHzl(pODL>^E%c8>uqRvwa6fRzVO0GbpN3g2F5Ciy|Qo(0#{hxZN;z+>#RQ zf^duoci`|9TNh-j#4yAX1sK7rikXly$vDp#!w{zz@fIvc1O}yYH(!BryfuioxbN(s z)W;0sHn2*g@8|zq;$*{TZoj0u;-z^lSp9Oip`PLW>Cd#v^TH5y61+}+!(y-NWPlH4ul!7SplnC?0U}+>pWAX!}JYQ3muaqU@A*M`!DK|(O9$}so z)s44I8Dv^I-_KcVZJy!n7-?RfYAQ~W=A}pr2bz`+HkD_asxnQLgG^-uq$O!m$pBL+ zFvx((3{=~7vaR6Wz%F~zenS(+Ch z<@=j*{Y*KK0;F6D{tWRprh6LFJPfJc#z7#S3=NsG!=(v%9%U;-o!jCZhZ7y=?JHl* zta>AF$<55lr_yT=Bsx|{H#LSgw?;Luk7?T5&v`nf;dpZ6o&@K1o3lNxsUyB=YkX5j zTvMmbDW`b3a7$d%)j`YN9=`a~f;qn!yWrKF@@EFuUbL?~+rR!;a^nU2${QIq!2GcR zb!SuS&-9lW?oNQ_O@|U4Q0>vQ6(>>}c2RCn-qp_mJl94yua0hRiEOS9Z>|q(Zi;Ab zi)vmI-P{o`_uCZNTp6-viJ!APs9Cw!s2K>Zs23uAb>!Xf7S;vDU>eMFjUMVWfq@tJ zVGOs*ta`{tbHt+FZBjiJV|;It-^HB3m3sARy>KQ(b0tV@HSxQ%<4%sT9rjTlG^!67 zRqZOFRV~1)d#xyxaQu8$cxH4&vr)B#aC=m$lRlbeqk5}J-KrC|Sxs-Yj{Uc?)crs;dbMdm>zaCCPuEMQku=UYnbIEXr$-w`K{$ zH8K1=jvvo*i&3j4+;2mZPwLH@3a1z7s7-e*Ji(3KkSX^)>rcUE_tgjTE#O_rV~Nx zMJ%^N5ZXlk+5o>tvSWrY+ysVeR|`uxc|Uhpe9~DqiQ}3RJ$DTWoG0@0%zF4w@TN*M zs3h5wuWDInnP<$wylhZ}t1eQ&Tu!88=S#e>>Z`1IAp9dGe+D_4*fcgO`fTPV<>Sf?oR zC=D%lfh|3e9}kyEmb+mV_hh_xlH4E~$pyLKX#gRGEO$-AEhaqTEtYN^hViG`lb5`U zcQ@WbZB!V~Zh5*T_a#g$VM-b9rAYq$f$E)FE{xt&e;1AfHO(#=5NLu+WdeIpr%E9# zJitQ;JB{#rS#BFV9s%0d=f`~Y$h24Lv&V_DY8Fg)1`lzeE}*IPqkR4j#8A9qDm8Q| zp^il(X!Q1}H2VBR8lv6gsvJx$Vb;+7^Nvz)pmE>X;FY5hE?y+$8A4tqQjD3+B*ffv2ow=4G_{{A$^{W&_x>*TSHlA#?LuLz~91U#akvkxI0 zNA>-^lB(07nojt!L`Ev?gpHStE4XyRWe~236;7#zxhw~Tne-v#WJfV(7?EL+;WBW+ zsOb<>DO3W_*l3Do0RO!zp_&n95FTEW2FgY?Y%$?CFoJ^!jlAmBcx{)+XA*We;l~he zBEu!fU=o`|*pY7!mP2;&K7B8-)yB1weFV%ZC30hTqK8<2LlkEYb5%4OIRhKD+H z37<`GJTKefuo3d3F2hQ2shSA8wW?~95J&&B5cV8Tn}psM8xdtpj?Ew>C%-eS1@wLGUu`v17A#wNkiNN%_k7th>nhiOmtjy zbYk@2xWU6yhv)Rm8JawFLf(X#$urjmtgQ*HSsAvn*1vXHz_Lp3${N3#!HI+WCHF({ z1oox66zcwvmkk00N+Zyzf1@bH7K-P+o^Yxk~AyEk1JaA8*1thnSj zXbbY7$qhn7$v`Q|@u~5UZ2fKVaq$BO4uooGzJgc~ZuT8b$z2%8CnhGsRfKEcR2dWy zFh*F$mTub>vFpN;3+vitlZ+!rj=-&fXB+Mp++vV<_3G6ib5~c_nzd^hsvBMidLbh# z0}&_G@zF*?3BHP2ykhc!h7*dfiegQ%cI{fk9MGTwk&RBeV*(rz_w^Ty4q)~CB&V2;4KQ= zq?on1V=TxJK`LaZOevx$N;ys@8WEwDk&;nh8T1=+hhGq)QRGx4gj>97)heuy&<)o7 zUx2r0$-mr=7es%5-KeT->Fh&0aMlpkas5n>({ zDCHyG5+sd?HcyK;7xt5i;?1+-%roFf%}%;4ULg5PxzyH0PJX7G5c8NQ^ArlpJQ!h~ zlp&QB`!^K_G>wz$Go?zKG&|NjJyDuF$W%7OR5{$VY=o&U$5fMHsz^7L4KS4sG%d|E zRlzYuDo&DQE0-|y1A)?TA8JSwAmtCWl%Gp&IB%~TZ(cdvw4%_*G26$PDlH6@h6PAN z1Ef4iAitlKg%FLW!R}>9gS-nt8kuBTSQ^lLwtvIZ>C2xOxcsT~+E;VRZ{?Ih{-S-w zt_0_rXc=r-7t_2cwy7(@c{I6kKLtQG#5O_67K*OG=~fE7KxSP`)5h55D+8CmHLT>< z4;KA=%)-}l%YK+q^GMo?hg0j1QoQAbA=Nhq*IbjkSb08Gb_fEVyAzxzlN%l%xE#ul zr!)Y!EfmH988*c>w?s8NB4n_o+S*(h(!4CRxglIGSsT-|ExxHGvbol}wk)`%I`~_> zg>kcTQJ@vZWz(YIE%=>Q2bvkf!n^1kHD9O~VhA%z;364jrix!}RBw3N`Fa_j)YexIaK!ZBZ9$)%^&QOSnlqw@f9}YgH!#G_^cmVid0r3ts0X&gIyH zQN}Gn+6}(qR)cCD%S>g-TAi>@6m}Zb*9TjElPYyEsjI9Kyx8YS`q{pwXsX+p>E~L7Tu4PX zUg6mnLed*=amP%2D45`PcV(|`JChz3EQ%pYPd=V>3>qvFtHaYJ%U-}(e~OXcNe>jE0( zqlvm#GL4g1&X)os0WQ2n0f-O*^P*hJwP50{mQii(j%U1dV(uXS(E9F00U zod!{vL&2YUbc4eJ3J@jTH+szXCWa2PjRH-z)Yhfg73HQ!HFx6{#?4ZYq0UEBsQ1o5 zYLk;qVI@s32TnqPqZQPgrd|$8Jb>PoK>7d-lz|j>IuTX~!eI|sfn^vVo@J--GP=^j z3hOul>gebbGM>OgDEE-Zu&_b z%4Sz=e}>&D$k_NqhT9N~4jDq?iRnNM+bPdVUNqiyckaafCY>D0W!XMv5h-QG1&~R2V{sJdkiu zSVvT)M93tZA1#OfqcSburd17+Ep1qRUsI@%AWx@ky2H{%34Xj1WS1)$15sLO4~wF>bm>xbBkF-yeAA2i1V(xw z5AFo&q(-BeGG&TlF47xvxj)`=huV06u4FQqKuPH>7y>eg1-U~m?jk|?TW`Jf=%bHb zxpL*wrArS#{P5}1r*Gc8`Kw?33c;2>W#~P8#llB{UwT*Tb1W~u_+n;ercS3TEiFY8 zo!4G_Eh8f%C@AQ$#~wrQq&Jv@g`a=^ISAj{+M1M*?nMo_{v**~bDNTNbe_BPAm=In#0EN(<)oR7iDACc;Xf6^S9uBAI zjuaOchYX=4(naWY1l8awoJQHA7KAJ~Gw@7pV{Kbo+wR@FAA0B^P}Lh68mg+Q_U_#a zk2)y4)pgaIW^Z~T@QHYPweE{V&C>u>Lm8$AQz zL?tIDLmv7?Xayue$OYYzfG0XYf$$h}Mh34aN`tOjG5uqfMK8OUf6=+#3FT~A>;*SxMASlwXM$QkT2LTRfZj9+ zBPf1G?(Q}_cI=RGnD&m%9h+_M*^=K(-nw?{whh~MZrQnG^NuZRw`|?8b;p(+JKJ|| z-?Sa`^68eHyE=C5?${0Gc(rp=CuDZD@7ld}H(ZkQ9lQ3l?}`0IY{FX!&~@kbPV@{y zNCe>#bbvzi17Q$l8%;!q(Ci0?kNA>eWP)ZVh{IrT8=M*3lL(n0hi)4lmx&9?pa3ii zw1PfLioFmPDmafau?oO~r^+ToSW_`Q0#+auIz8bHf>X3sg5&=Vyaj(cwem}%@Peo6 zt!(YvBMcuc@_oZD?jvdkQFRgZ6}|ciz4{nY%_4%o%Qp2s?~-T|qoqHrM*2D9zk_uO z(%)h#W7Z~_#UxKdnwN24kSRaOTsYOQ+7aP6lvw{lX4UgUs!t@ZtPF0L>APZ}#{#Qq zyuXy^FXdtil14NpRX=I2-LyDgTK0gYVZ5atGC8Jds5979k!h;R zFjZ#Ab}vhlqy^FDDOU5CU}YR!`6V;t-yyAjD)O^7shpj1*D-0*Bp#XBPx|65+^^})4I4qA32qx!J{ zD^8{~98GTAWoz0P+q5pGc|)v>rR?h`Tbp#mH?5BDHZOsz8z>ZmX?;xdx>(ue=Ly-q zrR3kI%=uvS!k^?WeJQK*>VUcnsVh$;H$IxS;+5=*SF+2W%czFJN9-%l^shgd=-A)S z@o?%&D7=wTb0WE6Yh2UjSQ$&%Wpi$hm5p0gN6BU_HP+_xU>S9(3zH$2^)XF5Y)+72 zZS>lf@Rn5(-FSRhxxb`q}hnIezx9V1+_KlI2D>2%x zD8oscd97CEF!I$Nn$G-)_Z>stuN{0Y%y>T7(8LPWEVtLFDrNY67WH;-&0dS9-lA@m z#0F74m2kO)?NF;u2kTaQsOPHXPqZTlS0SmF@kl0 z)Eo;CD|vpFiq9f!84=Egh*uJ&!-4t(R>R3eOBKiO@G`tH*Z!mOtgZg4hhn@Pp2i9y zj3xXjn@6!polQUc4kugy!)7z=7+DN>_hxfhX1PhXHo%ZA@`FWw2H_x$R|)euVHnSL zwk-=~=YYEv$ZAIqLsdOh#8jK^7VoAEbe#3m^Fe384NOLIaQclC|E}+m* zF-2Xh|Cpa)Ee(<~kIqav?I_tT$SrfXIUwf-0boX?IMQT#j|%A2O{RuJ{w`M*)=g|Z zg=Ok!=+#D&M`Nc9XV`YNu#*=O2{VYW4ZLuUSGTH!@gg@=%Z=r^0G2Vx*cg9UP*oWC zevEA50yWWgF^#Ybh|m^fxR~LyiWBA&*_b4fKBXFJjG?6tZs`=w3#Qgq@b3^#*eHgX zEc+aB16Xzf%e6A9heTm0C$okBls(b9^TBH9C6ci53_H&#RPaJI&u=sd@Xs=W;i_37 zfw126nUA4gvZfM#9>XUy>|!F!CBjg`t|Wq$4qi{qcwki=&2zKWTonnFy z*5$A>k#M!VP|M5UkKcW7wjg7W@Q)+AFC|bvEGwN5J7InA(U5h^^nQuLQ}x|XJ^U=% zTrNd@GSpCn7XF2E8E&npI%rX?5covGa{m}~@cxvSmj|!$@IL?c+i!zz4vHbq^K<9U zMYoh!UU?-rI2aWFpZ)A--}L37(;hh;;q>^=F=W=9jmPsoaVUf*p}emAg@6~*6Vl^tvXe_{YAT`u=vrsR35kjd4-ZFB z0wD<5B8B1==)nY6p;};Ipw()Hyc{sbZcj=|LWl4Qpqu zJ!d;NB6~#t)cy+IqU>=D)j;t_AOcP?It^Tc-P4o?Y4JqNpB~$Z))GPv3n|6;27-C?@dtipNvJq3Q{4b)DxPd97)F!Uhdhf>Sl$<*r^ z-f;fWq5J*`p3QXn?e2kf|))RGus?bPwd>W6H_)Sp0D6%BRzpx5qXv3Xp+_ z2fgZ{Nvt#-gb9>}`I%%>7LW((`g-Szbl&c%Sm(J%rAKJ?f#pY}Gv*t~rxpXqUu&k;c>B z;$}&G)I)tfM&D{y@0Qe0_j!D%wV36nGu&XpEF%0e z!Z)jhryhuVG0StmkG4@I?DQ5N>2JO^(7V%D_j0b^Z#U%}57u9e*I!Ea-k=p8vKm)g zbgiPwX%HSyH$R^)JsY9Dm8?JFsUF6#!wI`R#JJ8+KbH{>Mq0Wev~yI#d=0-qBRF{e zbgIwGqy1J#8^?H?4-Ac{5cxtCKZ)aO8DR>`jb*tV9_oj}4RtEvkgw*jr{<#F(jH?O zMc9oJzdKC2k?rwTLBLU~Zl}%f!jO;-t!k~8`hcJ2K^h5R1mOx<{sD%Y!SYKqLIKN_ zdh1pOnC9wrlR55ky3dI;@BNA9HQt&LDn5&4%TzqffK~n^&Q4;v1)_SJuc6+k9?x(S z=$Gyo!i;9QV0w3=37fAH;O#ky11Z@E`<4VA3`Z$Xg0KXtzf zaPwVKYy<^*)KG1lcY|DT$PZBQDrk_LsT3ocMq|0CT}Ze4ONN00G8^QfjiW6tgllia z>rZ%TFQ$8RaKZ(nP)}6*jly%$+AE>rC4cqtFbzC-vk1Rd5FYVR?dMd-J%v*N>Ny;r zL@hcd5PqTTts{F3B~pi)8Y1k_@D)54K^PR-qzF%q${%2I(laP_5>! z8u(I{k0mn9d)B6JkJ0fJMc`qj zh3DCy{vTBnp@8AMWHUt$9_&>_wMC^`r-QdCmd31>ZFK^v@u8FFAN3KM1tE&!8_>Kz z4ei3-eJhaEgc(J+Q8YBoXu{j%*EyMD=*rnA(80L8Yu!T{y9X2EUHnaS#R#GE#>mz? zGEU{rilcQrWxF+bmCl!k#={V%s`yT`szb%^5co<)D8467^N*tblt5K~`st@fjvNUk z)2C1W=%bHNkq2de)TmJ*At8@F_E>MEr3aqiZr#xvC+V~B;>C+5lWFSIsY=w5zZP%# z>Z`9nctH?m&z}AJ-~V1gQ4pN~`tr*!5pU@cUIsb**MI%jTW`GuVt`U};lhQV{p@G& zz4spa5Zz}i?s$uGRS72J-lWHLfwZ4}_8IU;Sf&TtH{N(-=+L1&&wG1&!?ettIrHte z-&PQnKm6ejzx&Baej7d*VFD)&t z8c~&;p8Ra^vs>!6965I6si&TL{PD*@f8Vob&-LrqPnr3)4;Sg~Tog4zXjMRjj@y|E%- zMQ~!U3|Ay3BIE*(83HMA3Y@G~D*`5nlO!Z0pjk*#QWAOtDP}Mz2D%H$oA^+$Lh%?< z_9sG%6tp41u(~#z4eexPh-P3yZFKFUd5>=B+_3Zb&W&9g51c))Y_?#<%+c{>Q>h^ZE0%T)>hkATh&^1cF5Ufk;`J!zD1?n?OV_QL-DV{ z?Qh@|Ehv<&a}+=T;R}TWdLbcng9r?Q5;TBp=jM*haqq+>y_2-9LpFE8UNAf?lHvq} zMk9*ph+;{Cp=iI|0H|Z@j_o^QK8i_vJ5dgevSA})GD_qY1q(uR7lpq9wjgJB$OSPC z_kEfW+<<=&95NIt16Km`?x3WgI@qL#ISK+M3W}nH;l)$van-6-Xi$XxVr9kWNO37r z8lpcD)>QUXa*sTNwlV)x@D|(> zp7upA<^mCKMe?6i1^jHV_#+GVoLY5V$8F`+nexkq+z>a0xcj39yb1}YSk-1$Xd~)F zjOJyt9J;~!-^j@2ds2XLc%X4ukTfztHeAW^G7dEBlP!i6iy_V1kQFXX$gmX8_Fu6i zxWO6T+#1nZ9^CSv_sVQ@MSp32xHK_H8WAWBr`|fUpH8qe!fGBDVSdmBwu}uoPmGd7 zwM?d#Ei)3O`8H_|TnaIdj+bUl_G+BzE&Ka~$VkX=U=k#A7z-&_8Wm}NaFA)~cuW0k zA7_!TbGnyfn6xZOnvZx+f;0!+i|nQns2OY?r;EW#oCneqmiiyOk67Y5dTxwPC28JHSX;vLoS^*IZ6yLhqO9cMtB;-b!i(z;56yXR^)7F9kwq9Iuz3R8 z6feCrIq2p7#(f6ivfbyjulQ1u_LVH-E?>>nSlu(JhU-@Gpi$MXQJwP9Jkd}8c)V__ zN!6uQy*5C1D^YxPY{26q!&j<=GXdh(Sg&(o`g)C!$+HiQOjzlyUqrZeqw0X9+NR-e z3=g}Q;@#yV9!rx(aD0VH{ewXkP&dvF_BoyE-J%xOs)gO2nrG5XdnC=_K+W6Zy}J_3 z^(tYhAdFE9%XRA0HtD%cOT8crW!PMvE9Uu&{(+WW-(W|OFw41yQ z)gIb%o}a*Q%gvgVqN-f0TIZpepy5vs2|jD{xtZe8z_X|9mM2oo8#F>OA)AcqIu-YL zhR=Gvy52)7Ht0)5mD8-Q(h1{OE|0L&87@=BFEFb?9#EneYSrUJRh~{&29smaOy~Gw zmMb;umZt*?yw6&m+jvmXyPd{_ew94 z=!jlKFQ$X(*v2$tnjK>ku#@08iCbLa*LI%&``OSm0tf*21%! zZG>eKAAv!PiO3&EDAe$~9mOj)e2GbL$5&%E;I~QqDh=O9U>&QtK~HV2 zS@Z$ydJSL02tDCd%?V>AKAqtb09zsoTbwil7;Y{=y5Bu9ir6R!`%Ned$qnEBW3jLd z?A|z@PhioVH5l-tam}#|KSJPOQ}7lbSe*tzRtW#>bc%F{lf{h$d=Oyq??8J60V(|; zW!IW5~<7UufI+NJ?`DRM^~!VZs8&PKU}(kEd&7h z=9_PbG)gBm;GZs?@TDI+!Y_0s-8;cX;1ChaSFdAy_;;Jz;~cuC8HWVT1-_u~>+n2~l?;#1F!9A!g77tAQ<$YJ^V$ zn?=gK(b2?>h(JSeadAXkm!$AeBk72wh?0nsn_)NK>;B$*8SlN{>-|UF9=#F$#%2G@ zO%6>foK{5oM+PF01&Q&(gAIuZneRLQMCgg_h1>TW-E;23xwGfbo;r2v`0?YLHg75_ zFDjT)a4PWB@WkPny)p^pLfnX8A>b06Q{`S_!$hT8NahjjYHDgKoWo^!6htuwZk5t2 zr82Z~kN2JnffufYUL(`($lLeg@7;>L^;+O-<=*9%L`zI`48eKen2_k6pc6zqgrXlX zTBo!Sf*z2#o?s4yXhGJ)t%w0JECkIUUs3s#wX4@QuWSx^8WQz>)S6XmR<^7phzD$q zRHLFXg#JM{r&i8zd4sjkY#`WF;Ilxa-?DmjOEVIjAs7hBA)>eq=?_IJsHUzOy&-Fe zYK__$hyoYnBN1|;61eNE1vW*LZz;}0MuxUS2aRSJQKW^sJX7KdCBslE5v8?I5g1B3 zBGt&bBPP+*C;GpI-@@X*R6qQjuwT(!b>xq5d^3=y0s-mv0}d{11=5=~{L_Ah-_CaW zY>D5aSp98V?&(1JHxupO4&olTgGaH_(+ug7Nr3;?9yo_07a+C&Zh&*NK0hY{ZU@l3 z7q7b?pgjRJx9#|6BbC=ZVNAsTC;MgpoIE2R(=Z>?h(PlgAM+4QSwV1w3(}R%us0>! z8e`$yP44R>4~bT$_O>e;>rj*H+L-IwFx{zPkZpOQGCNoq>t!C~PS6r_UvG030ZfQ> z5o|eD&P|f%Cd&D6AyUZ>Fpq*?d71~r$}>jV)()|$4pAmx!y`1o%>&J&NjehTHbKts zDVGkmsTyHdGsUTXf2guJTpX@Uf%SnlJQ|0XYm$|GZ_KN4Q~J0oz1^|JlBbf@$F5+1O!@D}WPdef;^U#Y%|X@E zog0@0)*pzjIiFZ@GOlW;1@TF6h7MehSS3vedud#OlX@0lxTDmovB4N}} zC4r4wBI?g1RNhG|{OyqJFD6a+V&cTNddxW+UwI*+;&M{?1+2|-Ev4j*w4xi`O0FiC zolB@V9$UR7qOLWpehWf-s;{M#zL{QlCAqB4Qnw+jJ#netzp;*RY4BbMvxQ_E!QSqP ztc9Jwky86gLg|%+GWF@EhD6k;NMQb=nk%Z%7vTNOP_Geo*lX6f>hb_T z4GA`~MJ%7I74|B^onXzS0Nr{Gyw^Aw1*|b^_H_3;5Tvv?Xt%p->Lk8JFCFvHP38F< zz@6>p`%Zq!c9VF%pU+xH!(b#7z%;mOrlMxK{(xO-6qlN$6XKulco1 zmrW*VjkDoEkiN`8Gfv>vdYG36I`?3>O1+pX@+}&EkF$0p&rcKi41t>@@PkEuh@va< zw%r$Ow?xaA+Gw_fIj%N{OO4V2Z^WW$GK#xBjE909HtB@B!#qzb(t|<%XS(@b4byM) zGql-A+fBmT(_+pK2(Fg+1{*QgAWpT_&34q}$zm?cZM4-alO-5~EVhx#^+J(RC^87e z0$<9(ap0SrZ5|AbJnW{~tP|I}*j!JvU#m#lL*>m;_Iq8mTf7ZZB)%dgxFR)Vgq=R# z&KPNL^3w@ME5F6g%Biqd!5Yuiw(W|ybOK%+I)f_hGO#usL6xqGyJ~)mTD(Qo2O%pGwdx@$V}a7U30v&u~~M-egrHvq?mr9jAsA($v!_SBAKxn#hc8}0k)g6Jxi zyBMl_GRy0&x{PDVPJLJuVA|mz9+gZCJ5*xI#60ZOJRNI*{2l*80tR%ZV#dfK%&#cL z>L^}+OCw$32qV^(ae1DFkeZRe;5hlGHE&HAtSG?KJd9lwVwhzHo=?TORR8qCbXn+B z2vXD&lT!L%EnHp4O&C0eW3cE=83tR1V1ACL^~aZ33lB%;$DjwU%(h#X&BTY5gfYah~8Dw`Ajwv5$$l@7B`pjUFI~T4$pCQjQaYSi(GS8iM z5ZX+_QXM~mLx@BeV25*TB@h=g!f1?pVC)A01<8ON4|qEaxy%>&l~^Cf9r+NUi?+={ z8YOY?mwY2D#-mI(u7DAO8I+6di!Fy%0%^ZiJZKh2;AV#k+%jj4KjyuR0Q_7a^adQ< z(LhZ95b#dn%0?ij0WL@6U{?bXt&8c09=*_8GiMBF1fmokHUTq`MXaXct~xgy5@2TM z0KvV>34@JfV4dOTo=^IbwK~TEl`0S2$_4M8R7=95y8z#eemH9>218FKd?7zqf5?b< zQp`-iFK30R|8{;0(Qi43in?D1P!>h1(Zl z3xD{-ABGJZ#&O)1En9w6nO9t1`BeMq*?wo6w>G!6 zwY9dkZr;54#EBCxzx?w4{rfj=+z8j?l;v#cwPk_l0^a~%LKGotvF`5fF8(eh9wkj7 zO_QcgDz7MCv}h3wRN+?D)z!7NwF?(6+`fG~45F7WTVArbl+XdK!k8f_Dxt=n5U;F$t{8_y>TH>3CRp11Q>!X5D17+UkH!_+fPhPB)-gq zJwmL72myvD!@$asmdF50z%Q}$IeEIURW5>>&I|m1_cwzC4^c!n?t?f6c zA5r@C^Ye?0j7(2YkHsZAIvO?=6%|GOi3s#W(xxIJA`%i3-~!1%f*%Pc1D8`%QiJ1z zQ{q#KZAcPD9FA{jA zQOE_6SXx#E{3gKi#DcAt*ANVZX7`YcBz3Y9nNusJwbnvEQYJ|!(;ve8A;+$^b*7Dw zR3<9DLdJ($5s{&z%~CmHs)VDazEE}wO(r6hDc(YNL3COuvxVXWm2dzAzYanCC`jhW>}Qzfy!u4 z^8nU%4t@p4|IGCf!s?`s~0!IpNDXdm;C zQ1iqTxgb+6?JgI`D6<01qcPahAAXf6=MA^1?WL3km`8aiebHp@?{6LzY#twCo)BW5 z2^-N_ ztA2n|TjJSp9&t|QT}>)G7+u?j{Fm!jh9G{+8tg#S9MZ5T0KpyAK8>}0Xe#$ciA%+v z?WsKt{_PmZ=J5JsvDMd8i=So9_~W>VpNyP(C%xc!Z1wqsikqp$x4RYHNiV#eR)j=a zQc5pd^>X(|)o!!YZMW1Njjg(tQgW+X5v;$%QnxOw0qztwUWeU?mIfkD3qfF31UGI7 zLrG6?+vBmw(&(*>$~RJrZzdO$6U|uP%?c_K3wlqBUt+2yTnG;1&LGRt^#-wsM}|A2 z8E!D(N(Eu9S=#O(z3iiXBf)09Q49l2CdV$-2`g-bw}*OPPjS09#H$?eSJIuYPD`8t z`1yjc&>$@og=&K|R3ogA#WSwbtx)}&;rbBegi{rs;)v0G37jRyrz0 zdU2LPXtLGpb{6;8N!v}*M4qpfq&$vY<)tk4R+jSI&AvgerFv|T_=_=0iH+D@5eiq!ZX;?ugRZt8A%>!mW!+bVZG~kY2f5%pLozR-~aLOoHD4oMZuSijja@g;0GSMv*7XT#FfbrsUaZewFWW#@~2Q zC%ik<;iFYCw}-f7GN_YwG^X!`IUdWgv`8TaUb67+|Gpw)ub&(Y3(T}?jQpIU!6_3Oh~GjS+U?v)e!4KK zEHh<%Jv1 zX*Zx2(*fdjOa%N%2hAd_7>A`|J+XsxKfv4IW=5lHNibt7#Vkxsfo+8H>>4NSL5;YI z1kK|kz6kxm@Z<2!40@at@U(80CoyC z8lxwWo&*cVLpq(_u5gpT#cESDnB`4bg5wS%FA~EAij1 zmkWcAO`A5sC=v$dFz)Nyw=Yd5`VrGJwr$&{*XwO*HzD-zNPZ+Z6YSN2= zs}c*@9R*=1&4x06)OB72Pn|e%!p6o1hSE20-u$s+MiOiXo&w6NQS)|wuqGB=zka=> zq@=L0@B%K27A-0{R|0+nyZ7!cD=(Ygd-`wL-@e@Q<%-1>O{b#P>*sQ-)6Lz5yqQj6QYhn#B8IYqKJ7k(awd8hC#tWvC*;H{I>1O-*^4)^$$My;LSJR{No@0_}+W(!67|% z?AXbZC%10hibCV2rH}m{kDV|!B_)M$cwi&&cwtwGObr2x;6)DGCz(yLaj}*dOGr#e zY`55mgovcfBsjMuT7sj4BjO_xk`f5y0ymG0ii}B&35yDgO^S_-i-cYE@bidHh=!+z zz)GZLlIcb97LpDFqjkanfotHIC5#a`C$2Y?lA*TCAz%W*G$?>T0f*J?c*p9dl}&`- zvUb&4HNS<_QKJfKq6>jgh;Rx~--Tu4#*OV-wX4?>ehb1q)~uzDNCcmuIxy6ZS=}H- zT`0c=`6%H&P%Re-h-^!t zElO@tE4mQKWwEtTx1;|Gehc|$l<*VCF+qBqq^V`|2Q=^@Bnv;`#9*h+JV~Qiy4CxOtjI znI5m?rYdtYl)^N*AXS+cujGU)lLE{meU+g;$`D@#VJZIRktxcYfwq++?5Za@)K77$ zA8uFO*IeG+T$rfLj!-5Cw3|l{CY2-1Q&QxD47oT>F0?4q{LI5W&HcU2SpnwJDe~N5 zHZ|!=A<70)`g_6&xfdb4CDc4A+&ncw$?ql?#>u%+%8WQAx39TufZd1p@^KL^axQVjCZVE9$0rMx~9RuvB0x&np6Ein+n*n3UBG> zD~}lESa~s}$kj~QgX9f z@yiJnyDW9BVF>3TNKSKbLrVyf=OuVcb8zFPaHJ=5AQ~AS!KU8nUU4V2_*QB$J=dM| zThs!%oy?#$h^mjADJC#{FOHib@bIR{1#Y>6ZmA%2$9!@h7>l^uOmr}k>a>b6p8_RD%0&)cjp-zz8LVU?X<_ebjx`mhvP@E zTrYS~47<@;dq3H{(@`^<<+52Wk3&&#o=zx|#l9>%O)HFHk!sCwz^-=Jy)!d>uTitt zB+d}|(F}j8Pvn*$_guj2fF0IJ%XxmcEY>(lkuWG!#9CQgsECUdVI~WA#F-gplA@V! zBUafNCOev9;BW~-3^I&n2Pua2uDXS`Qk9){lf@}pE4Daj2J`$Vz;AKZt};j^tgy{q zv&<|ODN?zEb{ux}@WVbJu!VS*8=!>;2x4%+@Dq3orKZ?+#yy^cx7c;b^ik{LQbfg8 z1;0EOZ|O*nX;<8(L>HS51eo?cDjnr>J0TnDiQ+931F`8My=%6n8ktpxU~BbscEy3J zqe_d$+%ZLnc=Ebg`(RbAVQhaAZ=ow~@ab^0!aE*Vsu#wFvw%>h;a|2F`ZB2fBO%h8 zna1G^mjjqr;tXG|jk=XAzZ_~9#=uY%ZRLR&GFERUBV5DUp&I9_H2+50zh+A;Q zNB(n2G7Jv;$6|qIB}Bq2T_^$Ij7?j1MomdTcH3pX?6+MC|gV-ZD08v>S zR7kfIChbhe(=d$5%)n?2t}p<%G!F=qfsl!EaM;a3`Wq>7g(MW|g+V5ym^~Wsi-Fi3 zXHO4CiHDpH`A(xBD{Rii3E6w@?HX8^QoNWaA3xBM(8u?m*4`Gmzbe4`g=P7P>6|(?a0>rIAvklRS zd7^FN1K13}4duCmk;XdrZ4*9yHBT*nRUWc86$RH4gm~<2Y3_~8B;6Yl4Txo!P=E_DT>ln7nibG)GK+_ z2{&Rl?LaHVB^7X8i8NikCQZ1L;g}j{i}h!W_y^~2x*q@W0wc#$r%pY6`taOyeh1VgLN+KY#h(4Q z1T4U>rca;FFbs?%h!r*6wi89;w`kdRD1UC##?^u z$dI9VJ#f&z|NeV*8d0b5QFn~GCaIf+o7dOZYcv`-K5+TRj>H~yJ8BPZIykTchYlSY zGGxez5hEr~o;-5o$kDB%=N+2|1Ig{%wii?sOwO46OnmmP*Sn2<8@Daowt3U$9fx<+ zFR!0lId@d{sMBtzhxrY2^LHb@Kgf^>C;K3xNK)uR=Y?DPZOJXnZD?qysi}bh;~`vj z?AQSV#*&hf%F4=uf&$p{{)7523R=|U(-e^uk%9}Mj=(kv*M&&nlB^#>dLc3_WRI{W zkv$=3Nm5c0+%Psa7B0uf$CET7vK);I3`L3hE-{QI?$T5QhGZDg>?Fc_fm;#Cf}{e$ zttg8oFgS2%_|S(D58F<){r-2qe;b$2KmYvRy?X}^9N4pG&%S;8;H`l;3C+t-PCj`& z;dn~_6u1|X6GV_7*f+vmAzej^xDl4P)VS`^-RlGDZ@S&Q;e6w&&DC3Ow_bO8{kHpU zI6v)vdUUtZQL#}W(IGL>F|Y-42ymfWw{9faieOMAhlucY$Z*koA`;6JyoDs-5M+SB z6qKH!j`S%kK(u73U@S@JQR6MET2?J@S|0i|H1a9Z%O!XVk)oxL4izV%g0~dW$pUF4>8`ddJU3=!I%kkg2ap% z*;MaSGx zkjuNtMafoUpadl^QkfQ^OpB6d#>@FVmC_NmHIp3cr#LoDb7~lGUk^9LxXScUWn#E8 zB}UGPR&t`084=3V5c4=MWgrsTRr&-g=5- zqvRyW`BBPD4B#L$qY!06x>7XEre>^t-3Xi7!RG27a%r+WH$|S;O)i8J+yqv0Q~LOu zNA{GSE;_tgPF|L+R(EjOc4yax|{;(-G5;C6rEftm|nm zPBs;lc-G(TUhug0?CYt;d!uT1M8HOBc1I$2W;kuc^R=N!DTO!?EemX18iWv!`Tp%9 zDr7yX5Q65L!|PAPRlSut=fgoWJ{~dkmwj_@q!z!KUhsauS{4?Up*&@;WTu3;P3?cSqL3 z4#TYof98IA)%DcU*ON=>xn@;rEsP3sq3VyEH7XLUVMh#njAvO3!z?g}&5~3niJKgx zowk~#2F)CXTNmZ^YtQ(MEFJUJZ&sws{<`TbU!>Ph zGODQ zwpQ5UZ8HK($~a-y)N*%SAz-@W2rQ4|o4stWCcC!j#f@5VqoZ!NK`I7J4#&6X#90Er zPC*(1y%=sVgY-yevwWS4?!2FFtApWMZ}0s+#>qxL*IsJW3G?-0v#;q;uzWblaiyEF zjOF%v>h5HDA92vER74od6mvooU~2(a#c}s$r=E&;T_*BPlDNZD`|1FXc}}`4mfP;C zU*Mo?lBLx~&1^xK$MX}l!Vr@t(WuE}*+Q+*WD@7pdcuOZfjG7f=t3o8qFMugJQD~m45~Frga_R^f zIasSHR#6u<(S>3)M8(C~T3_i>pXf8tgeD^Y5{bbs7Zsz19w~)=Y*k52j<}I{}EKgVzMCJV`iVf_N@WRd``TX;rNH8DPQbPdnHEy!3?KyI^F@JJzm%k z+C*<-KkR5T8k^wQ{)_(}x)xiE4{QhE5CBcsKgiAcF24tcSsE|{rvWTB|57 zqxu2*Ie&g?#)zcSoBh`nBOcfpIN4_)PT+D>b;BKiJpjA|z!}U$*$TiBJgvf6RE!F8 zQA68671g_7m}N9JZ%V=QnRvl~kOd#Se+$OCeelHzucAUnX@gcgVb)xA)h@#&o$h?Xlf9@vZj5 zpwNp_lK=+f$ZI=yd?M&qJkGlMSy$jcuUx!%@y$2ieCw^ZPMkOa<2ap8S5{W`*=L`@ z=(4e~QLop#y1K#$W%usgg9Z&U7!2^S|D%sSqTog+goK3u1Z*KfwXhJpMNLhC7vFsI4LtTv$v*1()HSKo!Kzz+_Uu_!R+guy=La8r&~-;@#O4PT+lMT`mf@-F z-Me?ce*NI7gy(L{mMw=5A10ynwr$%QmNv}lIqQ()p+ioGPTHQ_7rk#+zg-9V9cWE# zT^zi4n|WJMc#w~e4-E5(luBe|Bngoy$3$wGcr%jtSV#J8cc9-MB?yvV=KOG_eo?+Qwzac2mnGP zd8xWA-4TIt2;YP3i$W;v89=MoAiU*Spyi2$$mgoj54x-_bUm30nG#55t)JUP%jHqbme zOqmd>ObAiNhbfaHlqnWvDqILxrbH^)DayP|xinoaN|5tom056#QF2n`xqXmpbMqI`+P{>Nw-vRJkzQrS3>f_0vANucnk7h^mF#t`2D+(^|}I*$`F_r`4gz9%y}N!wyT` zmhk#Dp$#jrlx|aSBd*iF2}wcP6pl=e?xq(!={xK9!=`;Sc=`wZbACT!+82{2emQx< zM?|y5IJ86ZNlgeLBF1wpi@SyuVVp{Z4OwGQiTDZF%5hzb; zr=@NOmTG|=J{4biHoo%Zgo?8XNQ3vK*y=2>BwvSSuV&_nvESD3G`fj=Ec;ymor%ia2D!JV7TccQj=4{3{Wr3-QkH zFX_3#*RsCMlKS!Wd?ptl1uE7s|5X zfa%GipvfLaX$)7U(DQGE!OL!CyLXb^tel6<_U z?I8S?f2l#Z<}RK!2%oMU`2C&g2U$)_0rNQ5`%n+X1HaSvz<7(S{?*pvsq23Z7R5jS zh_lLL=&zQa|KG)6%bscgb^)*-fVEh>H_6(iF<4J#IRFO%I0nF8j4IIJR_&YoHvuO9 zPEvq?i8OUMU8opOi!dHyW32#B$q4`+0&olSTPpE5oCn|lMrqW6J^jI@hd{6wTddqN z1*0x~tUlL0+W+h=R$dmdOYQ+UPmEN=VP%!=TH%6%WLpMf`?E|WC&CQUa0gA|SP^jo zv#j8Q7F!4fV7r`ToR_o`h;uM;#R3@4nun-Ik55b#U}0;kb^LKhA&=z;1HK5^>#*>b zHW$By^1`AkI~}DH&f)+qn`F{n=WrJ2Y<{*YMC`Bj=W0+AG#^kUfUnE|3c%JjJ8Q-=;IWCvyBd}bp zddNw^S}QEUH~20|Z|cHy`Idj)6ITrG9)r&d92S%6D@tzR&{3neb#TPwE<0rY^(xd| z3ycv?ojR4DpAQ3A7_~V&JIk^>YSgHK0|)B$`aXU7T)ldg2#I|3(MJUZ1@PP4+}zJT z`;12YUDZn{zl9Vv-a=6q7^K7YVHEk+TW`^AsjlpE=^UNljxWFba`NQKEX%@pgkl=B zGX(vh=n7m`CpQrtm476kNDXnk2o&nn=O3)9!el_y)ReAA2M?eCp zZt!X-G#3`z7FRe`LT{bl?q}8;J?+Nk9Z|2@Vc+cXubwM1;-a>+4I@T3lUSeSCZf zS^_V{sQ9R~sI-15{j&OHtqWOq$??(^rz_d<+1--6#iqt4CMHHkMy95w!oDRWBoImo z+!nUx>FG(VmI-tN_d;+LqP0s99^!dK$S@>di8utoZ9P0Zkd8`J;6?Mr)&;F6Po2DW z?b?F}51u@E^1%lmzAPj+tVhIWDS8QEzY1pMwpVH8n(D?XxB8m$Oaj%Ar6MPBo zJ#}E}{lNR@*PLIs6UoNz-@kv=>Q${9Teoi8dh+DS)2C0LJbiNEx`o&3uifr_drrh0 zOH#YE3v2}T-wzkkZFnG*+@j7WqA&u%TgXR}J41jJn%+XlC+b`nvYzl;sP+qGiL~=T zT2?J@#{3rS2i?-#LQ;4LP(bmGCabFv!9WNiQD0vVuaky`2CAJx`6}zzAbv~a(?|@a ztf96+)MAOG7p+zmLXx4PE=^WD>`s|SYLOPg9-+rec`q~%h-&8&z>0QWopnXitfR4mvElbE(7RgB7A7+he@Y%0Ds3@d^bjOkdw9}Dy#-pE(GGsdg0eS zwHNKV;ebaEFo0VFq-!qP-_EpuJ4Sz1E4*XTz3s=m9n5|(OnEm+df>@Ca0kC0Yx8lL z`^yeu1rS558NUC-Lee^u2_FDJnX3D3%DZb~mKV~gfK z3SyWqT_ERcT4guv&pQ-dy)mr5 zIruO6JF75+vM#KFcrmx&+@jr)b=xBlCbJ?K83wiBq@Jx_cG?) zPAhznfoL{x!^?@~r{k*5#8sV)t=<<^yUSA7W~tkS!Io2Tl{dQ;-|s%}UPi&4v?92C zHoo$u*lKuaw^D1bB$QrCC?_YHQKbvK5foGcx;vyc&jmF@&hoKBx73$DSD`YU-TtTRl_zFGWY!Isy>41y2Ct%Y7 zvm@SRnykrVxLU3B`oxfHsSYg~egebwmG~J3X}-kY%8s3-lX6(}%Ca|Lr#Rcalogu- z11ydo$g`CieiX;$a|psc=C56&Xtp})4?5}Y^t62=%l$@%^A2~zu?V{oC&Te5hgo`Q zBEuC4!V+1k*25nJ(1&BQ8EzzCPDeUycQw>%giVf`dqX`Br`tDJ95ami9t?Xp%v>jl zb68=Vz}LAMH%EIENWu`7TWHd>*lT8qd^%v4J85qWa+sxvqX0jP7o!9&NZFCh~!k-F(rUw=6nDy z0B{n^3l{>g4}eV=dx`HT;nKAmB$78`Lf&Ld{>jE*P7%(K!au|jI9(|h2!R-Cu*X4%KMd0)U(iNY@|x^o^<2_xnJ;yuJ@5I2YGhJP^O6&nFq z_*I*;_GGL)0Fy~Fuyu|rPLU(G05Oto0Kyy5rX6lbjl38WU{VlDgD{Xvy|Bk1&Eb)I z+66z&Nkt^i(OC$G6ng{C7LgL!6b$>+0TCWWEJlhvu}*Ia(rIBvGf2y&FVCHJ7E1*_ z7VGA@;B*^zOy~;2N=NcfUcb|Me!qZ{sya!pkm2{kLP>sgk~&CTtkk^6@~{9)3hNqW5=Mhf43_uX@8CqH&1a&$_op>W6I#f#xC1`HVR$tRzv z;fPM?4OKhogu}q{-h1y6E&(xAQsX~AV!VZ_ru;uuq1_SO67 zCe!KA;J0vu9UL6aoH_F%yyZn83~c=R_3LSAX>M+A`T6QX3QBxje0W0m^q}c?Jnp<3`tIY1$9uB&G~_oloI7{! z^Zat4iOxv0|yT5-Mg0<5Fb2v@WhD|@C3ECw(dK=Z^!N(uXKB5cGT>s z*eK$84Z8zaP0_87= zM9ZobEi1y_53@Y6tZZ7@+|*1_5duKL!+|@6M*)xS{Q2{@Zrvj6Ab2IiY84d~B_$=b zwY8M!LRwzGW_{3yLC9lz4dS;@stHw7A<0c@utiNuA;(BftEqf0^_6b6x-(NWgN{S9 z%20_4xxfv{U=lEgAS%S=nabyqaVBdhp@n=@bBzd{hU}V(7}FtzA4y&kwU#D5Bw!2C z&L#91HQw@{rc9#8|85q+rY};-v@BP1@`f{$$bxxvB^!v~LNen?j zo-?$JbE0{Uf5-f{jS}1ZST*lU+dlRM0JLwUp&GD=&6km77IYVU?!=rHdBe)AC3@1! zB3B+tzqplVZ50=q8cz<(A~R4!n=dP{-_qzW3WPaI~_*h5+keSu7W)Vt#zK4mm9&rd; zp5}_bkApRoERF4u)|Z=ghm`~$JBjy%s24DMRHLhC$tRxI!0nA~l-$R|gJ>g#oQL-9 zi)z`jV@!sGUs{DR)!oT@Z(2tNI>*aUmLgZ2uPn))ry_q>s*mI3O5DPr2v2aiC(@`;|w$O>If zA`lx~W*i%%ikbrf>%um2XcwLhSqQ#sLYbo>-4|=hs$JIT9L0iY(qXKbiC!cc|C%#- z2Pc-EUe_rrB&r*FV9PqCFvejEhTu%uk96Jm{DqF|#P~wR3EOt_Pmiu+oYZK;?4((k zaBEKRH{wax+`PE!xI2-}DVNarayd#Z`9Y2zjqB3KP()dth%g3bOk@|lD@aWt)@E$kNj&os&$D@33` zD6~ctQXoL#r)_2Za>24_5W`k_!oNcG6wEU3Gn4U`FV<%5|DExqMfh=!H-oj${5TEP zFUVChUx%?j4a@G%D!DT_n=ra_sdSE+!%+3XNSBOXS=syWN_oCKBT`({u&+f=ZSruv za8)@|WMh~s@CYBk-6@@OA}5)ViqprdHAaFkgjXMi=~MVSV1x0Sfhh`Aa5l`PHAIqU zsg}lem9^FUJ*cqo#b3N|MEqbH7uvsr{U|TiQ3Tz0FDw@0o8Oq~-{XLNrMRY2$Q#?h zofh&dulWO%boFp4BnlQmFgmx>^T~tw;5AUIt;ft zOq%UiVVjj+pXv!YeH{0ZOJJGopgP~)tMOSimWQEI^)2`Axlv<3y|yhk7)!tvJbZkc z{>8x*rdR}C&I0|C9S?5L2aVjr&*Mc9#kTA8YsT&&eqn+ew3J$zKw5+hsS-wq|27ln ztN;qWL|(p8e9W=U`{99|Hij3s=4aAo#TO`~V#Hx!vU^1Zs746)5_B<#F1oS+up9%(?}TE0_nR>ZAY6 zqsCI_MN>K40((?aJGzS_)%%l+X_hAeL|!q*=x|T!!T@@mg*P_x@e3;00p>8{EG5`?uryQtc3Z zE)mR>sM@ts4~XOcNhCSBnA%!qq5}Z#0$c-i&COh_tY}gRevcQyyHqhP`Z*N}1&tF9 z%QAh*A_F24+8JRtVbEe1PEp=S?66`tUlhEvl~-pUN;HwG85kK&)Pak_-p+2l*#;{P zAK>Kz92YibtF?#b)AftiVf{U*P{?SR*u)lSSlBt3y-{&Idgo=4EfJ9q5g_2v6kLF#Ye{wJaM7 zx*ov4$*fR1Q$s)yXz}D-dbJ8P3vkh-&Dg7)p?m(M3E`BG=A~yk4L%P2L9NM_PzSJo ztE&O2Wpf}DE;;lu1g5?v;PsJM*cUs&+zxonSFQ0Ayjz~#m&8}ZxG~?UTn%o#cOeI& zjHh=EN2S{pZg3$bW^M}mVZy$A$tJ>EZ7gXCut$cV{8(ZU0TWF7<=F<+$&1aGk$+?)nUL<89y_K@8f zWXd_z0h5o^TU=OCn@F<=2Z0?Lh+YJlpEko*W)2_i6nD#DhXVIyk<+T6ps2UN828V( zJ78H<^dm?U0dy&dI?SkvZ#pnTrc4W>Q1H~B4G>H1Xs${S9(l!waK80j&;$h4o^`Ej zI>9w*L;N$Ze=oeo{r9}I@Uu-a(2wICZ1dPRq(J{$&P%(DHrT+JSo!|jCkfVU%-VyYu_pCH zNQykMC+7F`@m^QS2m_+BNgx#KoekGP-?OV#uPe>Js4MHG{qm9(8J0~;J>~|aWSmLt zNv^F(P><`U;3xH>)m`@wg(M18EtCBz;@XhO3A#n_vwcDd~=(k`jIvSFBFtJ z+><&OfVj;6n z-Xxt~5#&V!qNs9!Va6Cjpzj%nbt!riQCqB{$C)nQW#focUT(TN6hj5Cv6Tq%MDjzmoT(oAj{;pKuH?-dJe%) z`Y`Ro==Ph?CfsWtX~&(lWGDS#j*@*5xM}KW(tChf5P#K$wf&#nlQTC29Oo&V$#}pw zD%1(&AsOIsu6Ql6{r~iluM@|Ya=|%}K(a-%K8LK(i_T`Id%Q@XGr71UTi)*x;(b2h zkq{8hJMpt{ysP5xkFT{Q-MB^c8Om&1NMCG?t_B*+<%(|vxl#UxD z>Nk}0@)e-8uIg{&w_$C!P2jgh{k1yjweWk<`TXl2Y2s!ipWP3>e}|}@hXM@)!`=NO zu@ z0dZKbAwl}nFaxb(Nf(bSW`=jRHs2{PgIZ9;Ijidg5dMJRdG@tQY`PL_<^Y$D&C0G1 zUf4pWHGI>zq|L}3-WB3C*i+ZxiL=0kTf@dLd#0K4jyAHO#jzWz#duV7{N<{@xd?wR zmj94a=$QDE1w!PR(?AqL*dEO)sYnQ!YmeJN{<}+ezw%rx6hH7Q2%SZs+*M_{Nf%+) zINQ2r`{;*95|7rAc4A<+RQs5|3-%Y9h}&tef)p({Y~mkUR36$*2Z50-+l6{&j;i&o zS;Ge^B^Vz~R`DVfzg+iXB$KO!DmCPf+h&WQh}Mn+RgDHnvit!=!Rkd+hmnmk`WD_K z0d{Gv28jjz_vQ%Xd?EpcN_UDal^QVWu_Dt#G(J&;mCOA-26e6vgdRm}&Pgjzz%tv7%T(%*1=&qrKVc!57YZ zAOrXbtz0WSyPvU~)vyo3N-4h|a!`QLDCrcwDXCa#|DXX?0g|SZYKKJ6Z!cti$=rW- zkT#F1{|L&2VEk&=y`34us5o1wOZ6ZYzZbOAYBO8_8A3ewsm)--X^E2UK$EyHO;TqH zvZsCxHoC3Uq(~d97ogSN3z0p55t)O(<-2Vl{sKWUh1>IU9|hhY1rjKF7XgI^^43PdI^n zdbxE()=zcTA^w;%{5fpevK~zxVjE=T25Nkh>GuMEmqXh?B?|UdNC_sjf*OFj4>F!8 zY2Z&LIJp@JnAeXiJoP52q{l76D$OrQY`n5@JokzF{JnM(Cx(S$7+ z0-`qHfnbyUT&G?iAPCak-3{1YAt50^UKV(kWWI*x26x%J6xdwYf5t%}f#@j+apwsA zVidlR#OS(u=9)y=zKgqOr3k6646bLn=b##0m2I^?|M@#fJ5V|+1FJn95u1HKcck5I z#?$vGvSKU1SvtSEQi`Jf`1qi5fWuAEkMMbr>wRw4YSJ<7Z5j@=W9z531t3h=l{!ceR92a;qyr1t5|L6Sy_Ya%SR;LFe z^0C)mqxosd_}O?#w6s;^G-|ZiT$i&K6(a*Is9ilR2?saFRJP;Fy{Emi9T9}r)CQEm z0pLu;nWUuTO@x5w1zz`$bs$>ZL0z588rP+=uaAXJ{P`_ih6i^{iGmH|LI__WrY)(4 z)HTp%3$duN5-U7MGyF6Aka%{z1bo3~lX%?f5f#C$#zy)F8|xufO7yJ|uP;MRyIMCR z&;>sM81x}fb-IBR0k$uNq{MteA`#FofuJ`)Y7T*t`OZJy96Y7gZLz60G)DdmSa@RW z#eJirl}|u-$U1y>~E|PQORnF6_^iflxdK+#XoO0I}n0; zzrH&X{mpH-`}g6v{x2{0rZn?uLgcZKCGoisE8D0B@$C=Y$Y0;pjn;kAi1RI3UJ+z; zC%o108b0*Cky(9&i3id*QN#*gRTr)@jP7C0G53^tVg>rgU(kYPc|G*W->9G?T?QpU zUS(*rP4Q}$G^5X(?30ichOW9sWdIX$Rk=zgHFmIGKDol@d+yu_F~0C$S~t=AC?Ld3 z^&2lH^S(G4_X_XF1}0E+wk)t$Ef3G84bK|EVg_;7?OBk7`7nWZ^K_KCNtqZN@O|>?*PepXjsbwpc*7T21df}P|TKd5tinB%vcuh(cNoU4(S9$MuCHYMe zs;{wm(IlRQe+{p@jnHzQZ%1tamFDSI##|NJD}blr3c`?-jnJN&={m)fT za(Q@~l3C2o%k?4#ZqyCC=I)EPS#qw$RV*x47ELOLxLYqF1h9z#I(4dy3Hwjz=CC1K zs<<(81ibryK0dCQ2woh|SLW4Ij!w!J6JF$nFGUS8|JtU2Q2;`V%x10gdnn!J&kCQ* zzZtx~qfL^hCQ3tf-W52lbB2UdJd#9a*kAE+VTEZqzOD%=>N^a}JMOV)gE@4yf1E6P z)6Y{oGJ#p=k>COp;!S=_`$9j=-EnfgL58wP?I=fN(-1)8VW1wjGi8V)K`1qWd!)eV z^K^@+rM4yLs9Ctm`>lLk-k+r0-Y9yS$Eg&JpKTzG>TtBZ2`M1dsiJd*4`K89qIPk! zN=ve@LcIecGDulmDB~jj2s7Q=qi_-_rsDPTPAH;>TR3(uk%5tejV{Uw$q<^LKU~{g zDr%47emZJ989j+ZhvpmiCmeN9!?Nd_RQTouG@Axt_3^3|)PX8E{b3jIQ~RKr@;=t}4in;zC*7^K&LKct1$u|`T*$`O^mU#c;U5Sk{h z4CurD{Mshvz)*W9tOU&$ZcY4m6| z)|;Ely#JPox(&S;DLu_)@U)t-J_@~o#Z}XSlV9I0hsfD7Ocd2=m-_FS^|7b}^1He0 zNy;a)&VF|&dUyVjlag!ho1PZQgDM1D+*j3`Z1OE9*NFQ@Wfxvx>$NQ?Io zw(hir3f~)Q*<_W*1`NUY3|5(S;REFVsF+ka;OpWBa~b5==OwEyH*+OjitxjB-OV@G9}{+I-<3I6wHu04Ja< z!gv}%Qa;i`U#3aV9gEGGP5YAzgH)kF0r=l{R{($ul!$r&)rC#(A22#SJ~81TunDYU z8@q4g=sy8F)4mTU5aL33!~ddWL0l`^cKssSu#a$dhG+&NK_A_zsaVL8z>RAg*`*Jf2QZpkdg1UFy9#s-GW=+TTh@O=Fjtf()d-ynQfIoMasW^eo@FNl z()1|=vBnyhCjyt@_8b05)G`-aS-otjx?BSPTBQVQU0CHiqqFQGwE zfk+4|^nKxHhnPZN0Kz|BaQT6XJ_Q4td~LX14Uc%%(HtO;Okl}R#AKvTG16y4Yppcf z%x802k%$KR1Cm=Rtul#-KVG0<(1*YGlI8C_1XfXR;w`}W)b4v(T&mRvu^J)+T=Y)g z!?SNz5cP`CSv7j%!l9K!bpjR9?&&qO9x?$fl}LF&Mk@F~phf3vws8QYk&1WNHxe$J zsP78Uz7+lNtKGwAY73@+Jc_#s5O@0BF(n>axvY3%;&n5cd0^o zNv;H6wmSdc$=k8YIJ&S0woEY8z;ZT$+x*pxFOU~IDp&0xmMbml0&{qTmw!k^N4338 zlfN7ReFu@TUnT2gZ#Bo9Lt)mLqxmz=$A8|wNHwLBTTV&JIO6|?SI1{hB_Dj^Oqx*x z%XH!U2$$ebMA46?=TAG6Dn~t!Otbn2S#KmB-;o5kJ!Wc;2MY0AETi#$yzHL&xrI7* z$c*6GAW?p4v2)Ie)W{a{-%pK+o+GDIhBLzyCGsY$DgOIU6l9hA7}e%(YvOoqYI_`F zfGwT7#7A8eBOeosAVMJlVs*<&EHnCMVV%WKIlxt0U6tf2^Rkxl&=ek?;#fY&Q=Uzp zp_&D^&{kH4HlV{)GAN5U_XIt^0Db>}cYk1gPsI>P2+a6c<4fJ*=E0VjxTB#VVT?*% z$rWZqHTO$YF|m<5ZFc|5R?(T=)t*{q7v-ZNsICN34{W3db~0fY5;+y|&5d}5 zYhBA}*%d`dw7n|=j-Gisx{BqFp0#n?Y8d{K^No!s#azlD>5^(0&NVVDS2JwqnjIc@d>t zXHm1E&@pfyi2y-Dpg`S?-1&tvY(gKe$wdiuYS22@JLv??Tb}5f)9ERz_&O#c{MMYv zz3=)(>TUR|N|S}M`)PE0aT5ZJc8cpyn!|zw)(yh1jhv0{P8z4lD|_<@Fp*|*t&$nM z5^2{l48qvzkc^4Lzqjx;y9J9SI+kiQdp?iRswLqH&=H@1r)QH9hYGPz?a+_UxR+ny^5k7Q2pkRUuzA;IBZaxI(L8&WI);aN zuI|lmGD#k_+MdVF!LVaYgP#&B7CyP;$P6vXkA8M; z$foJ>{`4~c4D|baK7I@1J9ZfUSd_IvZlhljY71wGk?c-^_5*@OV zh?Rmbl5%M2cfu>xwJgd6RFMal)?Mss#C->g(fKkWpWFvx5T-&~i} zzKv~Z3X(iC-r8hEB_o#{QJp`f>VVOiorza1;&a5!E^xvrw|dsKaK>49t5ryZWTGHA za=N>=GAZtbeVp70h3wPX93cFT;7U%!>0rxXPTQf92lH9~&@pr3%Rdq|WJF1Y*Az)U zyd&b4{7?$S)A8H=)8aI#Z3CILQq_-6{ddGL9b{GkDSkn4ef2#>KvjE7;(n?4$M&z7 zHoERi8t;UX7^1x=s`~H{swEokNL@VXY9XW+?}#U^=`SzFQTsAhtHo!iW#{cC=F6eo z)U;>lz~uSYJxJZVwawC;xFyT^M}4->+H+|)bYW;a@li49y zYtgXJ_n6xpPU37PNJ3an7&qM#loJ#n@(>Yt8ljSv?{gDe8{XKiW(;_iB9Po4OOeA^ zNQn~3vNA5&34=8Mf+BL{z7P@)+TTi1o1ne%qUPrW5Y9*FgjwtmDSbg%N1)qHi5I@C zqX1V<$DvMlzm+EDU>lYVKe`JsKqVJ6Ktgc6a6rNc=oM+J8WH`HAg9|+R3YKk@{s-Teto;dQ1>5kgvml08aAfh}g&C3u*gob|-lort3Yd+(}Yhctkc}DDE*RMd%Xn zEimZ|azD@Unylbg?%OPD&LoVrWOhu!U<}MCMhNXGxp|$tBqnlkdUkYaQy;oBB(n-f zs^l2Wp>$^WWp7GxsoRqb_O?-ssgjaeP_A?$6|GPa;^d&7roT61k4$K0f_71;{r)Tg z&KkmSVJ;swMFf=1Xle!@1vRCbd1NIhZ%ik`DX~O^R>9YIyyczr|Lm_=+5>Q48TR{V z8ic%?%3<3!!73(qBnMeq==YVaEfO*E9H_;xuO)R7@se~5z1SH=zowMKqzeWtee_fl z7R``;myd(g~@_t_Zov*PhOVdjj6opu2<7v3>B!w6!qhylB&Sk!|nm z+cU}i`cv~Q;yofVYUwAW^y)GNWI)7WNUDB7;9BL)-(q(hq%flBqnjr z@0#wY%r0JDk+^fs`3G4CKog5rA*a5^Wc7vl{O`pUlo6ISjv~EYhw0(3e8LG(W~MT$ z=!ILmm(F#4B^!)WQNcnQGZuimBTf_e<>fDY^JkbbYG({s_ACH=&4`GI7Q1D&6%!yG zrNVE3{bNLaC$m=n2jdTA-@=Q7ZF&KooENOwD>@|X?htjv%ZfZif(b_U-oYsqV?;@k zRxQm1AMv!=q6iCuUFKL@5R)PLo1ycHph*i7!dvl;{tkF5mv_6<;~XgMe*lAD*Bc#B zR>&;NY4qy8j~CfFIReK0-{RupQc^IVPV&9BbUR$j_IwSD^soO#2b~BOK_^t!sxR23 zYt8b|#Ssr1%gDy5krCu5OWfk>lqbaw9t7*l6qPUNUgqMMN7KBjfb3r|(#wNk&OVo} z;LzJBrXi^7>gsA}NNC1ctJZ~6q(dmtZJWnU~@}iNH${P~w@;9en|@Nvz;XF=631 zgd``h<#-_bz@J0?pAH=lS+AYEAEr7Y+kR}F9n@p0 z=HtV!#ZiW^Nx-->#hK$Foaw@1(P}!IVo**|(9Eg{h)?RP<87gAOT7o_>#ewZHp%{J zE>(pFPi+i`SGKr2v$wf%P!Al~7hh!lHay}Io#@xfNZvham@SvBRm`qB8tiSQ1`~}- z3u?!CR1ux(%+9Oj*^ei1DSvYaVWu=j?~~n&Hd{9To{en#8^W(~_p!(kaOH-6>>jIOkx7 z$B{GxrEx!U{@3Ayn#?te4-e}XG%TqN!i*2wkyC9<*R2bI=aJ&uzCxyd?`s>|PQPE@ zjX$BMB+rJma){ zS38?~SO8jO&qGeSef# zcQw{87ZxR_gSSl%ho`<(b>V?UT}8h(%bOyIa%f!$2ejw;iXzFf@LRbj26{<^qQ{p7 ztRG=?mb&nN$?ox${7$62wu-bZZ50yD#u&Gt&qOzS3MQJZ@-1ig8#1(jHDAu^E&fc6m{H8Lr25ER zeHU?|ImY*>)uJP5wp6^=P&w~xuR+L#5wGfBjgyb7R~umFE|XlW8OnxT8EvwPFJ+QN zHRNVMg=9j5HFhto^VrgG#n`nj$Tfc#-hc01@0lEmo$zywd8w3Ke*clM){XLtWVoI_ z5ytf&eZzU!NleL~JomYqRN{m0L2`dm48xo!!_9LU$Bs~N1Al7q6MRObnMjPEWE2}yE9nA%uAYdcc5*<&lQ%;3D^y5UtpYueIBB*oDoMnY27e<@Sq zGevi`=mk*{Ij6@;&Y~1D_u!L+R{#6^WEHa1U^{Z4xF z{bE2DMG4)jL+RL(cO3v>w3c)F~BK}gS!=qX!3zIBZ(Q~!C`Kn@~6CgNG zxCHfuZRIx@_({ljSAeI6ZnfDLM7nh1-U)08{QpSzZJKDAw9{E3%IHB5 z@YjlHWGat8b-cB5-Yo};TTw*wgvA+tC4W!UR$yxWCgW&b$R%`JqnRiRy!wr70yuoE zx#sc+`25gk!Q=mzz)UIsm$d$F2mRX_fEsR=sXEwC6fGZ$18BsrUg>|-+`7Y4Jgwn& zvAa)KcidNWz3uMW#WgYqV=mB4)LeALcL?Y1cs;@LzepC7l;hv=u+? zJVlMd%f%JhOUsCVqRsoa5GfRcR})3v_rNXJ*vqt*J9q?J?U+ z2<<^5c@!36vo+Q*)d1!QB&+Gp#JIg}7}UkYAc=bX)~`q<1DXT=J@)whEdY9VK)AgA zlNTiP=}bOwI1L`^0F8VA{Y=&y!MX+niz+e{dv-jFDwLJcSQBxX^*$fQ7$Y%(Es8Tk zi**nopiDi1YE*2>H}PcL@QvcA3_2S@=R%d2NYnR{6{yTrI4t(F+VRfo~GtrK`)`I(O)?A(8fx)iFN;-hlGZ7U;-Yd4d!01%u_zv8Tf3ZWTZRh<`co_Iwb?X~T>+5=02AJyRG-!J zb&aClT;@P8N;fPxx-!ftn8BN5BRI0cKXEukAsvA9#i2Kr&jH!C_W!zJJ)3TJV{h|n zEB%#4i(Jo0Xsw-L$>M01lWm-{1yvb~!-q?CqOy!%kWfA@I^Qp_=$VHMA? z7ZxiMbr_p8mN6noHUq2GUt8mpXYPsMWHv_&=Ru6o2p`5WxdxM^-X7d^in@oaREaxW zz-O;?98RE`M-dB!^2VpjXUw>4Hw>HFzN%<=M0MSkIt_bnM7ZNWqx??`t!rKQBJt@2 z7h#SaG=-b$UsLv>z!(s!D*&=zSr==YreehzvMY)Jsa^hU<6@eZliuXghVsxP>KoJb zYipXnU|WXNrAr@H>uKJiYB75vQqn2T9EpMSFX?jgXBt5Zb$oALHKU~MFgJM+ZsEDMf&1bfOQTJRycqTMk7I5&eW zaQsHTZ-?4zHqz_kCHy8~Qbd-FU*hscUFwL=hh54FVDY?Z+5E95{>;D&^Y#9wK(P!% z3|-Nz#y{L1l%)uP0!tJPNrmi~!Uu;xBft(Mnh3`DBTlsJ%Y>zqBLYrxS*)e!H+4ub ziHx<2jgAaD_{lnK3sui5;c_zdb2-i(!Nx5nF9vhND=p4J;f*8~4(OHE;-M183CC7$ z-vRiC?#PlYHXaZG%Ub5(c~sZY_$vwQ-!m;gagXT3pFkYj21~wpCta?;6S0)784~UK zcDI3B0#RAxJ>D%97j}2AuDso004Hk&mC38E>uXC}#u@VpkCM9@(pyd~6G+tmbvwb_ zQ^>J2RudqkFjy~5GNEwvX76qk921jHvc5oe*7^knB#(V`cb! zAGre?%d1H9W7E+%OOMLqp{bnmk9~&VeUK(sr7Qqwfxbu9FQz)k4%ppnP zgc5r@aKEU)wjtf066%7sO-GB3B z;ec3!Ou#kz`!^~@6%AQvhz%WbJTT8R`0MfUQLlCh;0qrKLn$maku6%gdbEf$0STgh|=*h0X}f;T`i z2ADm{qlM06yK2SSrYdZ^g>>_Ig zU#|JmU2Nwc6aiCkpfpWAmO%EYO&o4;joVJ1ukNCcyN|N8YSRJ{r&yrxPp_&MyQ z=J9J!%l1;vGq;9Rdc-q&y?P@Y^eOuEpcoMO(bB@jIIOJeIrZeMQzTNvD9%>V;LkW< z4G)KwfR&Jt1b}#Fl~m12Q3^3(1%m`Dcv^!f2ws%#c}t&rQt57K1Q-Z-3eW|~Y1yp2 zCN^20x^FB;bG`hpH&D8fki5jNdBV64;12)B0|O8kpVH^KsKYzOUuEdU>pw|f6t4Z- zA4;4y;_LI;-U8y+z*vQyGXmK|0hi6(WHx8fvId72AQpmH48B#j`dwdbjBg-0f+B{I zkpNw7OIBk!Q_V%F;qxKT=ezm2mXdYHi2EZ(?^2?6yIAHog9eIDIbPIWMGYBpX~D2Z z;%$Z(*cPIHleLb z1qc0a3HQ3nfti4G3tyF(wuiU`r9slYe~XF3O-ObgFR9M(M_~&AQ46pfgEIc1sRQt< zGIB*~%;{m{&A!;&*Xek$BID(_iPtj`5$NOMHfvpZuHm znOUc>rL60U{}~C&PjUt%&~5EGobEf}V_W0n0_K=G{*@E{%G})Ye9DoDtfZ(lhfnrA zpeB_}rB=Skb=&AQ(sxC6SXYX#;vL7RD&Yh;$g3%=Mis_6THs@tlHiz-NN*PVtz{rL zG7_2^Z4DbFS?54-_g~R9%U^`)=}MX0sT$?5V;kcWnJS?*pkv@UDNx?EA)~e;qctMm zH$J#?d;L-!eAR~_3Hb%F=F7?;M&F5|9=_l8xm34Z~g4k(M1i@dpq%riXxT?L3 zjTajJ$Q)NUTvvZ*3Q*=mk6DcF77P&Qa9mA$r1XlC zA6MnHHx8zF=2pqTt7K-v^1ZCKG;!S2#%RTf)#PYs4}j{ZcF%j zu~_4l$K{K7bl1`^I#yMeQEeX4MW)d>j``1}CQNH%A3rzKD`MU??|nGShs$35#F1y3 zQ%JS!t?+5+G1xa3+TiqN0DnE%>7|}&g#tf-o?N<6@06=l*PFCaki2imo(NB4+K zPkAOW4UYQ9e91HYNj!>WIr~ZVgggaZHIC6Gh0>e^%;{y*e|&j}q%_n6N3M=Ibr$K{EV>qcr85D9&-%M~ze#B`v{faajpHfR}&X0p1 zZVJ+qoTRdi9_QZlQ^SG_PL#th1QN68Tq4|6uG%7e>FJ z_+f5wh=13Ce*bpY(00V7ObFk&-?%&uho*55S}KoH4oP=!7@0SnbmHg+64?osP2#Kq z&Ey$T9KSaV@6_6dd<$OrNw?cug4MunxqEivaWOe?y<71xPG>^S zBTr*2dLFNV6AX6PhH5Hh)M1BSw~efFo`ru>FqIJasKO~#S1s>V(rf?2V8F!+Z8uMy zVu%u7#4lJNu_npqklYAeL`%u1#~IMMrxHI*ONCs_(|MZM%<*JahwdM+bUvIwbezzA z=(#e*HuO2CkljyXsw_&53Jlg!vg+ZOOm*G#uD)x)O*fQEX3~C2z=Dl644<6YY)L!} zkB-)9=1!DV8N-fdux~_gLEsl6qBefrXik;8W<4|Bo{#wKU25e zBi+R&vYQ%!)?A)IEQouA?kZM;7|+VcxZdr<$wd+}kHe@{W7PUl{yH_2rDAO;BLg2E z{k?TGY805`EL|z}Z+qLK?}0-lfd2z*h<(@IHpCa^Komw?r)oopMGKwUVNjtrW*`H` z7CSVY2djcr0dj!3zqyQo%=$_zF<7uXth5=;t}r8OiM(6;Afu>1v||($Gbd42&})oRY)WWJP3cpd z7(vlabO=A)3^a_eOu{!CDO+S2Wf!P$KsqhNVx|PHP--V(5%0rb?e%!uigs`kF3@g_9)5Utj8od7Nf?7 zDEUtdSfU=H0DhnYL}hj{o+#l)C$cJ@66m(KgJnn{RwIM1V$az|$$Fu_z>mCeH{1o* zZ-+h}_d7;U?x#6rY_z1zS?c_4jg^g+-)p{Gx+uQ?c}n>BCJu0p(Hr~c(^-(852gPw2o=~Hxt@~8-~;4Nr+xq^eo z#oUGR!VlIlO_iuJ$luiBtQGjjh%-Q(cOqJyzw56(+kzgHrrz^~jqHX4gyx=Stq6hh zsa2KA^&zSaJ^u|mf^V)|=$9wFRe4l$E9c!r;NYvx3va6XPnhVo%k1UBus#K^XaA6U ze(6o)VBYuF-+P5WPtel+^jF+nnojpDP7m$y(Sgg%6dy)C$)cS!*x%!BK=LN~@wFJ?X0 zRX>*_RZ%y87EHHU)!m=VC$jwglYE91&YM1|L5Faoy^;Ir)=7%6YFJ~VKw4!ZS6F6Y z3~Tq7C`Mh@=BeIUo>$lNZXlpEVj8PV_V2)6Xh58W>If|T)1A!mw-NH_6HCC*wp5&d zQu;?F&3;>tPv6(-=S$;RWs7~$EB>o^CH3DWN0(zyVwDHR$Rxc4gVkpA~7Lh!hD7`oNjA$e*3Is5e~`xd6ofSx!+_$me0 zlGCmx(v9Zbe-xc_P~{C2hIiX)vu)ePR-0|xwq2WDn{C^+ZQHipe!uVj+01RaGxyFt zo%6iUoABbqw;se}D1q!v1v94*u%bu4sOeNqvf9Pn_nzrzQ~m7xuY=;EI^DyIm-=@8 zj%`^pH9MlFf_apgb2qTkYlftHL2{hsPC+MwTw?vQa5au20hvl_&PG$3m3*P?l-|WP ztD5j%RegN%jRNZU^u6O2(glk&^_1%@Ez4-SHR~pZPi2%6MYtenZZd(=87pcB2R{N= z-G`j~2+E6r?(pcYjZw$st+x=K$lyGy0Cu#S{sP8f<5U|`6o)kVvvE)_4WAI20;f*} zg-S{x-mm6a{!1ZgJ^(*R=!mZ^x2Cq)tB#{>en;4PijDAQOb_}Mu7Yvbp zsmBNl+c54J&8Msy*n;ukU?XebC?QBV!keKl7(ozEe29$E3L01S1k)n~PCIn!0J=-a zg$$@A-HW3ao8Wm4nZ4itzk4tx-<&%lW(uWT89E19Rx4 z?wI}aSvz^CPjh-S)#gX>%OAr5RJsEM_eLyK14jfTOi-Ia8(6m0y~%V}ZrcBDLWaLH z+e0D1uZF&nD#4zC?6u!1Un4}zI zsGNE z030>7B2?E_iBb@8m9qiF#5gq;$nai7dG27%m9aZ7@(?OfN?|k)MlD)Nt5W?9))d5! z+gq^b8;n@|=+YeECW&xLQ9Ixv?l4@PPTzlYHb5+6fb&Mm&&{qj0OH4BoulhlBL9P& zzqQm_UVYH0v+1(pNcu8tCncxrIs3#>hCVv*j5=CY;`yjjm%tV0sBze|eb1tx!qN4aLe_!JRDcYXR_`&6R2Mc((YU803{V zC7kyXkd~I0H9bsPGO5hC?R=3m92}hWT0^VnbJg745m7&j`klM==?!OKjBWp^Z9bjSY;$8^U+MM z;Qyr?4~L7Yx}AU=A&)eI;05TeP;0b64Rl5zoYG{frpQ0ZdGF=O=E(kypQetCfgwhT z92*%U#rd2(ZhZ>00g=hYhl#~n%2vLO#JUW;Ym#)ww zc0@%*L=w5oA;u#1R{HDT-9JM^!v`ewcP;!4t#hR)4noo}lR;CS3_=#uD?bTc2wae~ zPo}eJY4IL@s?=Sq`hxsaD5m(Ns-XgC;fqXQq!d#8>|~Z` zaeQgA4FylxARP%x^}rSsSZngQ-R!(ycXvCVqj>=%U|M2)JhSmw0x)eGnVO=bqF!#a zBHO%Uv)lA6W4=k&JD;I1zxzr(ID`>MS!Z&=N|*XE``O4H;Cp|3GWV97U_>0u8d zrP&lcs4`k9V9Cnkw(aMw2~(R?bNqU`seLdU7QxNGCnk=@!SXpdDakj2l^LFo$sXfy zc9;-Pd4k($H|@##40!Gb#qY(c`>YXU;N!YG+1(A%=i+)hdAoRYJZ48?DVmfE)G8sA zmzIv=Dh~}mQRpayJv-#R{V7TL4-Irzw<53^DyKC+(HflCgz%e!fF4>axUw}m!C)Z* zY#5Y$cOkfSPB}(tbZj!ior*|(cI-66ouuF0XK>Z?nL-tIiz-nge6_J}x8VtIDg|w% zVW7{5t+;S+Gu#1coRTD&I~Xy3Gh%mX*xT;!HC0kt-t>bPj}*IR()(G2_4oC)t@P72 zDS{=xi*D?Wx8-sR{rT0=qWtt$YGJmIN*n%^Ze|QJzPQ18^h-YvFg5Vzx=1MIm@tNz z2o_nh2sg>H(wYED_SJ+;y)-=FmY5B$5e#O)CfDMwg*4XjP*07!D`cbtzLx0Bc-+ir z+{ie2VKsJbsR?DtmFl=JrIW_RO!;|Ap`{RQ)prj|Fxe%RY5g^3o=Q1@= zUNzq4e-T^oUFx5LYYwFq52O|gr4EN^h=`^Pi=>hZrIQc#W6gt_v9Z)unwy|I^82>W z)zSgZjNy8!^?Hjm&qE{jI2WZu(O)^qO=TMxDz)Tg4`x=gse%6I*7es7(_ZY(cRx4D zw#<{=>2kj zoG}*?4Q@EDgD}N5hGHE-0+R+cVXuy?z<^U?%nFA1NT_BXd{11vo~gz_STD#So@KY; zMkNMZ9;}ZdKsnX;{e1=hW|7{0=HNykk)o1ylpA*-KxZu($!hz;=uOWp!V)qm9g!_$ zgB4oMzPr+kZPu$c@iw&ms&mVwn8&#hWEM1K06E-~q(5pe5JZM%Nm5l4lkUlB9Ay9& z%e+cuVlMROMk6Oo_o|3Se)4$399M)%p>Wn<*t+#0&-Y+^AlA?#RoOB@;A+%zTk*HY z(Y>Q4Cvlws-V>r=&ZsM1?11!Vtx%kOP=rma4jws#wc1v;{(G$#doaE&0uP6O+S79A ze&ZfYRLkgc3fWCRse6S^^xo?{WmP#(lprSWa=|I#4J>VA`i2wiRhxm18oJf$r40q6 zm?DmoWk2rw?T4GX#wp1(ql9HAEM}7e#E?)G&3Yno9d3hQty1)e-_$3=GL+hLez4XLX?rpSn{B}@ zhE^A>O<5BPAaE`7Kle{HEVpy}vdq|cchrL6LASx&Iv3_Yl|xn)qH+57NJ}vo*gQ>- zr0-U2&S>Vp@Tm)d;s@uvT&jW6aS@FQedZg4*v!T=E>gherkKWn=a3UKhM2MTX9yNS zNvoHMsvnzPoJXXkae?ju&1h2)2H>**UYf;fT_-g}0)Ds-rnBMdDKjFlw0 z$Q$hsq}_w$F68dd*I?y;4en)DM3V%_^@Ex`Vt?$LlP7&OShC5<27;N%8{-VM>9Be* z1$~^+EB!U5U!&v(E17<5uEFlI&IyvC)P4+qc9$&=e(P=MevZ)NfIG2wqa3hFPPH zRYVNVqn7p>N!Q|0ab8*!_O`XSZTKzefk;lKW5}~RdA)7gF_G6S0&SH@-%rT$+3jvS zdjD&nX@TdMz1)(S7KrUp29_hx@mG*T_(w%a1M4J0C;ySpxk5>NubbW&zORx$kfPBz zej^&i^ZXF6LLtNu!xxvA?~mutKy^q7A#_k#ZfNuG1()ls&i}U^z?Lhx?Eg!mcsv>Z zB!VHZ+B%0?m?60><{Zka(ak{$ddtH}rYiY=B?qu!U3R;DK+7rW?&@}h%iLsa_E7^S z>j8-N9}4mO*@uf&MaDH1+Ydk|<$As}xwx2UVlum=AQ6LQ>&IGHSZLQrE|Vd~kDXQp zw*CQN)y=lsKuZP>PDw>Y1t2v-ix4hfd^(*!2F5mk(nl;j@1e8RTTN?s-a2!?SD7r| zluPF(cYU2XbmlpGa=)^?oL}P`;A0?|85|4>zWo0EyIroi1q|p53SCYliU1fSjM?$> zRbr0n(xrwgFnPk);P7}jktYF~NnmOC1UM*bYMfT*D)uU(da=phWMQHpl`#ldEW?nr z^$5AR+LX&xd>DN~XhWKhthH<0dV#GELDAIYq;j7xM8>WzL;)0DtVzO4C>|Y{i^&Vf0f}v@-(v9-B4VX@qT(WwL_P0&j2VqtIgWRjwY}E9oS-x zEQwG_2w3t~86_d~qXug`c?t~>XEIV!VG$83jot+a$fJkQ8YU8w!0?8^;NU)-CAnPe zLh*zN!C7pO7!skv!h+6=k2_0kUCjp1*|WuRKyMCiYC9N?dVe%YO-028Oi_p9$?Vqa z4Q@+&^S#BM!cXC0F}Y0=O2{cpniOoebNQR zQoxczr>Kl6{8ME1yNZzE!X3Rt(V~ALw9CZGZO_e)XgdD2=2ibg=b>jxf^l08jj!$d zr)$x5SL16^1v(;=A?y)qZqMQ zGsRBt>Mb7*#zV6)luqx>8iFM5-nB5mJ z+^47{$@?Yc`T#ZGKQ@QQ=f)#i_=?>%Jk+Eka$;4-sd8=9dKD-6R(8$G5iYyk!Xx9g)lYXQ1J=E=|E*xF;w6bf3)%0LelG^DtvcbxfRly zB|I3lf~;1@RjZ|{5oldHlOp0LJ)I5N*s z%REf$M8Xt_iZ*GLnKB*$Es!ai{%8-wExdQ$XwXK`v(pqpLBoOwf6_2K7i9G__L}Mj z5obB|SkF6AW^HxJw5ZcOp~n{1OhcV<+isq#d!OmE5MsJCY^-JSB76?ori4N@)bAP-KiE}}%%n79rcG)9I z-~Y-#`ar9fa9Y+WJj{DaQ899prrOC1``JLWzGsvEDalc)qO5 z=jY^Fc2o2E5#=OPs4kuO>+Q}67JMgp@ZW_MvCHU6lMadk-1zV<A^XffA>No@ z5+z3t)Ph*k^0Gu!h2?j}WZOB>SXT`pCOG58w;z|$E}n)~Afbv#jueagvzw3ipBd;} z(8p`opPWb$^4e*bBD;fXn|AUTeNg2aQRBD`3xui#%PXhidsIq5#H;(CGe53^KVOT1 zYWiOuf~kwBg@j)yp5me-gS)AX>UPOm4dGk;4Ag&1>yTYteN80&8TrgeiI6lA(?&!W zjA5_NZ8xO^K8jIdlc<7Iqs%6uU7lR;&C{aX%s@_kFWj#vcE!=Hl{vX zojL`2IbZ*KKAtThaaKgr_Uuq2b3fdm)5Y#D{IOcXkt2t}_j*`+|079G{VrBYDYDH3g zo|_LQ&747)%|l<3r9(i}PKFxFg6f0baS*-v;LdWa$L}_%-Phyxe<6eIIi=Loa66sM zRCm2eOm6?FfiUmrfoFhC zyzggYN+p(^+slogX_U1k$^UoCc7HuqXL~=b0z+BuAFfAdSs%)}kYm3yUMnmhQ-5aRZWmlv-J5i|uMyDu7H zR0nXOkNhQEK*X*JN>A4W+>QW(rR5K-?*gH2sKiAOR=OUSH1FG|e5dYP$G?(ogSOjH z>`IqvU7GqYK(|4kldckG1PUcfq-7(}XiMthQDNlgtV8GR%|`wVe76nkly9!OdN`d2 zh<;j@Qy^OxoOK1@O~4q1V2{t|^Rd$#fB-E^o!P{sXIx}!qUT01NJOD>gMkMLS9Al` z%TClG%blsiYwqr@%qg9p!eLKzDET|1%l+OYbY4Nxyk4At*lVF8oYCjx4c}frnmZlWrMt&hQU&8B0U-ALeA9B z6hQ1>io-~_Lckgn-qXXhvBD5SRT{yv2o(&rf^@V19hOCOf=Jph<3(+eQOQ9%OG0o~ zEu9#mN#1OOyXq1DUX*TaV`6Gtt<&hS`Q{vPhQ2sa?T4Li)=a0VpepZ5b)(c=V?!uU z*x+sl!Qm$%gQjIq74XAZp$04owNp#jJrVpd{OHV}d=&wV;-n75jN)k^+$JmyM$rTr^jzu#}n4Zqy^P>s{?Y`X0y%tCaTNn6lr4un728c z&rrN(cT9rUk*dg8wSp~BJB)Ah1}7u-@P!bOgjWL5gi(Z7yU>)qC^3V_#%Jhes%G!= zRjQRk8KIMCR~W5_1vmSd`=?n@*sZ44J4k)Um;5WS>)2;xF z(q!_1tk}@3b=90@Q-dE*cVO!Ug~oPZw@Poi#4fhg5%bZX>f@B)Mdw+E3KNcM{(YS= zS#mMw3t{V?c6ZsWLmM(7i$Q3mcOV#9|W#`2_SQCe8!MF=?9|OS-4i3&E&*92Rd~L{L ziRu0$pL3t&v!?uc8-K&=p+8Dg_u|9GxqO<8+xOSpm9nopH*p`vC@f^{^jGV@YjpMr z@;e4sS5{Y%)Qu^n%E8UJGaDIe^V4%vObj#3U^lQ{9*P5ye@%6fkxa6Y?6LvwEu6$4 z&Q8RW`~4G!0+WV)6PWfzl-iR)#2I7c?aI;4gl^*~?0en6`a8>tt?kJbeSRw1usiCv zDGDc~n`L+HOi&e^Q~MbxeT%^@Gj1?LB&EI;x13F?QEcCZ#Q>@)pOEZ}e3TvQ;jnxA(1r z6<|LOtrKJnoOhrmJ+Guw+&cU9m|3N`b;27voHs&bmwUO!bxG$DZ2LEvxK=aR;(WqME zneIK(Gq^8J1RAdkWr9yl9F|Vqh(H`+8g*rc(DA}4xA3+7@dOJ2!329qa)0+RGIW;Y zM2NKuBAnDIT%fw%8mf8|K7R_2kYmw{z`0k6k*gEspr*=Co*_+=#6aj}3^#e%jzoXs z-ylBWA{@)Jx0(D^$?NS}mpgUj3l5Fx%Tah+%keldZ);lNwS3p5ngxkapu2yHkAgt= z!CWV4WVEUfr(aL1K{1{~R-TMouUgxr0&9APc3|W3|=0-G6ne+>u3kYL)()ipa1k;z(O=YOs2q zpJEj6RPX8**E+DC2l{Hb%$r*+%e^m|qSCM`S;k3d8xpx*O`@K`q*oVHRO}sdcl#?nT_lVl1Zo9!=oX)a6ir|Q)-664;a_IRgnAm3jJ^g!;BvNS1feadt= zSdS3-gIN9Eumv;T82d)$`Y)9f(Zl+un8@cl`P7*U@?|dQYnt<%I$``Vr!gK%;Kx}* z5ojDme?-EUeY%ojWHNk1_n$51fX{#D6Q6iNk7!2aObv#9<&bqF3knu~$R`i)Um%=f zz&BR=_3z|(8)E_D#;~gfcf;)i;E*MPS@mrXO0s3R90nI7G1fkocOgC0kvp=3*{l+`Nit2Z$ zf8NRaX*xJ&9s`_~NofyII-U(;H)pd}-a>I9r+UbVAdrXbMqz9tWNlZPp~s_40^Kvz zrVxAA^&@wLCSn3{4hkCt6i<00<%YuCk$Qh~ciia3ud^i-mv;}{6W0~?MfqG zUA^bD=aNK7frGgo|9$vjBL54kWPLw>T;S)iirMW(03p!UFcgR@*%YIf(nM3A5HI%D zq=HpQx<~sRbcN7GxL4?>NaWxK5pr3qtf&c-60Z!31IKKVmWvC=5k4t1PUNETz)$pHlb8~taw!nie{r>K5%lDsmXd*#$m&c=ph)6I+I2|1w zK#K+EoDQPv{;$L$Gz-!uI0Nt^VqsN+vt~6Kxd4Fv(hw;4cKda`M3Z}zDr(zt3m#aA zAhy+FA5i6fv(pQN=?E$?7!5!Jfzn*Wv%nN|196K_7|R_50Q!fEO-wA@p*_q0$o+j~ z8-*N^U|ezufMNf;w1He6VBjJ*n*a#>T$z(Wz&$F=lb@|tkLB>HooEEgNS4tYhD^>I zyDJC{nuS0ks0S8ObR;DGhqw(B=kxAx9I&1GBgdtsX(%hRnoM8-_9_l+AXV_k?(zS* zO7aO9ZWP&|S>6GhDR;8SG;5DGUQ-6|g43;JdZrh^%kOD9+3RoDJ%5HvB*o0cow3T3?53;jtZBD8 zfO&w{vVn$`17eMO6KQ+(+dReyI1c>K=&0aMA|j%aw<>lI%>G>gK|y3BBm;xcN?9SL zfxm2mIxrrLzA8RT6BU5c0S=!R$G|9bhVr1!VzCS;@dXfcj%L_*tf$EB`Ym5?Gbf6L z4~|&!HQ%!Jh2Wmx#zV_z+;!J|t_>~rXdF(5&o}3mXu#V+I`D2sB?HvkT@2K|mA%c= z%8)n91he(big(YPgn;m8Wn}?}*qNPz0+OI!0do=*pWHNNrCczmPn0D=W(s6*mtt1M zo^Uj(18jVBd=$I|&Rj}DQYUFjAs*M$w~PPX2=2pjq>mu~OY!9)AZ;l}GF zUN$l|5a`OJH>ds$RA|!XnF0SJEPRVR4CaY4pY%@Rtb+{@8%AQ~d{=(*F;e0K6lePDhLBG+v8;u}tP0 z@MC?RjqI9l>}(rUF^v|M`1hJ;=V$2j8WlMr5z00a7NnS@kR?l0l^dXJ^4O@9OswJW z%wSv;_oAmqh88n(uk9umZ=*N!RjKW0G`cl*qkY1X=)FpvH6Fc4dD~3*GRmJ`YHAHu z@{f6EoY2P;X?)ORYV{`fX@!}J*C5w@X z2}E({?51#!Sw#iJXBI+_4*xoVy&IfqkFE}px5}g#Ip!s*>LtI7=J1FkhB*&$wJ_c| zh!jCu)IH~jr}~&!`QK-S2xzBz*vO9LMVzC9{75f$wR?HK_jwT?I3i|h%+#e+$+)6H zL<&U3kd{dgK<5>!^SJBd;eNR7>$STa)AZqHvz9z~l^xmSODwfD4v`oMnva6+CH<=V zHTc;-FY`)kb5vDz6m0{PVI!l8@f+rJp;X*7!w5JY*Ulb=0Rvm5^gN~87TwpP_nORcQ zQ(Ob9+=#oK=aA;PD6~B0^E3!~H0pEor9_F2Rr-y-W=Fy*hr1=U-7VcS&&~!vO)aoU zI7S}*1nwa((b&!3cn<+E$$mfd%nOehMh65B9(0ud6cnyb1N}Q^3)}N0Vg}H(rN+Do zNzWf;=6O$fcE`VC>OSJ%t1MFjoLSpE-F;=1BEa{*X4)v~TytGU;mvnpael3jV)Cg* ze!l0~|G!(y;JYi%knc_?;48!YG*fw_HzxL_!+K>f9bgA!F;|?siBI#@O;fFZQ(Q}( zw+5HolQoY!Q{o;iLQbolGhCYcozxsDiVhIMul~Q8W+b@Am0a^S`S8Ze)xZxr*cSh0 zHqj9u3zCJ;ZyOJ;xqHk-)g&H!Ld;B!17e;vtA$Gg>nFdZMqM7rHB2AL_ppu;N@r17 z$AhBHSP@8XtU3aGtYiL6qZMnNT|2Y{%%4_GRmVJdH#0n4_C=$wwQpQqJ?Xpjg`jR$ zMMt!6EPBRQe$^BoHDtRKX1Ww-YE42E$8YkEL}tt6!PasQ!yCN*(wvOQ+>B|nFu{g6 zs-d5$nIc)nI6&37HOrV+)4YW7oJ8{Ej5_45@#0g#AVh3Tg2T~8)yJQx-8UwT-RCxY zy`s^j6j$ZqAY-=riGBH1rBezL%W^RQm1*t7;B=iAZ;=_rEJP?NY2e2qa?*z!4lQ(> zspgY3f@^-gs$4zFX3@+wvdUGuu!=x)Q{PEDDB_+fU$rb=Z{ggzXz5N@dMQ;_fSCo{NZtY=84$@x-Kq<_~7hnXa<7Aex! zq4Z;f<&G=;@o?o^QtCbl`ssi+Ev)Lkb@TdoL_;5zm-19e3b|}X76qTBZIISQ8lut! z70Zwcm;_7hZ-sA|jR#i9JZQqigR^B^=M{Zk9ihL#+Vg@Q)cNi^Q}IF!B1>v}Z60Q| zVG`!u=%f$eNK^18EM5)0S1!%<>p!J&LAMse*X1M$LebtW0$tlvZA7t#&IkjI$e1qN zf4a1)f0$2fpU}uHu*Q$fH${|7LkwT@0Oq3$P7BR)Lacjs%(+))8&^BTmx-0b+XgJWN&rR8nk8+dz4nhzFGk++{ zAUM%tOCA`EbQ}4xJB=5D5=*4Sp+3xDig`qFr)jd!@2#Y1!N_Wk)W>mpLizwxoAysm zpOKXkTQfaMp+6Pk%W0tNS>!f#utZof(v${;Fj*Ud^itqxT4zI2{ycHEa4m|&@Q`Uj zgkNhsOi-PF=-93(;wYisX_cs|G2X*`a7oBNF-imryz`&ntc)20@q+-r29*qmt1Wv{ zxxRjd01h^H{)QGWF`Yr!gAJ2|028NuIsb%V*g*^V6^37^Q%TPx3h*xu#wgTf*bgS; z0J>3YOdB@p-|I3OfdV>_0%*fdEbcHO)+@IscgU2r$6W2YSBIOLdOs1;#cBn1ph@p2 zg7Y6tAXf~AGQBR-A;O30DQq_c{p|qlAiV6xI|qNpeityd!+w{o=lP;K_SSKnTKN4PiPPOG8ij+*NzykcvNg; zZp8IJSQsB&ErZ+_b|`8=xp@3n5Bk`xpr_>fa3UYN0YopYm{O6Dk?ZT~2uec!L?6Eb zs)OGwu)XadC>_8?4WPut#l<;+8yEnpsaB}TtE)ruXX{jlU|{-d31cS}LCoT$l0h{9 zWEd+3%;kD>h#gE`{ltV+s3t2GEX%jOvH!PIX{_gf=hU|IaS2F8W_w<7av%)c#gj?b z)z$(;RI3z_#!VIEb;Ip@CWw zPz7=xw*3nDb?=+v34cQo?PS?R_4h=%+?}(oN z=%}6p%j!XfbPXhUKom3EkiIMKS94j)7NgsB^Z91`b9FJRYQ!zW+l2;(faZra#s51k9`gX7Hv!098;P^ToL%C;^`LMGx5f8F>K{ z8^PTgAPHsP_A*opiDY|tjiB%PR@^ICj4_alLSmoWqbCk;yS-(~Ou(g%{&4;;fb@r)K+SNBc)R(>>!<=Q!UT z7ZuBPsjRlUaI4_7_&ElE_yw$=k)z{%fq48*qm!1oyxgqLcqiOOYjvd6j=m>)?clYZ~ORNDL$90)letq?DBgp>I=^do5sGZXmrJ^206y{8C3bebNC)j|ls@vms8rbAg$WIAITTIiy|dyR^w zGrv^8`ZU=Rt!rU%}}$QAcSMxqk%i zF)cG-Cr`U?PyY>ETm74!eaBar7Ozzz)%#4sQxUUb8`X&at)gc?Q`BZL&zMUJtHaYE z7{DrQe^%C&2I%(K86dsZudHb(K-!r<)AFeiz5S&aEzGN+9}NPA9FQM14@b0_d7 zNq&jKw0No3x=r+4Xt{~zzDUX1JF<;QLwgllUMsEqV!aG!Eobd_#Yvo@Ur#ETX#?YC z0jHeuLFrWO@>6|3+sxA%iY6$+W6j)vdWm5LYgn|{H9F;0TmgBXi9Ns0oH<;Y-{zw5 z=-E*0Ql+Gq2K{hU`!aN`#MKksTaE4R=GI{H{t#Y732zXR*BJgzM=m|nUDxEe53j>Z za=(KOS)xv8sn?9@gQ{SCwlVy_D!rBGLBW4)jK|A|gWc9;AJ(Ru)tVMQ>N;=tAM)Kh zhqjrozSfhk_FhY_#39~1G{-vEjL8N=cyQILSSvNTp8J~LrPuw>;DZ;lBC0wtn4i2R zTIS{VcQ-Q$k)HyWa3QPxd}Bma45{-pP8L&7B-}O;7rr@fNz$}uDtR!vY_#vPdJ2$L z+}moLMHJYp?y%%d`oVHDZaJ{z9sM@jjNW;p^@~@1Z>V^%Myuww{Zesn0U%%Hr`!#XuKZCXp!aScqL($bHi&>Ug9A>^pSLYnp-tu~qQ zZ7K*}&N@&0a60`~N|QQxhhOjsL&xSuunvU&ChtsEkn|HwNilO0XB&}Qc8Z#ep*Pu5 z{kfQ$5I}a&j8Yj2`U2JYNJ1A}z&sGO|K(XllC9tsG_6#jB63&^l+S8t*RS5H1H+>* zSv)Wn)Sw(R%B!}qLXnj7qRMStk#$yVuGbMExg58mYoV1{6O|UQirq^fcw(PLw z==d5|jol}NdbYv^*yWgl6twVCetj|`Ikg?;#yLkxDZ2v;(Ro~v3GiU-KR!Ig$>l+` zNm}63yjgqho+Vc{MT;>%9npr?l=p;~nRvON4kZE&Tld{E6!c#`Lo5vft@Biisrdqi*d&sK~_i$J$h^+;Fu z4G)C!H)_AraLbNa>%D$waa@QVRdy_BOm>rGR0*vdGz&QX*Oxp@JqR07dvLxP5zc5f z^PIhug4!MaHCpY6IRu-|h-QxyfE2+nXuM)cC!j#ngA`;R`L7uNp<59pg%9~vG$D&X z>czY-*|0!o&jdk6MPlez5^Kx9c}9Z$aAmDCN(=I$E2}{kJotc}>|Pu^T;aJNyKphN zskbahk*p$9D^iq$_;zud5$tazBkyV@Y`R0OhM&qUhCKFoQJ~!4U-jW{Z8%t`ahfmv z)qQ(YCih}&-N*+Zci$AP$Oc#UG}&YJ^q^4u5@oe+x;0?bTNIqgr<)Fp%zq|4RCXTNaa4Cj6aXT!qqajo&LM_sVZB6B@IHoPkMzvO~Lf z*YAw19+c?5Qu8Xp?^8SB$mm9vVMIpt!kFx1b|i}0Nd^tal_Dfd2hDZ8Pz#3_U6rMv z6b~DQaqI@!a*9(XNGBY!? z+0Q)r=zW5OKQCB)`k@JsSGQc`pm4A}fjx?3@%(>Mv$EiFVFMk6oPyM-x}%#B+Kv3% zFbtST3?X!UUv@(PKM2r=bN+gNT+m!|2+XE47x-*1;eD6XnZF#5U;=v>YRJgQW@l%C zYGF=J&Q6cV@Te$UCL=LnVX&}bg1X@O>hf~Ww}(^eZ_wud5>506!XqN|b8`6Q8ANf6 z6m~9hrnC2n{N+`vw1A{SL45@CC^k_Wz7Q(Vqd1`aQ@n<2E$}_qmU*jb%0%1R?XqA* z{67A;d8oPT^6rUnUod1bq`9QIz@zZ#_CnE6lezl2BW^8@J-MsZ^;!pljooVH8{!_I z^^%a36pg|Ha}{&=cVJMxXMAS-#QOwlgm4)t{uPm(tt%&opx;V>L;Q=OpLM@re>7|# zK{KbkJ_2+WI1l>|9n+<7d+$D^+D>J#8mc%d+^a0A6#p)s`70M!fQ*Kibd~OtzaG_k zeyqdc`(5BYhWG7cB$oL91nu`%*m9nJ?>m4n!vWN9D`AGvCUhK!c2vJOaJbNLA~{Ip z<>f(9h%~4{J^fTq2pePkG zMGPx&42XUJ_@0lSPxM!`4x!Es;vd@E+graD&>aDnK(^gPJryI|Hdtvy9oR^6D|F-E z0}}*DeGywT!Pv0gd60Pq6bA+=-Nb$9^uosNCerDQlkZ*ZdwDxvM0ZswU7*7;2}ThJ25X zOo%=uLSu{-d9E-dEc-eIX0{ZYfocYAP9x&iTe;imY1xm*(KXqzOL*o93oE`%+1pPF zHtI-cf#!+}|BMl}Aqm=Zz-U9b`5GSDD> zi7KQ?KzkgbJc+Sw$s(i)&NZPa-f;HUw{%g_66zh+$%0(55EM)5i#70cwx^|qr^O}M zW-|*15f%RU4GoD)EGVlp1u1IchCx4#gq8(uo;B6@>x$7D#bU0K`3+X}t+NlCvtP2( z(di~??9M)aiWsC6*Az852kP3k-1~^K->mf)ui+^^eo$9gsiT|a_=|z=UHJB<+R>30kK?XCVODj2 zTvjwn^hWIWJe4m=a~)?*><;a5jmTAPXmUYvy!`&Oh-y|?)h`sChyqt&>f-YF)zQe@ z@+G9Sn2ASP^E)#ik)k}b}ERK8+*8FHuDHM z(?PS?LvvwZ5fZ3PZfixq)|wR|CZS-f}Z%1pYU0C!%O#H(jyd>584c@W`9c$w4 zYpV4ty6uCiUwFoy^OFZgItN1V<;Es+&-GTOHCpK7*L5gdk)SKCEj?@&g`zwO^^v3z zG)A1i_|zyfq8YWV=(LRqvrAD`QpHWggD|P|V`KkH_Oql@s*JI5VJ<>>RYub9wGcsG z2Y6>sKQwy|OFOR&n^o7>c>aFAep&P?uj_dD7mie;UCpf0(2Co(x_8|OL<3=)GI`bX)O)QD5y_{nMYK> zt8qaP2hC#}r+oVuvs1txk=0O_ur>v&WCRQQuX2UF1FW_6&OWgL_ffZ?C&lbHjgTO_ zd~=ljz&WYLo-4yuMihy!H-h!M@zH4ACJ!&x>S0suUD>gVCT_8$bulXS1j{8@=wqx? zPqUClN-w(#qat=xfBy9&8ugO>blSq?_VII+3S?ng7^XM-+~t5?vSC5gWbjmJ#v~!) zE`&Ka+);+}#SL+CpYn98)Qs&Mp^5KNeh$!>$f(gx3E1R`q9DoM{Iy2FGR~kNnZbv!9X}uSLogIBfnRdOGKv?wh^p zgJnuHhB!+Vg|njFCh0}GNiCAskbFgSz&!yyg7OC6NZ^U_)KQdw%b#L!y=?EBIjBv$ zpd8&Gm`JYisYJeJ7k>OB9z>-cCHXh&XJyA%&Ine55HqI+)9R)dVXG`bdf2C9=1LCM{L6W22mancIYfjN;u3;OBYM~rbN@QINC75v`34d zC4~>}G=1Yv@{762y#V&)v*C&qq^f`;wR3n#KF;>^wac0gN4$CSnka5X_m2XIcta3| zMG|mBI6+Fdr%hu*mYNSENcMiMB@;?rqC@&tEah<$G1IW3_kQNoJx_x_gFigm+zm>R z!6Z8Kk}2Kbu{?y^^+8~01nqd0;FF{c{(?#qiLg1nEK|4S|1>x!{Vteu!-64w`!>*% zjBhk}O*$TGF!3!MdMKr7L_7D9J%NLonA|*&RsR=*=pc9iZ9z2d(o4{)HfT^B6l|i- zM6JCbl7sY{_UrWz`u~?_Znt~G_`WdmALQ9yAofC1QWK$w!~|j?If)}0NU7?Gw0_!v zI0C3H3@yF~I&XkGBO#2JHns|=CU9OYR;9_x28K95k_F&&WlCjjZCoqQ%f30$Alw4m zJ^-7(ZPx`OG}#{v8^EvxbRDh$4lyh&tdU$;eJH`TFf$)e9>Tc#?l0c#6K*XrIJi@W zj5TV1sy#Y6*uEhk2H8vb8@Jy^7sxQs&`1FRi1TIm;Muu31)|s)7Hc2@X=-X}Z4Ddi zprD{2Bn0{aJv$FvZgp$z-Y#Q=Y{gdFU8_@5F)#j)y!KxbeF7DZ zU;wQrmWkMsz<`n;09u`#ot#L%g~nvdwz$4nj4$RBi5?yuB`GH=FDNOoDlM`sDyb+X zMN>@TW~3t~)lNHYt2{2D6(_+f<4;9pakrT`Z4PjS#;8`S!|v{;Fd70QBQVL<-`Dkf z24*W|KHT0BhJGh>LvYFiqPirA!B#Mkkhiz|i(Z0W5jf$|X?U5;ZZeY#X<^LdO%rCR zU3zaBsE$XjlYD>e^^Et%Q0b_f%$vbnpBL+I3yKS+*OfH>)bPJHN&fsgD66 ztnb$c;MD+lKS`VP>h3QrgO5yZ*FMd5*$dvKYc=y`g>F62bOf592iT#&6j;P^;yxum zpT0}j&Va{1f4=2KQ;+TT2?x4DvVk0fW%{Yc{St1l7H^8p&c?qGpkTa#s_d1|aR8=H zrIXcqauyJFDI+5it?iN$iWN9bF>aD>NS8p*;2m0_j-ZmU-?)3fVF@|ImIgz!b|CY9 zg+efeg3#v?F~+hGqQEhp27kjf%KQz*@zb2FR}-PzcuDJ)79{fTlct8o4uah>(7^ow zycU4H#Ej(Ih`(_oEXW4}BbY>l+8(|{fz3`XlNTPNN{ENjLi(8U%gGu3`VM~yO$2fX z4Hh5foJW55ibzQi9aDcvaxptkEj$*UcdL4Jms%+<<1GA?A3H)7CC6arpc{QQw-p^o zOxBTXLFfSMrOZ@3mPvbGKs!X1PU8*Jf=1*g3Jr${4HMZi`XwxB=63!M!8Z4cqW1}2 zoc@4%4jl5r5u0K+jntd@cgxj)PF)^-tAwkh5jCYb`qYoUzW+En$FRs7zl-0SZF{n9 z+pf*FU7Ky&)@FON?G2l`*|oXn_kVg>b9K#3y*P8u_k2$0xU>~2Wjp;auH?N6UXmLR zX=7ncF%6EW_2^`moHNpbUN=+AyeKS(as-w}(>wWeL=Y5R(ou_Y>__1gwQ)pnko4Y9 z*AZY0^r>UhmS_aqgvb$=a_G<`j?eYo-7%!>GICLHdipDu?5F@|QK&_dRZSwhij-k_ z!J>-U^?PV1@1Q&;+&R{nw5T3wLC&XO2yHLKWiQ##RgqEdK{5$$lXnpwsto5| zn^hhcb#}(>Zl3GytE8Y{sGy(jb&o?>+={^EINNH6YaF#1a7K`%HHo7SZzsQ~9#J_3 z_FF>bESmiw+;1;Nm^^5?KM1of35XGq?TW z->jFL>p8XifDP5hjfJSb9=_A|klH?nx=(YN)15BK4|Cap`$jLNh&WGvC-RmX?%G{{ z7f|_3Z0}a{v!2`0#CVmMaqF8XRrgkpv%TD=y< z+HY|EkG?le-5cbwS#Ycz&-C* zxq{ZXxvSC8K!Z-jmrKTt4)^3j!^+my`45Pd6nF-l8{M@XH9Z(OXtp^5wo@Xi7k)MC z2wB!Sb=FjqF_(8=+;*)2$J4rXVk0-f{u5QD7_uNr=-fj~Rkw(tME)ro-4Q;)Dp(BN zY85>>SN~NBpk2S=uWMYHbfMn;=;3HjIa0@r#wdwrcwxRyd$9AcJk6P;$+~&#^vKBO zpRW#6mT8QjJK-9J=3kXcEa`g6&L_b*tpSIQu}F!lNpUa=qgR<*P?c8Ws!n*v`p1ZZ zZ)fOmgMvnrP4Dj?>N)a_dUqB)a`aqKp!u3p_^nYQEzESqTa#U7Y))~ShZRK_T3z6Q zk$2yuM)qt23LC=%tL~wBl6VP8nL4F4%XfFYsTc?cl#vyu3zs;jH3bifl8xp6wv^-X zN0?o`+5-QwYJ2qNUNfh8si;RM*|qmwNee~zqqRW~#i!nw9_m5N#3wey9Bg`LNpI13 zGtW`5)-?CeD!9n7XbPXC(jDO2vk$ONIN8<;&A9#TkeI!r;r%TekOvX-M4vsXC|; z$E=T3L=n8aD=$XV1(}4faJ{9$G~%dYu=x4k2$AOLB|*%F9|z>gH@U~eW=pnOA^9bT zG}QuznO|EgLSbRt`FV;gQZ3UqkgG+9N>X@Ra;&_klTQ|?P@<{2>aX3jWbd1t-jzqe z>Y95OsyxSvu~l1OopLcoF%%x;jl&E3yi&$xiVa?{!aeeOO(hkGsAmb1+7H6%6p`Ez zJCJ-KhmTmIJqdSHP-6-jO@0W?D|#L2Ri`Cpkw4G|b-|do7v)vV*E-g(3ncLyx^OY$ z&~?GSBrdTr)BWRyWjkfoBu@GgyN65!zjYFRm9ZndpTmoa5+IbSe7b?=h6BtH%lZ{% z4yb9t+3eq0u&v0;4YKyce@5XDY=o}UFCmZzRA#X7%O*EHf55s_Xc$IkPu#gS;=7Q> z2_rBG)DD6P%+Ifd!d~PM-HENd)AIB!tMbl@M-IO-v^{eXBDj#MgCNs!*ZPkF}nlhl`i zWyh4Dwp#|NM~eQ@mi|5pDaOv5Ylj)!9M@&aO^r~ni<0a3UGkRM()Ia6J#xji`39=L zl>3t0Fc0p1GbF*4cO)(ZNtrrt^7h|77g3Nsg7F1TF&#qwqu1B!N1?5)Eihro59~t& zoQudil6W>QuGLewVXAf@&jTi#`v5$Mao`tFdX2?l0V^H{hejgcb*pG;QLp|jx<R2vv;k=-8v}y^Wy<5# zmJeXxgkR!(Ryn|A;w;)B;BwDD6SJBPV~J~l+k67&S-|o&H^8&=$`JL&=&nZ|kJxr3gf%LMrN@M5wi9ym9^qt93CWBv?=m;Cb%QVXXu zv9<;nCl0p+5{pRPXk~ghzkz229Jisx`#)W1?;oOFV%T4sU!U#Sn3|f}+RDiX1BiX; zH6&*}Co`wKA6wz$h>#qR07n%?-jZQ?x3&vYjQ@u#Is4uGj4*tGJ53>XQ zgb0g_i^Y4P@lRJ5*&z%a(B|(z6<4E9PbQL4eC8MceneCdUmd!?tz1T~s~mqn#t#)n zUSv_xg7TJFwkwPX-2@A(*!4aD`yii(Mr{Gzpe~aa$^t5H5^WG^b{;R6yIVp&UK$Ql zg`5(alvYw%h?f`diXeDR7#?s0=gUa+jGPKtC*(*i<{|J%rIRw{Aag zaq=MH5EDq2#>nwQykyt$D-+UWWA7vw7We71HRZNFXvj&bv)nD~%wlsGI@D)K&bIdU zA@slo@8h`wraOCat7=nk=)HD86GBOaCJ#@MHzfe776INXhTy@y`{+xPwmy}}ZlO4e zOIQ3hhi3h|(!`e52!+}!b9%pi{8F%m@#G`{LWA4hBd}t7I%qT{+g&riS~KPeuR2gx zCPlrjD4%dTtSnDOp*<&^&N{wPfmT92v4-RQAE$As<4rBvlS=mM&zRqRzOD}zbyL#Q zI>ZXMG1L=^I$g=>smAWyO3|}FJD})$a-stEUzn_jSzon>iK)A3h7`V|rk~-se(%Vl zot7v3bb%CMTu49DqEpCir6j^4FOG53f9shTn6e%c1ev9`? z)p_=uKamQN+M1ybJz5DY1}+nKx3CFeuiVq5A%a_d~ptsPq@ABo`B)P9zU zPS#&r|F%7=xo_(X&V)dHc2B?+Loj90{OaWTF+882-$kB-}GJ=St0E@Zrc<_7Vm zc9sEJTYN=UL6=md*+pSLg3pKN;7y&VaNyc*sKE-~?k>Pu&B7X6vart}jEhw(l5Tak zVjakJx1zZ^x5qqFs!ghU+%goKv>1eNi+TzwH*8E>Hr0CO^cxpdS%WaK=*-{p4zIcC z5}&3v?O%+GHln0cAXy!%OWwN+-Wy^r?D5wzaCb;<4yccNoc(`H1^tcuPrZ#qTcJE^=4sp3@nTWs6B*K^dGi^L35l9 z@=No@?3-2-7?D>QaUSv_N3sq@+&5+|7!e-tSL94yk$F*v`apt5f5U2>FE9bODp#K5 zlzDN>KAmfT2e)4#?@+G@W36P?CbeTrqFKY$|l_!uWGfCQjrQgYsEZF~!6djXg zpa6P8u~uMBu5(@pg|Rb?SFr2=qsbU#ErF<8U*Xmm%tqkFJcci$f-FpJ@kbedY+GkSMXV)<{$|>nBp$w)HWlnWKDx$kzR0S9*QgL^csz> z4g^U^C4P4iwk1j7R@?c3VY~>8I+_) z5XMKoh{p>G7>OUYf09(vCx>1LoVRdb9-%1ifbD@FLmV|~Y7K%;gxb(bNOK({Fw6KT z`)mKarBf#{RMuF7uEEz>RuSf3g^!Enh0!XY_+eG?RD!luB6^-B2oZtB(H9u?<;F^8diK`$E^A=I)PJcl=WpPJ(vA1q`<^+F=esu|C3Kf{7;jwrQ5 zeDV3e6W)i<=yt#F8}G6ChguG8sKqp+dBuE(?;k{x*rb>f{9-p)t=nrlVFOqc1}sB; zwtRaHwyCiKSN;Dik_T!^~oV z0u}pNHh8Ya`l+|aENNsG$nfqcPnl{(8wm9i!&R|~<@>Qvrak#&K;qzjbz;OAC)5-S zmFaHSQp~GjKeHUPL~U8c_YTLyn%c(rt8clSzi+GTQ{cCm1C1%0Hz9JP5Y!?Wb+pL- zLb;CTbTEOXXut23z2kOa&#`O6IciT}(NGYMipGTI7=z>+QAQi6s~JN?&*)SzY$F^+ zJ+B_i*THCn63yMUEX;5ec5ts+fhAc?%U<%LLqhT))==~9UC+#llN(o4k`XUcb~2c1 zNzHY+gi5m+kd0dzFH=@e&tg=Z`pd`UJkNRPgRJ5d1g zBYq=ED>=SPf?z$^&mJ{2YDR5p1(UBlAnAjkjZnPi7JVL{#q6936(V#Y&oI||FTHDC zoZ9?zL5&5n-J%YA_P2@WFrfexs!TcZsFSaAxiQHrT%Zz*sd>L63u}iq&c`YRC#Gmb z%n05H2&aEwj@Qh^UdyxQ6eCDRkg~6@?}#bDL~O3&YkB5bDLod!(@Ytdkz&re1`w?} zIy$PVXv59`-PW$|w+*OYNl6LFaQPa^Da}t8%UH7zObpN@276`8(2BvqqPl12QP`3Z zxX`*GMSYq+kjpfsIp=@u_(`SO89G~r>jT@_+#VNN=;-JtN`PGj8sznRWjE%)(eNj9 z^a^VgEww=4>J4s>{3VtF6cAY5PLv}%zSCmHHIMD4nQM3IZEtScQK!HQSX)`mTd_k7 zk}h)jy*}VF8wmq41W=m({+|G>1F@(`2ts$a5XghW|J&T$BpA(Njv2lMB`*ArNPIg+ z3=sZN!8QQw|8}<*xUZXM4M4xB zTxO$t>5j$cyvSbTem_;4_L%Am=n3@j%OoQOk&QQw!)>|%dXI&LWmrG5v;g@b%6fu1B%4JOu5hBydS$e|0@_~BDs4`;UAW$^LwDNcoFaATu}XbH-Oa6DavpT?^?Mnec4A3{S3 z=|G72Ls?IKKG;W26TXWE(MmEt`cS!KIqe(OK7A&-JtIy7B(UP2G5`nK&re{J2k@N3 zOO&!$!yZsHV75i$6$8Vv*etpPT^>LBGnB2Q|H4L#_M?l1p%3L2YQpf7x{%gQvr?NN zB}J}9wqU6UcbIBGPf&SRAJ)#U=b41n?a=*;{z40e%Owl?G2V%uj>@AD+Ac5t%7AUc zB!87FFS*U!7^6-GgP>x2x(tt<5(g3V(su;Ro`)^q3)8Xqv_apoV5F-olBr&m25%|C z%2Cslmlx_4QKXtdQdYYecZZo_x*IDWA=0lkfrZdI8g-kqu2^eJ%EZ&G{h)XUDK@1i zGnSR+n7xD24l9BS#ALE*zEO$}-DLQ1k4Ty-y!pyEcyX_NIvQTR9j)I^`MFg-^K}KG zwW&6hu#86(L$7)MKc5WmeKpixNTt zS?{DOAsM5_8MhY9nhB8k{(cx_r8ylx*UrJOVkENKWX4;kZa33qN942(eUx#qSD3Xw zxZJ2B+yFG8qN^idI1^f|)XQ~_I4q%r>E4qd?b9NMR zE=wj7lrt=~*;_jbTS}m)Dgp^&(A0lBkMnm?ZnZQ{7PT&wcr_{w(SE%u1i43$(lPy` z_e#{hd8t61lfCbwoBL>F{TC6PiycOdf0C1XF~@cvO;d$P`sXCL1mwq!zmS_Ah8zz5^T9NXs_#HyyXqNknq$4|BV>91nSD{fC-rGR zI!;{G)Hf-bINoDmx-Ca7K&?UqjC$q=6K@m`;v5%-QGpU z_4mksCNeVEOc(DdOg=Dw^E3)A5nz0+~3Zuj*ES#?qCJ<9=OkxkkdS=GPr$I** zy4IE+df0R5CKw|=IDVqnG9uo8RxG==r@2wigwQ6vZI>kblr#J4H7Scbj)2%lR!6f35Q}RtO_KA1$-}+SgWSF_EaT`gc zk(VVN2)q$PZQRw1Uz1_4$9fc3MO;~M)+rlbx^V)7FVNDX%wwuuV}R;MZ&u5Q=r{Cd zQ5xuTXwh1%^JUZ6&`$*2A(thHBFSBBK5ENf)qm`dAo(@Tt7^D1em2R2jbKeYJFssZ z7H(c!dT?*oZHXp*1A))AotIiu3mZ);91`M135}RF!xZ0279nyB$hWbq%o5}{XYTO? z;EW7j2n0#iqvZd^k&BpoF;ML-=#Nvs>nY=3Jaup1#xAD&4#Q(Zo6dn%k!`*cqH&rmZ;#VHGc|)d;5G{j_x>1XfLY1~{lScH!t&DL-y|U(LnsuY51EJ`pGfPNLDQ6O z>xgFSsMw1};Op*x_ZyGjm{uOVdhJgx(@S|6CF#sJRm+se8|Z4DJzQn@Mu-~sRk=L~ zM)Z&qFPjeeY7gtA6;iilombKA40EkMH%cf96mL8OD%QuoKjmrg>y@)-dyVNnQGa(Q zU8IgK;_&X4urJJ}n>_q;VqCF@p&Q+dAZ|;=K^nsBj!_UQ0XkNpHdb(RUNo^$?&*1W z!T4EOCu{55lg#arnVNN6^XcupJmfOQB^q00_Z@hmU0JrsTsa!`UEqcu!9eFcxzTDu zs32=MvXX{I%g79Dl$3He;LG+d4+|qLa4CtE`PXs46WS|!)vFmc?s1s>NHFb-CvMPE zq3GAR;M*J~1yK*ubOA^WMU8wT`li~Qxg47Q7zhjQdaA06g>7eksRX z$)-UGPV{4jqN=66vs-@b1cbm2k8I+wOH}a2Y+DS;oO2qYd;}T{c;ZDpxfdNiNS33@ z;ao(Ht}oQyF0BD2lNSrSC4}~3ga9=oeeH+m=>2sS1imoIl;TI#-b~5-*{f%dFz9+Z zFq`?Y;cvZa+z#dH(i(8z=Vy8E{lbZvS_RsqcU1{AF?(Z2)RcS@}QbGI`37|C+nHUV%gs zr5&h?xB+aGU=m@gPP2(*Ao$z@7SDNjv}-lQPC!*L@2}_efc$%V+HgcfMG!;S14g@! zkB@MhVu8_4 zsYRv22cVczRAhYlrxjS6;s)*m5d_6&zJLD?VE$eJ(w-qP1sK8QT4M>oAx5dv)za!G z5t}h|F2|HfAoUB;>|h^DAdX0vAsPb^F@R%=uxh1RFJo)6df~C%GvqeL`Md4gH*o$Q z{yhF%>FF!Xi{D3^t(80FcawKOklk@IGcyCUQk@KLbTl=g>uYOk)6&v_FdCgoF_Z+) zA#hkQisBmuygZhckM5iITU%y(i%yGTTNeL>MRI#ayCs)WuqKLP{P=a0qY+u!ta5N{AAYE@}vyz!lL!u|@Dj$LFoJ z4o9-w&*PaKkfcGc?+dNot|86_z+KK4^zk08!M#+u)SYhQhUQDLBP3k}n?W-Q-4#_% zeBmeuC1I{9y17Z@*sYQ7lY>B@qN1XKfq}-xMpRVPdAczbK4wMN9or zD8f)4RF)Bn1kHMps*HA26H=w9CK2MSgD}mIPo%O;^Tx?a2i`a0f@$*W!G#4zH*@>+Zhqt+xZyO?DOwb9DUxxr+bN{3 zc-E$34@?70pxf!A3v52wK0~oIJdcPNzwBXqV!+SY{H{8)t$)$iTZ;{rBze?Uy1h#I(R-{#cCV+^#=KAfIRXx0OKFj-hdZt#*X!#FsR)X zLUWkX5xaj~w*K`#b8DDS_zT5y9a_l_j`~wUwkso~l@xHv?x)v^Yw(A_P{BXZ zLO!*NRGZB>u&uG(TQnDr2hlu>i|ZE&N!ezdeYe<#Uj0f|er;JB5HBl$$KtEDJXZdB zMOZ2))whi^#iy6xtObw5-(NlmVVBry;n~jC6M7ss`142hR~s&oJt2|(4g^WI7 zIb(u)!ev?I472@gqp*e)S*@;|9pwt2Qtlp$bEo6^=WZ8UQd&E@9MMk<{!^~IC6O0H z#Hklfrnl;vP1`7Qsi7p{$%UJ2^rY_@`=f#Oe}fuKxnf=wh2Vajj|EM(D^a`pCc+HxOn%7&LjtSAiMy=>pD82!q`c;ys7LhJ z-mf(HiPn}adsUvDFWtgz^)s{T`?SW?EP1AZBI}j~&6cgB+lt}+r%?_Y(VakD^jq5J z8<+wka+3gFadzc*Urw5;85#X4wv^IMO0}ZsJ<8_y_5Ab=pFdYo%~@s;A_BiQM>-bN zb*64?IR@2fel!nX%Q9{GGyT?(I|b#n=uc`c@K3idS!eT3GkY;QvF6!<>`hP2bsReLe6vFx4o?lk z2;WDS(&&4XXJ$F}CarB6t0y>-AGK&+1zP&D=bBar@2T>Pk7>h9$gH!1iOHRQ(|+DG z6r$-~qGVfZ!k57rr#G5)gyLJ6xfxvqf)9AaDW)$VpfNZXqf`=+W^kJcMSIet;61P; zudLI3G-d}OvE73#PDdJZfIClq^<(~MycV#Ht2!d841;<(U$P0>sA708O3ZyQo?yG$ zBnwJBXr-F9d1!Ew=9yHSUW5kU1Gb5l`+fakO}p#NywUM^+7A<8H8TGI0kn z?Faswi}!apf0&4dSEq zM()cd;!*wJ9eBur6e);xfA#Pi{RbV>r-$;`vi_J{f0)ko<(&0+sXPLPglIuIVgRE? zgs3c7*~0Lh?A?-!;llFS_9|M5%5#&zT64e^TCaoC^g-^Qp6Q;7(C;M{EPUTo#!Ndh z*^WqhY~V9%*4ZiVG0aoo+GQDgoYf`7x7`f_LlLsDMX%I(V!4EUdIt7DbdJrp4-O@% zthwg2gH1Y{lDK!Xnln$P6ia=7Lj@Px_H^ROrZ->fMW;CNW!$z=Db$u%mXE&+UPx+8 ztj?*K5dv;!4ZY_LUw%^7cM9Onc0%-ys&VTaWH)_ReL`UYsSNK-m{J$#Y|vtitY>5rh;2>jicFZp*UZoHmdFC%YJ zXhi)IH;_=3UHVCAO-6R<@!7_j6Hn!CAyu zsUfT#8V>0oISq_BI`SYca$-NccX>uN)ZX!HoYqne`(^QHdc*f5m-L7xizF|_@;K2a z(mb1!5TFaPDazkNv`V5@REh0e6XRvLWniR5)`hh)vu8~^aB6&YnIxEJjj|tqsuUN1 z*B!+8IE19x6mGEQE+0s+WMM#~voYio{4q*R#^B+V3hnP#qgJ=f;d0u&d3b1gh@Cji zIy%KQU1XzOVv}8ByIW*yT4I}AWXrjEf@`7hbK<3yIK{m@&C*?rn3W^m%*^Lu5NHPP zLCQ}S#A#doX&Em!Hw}!wjy_$xQNWr!6pg{_MS|%068@5Xoh#~~BwYV#Js(0A^37BG zcKZg{cO-hX6%~P6ssNK&3wRR!si_!%N7!sR2PjV5VmQDMgbM=`4F{B30pE|hsw%ks zvBSedfKJmJ4Ei4^St@}D5Mf7N266DAiQ3xQ|0^Q{2a%+rjPpT&ru&}&{c*L)1H_R) zxmWP>`2g6_hzB~b!sv4aLPG9m%RpnZ2%j~pro8+(t=vGx!HZ%-+V>wa{3)!6kI`_d zEQR<0P{}|;c0L$IJ3Kk%*mBoKPXq$Ibz-cAWa- z`5*y!l|sOK(9&d`hE8X>GNZ@pAtU8Krk&7CN3V%+#ff8#Z9oM22BO+jbkq2um=i=Q zcrpiEeFDo9ppy{K&Ym8>!BALtcXvRBlKVQ?*aW*eT3Masto&$yx$^w$Y5BuaS4r2$ z(FfiVUdm8PC_pG#;wUDjVu`eB7L7-{Lim$HIlQ@us6i3IzT%EZZdwjH*m#hyHcaw=lpSz)%?$WqkYTG z$4t6ZvoymtvDz@UKq*tn%*;$nOG{lHBRP10wif~f3dKrxkqGrN&N~c4b%B3Kxxhh1 z{T7GPqo)P>Ne~{>&gD`=S*av$o{<|_QbJ}2VumlO>@O-3^fiO2g*;sk4UIG-8k7p| ziEKc?gs@~BT3e|M`B-|unr6Elb59J&8LdAovT&c`> z!K{suCWe8!gWFy#YqueN#}&i*Y3ecdXTKG~F^k1E9JEbpQ;Py<)%!@&Ars>AGx)RZ zXC&{5yKF$>cSp@Gm8L zqX-?pec+4aH&rzeSsDrN`$AZ)H4j=zm17g0O&%r=7u2*e^z)P6AjCXPknnMt#T2V- zS6p)b^365#A|j4sr`2pX@hCA@(uFzs>g9R~o&zdH(90J|bZzM~XY8eXS>kyAmdKNk z5Z}|erc``qr3o`CSHZsKywzAq>P4)0$FZ;gp3Uo4_D#KI*EVc=jm2|5#?SWv#PI^0 zfY*O^Sg38@A8*^@SmkM;BP@9L7R99@gN;>@oU@9{>%z;wg$mBV5z*5hGavit7>~pZ z&R@SQ+?ek|>LOJVVl)#HmQ$f^;F7p&yXb*p%Aao_28k|!uPObfY@yS>)ZMmRX6ZkB zF(G?_Zi`IP>mnB3(j(TzsguB|1s+RU_FP_@|L{9S9G^kXW%@O5a9gNm%0=z+mE zJ%-(};K)qq%FwCrI`eJ9$}2H=Sx1mK_n(5pQ+wx-5Rg+dRX)g1KdY$#>)xXUF+@KE zP>-HBui1{Lj5~MYh(3$S-=;3!p)13bFJ*lpb@oxd9czhm5S$p{?5A|2>-?Z_xH#~K zR9cecm&Ntt>D?FBy43v^H|3|h{a~$qy4p?C;Ov94Xv#xi-~*VV3<$H`JI&7~)7*n) zom;NmAwu6Kz$TiQ(T_-ajt69 z?I}I>^=l`k+>2W$>Y1m4jw$B6Bl4}vqfSTcn&VC>hwth%=aMQRr5Bylmm@e!GO`mk zt%of?_dm>17?2hj)9d#-W{9fV^HPa1Jh!hLf9*u~5LNp-cASrh$ME9zEe@Q>{wNW< zn*0$G%k`pVK!A&7Ti#Sup&doEG#ytC_ijhG4#&4H9>yH~*VWpiDtGJ3oK>Gp*O*=} zYFXZ_gA7(d+Nwb@7Vu`YalL!ksJ8QhaTzOQ5 z9*7nS@P;p*i20G@gvIU9l-CPSNtaU#TO<}(|5jf>QSO+O{M4Rf^ zms0JkYx-pCE3kJOUitE@w`CvrxXL%uyoLWf8-V#$;(R7shJvKRI-`%X>uZsy-x)ep zoJkqa2LPBkO_kPIseZ{YLDDgnX71ED`EYMPoj;!L5LWy2a$V<)fQZX09X^ zGQH(#UJ}l>n!{50@Sd)jTk;)>QKEPsQQHXNm-;&hzKAQXLYy#l9{pvi@S4Uj+JDL5 zNd);ZXEqs;@$Tcvg`QD7Pe$D@+1`almPdiE+t6g=xR9XicuW^$j$qmok;=2z{vM^J z5KoxJ*6<)pB++W3n=3h8>A9ttZ54&qn*D{bw+Kc2gMHQlFM=?|m8HIstKYvLTA!A}VmI9B;_AQ0x~nScd{9+$5cdpE zs>v~(%BUN0`V7XJ7wxB3s7+o5zwV%&CwTQRYMAj?%FgvRoU=`6KzC&!ht4CcG0--T}yS0A@v5m>eTD- ze&UrR6IL9~g5%uV5L9oiX%{bo;PBT%0%CE{0rLbo++RfPJu1vtUGkaoCVD|?(R}+` z={BXTyOCAHoer=2(OO-u+q>PBOJv8bUZ3-oJOIP8(*=31DwsJY=u=V*2{P1S< z7x4X|dNXY5h7Y1Yo^MFociXkPhW?M=UGL?%ga;czYa9sbEOFAAU)yodmT(Y1fnkQO zfUjO@JTFL>m%RvL!LRpwpph!FjO_Xe5LJM7NT*&eP{7fo1qvy6`wD5Ykj`aT*pbBU zyo3;?8BB(hc+JSff}4$|V~HTK*tj?jyqUE|Q^2KMTU*OaPq#2PAGzmm`LFT)XMw-$ zpZ6)l&pQB4D7xJ3arpoDEN3uX@QfKygZ z4m32hAG!FlNQ!10Kbf8z;g2i5|iQ*YuE+(C3tGk;CWOL-6%^rSm0Xp|IzPAhd zd}TdDLqm1-)U-4X-{%{^F&(r6GXKChr&Adlv?vzPb|{oBo{f%AyThu(4;UwsLWto{ z2y`b`S29TW6n903bXy}7xW?3|gEyb>CZ z%3xY#Kx6^5V5`64V7J-vTKikK%`WJd_t)nKu;T(ko89X4Tc0#ncvSG|`G+-(75a!= z4O6jZ*W?v(grrseM+s30AweM)F(D8hU(qEMCL!!0orD~i(rWSlo zM{|Lfr94YQK|@8uLxq!*_LMeLCl5-4Iov-yBp;-7BH<=mQ*0pg3=>0fikjEoWiL_y zTkS{Pn-A3iuNUvkAUB3QfuA@>t)wIk3Fay3zP9Gtc#FMn z9F*4KYDHE+k0Tc-E@S=6c2E{C z(*r&*ca_g%0W25zM(pw#@6&k&BWhGf&^R(+32Try!oZ!97Y)DjbdNAQN<8q8b4THQ zeUhqbd@fv(t_>b!#TNz~Js80+Sh5Yq;K`l;GSUxLrjL?L$B3RO?$Cq#7!*CU*=U@Z zXAg}gaXeX%+z&osY;Hh*FcCOxLMdT6r#=;teL^ZOk#1LMB2(S#Zgiug*qV?@z`57O zftZr-_ngI(?J7vF9Ye?}OLgK~f65?c@@;p&pv29lIodm@E2G%ZGDfE8JRbC4IlRq) zKlt>nJ>5uKP@=14+JHY;m}6nuLiqX+{qnyVp&RUN)2FB8E*RG%EEB7n{K#t!xnV`Y z!nW^<@TmCZ0aMezUp6y3Dt1(qoqUzO(BNt7vZqh1AZQ2JoeGP~sq4k5#X!fJb)yYc zg8`Q^&U!R1#E8XOp354~Dp)l-nFBrXw3)`vT3%ICs*-NE^=DTbpj8d@F+(tayjh(Y zWq_Th`%a~++l!Uj=w()ss((kVtcv~q{ZU=>C*CR|i!J>UhmxzkceT%D)LJ99PwKV| zP1jQU$wI%%oQq5E#O$Yu)74LKDPLadulv|;Ae-+UpMGn+&Vjb zX019Cwz^!1%!X%4TivI-`bX2fV>t~}oQWT3dslrB)VWb%F%wBAgBt#j8Mgu&*oji* zG3f?R3(0hLI?rH4=@r{|t^G!Cx&yepJO|6xcx|P>v(`S|XyU2(<`HTGotV!sY0;IX z{#xj`m-Y;|hMMlWApLnHDau3e&v^0#sB>?fYP&ELy(`Y`M@*Cm$uoB28TupWV%Wo2 z=^U-{x9gtke3k!8o*X~r7qL_U5ccS-#=6QwNYm{+=p3I$R$I4`>3&7-8xP){#ZurU zU&1m7!9*~9ZUpvB#>p*uMY;IWENnC6*n_PpN22I+Kbh^yf{8(;`^bpR?02wQFKN=Yzwjb1KDtT`r~E_q&0QbP;fOjI<%=Qv$y1 zzuAZHNhC9F(Aw1{CS@T_3WK%mDHnGdZF0|#v$QbO;acP1Ji`{SW20CTv|g-361zL8 zYR)<)0cmD`Nw}nP{Yi&}R>PI$dpZw=3!QNzmfQB6`B&t9-0aRORg8@>e%sOXB%U=) z8n$N^w`HR4tj4ec0uGKu=J*)|m6kzmRmX(^Mb{45_*Z%$+U4<*+4X zzm0SyMAxj`Z3^1*Y*J~uT=x2~9W%W(5qXR}3~RH3GjC9#XT>_LQ~&#BPM~i5uU!(R z1BWuEZ@b==2`kO8)ZAWpJM zz-AEpU=$-;KW1`5P%GKn^Pr2@D;rfTtu=TfJ=ihQ{D5S*zV38n#bK$Wa)g!ax>Y79 zJ(2Y*&)S9dPlfUfrPIaROL`uoOIlD!TP2Z8(pM)0$unBq&;$q1n!XjKgu+0F_}pr5 zh8M8MR{cDuFZ$D$?R9I$Afoc zzn5w?wGCu>N993X?gtHQtLyqkcX81c3SXo=}*+Fz(A9>{2FA3z93(M6eNzB;$YUJ z9r?c7(=dSVleQ1xB*eo zRXeP#-12V7=h8Yj&b$yyTsY`$GNMRVum;C+kYee=!JN6teGHRj}1w{z#^`{2c} z?RJRJ9Pjgax(LeZp}CSI2kT9A4g{hSb2s@cm~b^YDl-TVwq1`yNZ4{igdD1%OmG*IWE&d$l%s5cafq}VeQ zK`}BvnoJY_4z3gK2dGo|Od?=K1z4BcYIh`N;h+?r0ZKSYy5}v{d>$7afZqv}d+wvt zV~oAtpIJhw@ZzAn!B{Q;I*&oOr^rR9KMVEz;6GsK41k2AG#3GApb6%n$Q!Z9A_iQD z6B#kF5Y@_>>S{7L+Am^f$8QD_$!&=LtOXdIl_G!rqCyG&6~PS)0$q6|lYsJ3*asN2 zoHBN2Nx(zqjL2f)nPV?z?=qac=lZ8Vo8PY9dfZ>ns{d3~SG~Edy8ipvOyG*~VG zZESD<$It+o`oF~zO?CCa$+MFxIofwxmWx2{-O~8{eTd90)@P3j#9y`P1(WpBMi) zF1P)UQ)_Gcqg~GW#;5~b9fL;o?}>>Cpiq?%69aA!5pDoX(u@OI-D|l*1=wQF0Vqn* zm~<<(x|+a!fL;HH99D*Ax}ygG9CRt|jN?I8==k+3{_m2nX)l?dWt$p5<$J4#*Z}H0n+6BM--^MTh#?4uJnhl2! zOG-+aWW!0X8Nv<`?sfym8@r{uv^e zn=D;~l>;pke-Dl|kQI>1oFwPV=co7B_It19CmJwkSYVaGt4y%wQtci^oN!h&ma zkD_zK9rAI&>fZ9pe?tv?J?;>24i12M=>5(1x;Xw!XUaT=K8_z?AG?acYuJH;$4FJRlD@7R`50($IIk!a5N0ZV>gO%EdzQ81LxN2Hylx@Gfr$u z_nsqr>mPwO*I$33j8n?FI^e>UJx|jt6vnL)ly5?DW2@yHL_G8(X z5eoj-!bt~pz<|^EP3g}UH_^i{EjW!<4k=la&yy0^dqond&+p>ad$cuB`UB`Ctp?%S z7spm^#$^$%j)DW)R!G@Bek3Q6)x>4^@$E2EM%moS!99HpLk?H7zSfw$) z;!*N3B4;N$W8m&kK${AY$SB2Rp;mm~z=?ut`Ly@&6uF6$jXTJ|uJ|dqMH~2Fj!POP zg&c72L_(YXQ_;07zQI+1h|!DAM24mbxQh9B^f*ZP72bDo>)X3{G$k4{12#+OKKixz zmdVxlg>bN#0d+nR5|@^QMN1uOTc2%Jf@fNb_gjerZ2D6DU;q#LH#yaB3`cYCj$#t| zBxKgvDo?8K1eu#@6W^kW@!nWhL{XQx~1XIQJ_RMG?UpsAy!Drm>f)D`Ay ziK=xu)V~WVtME!|bLwfc7_@sAHVtbg!OH$eAw>iCHYG=TC^2yNuI!}}TkIR0&pT4V zxYfZqRccc%W8NsSuJ_iICTK1Nm8yrE6kuuc^w*2cV7c^5C*{4psj}~Q>?4&2Qog=i z>BU!c@)9RfC-Btb>*C+r-_4T(L`p!I_fNlpvk?yudJ1zDy9%BZ_%YC8*YqKB*s)y? zn{5Z8=ElTBXFNcrT}cq}`g~0Nd&wY{_?FEShOLLd#Eqp6#(8;L&5ft-&A|F^f*B?@ zaPf&WEser~BSy~#yK{x!wSN6r*NeIkL7vZ-uKh)w=!VIMiS%01?ddeJ_#+Wt&7ZRL zq$l#dI>}3u^j=uu3Sk_KqdV4wN&qMI)@l6VT&2@bgyQX;uI)v`{-)~6tx|ul4$A(; zn{+sy2w9;IHxK+9R%t!_Q#W3g4-0=lwbE~$xlWk-%6$e7RGLDFulEcU9rOSn7+6x- z!G{bs8dZy=UagsVO!lqHA&&7i_Uu0wRop_;@A5jH1tBdG!vxMrZ70s|2)cvLEbQnE zB&&F?%@lTsLl^D4SLPkR(H@}Ko-p#-QgvT2bYfKKn9)v} z6oQYVd`pvDO2P3?4b7^cnq0tcKZRlHn)=s4jYoe(Ma3;N`Phe5q{BeAdKOcS+a@xR zkHEHOO=h3W97Ui{Y9h&IR^8dOrKM4B6!RDcOJ7>a2U8d!C#yC5agR;g?R(72ox095 z?fSzzAolvD`-D^~LcyPrPb?QHtmC1ByWZKITc5>2Y(Ejy)`shW6k>OoVt@G;n)^D= z&DPnCzQHxJwH3a(^}eNbtI2O{wz#{IJc;Ju54m1>qU}P)#j-gS%lramN@~(Q!3k5N^C-V<(Mm+jbf^Xly4VpOm-V6-Yf2yYPisP> zQ5;Cg@O+8*hGC^qPG1eKtA#0(y7?D)-k`6*9xZYEN}GWkOpeQzPPGZOXRURuMJmzM zJ^d{(4%bEm9h#)46B*Lz#p*45f@3Eyx{1P#RVA<`!)k1X=Iou_Gd^IbNfK`L7;FzkHXmWBhgoqRD5aR8`ILJRF;0zHJ6ZrO!Tw%3WA;z13 z`MrNj*mZtNYZJ~IDW4|X%Y4SeEssnF6S3iHc*ke&w#-S|NG*d?b;?O!t+H1ReeY;k zX`;_v(2nGs7)Xu1N>5iQ1b@z(@ZqmuuWO4dc3M6ECB-3j*xA7`v1Aq>fd7eesUZzs z-luEl)Wf6BS46$Az?J;Xc*oaETN;NTb6KfR@?4@5qN-ju zaVDx^hk4?8z%@810c4C`EUenJ@2#0Le$tB3Gx^I$ev zp*h11^K8d_YD;qlx!(9^{j4o{&nWTeW6T)>*L0W{S8$kIstQqay$9i3RsZ~1?SYsl z>FAxz2||pUI7Ny#IGx-ig?H?brnQX%nYc|_YI!T;cwRnUo~AvZ7)Sot4t7E+1FAp! zodQo*EG3fNc@(YB3yCf00#%YGq$ojV2<`o3WCrr;H-Al0kc^M z{4pz}bxwS(q{JDC^oSnVNophB zqcGj|vG{}h!*qVEgfWxr!q$r5^NcyDF(a2a?Gw59zu#`;yBI^d|DWK6)j4;_X>VH* zM!W$MQBM*L6O#}y9Eg$BlL0um$*HOI$#RA$4$HNG^Xl948mJ&C8JwP;E<=$pe$ZnW z6%zvtsv!Rjzs0Y!{C||0`S;rm0LdP#=7yz#D0l~EU3y*nPh?N=RPz(;7 zA3)L+7w!|5yZkE-X6uhL&1AXc#Kb_+!X^BK34qhHy0iqRU^2}sC@3gUb6ABC*zkWS z;r}K((0)!~&;bV~?;jpYNlF@}!4MK-VkRXdd{tNHFzoqMi#`#c3Og|>==ESYjns&L zM}cA{PV0w9L$(al?UTJZvhgHh4Pe()v4_uWX`K zVppJG$`DK{uGg+%5!yLhU;rWmLFB!%qqjAoE*^!@6v_}K3pKPC?)5Y@EWLa5_v|gm z!#H+~ZPqQf_cEiB>LR_4Uyje7r}y3QKGf7X>t}V`ZjQG4jvJdjUEGikw%_Z0)?)1V zA1~IMz1D^{qa#z8=tdY@9en&^t_f>Er5)(l*s$c3q?RD{Pq$y)SFkJR5Uss zyuNn4wg$?x4?9n(wiNvA|GNp_>B79fsZ-K0UX^mxezfY*Co_%P)r$-G%2-2Lj)wNc zzP;!jxPN*NOk8246Z$qHAHWAnnM4AmBXoPKmkoNduwIG4nSBU?voy`?^+vz)A zT&XyDF}dsoid-}%36_p_v=Z0^&lu`KbW#ghKXk6xaoWXgq5HOVHxY9{Bx*m|1Z4(# zFg%IKNM^FbNoPXZ^pHAS>%KG6Y*%GDSxU3mR+Hz>f(7v>BYgk#P;v|);Um}cAeRI$ zjIn2gi+}G|*pZff8twmzA>EJCrA)NLz0RGNQZ*yADbs?D1RoKnX@>==OE=?9d;`^Y zi$e1T8Him)+A7ZBlCopZH~ZKpk*YqgiuF@(%3yZy#j-eYyIFPJ?(;)2)CT!BHhP~O zrk942UW?%+Y{Zx7uX{@X|i>?D5AR zhoG~&>g1;iT*T$OmfrHm)i)JCj@>z>kQPnHn?)#Vh&c{Xn=2}5@Eb5Cm@>wR|p}CEVx-|;+iZego>6x7)9kdXOP=f6Z6Q%}}5<2}fT)co_ zuaQ33z-PHljGya{miNw1xnkpgH#2{0xrTo9IT!{wp%_r8KBs3*lG zFL){6c`I_{FCyK>iEi6Qi?eW_vOV$$Crq|;7m8drn-rfUFgjZhYN~**gRLG-&a!m zZ)>CveW!(Ay|sIZgPzZCDwfann?QgRT`F+Nj+XV{gB)TPt8a(V*2KZ|lMZ;{cn4;KviF>RbMh02=d&-$wnU7Jr@WWnQPk_|Hy z1MJ;)l?bEe33cw)dbXk5ymmxkb4k5n&16v7U#(uR!5|o&8`x{_76NaK^7guG#TRQ9 zf~HO-t1RX|4ksPMG?!SD43d)(t%z80V1#@aH#JO@4Ib}ee7M7Csz}ojv(5%rs5v+NU6zJHYI^ignw}33tmHU870GT$^yf< z4Pb`YbHq2DhM!4H2q|YFD@DM%$TmgF7XAD!=a8s?XqAqlW<|mPdl=HzxiQ0{seC9I zADT#>GR*fZUM4k*tlfhmCHbz|!yu0uXHET&k^EN*u1wxtb;s!&X6y_{GHL$ODXSpc z?mSQ6Qi-^-a+90%b~8-<9WuvGHn7hwatLx~oq-PFTI`|i9FBXS+DshLrmt_18?Oq{ zV#AY-_u$|R$b)qPf08K75sB-1xg&Cz!85TAMfo|Ju{Ia0e^zmPS>weyd^vcyjAQdA zY<2U#BeK#WAL7lS?POB8L)}*VHz(5Ba3)aGe2tthrRyI>o;>43SG9TTUMucFvl5O*ouOWFFhnp9(qvo6o0 zQ?gppRn=-5osixdN&0*2*OcR0TnM$^mb~qxSl1fX*Ivggon|seL%)b8mOtZnvkXBB*q{R%NfvpYKAN8Fx)I2aFW(}T z^Jrm2X-rh8@OF5C+^n2 zCiydbrB4_rK=tq&=QGZC=1s-FU`EL3gbb0u0;`L_>lABJ4aFH7*OebbuCZ8No|S{J zlyLTGx7oYVuyN641`dgTJIbAR|k9`~$uC zPt*xI>jvb5e^Mv!?A@GgrhSU;ryr*zqLkQW*3x7rSmG4@#LCll98nx_rz*pB3z+)^ zHL25PkW`;SDvD#;%V*EkAEvd?UT99pw}#4~1#uy&te}a&^CcaU3Jg%Mg8pO#$>%D| z2>9nYpq#fP^bMOLOjjGRK2Ar}v_MFs;aov!R98AA8rk#XQ=tZn8jP2Uh{kG)aJ_k~ zZ>G2}k2rDjQL4$7OtWviEI3X)qFGz@KcwX=m|rsYOzNm=pFYCYA<TZuQj-7+ zNkkQBv}1fRd11VRvH+}Ah$!Y@F$lR1kK5vAIMijh#)Hd^4gk^gDKlHxZ>mA({rZ(< z@rg!d^zzakc&g!cjX@Val-$R31O_slxv6R4*D_!y(0v4^#pmvLhGCqFlG11+xdmMa z4TU|L(&HH#`-_14Cr=F$)c^5MdQ|J14!{3l<63%pdiJm=D<}XIC_o59ZM+`O6#Nl- zMdCP}2ZjihbA$kk8b|jTQtpxxDqpGDSXEjEn^{t@A~p%baE=UWsW zyPTX{v2sp!N{Xh6iVD!%0+P9!nkku?nE;3>F%jy9laq5>4|t7+Xlj_6!2FPbhVR#p z!pLaE5ET{~;GRAYc-H67U>`?!CB)(~Ll|^!Zf>FO&ncKg!{C8_B7s?OJf0p%2pSWe zkPr!5TwJ_Y@%ZvcXv4XERZV~Ao9f9oot>}ocA0_`4r#06UCiF!l~w=bM@MqmZ@L3- z1+dVKY4sfykD8}0(ia;>?Bl=t2|GCat*vEIv0zAtJjCTcEs>bhr7_oo&`N?xu0RO4Q1~A++&%DD0vCVz6712Nwf2l_k6X zY}Ysp{t;KRycc@AxMn`>9%~Olb znk}*GotK^(j8$LXa|su@D}C(wVUgW2gK=UHW-o65X!9u6CnT7!}&fHzTl+ zIIgA4U(F2VmPMLa@pya@Ngo-{UeX|EG}UJ8nQx9XWSEU0=WmB3p9`9Tu6Sf~@|O!B zFL8S}U3Y}=^Sn8}J~)Q$ry%+yP_BaPIcWvRO={BMn^gfms^gbopKTd$fyt{^7^Tl- z6gD7xzJytrn$=JH4AXKfk6_BJrZpc`vuAhkO*8kcW8>TN5QH!h{*6b-jAqLH+tyBt zr5E}?BU7?p3j6;i_n{_}>&(Xj%pHkMstLoOb$d9VE8+0G7N9K_)cs0p5>ATcT-6l= zNF$PD0iTpKLcmWlC6=vUKK(LJMxL$An>jyssikwMnCq)5CX8Uz7nJpVF?vc{ZXdkoey^ZnxBeoRNJ`o8)`^l$IIgK<#%8{hnjlheEFc`zg)U0`(0l0Zs*n) z+jjjE*6i}GYhi8o+uW2l_7(_}#UNGQg!o@kuf9W&o@rZ>@0b7MS zQ1Y0@_~;jyFpjwPn^ZKrYhB#X3!{U&Ql^41_5D#KSgueh>LkpB2{LcISLboqcl{>9 z{WMTN)OzQSXx07>nHu>~Dg>Tfe|31Q3Nj_wib%dZoiteZ31{vjAaLP(9yN~UQ6d(> zoVQ3MZXlnV?e#|2PC9IEkZGbANj{)UXk?2zVn91D{x6 zNXIxp36QXwDEg)17nF63N&m1UC~#}3r_plh&>Z52QOO?DCD5WdR}s2QY56)84zeo$ zBjU4V?`Kd)pr>RBa%CM%N}TXc5#;RI4t=N<-yADq+iA0GnDopaBvd0eZT6zuie=?9 zBL7Cd(=1zNAYLssp0T+P?!xeFr2pe@Wy?z`r=0Pm_rq0T|k(uJdTa`#xqM;j~G zUYGS2XF0poMBW5dwTgyT8K!Z@@F7;3&hfJR!j2*@B_mCg3~&=Zjz>KffJn*ytB)%Btdk(C}PMz!ONekr|NQC`l>gZtjn7Sen9 z5T$_vY@VY4k9i$h1`dit&b;eiDVRgQTYAq#3wBgVas3w3lGJy%~e!-j=|-}b0}OYvVxd11V&c6%{m6Br!K zd3=bDWwAP6k>5%`Hy(?5JN3AamBP2)NvZg=C~XD@;PP&y#f$!~%}Sf?Tk)<8imoyG zqb8-xW*;nL%|$&SCtbrz<1^;{J|%XgTo_Ufbg+~jUOa5MN>m7}qiGU|YP|n(y_1`R zgXgTv7*vga4)BuZ1<@F~mcFl+jxUpN*rL)t8I}*85hPPP|C{6-rfU-87ybWt{`B#$ z7q-#GB5L98ZlFP(gWbJ^sM{_YE*rG$>XgI_b)X9WN2qM@2%QOPFBP1kWGQL8C4FET zF^uII3G_IH)iMvtU-$m^k9Yt7T9w0fri3g40KeZ6kSA+}}LC#{zb)(Vwj#-ND@`hPjkm~m1VzuF@ zMvwC1333%ra37yp)W4iSl!_$w2g@vfqnVAGJmRKe*mQX>i{add+tsLFvOhAU?bUcH zv(4+OrJ7!??&Kfg4nlG z%gCI5NvfHZlZWI%IZ zdw_|JwY5mHe>^|17^zmG)aCu0AUqEAwx|@+ND@a`aQ?r-tN#TAZcp_WVPApq`hb7{ zV8h275oH&RfCa7u76xDlwrrED7)^I!VPUPFfOhQi2`g*HnYR_O!FRu-R8`v|gZ_wyy05fKUOVTIy)YKG;pSn1p zC{>gIajCCQ#AVV;xa^;cT3ZazoLXC38yg#IYf(eLmz|XIvOI}o>t|<05>b^J-h`Bu z(Yht}`m*9u@6;h86&encb)suwu?zNSR-QpNIjTawZd(eq(g+o*iimvDhDU);-`+Mn z?$|#%#5;yR^fmI$Hp-r{p9y9B{P*)#O|{m~s>_TmRzKk2ANu(VtUg4P&h+P8ZoUrw zSMvu(CmnPi9u@%x!pQYql>H4qmUAe&5T9a4B=XM=v2jME(M-`kCqM9?ED6*$?6*)Wzw$w z>#!2ryr#UE<>X%%fq44T2Eu!oSovRv77$w zli7k^Q=PmQKQ6R?!`$jR`rB@Xg7)aPKS1x_(^Sg)%xnW0(!RofMa%`RXhK8&Feda0%Hl8=n(jD8?0eW( zkU5nz6<@Atj)J<+ulx@V(Z2?T#}Rqk5wy>ioxPtc`iWyzz(hS>&oxn$9~*ZT5V>1& z<~O+P&mxEihCwX@X*30sxzBoJ zkKC&H-=AcUaeS8i_RHuX_zEltQRH%AO^z8n6Om6EHqC~A2S##nUdU< zt~hg1b8WGSXR+$0w09?gIS6YrHvRd&bzh5C&2KF2#l*ZX_IXrTL;1B0TcJ$2BGHdQ z;f0U))H3j&S!SGppvH+s*`@+A+^@Kyb8ENLkD%K>vA4hpCj zY{>)v6RI0xzAu`NgykDzrO=fgY-|^|qqJL!@}Al0cQzVtTjjgmN;2Jq$v+uN-2*Ka zzAmkD5~E%&Uue}W)e!zVmTDOr zB%HAh*0czVe)NcsFxzWyZd3_*iQpfFH_5EKv&zEXld{lrf6Q1C?z7S=`+;Xbj>SMH z+dq~;+W{tMxL_JrkBW5iqcM99l6(hZvNP!fq?`~A0;S92Q*xlE{cYK)bA`?+!ZkLF zQ6_xi@Le2wFcZRx6+J3a+^l?`VmtBxf3!pk6@N7+T9ta;3WIJgv(BN(GOlU2tN-{S zZtCqd#Ah|zKOrN3T1SO&ZR!TUdRnyZ2nZSp$}?&?YKRr9h;YlKGm$pP4KzzJ+PJnA5Vuoj6_!*6>k~G7OXcUEKTQaA4(xtvjXZCSYz)ey55BVvcC4r| z3T?7(mlKetj7g|vrHFFD2j$6&*43R?z?isoWHKbMP72AJ5flhf#1R!1KB&+gqbH8BRpj!TEBU(9JWRFBbADRv&*)2I+mG`$9$ zK6gEv*Pn>diHoGFDG11N1)dT)HWzN>{ulsmCuQ{I6}9Ym$Ts?5G!FalsL}+2pf{t0sY!dC3q3|d}XsP)9e zx0EC>@j$elxy+G)@Lbb8Zh0e`JvbOn<$L^L>HqS3Jm}wAeeJ=_kmt9V zm?Zo@qygP^G+4InttGDB@VCTIKaj+5he$`v5N^i8C3B_W$-3V1*rz&bD*XOspyjvn z_oGg*!AtHeED(~3x@KTY*gJ`-|GEy-PYnK*4Sl>3%jrt-@s7G*LoKgffmWJOSBfF9 z)tm9o-5u6>Pb&{?B{T|+M&^zo6cu!p)`McS$0JKBB6-k@`e#~;FN!P>M#Ycdbu~&{ zB1r|zCb7S%4gSVZVq*ycMFk3=vPBk!ijRn~gcnpy1UDqzc?-H_6%<`HX&i$ltW1Tn?4p2U3-6K$h2ajX1AOk0$HfJ!iGJX&DYyY)!nBcSS6 z5jHwKtrSA4l`Bef?7|^xr#V~3F__mXkAhkea2ZNJASj;pMhto5Vs`zB3v&a+QB4>jWn*4J&{YUS(w~g*3|w&6 z!);VYh%e~3hXWd-BrKrs2fXVhb(ZczAdW_F@12H zLr&4GV|AvZ=w<5oU%tc+e)R=DtSs~ltzDnWqz|Mj0WIkX^WcHw&?{+`>IX;0rx}%1O&jj_z)lg{12)JIH3v(NZNz~ z?ktuhEGus3%Mj-W06Q}v#eTK1tfND>N)x9P@gZt`&S=AF{pM-@t_;wZ&(F_2&d{$% z2M0j9Kubfzs1OEG&VP_BskT>JPfzcwYJH*YDATz9j?j)$B<{EPZ$?}%hZ_y($k8wOX0MMWV2 z-xmb>^cu)}jJia!G`JY)mEeWnj;~p*4R3z~kXZq4RSvIbdT5{IesP7Z(nxG!P2iw+zyRf{i|1s`+Bz`sn=SNa!-?B)lM8TFXJ_-%j9Xz&HNC z+%5>9vVgJ}H-O{^+GBte*Q)y7#O>mcs*&pA&u)pb1+5~KGCODohv(wr!t!%bZd)R^ zpl1r`fAJV%EdJ;Pcj5Z?sg{U_#KJ|3ej?WJPYZ{2pw-n}T%V4T#q4y2-~0k+Gvo;I z_j_O+$gW++0&YNcow$Qnz)fZ)j?9NCx(!{eE5F^+O!fM7cUNPOrJ=ml^ap&J2=T=D zVoGvlz-`1mmLaR3j_=NxSS`RBgxJKp{9Qk=n;yL8!wstC_>E@O3xcmEHTN`(f*DpE zGT>?@#WmgyrxBop)yn)V90bqh?N5;{c`LPy?E%F(<*=UCb)CGAwdl~xZ+HdwEApu2 zs+?IMM_M=GG1__j7cT%*sCKAysPNfS=4gTGXLNafA4Z1Y2lna_&!do4S1~r<;;#DyC({w7N=p0L;@eqRXeyr%u0S1zs z(7Mr6`^<%qe|oPz!1wBHIHL zK`$jyuQ^TnxNDpOL@)SS1YMMwG6)W7`Btg#ktnFeCyl^u-ip#|O3!^ftAoUpnFXAw zwBJ8-qx(L)0&Nl#qVd7#M;5R+-18f}gF9X@0l1aPfls*8xDF>J_(lczc2)RxCHQ~K z6>ij(ZZ*u^Y19bS6MOb6Bj9WxG1;M#*j2srUB5_%q#C~%X6>5f1*hiUXtinv)82wH zc~v9waV1YsOXGEUWt9bGbPh`SEYTh!?P*Wvntdx z^5;T;|IJ=6rMCTnt@e)4$Boa36TAYy4}EIeLL=`&Gw)I}f3M>*(Nt5#`hww?k?QcE z7~!P?=bUI|kNCv97LPK&oSb)0p9kSZb6}uyb?cDo-`C_T0m1h<*0S7veP2|5$Vt6Z z&4!*Lw`jO1nl1ufSt1Do8Q_pF3?xyx8c`XDMbkddo@mrKhR%_tegpm&)ik+9(Oo&< zkIgtkyu~KPD?rLB?k}T9RPUvIV_mBpZ^EzrL8#xa$_^40BwZ#e;?4{gmgD%>xPBj$ zXxozQn5E!8VBp-NkR&rF3%#_~NQD%ouE?}?2onaY zB2t59tS*HKcX~q=f_+P>b449ZR(>c#scp!Vy4+mPr$AH+Tt_TV!g(zUl5U8h>LRN8 z8tXvMS6XQd(}p7Nimv*#Z;MRWMg=D5qS3{r*O2WxBh{?ir%}P2<`KwBV@2qOK5^Z- z0eSH)vG)reI+Ez%cuXBevp>{NOR}w|q%JOvqhHn1Gm3G_gLnzKane4-nm1%P;O8Yo zESzk4NG;vw%Q#XM=8VX5MIcl!tgc_y9(5CD%|}N*?-72xAHWDw2uRBssnsVgE<=K& zOSdRoM>A@WOI^$DXGCo%*m>0Z%2=$$OYhmWe~_~41gn>jB#Y+Eio_mM2ql9avX)!s z&ASJaXJU-YNaM`<+kllpmDHL8wR+Zq+l!z(6@A@?{DckjFe0t&`1LP~DGt@ySIV$F z)Bqs^FLJ#>8S;MPuqzDP^EI(%(psHmak|5H#5EGIP_{Pyl1hO=eu)$Lu9@@ooOT|i zA$SL-wqGSe!Msc&L6A>@pZbZKr|>UBnFhp`_2Y6cEL-=(O=#48ru4``uZ$9Z;YOOz z>M=kFp*TA$R6Hi7PO;7;Vc)b4`VYf@>;1Z2+j#g4IJ#dj=2 zB|pn@--rX5_a+Kh9-`4jx}_Y!lDVOlzWz(6Ikzf&t28gPuV18tl2wi6F%v{`ZVS3$ z920}a;zj385ArXL$G2MfXy2ie@J_3fp}{rH=VW7>`Y`!f{yQ6Yzw#LJqt6CpToSfP zQf!jo=HVJCUq(m;`)eg`-fB9F7=Jc<}YpEji8ZMO41lf{^p}1l)N?yvt zL*m;IKKJ?Y6^WS*k@k2%L&Q_MAbVm2!2U8EXf3L++4{ZTa-2`RjRS*!AcEyNZwQXV zaP58|DRyav|>V^;Wf)9!Y-iKaPJ_0Lm+H%>=k_d#I9sA*md~+bzXu(v~2i z^bA>m)9%S1pzqK=q8r@VMEy|Kt3XgooK_tOiwwv?0U&5-LqT-A7c2Zf;z&I?w&cxT zwNt&4%e2mBi3upk>+5f8nGmNbSy_T+V@ZJd1{}HLdIi##m$g&OL`Qo#h}ld* zwKpQ-n-|!=$g##o5s1nheYyegIY~*-d^P^-b8Ornj_fMf1-3gB%r>bEymRM-I1 zGXP%+7(ZgF{Z~gk6;CdW4XmF74rArI^VPO!?Pnfn+;H9n-mZSnHN~~q3}VG!@nJq; z*bLe#K!NAmHw0`%M8vt-SzuAK$JoPzH*INgF(oNUT~iaRU?pp1ps^Wa#E*Z6uK>)b zM)^NeYum#hRv8BM_V+`<5#QY20ux4GuxAD{8gadQNMN<0&zvg1l!7Bo6&y#hudW>9 zhDBpUwv0_-?^tRa5$#QCpaI(UR3;4Ue6voG!x5BaO2}woCD7Exe4#S@4mghM@Ung`&^ZJ z4bIOI2bo-UkXp}7a?7m9>!q)ce}$cH272w_USIBmy@Fn!ev_X1PxYE&IU+df0u?e7 zx!C4fmSs37Ie^9t!jj45u2pB2N)fs3C0ib=sJ;10(Ydp=IWP2MOFgW`RlGC!Gq}G& z$DNv}GyJF3&J0pE9aL7-s&1FiUH-dLxIsAdZ}`^lyeqM6C-PS#wE2Sb;QB+u)zD-6>T6chetFomu&u%Yj_GF(l zf1f*|gcct@m$}26>doHJM09jjbSFAuNCQjzY$d$>zZ6I$A)-s|+7I9|P1-oC9ec0K z6#PA7jSZ(}K+-6hYWj~mN-rMtiN7#OH>%;kN}UFG-*J&IIj9_)@RWuMf-aa#Uqhq{ zc&ki$ff0lI&XqYuh_`N>-CkVwyoo;?st}vMG}H%L2_-kgR21 ze3%TEu^kVFWmN|B88HYBhK1q|5_dFA4MU~VOptV44y1+AM-1*T?A7BeK>50l-kLl~ zo~B-?3Kh|A-7h51{?t77mZ-4jqNxz-UnQ=WVFvB>O=8nE_Y%7Pv0w7YG#`J!;hA0{ z>37=GC*}DZ??2x3C-UNVhAhX9;*zCxY_k%0cKK0kfzC{EE5;T0N5yhT8vBwo2!U8_ z#AE6>8l4?%cnTzc^L`yxD_n>4=H_kNfsoMkYZ+_zPijwmL?!?IKor0_5smmIBWV!?z$b4b#kF{$W=T=+wc#~VG!@){iK2V|IemNpUV$eSvfg{w;<5=gHL49 z#J|J{RBa9n=Js@&)_$K$olnFL7yXM%dAglwb=bqGdHAz(sMdxCLDKP$E(RtUC}^&tFM6(Ism2dbtcEyKq*)614J*^jiG z7f-vzv*h%e4)eFK+!t#nv^i0k4>PqVkFXY-=Q;lj+~0wokU`EE7V#+zQ`5ziBUp~j zV44&D?2;Isp^Gs7!vLCM#}>e^Uh2Xx2D(lD3^`?;lo65IQ&x{;=6Lq97sA~ho>gy7 z$fP$RpiqF3E&(00QgFuQv*GcD7cH;F<2*CT z(;@f}fkN6bw?vVg>!lDBiPXj=XfFUidSr+%{dEr4IwdJfWTt>py&~O`P&~S@%)ITm zS0>P-oe=j7o30I($Os-;0iEmnP-}r@9xnrXLV({DAG~hM0NT?T$Z>I-!1EhX|>=@?=g|>-$JkleTEc-Fa zj;D8La1%9BKKAKtrp>_&ai{}{cPb&9UG{cb2AZ)sQ>t*Im@(N@W~W9xCeBg*3??Ve zDWlk+2b9~^G~($pW6e|N1%w^^pZ$!)Qk)3(MS-@}(S>v>!hZ}F@n&nd`?2(w*)a&(jw505{aurOkeF-xeXg@XkwS4N7wQ z6zjtln|JvVRxG#O@;3m|xg_W9lA-`RVH8}8buDT?&0P^XBuamR+?YQ-t=_nU(@n3Q zF)}*4Rlt-M88B!tguV~qbE=fqI2hMEf^~dq3vS(ig`}&(YqkX3h;bE@jV7s%Y*Bw!Ob$D_4n0D*?7?se`k*N2?^hlF zIQJnJaj+4e=XCIabrds{M|3972>Fx)58`tbCzkTCJcGnMYx~5D_irgZmoA7Q8k$Sk z+8L=&7dlbS5e*tql_sHT|`{X3@GB03Om$7tyG>Wy-;S_Y&wlEEPUL18|V(#$#ei+o5EdNEgMANvq z8}gl2l-J#W0OOFbY7Dl(c#0)o+O7m+K1+P0mU?(t0XJAwbtF+nE$2D)N@P(bbQMlW z+(*293(!iYsB=?V78qm|h^k)_ygft~1^Brv&SD=ZCZ(v7f2h*45cpCkdZQ>xDG^|8VUnZBN?{=y!itCE2NuorOCHXfBoVsgINsH*;WbW3u#tJ1kduyr zF|Z+q>`xZ2Xb~XDSdqG5$i;UkL~NSvSL!Q^1_}N#qMkw$+<)xDzRU$ZGtz5Tx<6f! zG85lahfaM-_}cRZL=96?QhKk1E{F)xYgWzFF=R=O-`i(Sf7C`pb`=iVrO@G ze|tXqlk)*soy7kqANK&!+6^Q(fjgu5)UWi6jBlEnAuzV0W-R`3$;le}`pBy&To4T) zFKRPWQ&4apuzn6i9seIqcL-*E>6g11AdUlangMwfz@<0+f5cE}a$OxO4XS#SFt#iI zp_o4cfQ$OG%>WdR{sCflE5Xkr`p#LD9w_y!&&@%t)?3XL69FCjwHAkqm!^x;hks13 z)Ex>!g?t>wH(}gK$q5OaodS;X{+c*gSOMNjN=m?hlQy)+w8FG12~-BJ$IE{9yP%80 z5swGI(IITHDPWtqXw|ACnztDO4rLi!MXF)2b}jpI&@G7xa7o`g+S@&u6|VnXQeoj} zSgTB;=j!U>`opE^(jxkYzGISU)XuiM6puHTn-Ew4^-ImY+u2fKrB0o_TTI9RWs_fIvSjH-Y=YaWqH2B-9J9u5Ao4yhrv#WrfXZmRJ@9z zj;;(c_Ee2FpY-fQNh=Yqbb0+;nq@NF_yT-$V1od-|9_m6b4}@ik^|v1wtNyxbm(6ihAZ%jP-}W_4a->24i(^&Kw+5t zODvYc%mH2NMx+D)@53#F}WTC)rOGW!n;x~al*8QF71 z=*&&iX6ik~t2m<3=d&9pANOYXKUzDiU$Wf;u}rS)Kh(`2em|r%$22cKEH*zhFUBm! zOrg{+&#yvbix$@%M4Kb01tuHT<(1_XRlzV!*<>tDud8gS{`Nh5(Mr%FFt;srsDjfy zTKKa!=)4v28bt(V0YN!2DymbhcP)Qd+V`(7W-MBm|6vNz|1~ENdQ^P{5nIc`=|~fLAsI(aPa*K7I{5$FwnXZEcBoq4f2QL)bsqVw^BXm zm@GM#V??yV#Y*Z{0?l4)AtL05$BIY zYhog+V1|xHVz(#z2~r&_Zcng=6m8xpM78+S>Z$U4Ysqi?t`An_q=Asm7>ekC7-5D_ zVo}gw)z*QX;?yfKF3)S9ZLeM_YhJ8oTFPe{uVk8QY+i4#?ya+jhgK->*qFaPx4B`o z?p@Sr{$Ym#giGbWC@Xz5vF()fmHnYA+9dwo!jM6CtQzjP{JST01my10YIntCmmInx@pcW-3lmqn`Aexh3s*MVq$Un$wb0zmy{li_%OWXRT2nIr4I9emZU(2EoARy z2-plr8AKe>Pid^={qSM=hq3-GBz~!;5;toi|&rvMx~t0 zfdq2ea)|+dYh5%*=7g_nBR`krcX6FlTTZ-9C~FnRBqxNZS7x-&ei$tV^O2nkng-X} zK!IiQcBr>e_q&s(rsXAFE#fZb?%N_aCORZcc_pVPM#u}Yv{ufBpgIBzk0MsQGs@Iu z5hxY%(qPYz%}ZnsE1u*FjO#X&&VNH{Q6t16DyVlqG& z$DI{x?hcR0JUD4pH1TY3dIl>`YJdmr*%WM0knBz(dI%y73R}ycRuLrqEm;JQa9^_p zO}0;D5}l;r`hhRr|0%pQN=ihUHe&Txj7n0j@KQv4epP`=V}Uj1fd^lH7|n;NAAZ&# z?2cr}@2vqc>M{W^4e)t!v}Lt?Mwzwkh5&BFCwf#qN&EoV`ggd0)HW1e0_u<3bV6aRV}>3>H*> z8x0sAQqq!CBuj334`j_Ci9$7M2+p9(oujoz(t~%3qJ{SUb!vlrp8v!%hOIVrd@%&Pj)Q(0M zyzB$jXBdmcAvvUpuw;@6={KD3XLkw3^+IQwhn=3l)6%Oz3}M4c2cmNr95dTSO4Uk* zNe_G^(gYQ0=YNn!r{|Xn|Fm0aZ!f|c7kDL4I{aW@il+(?_0`hCsl^L;F0-rp4HNYr zMdut;X#+;_Y}?vwdvk5JZM!zxwr!i6U7Ky&n`^_?_kMTo-%WGx)SUM`=lo7LehfeUWeH=~hlLGt$7QOa)Db2MtByMvMW`FVmdU=inhe<HZdCkC-vjQ2OJ#SF|%JCVJ5%@^j(`VyGCaQJ|cD10oOY3TG?bv!AZcCB*7_QX-kR%ruonXSZG+*yUUwhF8%0r&I% zEk;^eT1G~w+yp?U=jP@nCW6wcsA{GpB+dD|ezo+s_;$VzEe;=i8xS&sq9l=4OCeDGR4@&y?@$nJ-QIBgdNjI+3=raNc{Q)y=;JE-8 z@zki*hGYapWqdsl1vnUvu2vX5?I5%g5qlJm%FVvk>Bxzj&8sG6zhJ3i~rdj-Ok?5#Z<59xBBo$ zrK?=QHb3B1kZQA>c+(eQb(7z-c^vEbbzD8U(ffYtYTaSU!63uu?)RVh);H$h%$xTt#Q^$37oJfOeA&QW zbc8UDT{i{6;I|YuuXpa^b~GFW?m^hP#NKb-bP|%$uy{v@%t0O%H!=x(wd_zmh(n1v zcx$Gm6bqISJBQq?l^jjdVEY|=QVeqy_*#g!a^=BcdbE#)(4>MPvXj02_hElkzI`g% zL`%bq-W<3-R#M;>F(&M?h8u1cjG;V|Ye)2_*+;ZHcA@Mv;mVQtOH2B*OMaS#u0wt+ zBNS$TNaRC{avLroVbM?bg?X%ee9#T zKQyoi0GIUOW_N+!`Baa2Zh7Y_w6hYYx5o7y6Z9o23EP9))D&N1W;)sZ^WU^Or|JvG z8at;7JEstW3{DH2S<6V9Y1XNpJJsMRG7ZJ$jaDMX+80g5@%e%UaL{A?p6br+C7EsO zw!yQ7V<08U=_L$dUe1)_dF~pP#Qn4FX94Vke|$SLUU%p`jG_seHnVdj$k(D121j5~ z|CY3ncA}xHw7JWaY!;Dt0%m{K%6^?hb=&(mukEn4-af0SePGPCX+1xBK{v~QX<&y> zey~S<=4}Sz3;Wp5uG%8e!8|vmw(JORpU2G@%t<+lbNc29ZPiLN%%&-NQSsWk7P$ci zjk$-EB5ge<98&7WWe2$SX7PiBV98D)jYPS{NQh-}v>w5g`$I@k3%rPj;VVvPH3-DR zS9O?7y|e`_FJp(JhWrT@E$24*c)G)kND;~%-A3wu+C*dJkPu$h`~Aq?(9zCq={C6m zB+xOhtorDmQsj|dY1VEW-8eU!M&tx?%E?ydq+>_$=f+{{M&5M43d=iVxg8sIm0c%mgo#jRW>BkN!TT4RfZrU&uDXcz8@D!y<8(KI9_xqnH zzxxahvl6ZBYN|I{5Gy2~!H9g>83GNuaN;8DZwFkVA&XoVypL*jqzvA^A+o*XIo(5} zC1lVIEWB83f~en;hlM63^Um|D%=#_IVi-38$cA{c^X}o7`b2h$w*8co72J)6!No_} z5}Wnb^!~aRZ+h+-wViC_^e zXO_!>r#rW#Nl!aRutb!sb0p#wrjntStR@2QUlH#}4b)97msNFhuxIwpGd!F;GOPsw zQ6_OXC=ieo7I@;Qio!7@1x3`f63nLoPNVE5c)2+_4b90p4e!@e{x{y&S?ca?egEA1 zEG}u@XsW9J^!%*uyyz{T@s_W9dN)Dr(>(KV37JPRv?_wuO7FUQ0NX@1&-xz2^-EC1 zb3x;yh2^8V?Mw@$47bCslv^wh^h=_IKh=U#<*}>MoWsporq(b{~rCiUVfn7z`|Tr$UOZcWtdsT%r-5hI*HYxg!{wF^D5bN)5YzBRB*Y6pzq(# zy&F@Hw2V`1uLtFW-`3I1|b(E`Z(d2RvsR8vslE`lq%2q5P zARvt0AnZaz85Hb-YEzx_6)0JAoxhvG5y9q3tdT-d2MKCo#^OiyuYHIj`U{F1AW=q% z-s#!8u1aMde>Ido^ z#^{Cy8j{FV0p%D1dbfC^p#2M`Aj)I|<5?f_-}?(PHFfeVZMxQ0i9vp%iaUKL1T}%1 z>B#4tJV&0vA(}`kG>3_?ij4lJKj)BW)BtHAcy3jH|CPm;h~>Zow6Fi?ab8CA*7aFG zIgdYDf6f?uy=h={7iV;~w4@Y(cA^h}_6A3yDCw8x!ZgG%P9CpfKk3Wo3)q>PrzIrZ zKAtZc7-WX%xn}>f#sqpljZIA=^Nes&p$t+C6~{mr6b>1=y{&BrxF`dAo&V;45ZkRc zL5A`GF8~fneI;JF3)F-p@5StQdvmk0tc{I>26q508c+1y>treu2%rvyA+m9D=CJ^& zYAS_K#GDh_3ipqhkX;44LjFxsj-^zX5cz5 z#jT~M7faR~6$OW3Qw+lFu4KP))pD0S8js6m{x@Tv?ajsWu&i)qtc(fJUb8 zy7}a^h3~BS%(QN+-*VgrA#vQ_Tc?l1Xg_S7Y39mCjiWVmHSv_3b=NJZj)TAbB)gYFHE*38uB67NwQ?a8yJA-EdCC5jyI>{e%P* z*YEseDImroUqz5X?LcBRP&ZG9ipg%+W3oLe2oZ8J{c~nasAnDsJ^1~51Ee+Zj&<}? zG|1(1MLx*J$h{M#3{POr`PVQjD*Y~tYWpPpUY9->!!N=es(1ZL z{6YkX-voTl!oMgc<3EVL-+BsR5xwz?P0p_VLA^=8ib8>JrA$abiNw!`Im7}D3?daG2Qefa+Gg=WN(7}IvyB7P04gYldthw` zt754l#MD+SVz(*;+>FT zB>1oeeA<&HqOUMiZ$Yt}2-Qx|g*bJSFU#JfgESDSGQ{qZcBLdvX=RbSMPb^$jKLCJ zmtB;B8$j>#7_J zt2`s4h=HVzF+zL4Opzo4LCXfKidOksoK1Z|4MQA{exk9?%iZ-PpK_+k znOcFF8n>2~xRmyXfjAvj82-^phbkcwO}3R1YB}|-`pm7$45^mnT@;uqs?i{VMw%>V zrM6|Xa1uY?H22{IxtSGf27w$$?L^ChEL$~RC9Q%|ngVO3{;ATx@eH26wy96Mv7p@z zwmyY?Kh7G*+;sCunQ3F}n8u~9)fSI~4qRWaW!vZK4PsTlsEFgyQy*Th73{+W0V1AL zQgwGNSNawRJH<<$cvw(5tSfVVwPfTnw_}vICS3brUs2NgPejMF4+!SRmFxq-# z?MHdz6P@5wgV>o}Y}K98Q9Py}Eq)SQP{-S`vmKb(?{5X-t3?kp;uv-s8g-7V^iCrl zNLlH^GfLn7{-}Rgu^r}Z&%kUZS~Ow5?p^*zTAE%A$0&|PfJn2AywPF#eZwPMkh4l4 zR@`JlG4rfoj%outwR;IOy#w@7Crf{v>Y(+0)zZgiXf9cc6P-_GZgJ#;HPw|mJZwM~ zbf|XkZ>pD1X2>K;BOdH#)og1O`Jt9Mk#<{h$>iUk5(mxpUKUIDyga+uHV?wRZ>|Nx zxe*eqIVcI4r@UvlWCMH1CO39C+%%KOQm)DnUBM~F1x4d5L#&$ zli7da6zso<&d*e*eUN)^!nbzEi%V|Vm~dSql1014sEq7>km67Q_?+~xNN3?0U0(^ z8>dIP`()cxjTXl8p`Er_t-)2rD!SRaX=V7v9+)2)sq$O$#qY~Z;U{T(sAvZ-E&BF} zam%{W!9bJwf778@v7CFnu_HDJznsX-Yf>WBFRt;iTf8I2;AoncMYin8aUbl7YAd6wavMGkx7)aij!us9@}r0{$dck< zYFXdRa`Jk>gSPbSg~=)+p%bPYma5_%!~X^+{8q%Z7Ck1-1OM9{rnIsbX8EOhn|QkQ zg4p{0nSTslH0CA*KkrfTdc}frjts1d5427o%$V{0cVBg@AkP9l49ZVpUm z@afyos1~QhW#)B>1ZCs9A~nVvD`+-Q?Kp$X%NqXThLg?dn8eH?aKHupo<|$JJN4@L zdUSJu%Sk{r?Bd4+3298AIC|~#=x-20z(d$9A2{XiC)W2ZQk=s!5!!dd6GE>Tl?n(s z0yDq=fsWe85y<#n{0w8NMRf0lC4?^_Vlo=os6@kT`q@Xd0OgxhP+B{Qq8u){iWurO z-uV8-!WWSNdM2#2@U?r&wGN+~OR;CvlU1aB+c-18jqajrw}EPvDQxl?DRQYE<=oiBdL-VPUxg z$|OvX@0P*&{-3V^SMz_+JVe9sjk6L&OsK5{E#|Ok9j7AvFEB4YlcG#6+)xr?fT;o|g`feD zI>4d_lqMk|At02*l^TMBgF{2b0i01mbTxbxN=1}UZ`4qC2;sl7;xiGi;+~%E{(i&1 zxBKwej4iFLsguXlzrIP)oniQVzMhIbiMRU{QU|rSw6ztPknikXuGHFWFJk|L+Q{SqY7bvEBmy=hd`sH;^CDedt8>O?oA8yewLVMo9E}3KXiFr?DZ+rwT2xbP&FXUTDfHEXp%LZkW-L=08{oikC<_&=_Lw_{$LO3 zvgf-8@;$NZ)~AQJ%6sO61r!vNVq?Ya|H40-sh8xBHU!HzH*Jl2{eO4s@h<{hFZQ=x z-?o@{?$HWEEkRLXb&U-}J}K@yo}Zt^z@g<^LJU=tgvNvV>T9RHnI6DK$}bVT&hPRUe}Ee(f}z5#)twJXF3p0d#q2g&q#aBa zhDVG=oV;W~Q(IMSH~EaSxVO-8Y17B`IfiG8XwkMk<)?93u} zguUp6h!Pg8P|H%w*Y@|~xS$=-fnd-D>qar#uBxJK3WGUCQ^FQ;6`riX0(F25D~}iC z*D_|&9>fY%l;Y*LRuM+ofuF-Mew4)kzXCkNtZfiB*l>4H6(LDG6h$FY2kc@x#sqwu zLN)t4kT^?v6vgC*;qRBxE>`)~C@_!CI97uGBUZJ(PPVmjKb*mhW9UrLP#Y*vQ+CaV zU<#uKvuJ-B$}GX2i`ap**%l7JGdGKS1ti%+h8$JMddbuCi0yk4kGLxxxJirrWo8KI zBq;d{ivCAG;YvQ>F%7dhbpT7U6r(S3U*Y@#`)HxqxJ_X&Z`nfO+$EdZ37TSuZt(XM zYZS3~W7ro__VI2`5Js~vg86I3LVphRj5g1jHqVqg$Dn-bRb>ud^=}#FR7z^NVoZ47 z6sx?dO1}v$kY@S_)uT}`8VXRKeDqrpqYU9ct`AYG=*|N2my#C0 zu2#^A>d?JanVO1h^(d~!H2b3MBcrdwOn0As##ei+mVd2+KQGrmO;w|2i=Pam1XxV& zdfh9c&3bxH^fiBVRR>-xeAo?eTh2BL#5F%m=Zd({v9^d=zuZJ-LeGMF=N5W|%{KqB zm^Ln9vKd7YI-bm{|2)2%OKM)r>bQ+sQF`;5DiR&Yux_F&0IT(3XTP-fnOQ#j_)KW} zCu|ljDm^QKp&vI|#K>iQKlgTMG~zq3R^jC?^oqx1i&N`VSLry&R%Me)b<$Y#GWwCG zo^SowvGLH+gG&3QtU91pGojO@7kli;Qy-6E6TSta<(mZWOW)`N7zRzXNyK?riA zU>@8=%%-QB>zz^OM!#q0utUGDt_rz2R9BU(b3=FGkW7$yqGNm8F}v2MC)GAHr1p63 zw-!e1A?G}p@13p6x$QTt+Ctap*cP1;u89tNhaPyAveXpIo)`x#msjOGrPg&}_kU2} zQa#E(j0=k2PUJ%>>LAY?((bRr4t|FD6z1IgT)ISSUbHgLt+nMBCnmP?7aZzJGJD|R z91>y;8D*T3zh%={EFxgxiZo7-;z69$BQ|ofXfY$7Tx%+Fb2p0&$)`3?MkJq%9saJ4 z#_qgRuy=aya!1b=Z-Pzxg!Oy)m7gtUa9n`eO*THt6Ofm+p-#EdTZXim{j4KsZmmIl zPg%Q-9gTh{lz$W&HVLeH7S(S;nm?vJFLo5ON8swTNd+)%{QFfold$A4ACA@Ewi&&| zl}}byX7W6>?EFr$ll7=F##jGlZ~u;uGvfi_b$o}9d7U(>l0biT{pJ3;P^5~AE;6qL z=Ls_TYwpoDZ938U4k3BW#pcMiw)sMLrUA)dQ!TEk^rXqG>>fv*U15UgLD`M(OrbbK zi6mg1YwhVmb5>rbCpd4Sx_F!EM^-P-!if?bCbR(SpNxzE2d9<3Vu}=LXm;kX{$VkA zX>=LOq+qLHbO^`MQmzl1n&9@cllwF2jtgf!g#2s*C6XfXTBl@R&Q;^!aC*FO`9~vk zFGv~UG#2m2*ZA^`s~67O(V1;qo)Ni%HT5if!?k#Qwo9Cn5TO%K_*jD}hlSEDoXZx) zj8?gSdNigEomkHcJlw zNpIe7p$e&9g86Cvjz5Ok4hsL z<;mYn26Ui`v_jRf!a*-9r4;@6Nk~lscbi~vSidgBY*5jt_P!86qphiGc@g1%U?9@L zUL=9xN|^gAW)TcwX}XOfn{%+AfAKy1jxNd)B5%i>T#^l8j42dDmo>GEn{dt&JuD4H z$R1(^9x5=H@L~~1Dp0nFEx)f?&nT2>-8)Yp8vDS#t)YwxqGr^G)~~5D`?>`-Ccu8M z&-q$WPKSZVj}U26JFiVaIYNEozZj~NwIr7a`OCj&U4!m~mxT{5$|@)=W(Y?|jfg4U zayTbcCp((zoWoF2Q?S2+(;z_>gZhY0L}7ugBPn0`q@UbSc)Njw$aH$Z;G~a+$DKN_ z+mazHm|z>#Txx|gxF0no-}ldeDLs%16#vEzc75MnM0yZ;9$^A$pfSRu}q z_!4C;G5ZEfx;Y2^B`EC8X9B?A*duk^vlohzuzLvGzf|NW82Kh;@@rl^ShkT0!Ryc| zJfiOEk6ycRsP35LLB6%*mw^H`m^jN{zMJj`S~%gH$j`@eAU}9s4!D?zQa@4Yvm@hY zfg*IdY^Je|P4u9q@Sr%h1xX7cDDwOWgnkgAVxR~o;)YCmQ_unS_wPMdSGEUSweVHl z!w&Pgy>AzE7?8!jO24AwQjBoo4$;ujnRI`V1n^ky&lmroPD_+71@6{o%>Ik4qCn6y z%(}#imx)PX)$9DaeK#&L5*8l53#bBBLW-AJ#x^!ONlQo7At(L;O{zm7N$K0P)f
Vsg--wjE?%2DAt(e7vKulqfB5rPM`=f8 zsgz31In3o5^B>2T{tq~JF83eEWBZIl@3L=5NW31*=X(Rg!JA}l*TkLv8dG)81<#@< z;m>$i-|c^PlNK0RO)dQMd#{Jq>pOP8ESsH%x5w_B?t+Fya(;63Wy}FXqj*Mowr{f^ z86BarRUnYzRbN$XH+l-Id#)qYaZ^=O*K_OaHt_Fj&F7F>6Yi=2U-Cops~xR^seidTSM*x#O)9nsT2>rAsd0xxcTI{qStLFw(-G`MP{f6dUHk@im$wx|yj z#FiXjK16h@asM4GqozjBxvLO|HamBnMN*IooWQwxqz#Jg}z&9@9>*BVS?} zX)ELH{T_)8K$ABY-&Qw4W-klD~OdEj7|`E$l1NG zWj%J4meGR;m5>~V&?fak4s0bWU;)!6)1*Z=3`wwqITE=KhUkX$MRNqd7U#qgbCrUd zb*U+<^m`bb7L@ki;&Z==TAo$YRj<=tpIf^naM0RbYA7mnGBbI&FmHkKjTX4PxSZp( zEbMLUbrL^pDl^-D=*l0@x6^O%G`#J{J~61N(hsfE4`Tg}jc9K#aq0_H+JG#ErLU6{ zi#6JX%hH3$Ok#mxcZogh{A3T7ubfIb>8o(aE}5e_Oke2ZHQKe<86#e77(IIT2;yxT z$X*djw;An-x+1~dOXZNNGN-JJFKl5$YsI%1qEN`eqsIU)Y^4B?5|CLJl&b^i1bEMu zS4;GiEkjilq+tKY?mW24VblblqaubaZ5ceS&@Bp=wRx@VUq>Ot1-~SaJrtotCxy+4 zWmd|S{_^xfEJScB9x$TxZO6eP5Pkuy>?7*@$B=AfogG%YJDhe zlRjG&PAM(22K}u@Nmn(cXSF#5UGo73B)JTXC6yWXrle9z0X0F%Z+O<<@+^$!$P69W z`I{FiX~vp~MYJ;NX`{4oV5zuu6H=9Bp{L*Ax|Yn~Z8@w*azMo248=)l$(591q7c=l z;#KK$EOj$&bW{ut*fvFy3x??;4VkLUQjA_@A1;)`vz~}B$H*S?Q_ViwdbjP8KDyq({1`5-*h0aovK=biFHbAJw}QH{gJUV`*z_Ma>7QZ5c-$9eJe z{8-xETn#lcdL1jxojZu9d#W}p3Cd$x(u%@XPnEoc`mP+jUy6?FoIj5_zb1xAOWO}! zb_nyFs2HIMeYDsOu%hH z5~av+Xr4mEj;mVaJ`AZ`*3V+_Q`=llU7OO54K>j2X3y=-pbDPoz)1CH{#7##T`%@P zi;TG8TWKSgjZAr=8#&X3+jT3X=!qp(+=7=9wMt>L!zm|&E8YPr3mfPgSD&)t~gN&U6XGF zPPB^fL&Q(5b;+>%pJZ>D8a1X< z8gnHUYC(~*2@$dpI;_Yjbp~tGBEnUQ%*>$(KTmR>7nZS-9G=}7^0GT97bq4-cp+r8 zBDza`YBFNen|An-K-Imnb0k|F5}RDaZ8;}L7ZFHCsbx{7QH_RHRAJn%vr?YmR18QJ%s6#~8j=`H*R>_Vu zPnP8jDM!?48lye-P$0{=cgMgU3^kMGN#r?UJlYW6Ho#0RruD0L%}~1Fm!`?7jdgta z`K$xGAU%?<0DO+4@lY~Bwc3D>nO1#Fgc6s00ddseE>gKzR^kuW@qU`70g zr_SuIag|4hnJ3LpJg&8MW*rbY$DkhY1F}~EF{rTT%AMt#1u$5FT*(H7jtEU=K%WJ|QBxP*o`d&K3i^sUaa)dge&387hK~t#C@L;4wi@Y)+mjL0Kw@sO=-zd=k_O zHVn@Mv?wy%jrU))EfneeDj2)98~2igbO{oU7;04Zn^R{(1o(OC00ufZZ|5HpT9}1a_Qn#|F@AI(@JI+-a+GLw zB?}#{+>6U^f`quYp(laiB1o36S;B)D8-7r+prwx7Zr?!k%am$hBe04bs1$9P38GOE zTzdk3v}jyH%UKDJeev!%1If$(T`@JaWP{d0l3m}IZ9p^y2q-BkG++;wkz{~qM;b#@ zQBh$`ULG)XM#O>uj?FOzCbpi~!D+s~L};-z><-x;E%jR)AT!(a|!_&a4-3O;Dk0EG#U*4>@}PmXAb+T;t<3nP87EMHId; z_(TA_rq;Ja$vITm31&-~rTzbhM4^d&0(j*bMpsg|NIl@;zFr(Q>aCV+FLqi?p5n@q z%0%_U;jZ9Cd_;7=0}&&*Uub+CJUo!RX5OY6zcf@m)ft}kbPtr3&lEEp@`*7BOo@#J zN&N2>u5F}j#`SV^ew8$MXgLY%3IF-E1wIP>@$qj zpRJC(JR;YCikCShevjJ%!0xovpNBkz2Wswh_I3Yn*iS(aLe6hfoby-;6ISGv+G~? zj|jBuGc!obp}qG{4}@sUM}f1!dA)gnHmuR>!qStW{snx+JHYZ|yy9yGqv{9{Rp{#K zy4mS@A8*g+r}xx6)jdT&7PaIdKZn&zHZn4*7yuBdg@uKd6z9AL?=g76jDmw9s5#j% zB*9%WwGv&$Juo{#+A^9biS=Yn>B|gVBL}3O&fG;FACcN9bRS zpB7Le`(cnc^-Gw!w4?+WMJlxap}}F)mcYu=UQ$NJ~;Wp>i?O$D#> zT6Ol?VPuJlpk$GBZzCmaAXX+^f=|>Y+h&KnI&`pYdw@kKm*!aw_1YSPJ6)#q-`h1h z?MS)n0JH0AqZR(T%W|H~Ie;8L;&(I39^}v8Nu3mOS!JglldOnR0)t|BxvO*<`^93*Ghk|OyO{VT{()lRLd2h-)c(;z@Om41J``4BK)Zko9p$!C37^`F z4ddW{Dxl`SbzDAR7=9#2zz#L=EEr?=>YfQC$kztZaWkaBJg1R%1|doLo^nJ^!J{A* zue6p~0q>Kqppq1gHt(d}3EmyG!EzC_pAL`UvK--=WPP`IzM6L({YgD^l{;V6et2YY zv?Y#yg6RDjY(;6n;Qr~DcHU)@@-k$~mGYR38l`}(>BGHvl&cy{HS}~-T?)Z84ImxMw3TeDCMMfvbp|uF)f^u z{sU%9QYkgw^_HqR`R4JigPDle-E`9rg~{$+P77a7mp=pj2ioZ)kk8R{1U8DL@e;G| zy@o4a^z!emZlk-jWjvznCptDK zT2>1T)u-CVP8lNaS-h(w??DrPMo)DksJG5k^bVDaohq$d>(|c>oY?I5iwm{l(NBAl zQ|{(}^W0FG@k`~rcxbNlgg~xB9X~;)|9n;ZDKe-OjXe{kjqqON8A(T(Uvs~-)MYa# zt!kdDGmY1;!~BDy!6)4R*j!TpRo$7T=~h?F$iZ;L%b_M~^QclQGV403v4E9o2~Dot z;i+-XSoLr}A!(=o{xe|#P|+0$@&++G1aSIIYz5BYok=;C5>%y<0lM7RteGh-74!o( zt5d%g&y7~m?7WxUp#RnC$Fp=|*LQy3dELU<3?_;y4y1f3!c?gK5Eysy=O4O@iLQ|w zC4AuQX8B$qJ6bz;m?qUxyz@q9Yf2|u!WT3(s#Ju@>QpU|Khk7H);6^wHgvB;@YqH( z?5*}80oyk#-#e?@BsS4YX?*YLmJ>04(t&~WwbtH&-|~hdUzWKz0A+*N0gKj_GxC@* z9arT_lg?UOMqQ$aM%*UtX-*sLsK5H}9rP4Po#u)P;PsTK?5vg@m3NHPiUDS55_1WW&Nc!xV zMm2==0b|4E>Q8-%u3&{T0--y=G*p($shuUZQN8Hq(T zo`=P%T~deH(z{E+&1IpY2N+dq5nB>M$tCCE&=^BDfZ0IRRT`aV`Iyh}*BL>ADNUB& z6WK*>I+20|wO%F>)_v$M10nV$=jQn5hon$<^o-qe8C_Ww(kHl%L;#Fl9O{E=om0lv z3@#V1HIl!tm>R55{I4`gf5wH7XxT;%4OmBvX$DtPjl>1CJI1u|7%*nHyC&z!s;6)^4b~C0##C!4yxIR!W!^-#xo(BjAoPvLQgXI z!Q$P>el943+5{0M%g!Ey;sB#V`InfetWE%?^msF(WlVtR8-r$_8AX^Qb?x{wyDeG# zp_hr>P9`XVbCzb!cl_w)4yU46VqM@bZ(aw{HIbBAlQlGighE`we2s&kJZ$M^owaR8 zF=oOPw32mjVco-$J9E*WZP=lup4p<3`Nekg@U>3pL1&fhBVf zoDBg`X_i~nqa-+WtN3diVDPr_NIQezFb*(OpM4d(_q?+HA?*c@pTK?n1CO!@JxIWy z2||R09uB!&F8di6X+I+)?X!Av{^^O`nJ`$r&Mx@%!B&|F)1L%u_j)%*jEu_~a{3CC zXbd`CBOCZaB$8-D(uz%J7#Y=RQ-RhGwX2+@q~@+^ivEHMl3nKA z`Oo6nidkE&j#8_ii%DS=!->%1L9vLj;#oz0^`Edv=vO&c5#e#w={8Mv_oU&XSx|B} zy*ofbANVkVq$QO?9&oLOic-E@U1^WEt8cG+xf?l31NjOFmyzC_`ft{Xt)R1 zs;l(|Ke}>&12^zFp5Xv0F#2;FSnpgMEz2|dE`B0D6F4)>&O<^H;3yI?#;z0$iv){A zjMls3=erx!n-}~Yh~=JdZ$=9+=?{XKIQZYJ9P79vZ}e>V{FHQ+)D*)B)S8nLyC$_} z2w4$9K}-4@tj^2LJ^Vhh`_kEW^S2lHT?qtya3CkCReww#y*LB@>W_G$Lij@<`wKz; z22cs=9t#6^iCasa4OJqf_)9yqoTXe*z+lJNc}b&L+$}D%n3s-Vu&7; zgW?(rLK#9vW{3@@7a~M0RuT##JQ!pzjk>N6l#Xm*)|#9SC0Z1FTD-QmehQBJZKSqU zH&!Y+22rlf01E-YQ&xhhtBp?``QqBI7OV5N8au%W|Eq~}JV_i6>o-?3j!_*Lhwu_q zAk|#j*Vk8HkNkwwxjdD{K_@6kT>FCp-fcE0O5RS7;f|^k%~M?EWy>RiIT3Jluf@+q zO*8j6fzNzRT8xQHygFT`MZKssA{@_y#;5=d3xNqWqOFiah><+}w`g=xg%MKt2T70z zd7z(6g}OqX5M7}MxH&tDcGy5R%vv;P5v6`Ug7-*(t1L)^s9kGQvf)DbVyx6qcOaJy za=b@SyIA1AN5{DYVnkTE^8|&KIJ6ru?qh;!VKCHZa^7#o3W`aGDUT!p4B<5AR#{z? z<&2WA`qCT4BSuJb2i|@MvodylySSfEwDEwzM2=Vojr`kId*&DNSE^`-D)Yo(h1WOB z75wWW0f4(?(|n=ZIQ1VNRQ2) zi7uasO`kM!ykkDPF}~{2&I-z3m6VV)Y|IG8npg#r?kaMw8gkD{rIo2JOB0Vm^gBtY zB^lU(KkXm-y{Va$8<}+z59@y;sLeRs)$pULFQ6-Ot}3&Q0SA0d6~lO}Dl){=cf2V* zQw=}HqBBQFt17F&XlLaRW8o^JSIy$6d7Pg#*`~@IliglPQf5t6?4u-t!de;Ds)>-S z9hcoz^`rD3ye=~in^^zcnhsX$wq`A+1kknD4h`1vRlT@d&Ylw>cV+WG+ncOrqb&UM z1Wtp;T^y)p1mjuK?rXWx%e^vf^NqjrwGTr}c;;_i&33tBDu~b6Iaeo`1C=knM-g z_M<2IgemUiB`w?KUa8Qd5-|Kt!=+^lm1SF~{>EmQFIiYje)zdv}Js~;uPsIE}f*_&tU zT}}Hb)FO^VrEN&DxkoK{VAQZH+%~D>f9iNSBeYzLv00rc;cK2GfIMRSqu4mHVpqLM zbmS%DH=b;Xow9j#EO{zg>irfk?kf2{e(mF^vu>lV*OXMJ@|2Z_Uv6rNEWO0H3+XVr z!(VtTTsmY#G{!yDyy643TnH0u7 zR`a&7_&=h}oooXOxrJg-x98cB)DFap(U-P07wqnVQ9@|~dqf9+7(@G=E<)06~dVV**Ms;Gc{JH<~$GPj{WAM@F9fB+@mvCu*71^==uTZ*XQCVPf-Sx zN3u}f7L1Joa(F#zLirCB4>)POMdv zljiqiXq`pjt58cI3O$@U9b?hdY0nTwVN_m8@@AQOw2Z3#IyoQ*zab5c$WSy+z^KGB zri7Z!h$JV|0>iGs)>tv;V!rlp|%EB6vgnLGe^BuMhk#sM`W&$&G`3) zsUHRNiwojMRH#^$X}=DdQO@I!9oZDD@kS`wND?oyT-Iw+)EZQmjOII? z4($Qu0uJOv!7pc2&Lj!M=0FV-vG{Fx7*1)F>=CyGc65lc6-&$2DQAnVqt>^BRS!=5 z?vm@kUtlJJZbYH#O_*pa==%Moin0_VOoKGn8AK+?TlkoGpbKYZ!}Nv2Y8c@Xc_uvO z#TZ?Z@g>Y5BDRc5KW%h$n1cOe2oNwt1KcdZJ}w~eVwuH5QjFnRu%PT&q2~63jku+4 zGwAo!E9cC&6aQ@Vc)!px?1+S%sqrRtd|_Hx>U9GhBsB|v$h%npIe$c#@&CNg0ZIv9 zVXXh({czxd#hy-QYRh!s5bqC2ElU|7B|xK6rVEfubKQ4@uYZ8n*YP4o zMVJ^HUtL`(B0Syhi$1FT_z@l$2pR<%N+dM>dwe``@))e+SOZWPmr4+^#>APjVW4mk ziSt1{J-%XM!vL8|sT6GPu^pQ#s59ZP)BR=#DCsL%F-88naLb)DY$*dy#mf-~(%Y$G zy6FYVVDXX_lGqOG89h!Nm-iAcqXVK)-eF#E6~5j4JrQG#;cjHgq||8|)yroEBEU;j z^k48tadA=kUwNKmm#Ws%pO??I03m6u#cJ_?y6521^78V|x;}5^W5a-v4zUi#J76zn zdu9-_39P6rxx9RC0}dd7%fJ1d_-zarjsoBUQGaxFbkbKCr)j4;UTG@fXqt+G}lA~lYtCMaYbK20SN%;}B z+Vv?XJ;;qz2^!=U#|gC~G-UKxfi1|RI>Ox)sNJ2t_CG{$1VY8n4Eq{I3moM~) zzuF?ZNKNf|Bd#Fo+%MAGuQs6OW7tChgG=eB^d{` zS!l&0KU7kJsT=uK5;s*7`BjwpRF!+-c_#JK2?6a|kw#QXjh#=6W2zzWQ#GZEmY{}~ zu!>&9n$tr)JBGdkx6z`!vSg^$OkZ)e_~bo6_!j`8SDv6N$y6=ipz4=tu%+!pXImKi zE}9hbtyM*yvpmC6kH5dlv%`@B#IPh&?Rb6B=C<+zj>gB_WJo_{@&y3a)P{}uGUmRF zCOrU~mdAZjkfd;Wzk5T+kx(1$DUPzjX*I4j^$rzvZAwR5s>}OSkgMVw=9HE9esQgJ9QlmL{)zHyN>i7Ass#)HooX!G zDy4PFMqOPme9VvQ`~JA2W$y3Hm&Jjm=L9yOymWy;zl}gjyZMs5$Of`=PP7sI!9G=# zWO3^6M_GAUJU7-T#+%wTOcgi&Pfzlqq#cI0pfM!KvW4@96vNbh5^5lJ=IQlu^fs#4eD2tvTg~AqS%(Xf~ zd3Qk_ddhOZ)jm2${%$7w9hXzV6k8Vies~t!C!66=*W#jNf&3WNQmHM=zA=?@C%)T) zb#iKVh3~sE4uRLzz^F>HQE@Z_hI6CEpG`4Ok~Nc{d=r%ZeB_d7({=`v#gYcd;=fxm zU+>_q@m+tVd!N^3ro=RSR^xp=(yR*zc|*-22W&{i^{-A=c$lnNW6Ix_Zb(-$O0vbd ze@dV{Sg6mpPL*h)x^Yg%mejXqvI+H`_#Te#PGpFCYG7h-Zey;WF1}@yu*flvAMny$ zn}}Vvq;QT7LeT4v)p;=#X&Ny`m}kTcguwGA7-P=JwZ~sL1uJzH!*7mB%-PUJ**Q)- z)58<t$FcJynfTX7%>;q%X=Bu$0WfRTHKviR*;}}?V9w~Ut=g0wYo>%)`q0N zDrIjYpff)5iCLU`qh4v^)vWFQG~G?_;o1_|#b7@jEA1!%lY{wzp%Y2p#D?-1S#omE zm|(y3uT%ZRaWwtQ?s!JQY4dr!mFl})Sl;9jFitiX)D9L@6;v}zcRV6<6{_{e<>c3?~1_+4G%!J8?fw&){C_$NU<$)FY} z3#1Q6s;L{@ssTzh>J6rXzw%gsMgjs)Wuc2d9Iinm`HsCDR1pONxsFkO>1QJv63h=W zVy@3t}a^HeYP=yf4m+@@7{Y}l;s)7p+%U;+Uvy9znR(?jx27@Hi z_y?SnBuO`3F$W5zo~tO95Qx?zeiJ(p#tRLIX+Tf5jDIE$2EEg&&6qnS2sjAlqviv2c1O{lk+ADaL6 z6D7oM0XuXK>Wg@RO3!~6xqZ|d1y3kS%;5s9-URa#C7>G>GYm(v-r~)XJxKnucj?c( zj}Mvf)F2XMlL>kxWXgB+Fvzk*nkw_DKy$Qw>2@I{%x~Rkpn75pP8-B6)Sx~13w=KA z?zIcME#Q_f3}15|7lOPf9fyFa>YeN$f((f&UT{j42EHSb_cQMh7{ml_@A-d4sU`hU zTvXk@7*|9*a1ZF3DC(g|gAt4#?^3(pAs}{uc2ij-AVGtJ!2vQ8x`COg>E&wUG2n;& zr@`tBMeXbuBlR#~feH*nq_ULk=hXu+^r?`=jWXCF<_z}3!VB0<>SMWMs%UFzS>D*d z7hf%70qwd4K&Azh|Fyo$%ga$C24-hb#Jj}I`{(9R7h^%G6HLTT=-PtO0{j96x`Cmm znwpxaDfy;bS;7sb??UmX(|(_9Jl+dAK%(>9POix@{3<3+;o&@roU~v&;UM$i?D}kH zf9Gm9R>ZY0)xpuRzNLipxsP2zF;GnU?cQ%swoN!Z zKP=v2V-e9SueLhzLW&!Yr@QQ&eJuXX+3 z{}rG5069ME0Jt^|MlMw0cnwH1Xz zuOK_H*^X~xKXC8W_H~&_yZdX#*Tv$*2KJql9VHDqo7Efk^XYsMN}UKoFaw)Y>9UqG zFK#Cgoq(u1R>>>W3sMps3d(TPVDAne0ga>$+HQFuC}JjTh6HUBkMvB|yAondCdiHS z9kav9l5DpHy%`gwBScYW2J}q9Meo1pbUqRH*}>6~Sph{tYHFAOAn}1Oh~v|RrVHjw zgkFmsftkO5w#$5ENy8u31cfHE7u-HAlDyz=jVFPE;GDwA0?T!_@0+7i>R%~}M&J&j zbzD}V5qYG6f9+&t$1^yr!FN+l=~6D|FW$sI$3I)&V75eHQ5127|B3%Qqv(LZnrpK0 zT6!XS^dUb`hPdrM&vZfjBpj1bM?+E3e{8$K6z-J)Ia?cK zR!gnB>X+laa%SmrK`n#stqyT2exd&abk{U^7)cd*Z*F+j0MNUSh3ud}M3Q#+Q-v$& zep84ncJ0?v!M?O+ih69y>j4kZYixnc7ZC_@8yPwG4j1jLN> zABXu*4N5!mj_#4Xzwtd`5#V(cgyi6zxn4i#r{eyK&1LuH&2bBb@im{ZC<#TA)QVFE zd9K^s*o7vM(tEUnC26=B(u;e7OecV#_@y6DMUsb6-?C^q1*hOslD3hKy8679;5f&l z6einyI$xL2)~q{v(cD6mKNto-I4 z`7DCDUu^L2?rOPBf<0K$PM%3aeYf6 zQcNX6s;+RZM*L~UQ#tgjq9l~M5t0UTkPa4*lXevmc4@b)y}BH$vK*7@-`O-+m^Qw# zX-iPc4&$lIvsGr8Y4LAXc~)pHnk0_lm@Vp_MOxQK5hCTb3{`rTpYMtG7Q62`Z{wu6 zl2qgkX|hdMn1%Tbj@BiRzF9SsZpwZqQ%mQPQ56^}Gm5law23;?JX%!#QAwGu@`J5X z0<9Qa%)`6D9NQxaqXpPWxv4B}0Gx`0LkQlVpS@pjoFvu?h=f!&0)K=>+g~8v1Js zwwfvXW!P|%4YWeW(#@;OT?RY`WwJ(;m5(dRo%$m9ni;WcHTELsy~psaF;^Gq>lP{^WLimgihO!XaB>t_;xm@B0Fj3o%(`5>bQ2uEfU+jK0pw()xyHUbST#DrW!7V&nq?s|bSr;K? zSs@3R*o0U{`Qm=8=ZSs&ytW;E?qK7-h8>84eN;@l`>RTcOl$D*lFUCXmhJ1BGiLPb zCS)v-Ctl?#H*TpT*}piXoav_K)Q4p&F|Y#Jrdl6zOTRMM9pq}jp-@f`*|c+cH)kDP zeBCgEL{Xutkt36+J!V`EZ~P66KZN3;2eIl&@s09$i<+U=&%x&lAkjXG1yX#C+>+P+ z_G$Tn_E)RRuxh-XaHq-&4|Q_R>7-1Wwpy|SIo;z1o07Eira60-xP$OkR>{KUQZJ5- zQObS>7v?EqkJti5t#MA{yyr|yL)gP892QU)fLNj*+l5Y}z_t@=pn?y_`V9i2kg*8a zx>^^db$uNm`WK(j$DbHA-Sx{XhkgGec~OKEhO>pVXAgOL9jU@2(k6^Ijn7ulPaG@i zzV97bX_AH+Ql?LY*T<+-zX{Oq<965-Wc-$~>se;Uge9xRGZ|lbmqYMl9TY3DFf8So zn4?y=8x8M@$_jlGrr|ETCK~xKdIZLtFX2?CO2mkg!}M@>n<5Rn?GW2vGeR=u!J?a| z)?gVL#ZRPlZ!vX?AeQD=Js%+SPg!%;EvX6R*iNEzpU_g%od=khfL2u`*ZaNEFlnr^ zB~dmk0W*~Z`C75bJoW>c!5_Q=DguG0?fVEbkh#Dkkr8aI8zWpeI1%bzp|)g*1*d>tM<@8t8a=QP3Jd}BpyNj(>CLV&wBmf*AP@^&h&9#-Ax9-J zp@)ie6q;Ua=A_ZMt>$mo!iS1)vfkJ*u{YT}&EXx%KC`Ji%3D~m-8#Y-}4iNGU6*vcv{F4}8#eWCZD}(wYJwm>l(K-GOygX=i zf%JMXQN~8afpTgNL3iKM`!oK7Gt`U~*h~T^t5*SH>Wo*+GAh<%?l`x)e9EJBwMz7q zNb?hn5Q4$Ig<|F-_(UT6>v!Ohli*$!DzZ`0-4Sy`L6BaP z_)z>~=*r%q;XhIxd*Rg%#lc!*=>6n+6^b<B(V;^x?*C^&%1U`8|nVf zcl|HuY9+R-CnEz3TO5H+6PWX? zLl%^fNkj`N(JH}6e3g`xL=Nr&weFo402LC(i z1p9xFAa?}g>Z!sN%60`Pt|obSGy}dhn;UcKhcA(H``lUTChIa0Z2a}NEhs@ZL3e6 z=Ob>yJ2O92`pYFl@mIY(uL0Zg*9&W?jv`aGpx@`gXd)Jqo*2mI)8#te5_`wt)6z+A zAs8vxoz0z_*kzer1<)~)03alpTui!c3)|bjQ@TH%w426(6{=W5{H@7I;Npj+3b_Tj-`eD7M%%u&KF)tEbA7iXrKPj-YvzFl6AmhX5~mQGd`Mfh-`cLc z$i`wu;uvL`LE?yzJJCUlPXPi04@_2IyDfQ*qKI*FMcLq#q{y%VSkU+ZK|Tc|ZB2%K z0WiVaa~j*!Fu1H~fNFvDIawM_q6l~>@8?a01SHCnZK@MLA}c_C{U&AREj(+AUk zCbpeuK2e9Xtys25O}^(>66mI%kX8Zvw{&!(m-I9=!sEd6#MRlEh^|;e0C<~{uBh() z-qEy;K)b++=Vk>DiH|gv6G;r}UHnOEgEUFlV(8bZ_0(d%{B`GWcq}eY z^}xdB1~Rx9;K78O0`4FERVfktW$@}w&0=BYP@%J{*8D?v9_uIPRMUWLL}F+GMwAiy zulz_G{X;pXLv|6zTeT_ZpC(2&19dh9(uY6=$_Q!_);$`CC#tyrQ*(P&vwf=6I<8&? zuulATv%`gRz=-G+YptG%_U4XS=VrHyf#N67sW;voOf9IFbxPGAgziTLU;$EtePX84 z4Gk_?O5!Qa!0~#b^jMaal`hBT=V9$Iqqm>f5LKa>V5Z>Xa>>JWB-^%JjvslEhl0Ti z{jduOX1Wq)x0#ASe+TWbL}Oo}S$mt6 zJMrj7xd?(D+#Gd`!A!?SHQ)l@nl~Q3D~v9*lQnpO$wpmpG}6~8&&hw?Ro268<7Rv; z2IJ_*DtNJy-m7@r&dwwNjVpbHY#>n7)QAGXyO3-vun3T&==j-Hi90I;l5!VwGF>a-^KP2GjbcCNWy^*dDllf!mQod$;}Hcd{xsc55b*II)8M!P7=JS7)c zUFAXjiu0P0i&4`!Bi6nw*}%7$|2|SUpf($C;OOdqsl=4X;I;;vuF-q{NUPZyp&{g$ z*~yV;`mUzEs-%9ouSDpowTev9POw^WlZ%t;85lY78k;WS-d{4?Axvov!q&Rv*r$Kmr*nmK|cAe>!v_@RL zrcUk<6B9NOGs4MJxY#6X`3LT%g_BrZ4YDr8gE>a!-X4j~3$@B(ZoZNL11aoOyE?cO zt*sNIbbUPh8lF|xl7btDgwIUEApDlrke(LINnC(@0bMzAip^BND4mH_v`U;VnuEaU z_{>HeY9ktj@6Dy-k?JrO%|hO2U#eM%$?0$-Dj-8Uer#M`q-#VIZ=Yc+jW|9cAkZd@ zo&C19BaAx;G2*P7!v39!A-GqB)h>Z$K?s?ILQP<_M{1lel6E*kN;7Sebg-Ddi-iXM z%c@FP{fX&ox2nE$+9$FtmJ3m7fmSD@sgCLEH!AKZH9aL?e~UXxNVcy@RWwepBJ_9d&ifuY)edqVagH)Sn1ON@}$6hI7d_ z5R=QJFDi&X!a7*?2g8hpe(F#oUhb~w;NFJWqHF~oX&qrR5k#5`xP{O5_H}yJYo!$( zN7&^Me#wd4z*|Afw}%!O{jv)vkCQ39kTQBL9O#JU9zZy9wy+cES71AQ8$qFyh^qZX zATqULl!H`a2paTvxO^lagM^;f38x}Yl|6BoR5lMi^apZW&$y?vW?>^~&`~I{IrwXg zL1s&o(Q&z08tIYah8~n-qYQmX@Gd%6)Zw3sV5f}jseHK!J#^X8c;$MC*qw9QmXr?cv`P>4kgOiI(TT_z--q_OeyXhFZ$X(b9!d^3==-v&B zmPiRqqZyDlb^r@F*z7;LRIrk0bmWKtFklpx=X5XG2pBov0miEKK1gs6ejALf$8olM*Vm^P&`QsvI%Rk{ zXxa7+>Jt5DW^20%h!uCBpXO?X%#J5e85)*^p z&OZa83ICDc5zynnOQrQ=t_gd4`>-;iDYA~fQ~$WH|CnL`^>w!gxNyhI_@g zEq*226aG8j6ciK!?}dKvB%$bPCZLI}5YLVXVT?P5JS08eAYL9%w!mOX{vzOTCCu&%sHlagYvE)@`MT2PTO0ZzQ>$D5fz-&cwvTmPR+lH~u>bzpI=y%&U-(>xt+GFzK~e zD$kQqZk+d^tQ(!|Z%kJb;WBl-`qLN+0;Ajf-y@;_kdsPlKiG5R>{0GPF>=>KQ=mcY zQ(^grBG_k1h(%Jor%=;Ral#TrQM25pttNzXk;2<8(4cb#?Ty#txDcdF=!!-Fh$^JL zmhe=XBV2{;1toWa^7W$y1O%V)thOoTVF$`tj9AYMle?GmQl zp%|yWs>o@l$C9bv3%<%7&_3z*M1J<`maC`Dv{2LB|LII;w*=ZUhW%JYJvot+v05i# z)zIHWZC}=IX4XW>C9TapQDsxX&_@(^RcSt{wS9IeoGg`u(T~O8n@rbPIi6ivxmmIN zPfag2y|b?!bqr>HB77e6VHs<>sdnu|-;s*sx-#h@mrYp!o6WS<{6)yeGNFW8Q+rKc zn^jDobGY10SqS6q97cqdGPMw{W@w@fC-wQS`jb~siC%cF>akjirEIP}1HQJLp&l!tds+YZ{sr&bSjB;PbzORqItrW4UmMM$-^nm`RW28FrM9-5+5656^NTJE>FRBbrVpeR81Skf93yz%? z>c({tOB`LQ;~glD#l3iz2X-(*c49N|E4H2bi*6J4mT-*vr^@`(2sBMeJcS)pN$0^$ z-m^UYHMpT=ONF5){R_YHV}+F7fZg?_)~gVE%E^B&9z*Ih`(K=~wtKt(B#k%YE>|s=oLgTihoX#dVyOIBO-R$|&Ntgjf`7Z3< zc+bCym;nn1{%IBbs#eq`H}sspYL&fW$e>?^ibWgj=4@E4W^=z}QumIltjQFrYfw0< zK5k@58MNQvk@SyfnmGcLOL(gE#wIfQ-Ym@{G<(Fk61~q(!IV4W@?Nv*TlcF`NSwT~ zcGy#!-_Zegvm1B+#KJh{TLxB{$vlo-a)!QbHSIE;y2qK_kBE%}(|Q%A6fNo_I#XwH zv#Jq-+*P{eUQ$t#D)oB=+fFO))Cv z3(Tk)GnaYeRUZ2&O&*pXQ-gV#82n0f7Y~bve(QKvpB+x zv_Z}+9SHg9A8IzG5N(&pv~VbdzlWZ+osg)UPL3o7t)iQ(U>FU-xC|i)EMjv}M2m}a zD7UL`zpDPOx7_P;KF^3LJ>c^0mU0+w*TV7b(b5(D4M~~_Uv`T8zPx|bFT zJ(e(j`zPm1%%@s1;F8_QmRu+s~r@NtW(n@O>2=59~rc;QnNYgG(lOP}PCTeC}A^ z&zzBDfn4@7E@*&DHvvEFh1j>G^!;BCj0gDZ2L!Ge{W)r6WAv_PPBmue_>84m-&b>w z9qPYkzHYey*3diZE@Y~O=)S+y6X6-vNr{tU2vd-I4hWiBt8Uif*;(1bWF``!y(Bj3 z!G9Lh$jPuD=OF7dW(QzO43)+0`1%A4 z96p@O_X~8Ld>OJUL@TceNYjS*#^0P?Z6a^^-VS0VY_nh7KQ8Nw>U6tZt|=8t2va`* zP=6d{4=oODxINP+{eA zrQR4=1ZZpq8vS+HK&DmOzLO$ZZYTy{B*{acWT*$!SnAHl^XUSHgb)0h{{|EK^CC57t4pZjmyUpF6_ zE%tidt{=Pm|6f-CvbpQmT|N*0W?#mS?PJ$;43&4Wc9;3lus6H~h zfD4pw0(rU=YKM3$|M=NBCvk`AO?oeRu2cg)>yr;OpU!)`okc(mPi+)9ayw2V%B3Ale3*4wm6K zXu`6X^m_j9d6$B%C|<_WAE*(!WTPX4I!WNYr|){uvOi(oQI?Mdhr#z^uAj%_B{~dJ_X{GlS15{#viKTZnX@oR#X^S>QK6s^RyMRoG-LuBkk6hIbE5&^1lNqg>#J zI&%!ap$_+>({dB-cl&vl9ga=5EgPZEvEhoBpPj%|I`Av0E+N77=Z0P6I#Vg(kkuB^ z@xLqNC zctG~nsfqt@C@>H;Jf;R8{(Kq?ni&*fGi!|~%n6x`*5+qqWKFj7; zw&bG4cmnc|5~R7fQTREz$JyBDrIHW}wnFyrN*U=SK&W<2#r<-{;D=dY39+;?b}dc# z%mnm;{L^CW^L$B|wb{c0Ci!`!WM*oqrdo-%YO$u;;rE56uq0nT@44Ta<$}iw+^K1~ z=K6_tn%UOco49<(iPFknYS~Y!p0E2`YyijGH8_dmRjpPm38RVevBL7U(!>4V!WWzU z`}N|!Ab#d5p$_M?k6nuHBl~(rhniZt87Ap4Ok@llN^-^`%5=KyQ7#QHuw7aV?A(>F4%DEF zB)9@|*&_j`HR=DjrMGd)uA(ph3{T(5r9Ih{op~dD;DNN6pI*6pttHV8szh2ZN2>cI zpI1Vt)njJr6TZDi z%ax3aC!i!u$p(`XPuGC7s{R-;=Gy1@-)O$oBzR#!Fw0AYczXjN74YaFL`8gZ015MlBMoe6#KiLlYm=Q@4DK$-&GGHO-c_qn+YS4EW zGwkwxX0;kfR`}txLdde2abdUzh}SLLz>S_~owHpty|@Pru%WIzXyCCV$(T;D-~Ceu zHYg#{MNr)-F#Ag+gaU%wdL|p=cWbnqROuTiSh*5x>)FENj4ui*ceM zIl~dTjEh=tOtQoazjqH!AI#I18sEOUI_;K`>0nUKgBNWFR!a=1NpKT4*q6Aq3oaH@ zS<{ETJ-ZMpgTV~kU3X3EawjCsS8#DNYT`MylXnarU5hZPBzNu&id8<(Gb~vQ$}8Wt zV}MYJK8T}x3FMO%@ICs~Tmm@gYLEOaIgS{?3){ zbXciE-7*5&@5ph~r|L(NCA^pQ_Skpd+vZ-XF`f4>hVV|b%Lxj<2h|BKIINW6uN-Wo z5pnEQ%(}6BnFTqaTYucR2K_YZoBBb@S1!F6k&KxbO8sD3KR7)!FG?xskKy&iI;MMl z#tFYSdf|aE`@y(z!pOB45_FM~@E-OA?~W1!3cZFE0}pe`A@84##v%xSTUN_Sl;wO7 zEc$G)F_q4JI_i;S`MyMWeH5s$E-3TO?DoEbPx%cV#Iai0*!Hfp(Bi2!+EVsZnZCna6VS|)+ zRKv5hsFy`ejSMr=wVjgraNcYY$5p8#h*Ma^fo^v+KJ6e7zL>j+6d=_$xZ4d$>Ua?l zzoLZBwU_G={dBDfy#-3b;OLjqWE<0)(v7nUA8$b97UaM!AfQ*RcJ9)4a$*s#bG6-# zB+MeVXK87Pydzp>ZD(g^Xs86V`A=`VK*a#z+u;j9Rgr_$R8^Ccl7#+#00K3UKYwLq zMWYP5VAF21tz3mRM8we87%YG-Sc*xngE*ZR8Bb(lWoe1i?Lwoh4Pep(nyeiF$SVkb zm_&QM1LAXr5Dg9OKb7i;HtYy+Uj>1B6M!fHW;Ku7^Nm43hHpq$QytkzBVcYj{njT#MIk=TO?+K9Y}^$&jyeiP7w zbS|3c;!I4ndwY98J4X~|sDl`EC(edxzn%iG&RJ4Y5&&_*O94$Zs9lKAkPd)`fLgC_ zq##FbNK!`CKqW;&&P>cmd{uM4`TF>X>|?*(ML;?!8ioiY2ngF+fxH0_4-kFvdcF!N zbM9|L!0Bgw2YUw#0uAB|v|HJ>&{+OP2BbG`qp+Cz%6#uesb)U^TLc5tyZLmE?((0j zy}Bp}LI@HT5&;qch$)EFAw3?q7i=9c2h@BGZW=22EPuND(L}0)vX(8+C~o}A^>(IC z;*XBY7Z5wJKC*u4C8S=_o#ek_D5LtMRI=O*3E_4c8rTn0vx~FC>*kf0OHIX)OGeE{z{d5nEbaZgI-DZ;+K;Q!bk5j5dR@Y%{PW+MMF?Yqs ziS6@f4N_`gHsL*jav+8D~%% z7kSsv+OF4E%2rMIIJ^9_&mxkpbYQUR8^c2(_N}RS=T|ZPj=>|k;NyUX&u5B%gDgv< z`I`FUV$#EM%EKZ$l@+YvWhp~wbIYHyQnM%Mc_-+X`6}AC^GLKNi2MwCze#cp9}XP7 zwOc?&uEfqBP*}@KWT*lz7L#CiI0ID(4j(8*to z*AK@ZrMj`%&7<8$$qI*m^6sMeVSh_v+QL;?3~6#!dRLC=Rwy%!hovV!%^dx;lH@%P z^|V$7Yf1!dPTb{?W8;@rW|vau)c8G4{p(VUNJ64d)g%a_ z?bJ>(nF1==g!w(+XDkcl7o-}uNj=oh+e>&lejHU77SE@95an*{?9bkv(d&mCAHcbm zpT>78peWu@*P}4DNIPhsQ$d#XVr@d27PAa)rF_>Z_O6kj=TM!hSDaiJ!>u-Hw>9;; zIJ{hHFhlb#ZCF8QBH^!7n%VhO(U+SZ+Fp54;c)C;E( zVMi9ZH;J;EQF*1ln_G6?*xg|uC}8$lf58la)RK`8%NVool+*zw&Tw_w++2E0A7IUl z0;?eqa7g>G2O{a_X!M@EPnEio$;g3aeTiO#GC1kH} zwBmESE1)9#+LHd#NB1BEolb8%C%Hh@n#`w$sM)}0A6M1Gl%e@({)r%%J}h$Hum z*zzw}!PZHP-i(T7NQ@qIDqdU*-!lrjqdbxM?oKb`Lnh@KY*Z_8=H^uaM|0&!^qBI^ zmBAA`h7ef;*C|74dr@MvG2yK-xMhSzb*G@Epr=Tde=DINWn@(ijuR9KPKE!6iM>Y@ z7zCzBH=E2+T=w3UqAutly;H)r+?@D)6J%KD%o=OTX0P+VUhvMGWAj_;Qn1%z=92m} z?3BDO@!H>4LIRkL+L;1ky6=cO>(RwD)a#t%>yX-xx|i>%w6}6ne0; z>{Wx6Yh;**M82T~zC;f$ zGqe-=x+6ll`$RuLsod)~+Y9Xt&e9tot;E?MQ}`Xt`zg#Y9i1(jEk*({bH*}tfM_gj3;fa@QGw;~+4SZ+a1Vw|X}OcGj5 z$S6`seGBqe*8#opaLG;bXRs>Dw>K zcrE+vXJIL+rdyw3A>Dih;0Z*?9r`edjK>JM>dBs%ZNTY6=A%XoLjv#y!5ucmcun!yw={F!M|63@XUdDv62>>O@2t}r6U_bzZ z)TQ!10Iz`j;SG?hB=Aa=i^yt#gW6eE2JNQN>FFvh4HG;E=tuy7U}tOlaR+1tUXJjf zk!p2XK@C4IRvSrp0Ekv>Y%H7E#DA9VFp(ep?LKgH54X+p{k!mA+FqI&tl0ff1iYtF zs@x5L7zN;3d9rItN`|mq0&S_m$BHb0-Ot$>KI5gaX~8e}m<6k5Wbe+cA9M5nar6K4 zV1WrQ1F%=P57L}v;AGgJ;bs@Gi6J%vED-msEodJO=Za+=cFs%pF_cf$9dh2Dulq-U zL4((S|KRbt>i|)G!-LNR6xDA20J>C3NeNtt7)L^^0DRB=Kn@)r9Siw+=6<&#MU}Y^ z*`@zitJ@_my4Ud(!Aj^o-0cxi-&x~X4Bdjm4RyV3t0 zZ4brZ0q+tL!_@U}uP+2{UAu*;MzcfE!cW3N!oeu}w z*RIY+ySx2#4nIIP2$I=F_4)dE-c3wCSCc2?Y29q-beFo0n?atl0mVV_%S}s5tE*!U zY(o~u-D^k3j~-!$*1vlMkh@z3&4xdQGN#zF6}EjnU+xY+-kwFT4a$K3yZ$%7-pTBc zWLnhO=V(ZBNYeE;>5!0EamtR$F3K)YZ;S7Z&#dMoYT@%8*_%o4VS5|C5=8ddiF?Us z;M67!Bzq5v+mQKPM!4SDqOak-yK-pkfFZ0qgN4n7ty+fO2BRBVA9%arX>q=2sBUd! z)%Gm*+6laUYLiRep@mJ%4dy@CS2(<_t*ttSTW#`~RYv#RHd8vxTB%dH+0Q%%TSnbK zO>~ozcQQ3)Wg2w`!T>J=$P9<)E(4D@hGoFB1{~S;+?rni$=QBko5eA2I;B6b1WaI0bXB0!CF5{)J^REP_eBsl5aeGBnZk+g@t zTwmlSg2g?Yoz4x-9u@SB29e9XoX>RWAEc^Qxn^vpamp^L*XssnB1*bFUp3FB;fZMlX z_*pypqz=T8Z_ydb1ka1e$B-KVgxsQ!^z?Lo=B+t|Vf5euk#8|lSyR8w_fK{Uiw!Al zBEFK^aR{RmXzE>;CvZAZE1|Kn3=bmGX+N=PNepD;dbLInqK4M@sCWn9M!~pxu#|P` z%;22FFJhI0!|CPV?baQ#!o^Nl#L`-bQbvCaJxuL-B;+dGE++Y}UhJvK{0U09%r}9e zURBcaRTbU_eAuy-`J{Jb3c-FD_scSEp0kt`&z(trSadt9v`pHEQC+XsN$A}!-nSWb z4_qccwx545dvc0Q+ASUbN6|S3$JxL^yiH@>kMJ%j@h-m+3g!>8X#dQMY z+g%zgaaJ8?(tc;^yI`aBBGbQjXk}QFLJYv2U8#lXX;JZRCH{WCwDT0if1YgUS!~wg zGrr!m>EyopqGg;w=gq=*@2dWI_57yq`cmGFQ+HpRePYx+)a~2KQnVPyS`GVkHds8@ z6KvoC7W6cKZ^a8)r&HAq_A-mI(gR$a7F{Wjr)Cnye2LVJR6@Xm_v8fcbj_zL3*#;C zZ(BCs7+(m=ve1ghzb{OMD?iL*oqVh2L)E(L`L`q!%_l=1Q*C%scU$#lV=L-uVnR4z4~Yn2lLfcpDvI2go-rgk7!0p znryD3Ru}2J&Nwm247M&=+L{Db#Tmyx?Dhzn&T*~|r)Q0u+U-UxEdmUBwU)K?A4M5= zTen`?ubDgePJZR(YXsOs!Z=65I9trMmmOi3wPe@?Oos%7X@0qyC@~n$l>B)}3q7Vx z-CKse3M@u&Y@PU}{iucgXx7bOtyTkS!p79roY-Zzz#l*QQ*8pMhwc*lM-n9OnF_nU zs+IjN6{u^<_?~``R2H-MJYFA^AqSbld{E`FZ)TsxBenW>4Lb_rZ2_lZ#7f|(kM@0A zH2_;lb9A`Z{rr$yra4bJhFc`S8RS`5VWzTWCZnX1E09uuyaz6&9@6XmRe^dgS+^~o zHSA&wU$?BSZ(f~A72}TSU-}a(8+M-1y{fa+*G`K?P<~v`QZ6x!5XGdY^KB2dJG$&n zH0C-1Njl7iI@L-V{_+zgFBnrdMq$fi@qzk4%C%OD_)F4$@K62L6cD^wh>?zIr693m zt2{Nc$@5W9gC2<1<$g+W=DRCFO-kff5BN#bMpuuM@ViC4Is?hTISXIsJH46Mzekd| zNZVu3QtAG}5kus?0LT899M~fYRIAHpxzdI4c2sx|>B{_5U{Wda-m#XU1 z7t-tu%N?q6p+OhFzs|upXkV2a;_pm*SgSa&s)rfD^lW&~66*zI9K>OQbeLRo1xMAR zS;9=1%kpXjtcUEl^+(TF(um*R>@~>U5t-4>cG++b$A=);&DrFi*OR9US!l>qqz@ku z8~b^nS<`S?=gnN45l50pLaRSxsqJTJLiRBYY*x~Y)!b_e;#s!J(sYiVJje&; z#Cvm#FH&Q)-R|oud{o5G?Ui<375_S+Czz!0CfsNOTcDIKd+!NSA6)2$1gR!G^M8}{ zk#?Y(JsMZbRuP(QkM`%AK~)*WPMHJcZ4JNEn<7s?iK`#Vn!`I;ezD=kPRSiR5NjS# zfwNvTz?znL;%JyprjT+p?*EXrE)VwM)x})3qrXf?5P-A@wM{6kFp1&K+E2N0T!A7$ z@dWd8^T!<#PTCkRFRy^tMQDG2ZWj}J&h4ymEWzEku>pz8hn0KCcg5*LP2 zNwfwi=`YYHz!e@yJBIW60=g^NV)_Cx{r|K9e7L!>h^~t`0I7whKnY{@xjSrXVn6)= zUT<-6(UgLZ&*x_%8Bi&cm6szlH zc<<%Q{RvPQ^8wEP{QUfhcpmQeU1gqFbXYP=GJ+JS*J%4hZaYVT7uQ;_B4k#Pg#y5e z6~HR!f8IyHrKPnVyn}LF-0-^i)-BaZ)u*n_V?S{2UX8-Ogp zRqvR3{Jn!>4q;5h>rLwHzx{dai`<}`5V|krFb(QkNCzX@E-f`R2=)i68!RHlJWTp7 z4vTTU*2M}Jp1l{*CKOWG5Sb2W5kBJr9Enn>ZgU7yB5*4|5?&P{+2mYV;pBoI1*%(Uf3vEM>hWlfh8Hcpq@iopYv4q1e}f zj2zG}@-kyI6#rvR6<`P#nkb432TLB>?#Xb`hVAe!m2i_jGKsCAES=0>M>($WOhb7a z!|DQ?4)KZ+I5|6!-+tmu^OmlOpjE5026Z;@BZtQs)C`RyV(=nM=lkxP%nD_E1vCHw zkXJpfK1;Rn3TyoyMz@O7@D{0etmFsTTuiE(lzN>kKH6D*BO3RhKBhOb#9j36P8Mt- zB(6b>B3oF5#k;Lnm9abXdY_-9FZ!LY-Y+WW^O>T2g|*%})%>k1 z9$#$U4V`aE8Fs+8k(!%g**i*bGI&lTRQln$$-^7imtRo+CSdI-jzbrXDF*E&-iTz8 zF4kLu1*%~9DqwtU#w^#<^5Phs{!|j08}S(!3(IP!m(azn91h}FO~bD!U)7Bh)vyp0 zvBb_F4sMWMVFq1E204Q2y&%L4{XJ|8vu(e4P=S>uBhM%8x2h9l(hmhRpl!;$FBbe! zPuUjKc;+;D$mLV=N|+{E#10el1}#xi+>9u#`!<5k^TIc`$Sfy&RnL|3v^_f&rXi~* zybb%>xt9%j)|Ge`BzW*~xIS0iFC^SACETwnDlcIwH`GkD$&}VUNKsE}*8SY%TP~_9 zFJqPD*wW6qmh)Osj#)VzwTEwLQ5clHzbpN*OhbNR5TE5KXf|^_un<5NG0SfC*5v$= z|D^ry<1>$fwWutE!}+SoWfpt#a!ngsj(>7T8cFdDGH1ZPRAIURC+tD)%Vd8JrL6i;sSI26a4UMB;#pqhzn$9=%>xagj3>{_)qLxJIztV2f z*Ai%P=*If+`O`LP%LYCdGFUK;$qz*+b9HX*R@*&StrS~XtB!OMH}9N@Jb(vNgh{eK z<`c)i`5ym>t`f#I=;G-}&9#}K4c#+(q)$F&Mu!b6a?8dqJ6x$87Ee%+RD)P7$Z%}N z)UUNrYP+ir%iGCD^CDk(a|pLAmd~umo{>Bg*4extrTxBDX4X8A*M#Gx14olPQj1?0 zVR=1jmm}$v6KdbCeaW7d=dMWz$7U$%I;nKX&>GVzXk3rlW*B!mj#9u^D$WmLBz*pK1#PC`OwY2h(xEcmpLepXQBfp2BKlit z+=dDLh09lB7cSjl)r|x6z7FrQL1|+q7k6*UGNe7QxL=B*Lh3Y|{3sB~75rgbfL8EJ z6T4{{-vW;tWOj0jDFB}OYST7s)Xy+%RhE1)!z#zw>nCyFW4ox%H~N*CmtFDGERyRy zl4tv`%!PMJk@CvNY*Y)YL1c4O0vFa;NfduN-?*Q3tSOe`;kcc=lba zkCkgJCEn>1l~Wg_4|roUd;U3+Pl zTw$GEM^pUL&|oBzN7Fc^Da}AIfuz?apHwJUVjzt(@jIB&^M^NK{n)tv4rfZU=k96g z(Sdc!CSRh=R&1WZeRsSei-iQdTZPs^I{UsNYaYncj#`Zk$YpEP_D3#w&Q1jmxUucB z*-Xh#iCEih+%{A4c#37x)br@4rl#Rgaz)HDyl)Q64LFKu!@c0}4YHG`99GtkBd>{k zt(PO+$?0x<(F-GJeWSsqpTET8@0p%A_S|?W(m@(!Y&Y8bjDG9J0&2U8uBD}^E?-$% zY}KfTlnDY$^;`zLXigY21-T@M9J!FJak+zEQot7PO!>6`Z?azqz~-s5b*L$;O*Yz1csTqj8Q57j z$=hJVb(Uj4-CQ?CEP?Wg#zS@9Vp=uC{G?N>hRHTO+y z4)w_sf0Rt?8^4THZ*3E*O5`Y+FcUke>_G0GEamCMY+*Pw#-3HtwHC^QCyZ(3Kz!-E zqOIV_(Y-@socYx|MEbM=G#B&{HR@~QP=kiNt`BwUSF&tkvwIX zz0Afh^9(mbBs%Ozz_1nQ)DsYi^l9>y8rfM{z|CFEn-`E$z#M&&sKY{u94gO7(VlxW-_x>$P2{19^P6u@5UyPofzbZ|y;t^s{JqEz{CnnH-zf{WB{!M{KwGF2 zgESPk$-oalxi=XI1`?@5ynlZ0hzJY+u=(Nb?cK2#Q2^h7hKtrQXT3L{B%`36CeFFO zwB)fn5CT|#zc<=ZAlz|79=z%3Sai{wElRH@g0DYE+3#WbkDVz`dRe?9P6sOk!(h9B zciM0ZxW|Uw-)i$Zz4BlCI&M1VSP#h$F)lf%T;#j``~ppd5sx1(R{yVy)wBL=ciK7! zIAbwDVsx>qx7AKg^Pk-NY-TUbJizly8;W=Ww0I;62_UN7fL2f^S0|65^W*LH0nRQC z7M7Gjrko)#JXXSA5qb(9h>DPjfRTZTl!`vH zrJOIUbXP*^9XTTo(~YjK$FmvL&&K4f=dUifB(B&cBqS8AfHs%_s}@Nr((t86yk9nv zS?*`Db~1I?O*$C5$MVe0Tog1w)G!4Qa;vIhI?yp9%^-$QZ%P=r#uN8K3xcU8sd`FX z)hG%RP%|SaUuEe#Bt1qlCB%QSswS0$zQ*X)Sj?Hnwc}EtLpHIt5sLc*yAZI>YHtf{ zmF9zHhdqp@jOqz#OHg-kA*!%&Fwvh@S6OsBEo2^%L)#6m0v_C}XSG=j40nE|xj??6 z22NgYb@LoZxw(UifQOFVI@)>dF7%A^izuV(>5d!)ZRWHkbIMrqyK(Uz+yka$9i*hn zq-Q2(TxT+P`a;ZDJZjqeRdkUt&at(o$LIXX&6|pO`~!L~?>dF#;9NvO_q)twi&hK~ z3T?zf@7)tvFHm%2rzuZz#9_1#i)7j=9tGkO&tf@JSBr)H zPkzR_AW3N;+xY(CzLPPF`>S`^RkJYl@9%8yv>P8`9{0`MP44{Hbr|2oCY?(xHf;A8 zDO%>6z;Qi5h_xVGrzY8^M>cr8|Dv&^x2rvme_nZ&MJr?L)Ua2m!K{mDVi?a zNz=OP|06hY>Gb1VsrT8a&*>=d1<2cG>=CGhao5v&)zv+SCXOT-;q@$BEvc(809 z8gtFoQtuS{Rw*sJEZzY&E2>g@IrE4i&nU*dHMwf<+FUTKp_WR(Hf(IN!E2SBc8O0D zqyS3>@AA~8R?3Mgl6^wJvc|@Q#$*%PF%@_!2IfKm`dmJxwKVLZbS@ab{aj+NG{bMI z4~^%3JhA3GwblFT%$>;2#`8%n){fRmpMaa4^>ePYqM7YYozZE64F?u)MENf~F>*;X zoby^YDb4n0&8>^Hd+^fIth99tREFkmO}>v#X6aIAY)!KHcT`+jm#?DBUX=1-C7_%) zdK@2*%KcRSZ9OXk&^4u*G+&mnOsl3eHqPPFHP5s(RT*1u^b9`6BxrhZGheKJ zSS#Vn3opZfReex(9b{fA>Y6O_B z|3=y2v>aXjrZM)W;c3(*hcDHr94JC_8P!}fqPLlE5mZjAtiF6UOrLH%;J(-}H^G>YKXwZvl=^krB24l_ z(Kv^_msOA1;^(s@-bQ>8^nwFPI-TcI4_AD+;zAH{4BZjFdFm9y!KIrSxB$cQL^*Em5jRhjg!SV>*B31@Ln zT>@k_X~RFDHwpx>nih2G=#VI2s19~NlZg-VCmt_BPgm`!nD`|H7fKO#oA zV7a{sW%djI?U?tl;{J9YviKgYBeim6nK;&Ahup5jSupVL$y)(a83#YY_{1z-0o!WH z25Ds7Y%Lv@B2OPCuAS42D{~rCGm?&Zi;ble)*8!F-Ec+h-w?SOxJ+_$2km+Jg@oP9 zPRgz_l%5NN+V%xFx-w&bCYj;G4P8KGdMIkQ&>Kd4e!q1dE54B+MFPT3yOiT?;z-|a_HS8pUgSP%C)~S98ZKN2i7iQjE!R;FZz|1EsK_n2h~dmziw}-MWBe44 zWV1E1l2Dtf=-OipLz~9md2M5RTeqDNHitaPV&M$4Uwquz3zbI~E{F3iZ|@(z)l)qS zq;nEtKuLq)okw~FHP9?V2;Tq;JN);S7x~&cY{{fk05|H>o)6$&<_ib( z^z?Y}=h4Oxj{Dr(20F`1-0AT)x)>f_-Jfp(3IRu;F z_;@8-Te`yOH`7B5NnFP8HSv-FDjssP5Ttrk(7ziniVHa@qmqI`b8~apf(bzt2S_)L zjwcAeDa*(;j+_7}QL(bIW%0U_L*4A^rfc#&01f1@HC~3qh;Ie}4clrV( zZbMyQot?F{wE+ZwCjB0nggJ=#LD0AOB);?h1X~+hd#2uCxg{oQh1=N?1d9W)Q5Y6! zv}`POiBRISPqItwGJ9lXFLME0n+wi?q{9CtDnJ=y#eG8D%3k5p7OYeJlZCr3gjq)g z@c=0Ua{_bn`*xk9+vYLdP=Ecgl}On2d8(9yW+wmFudc>GpZA)x=FtTF?n&!a;mLr9 z?T1FW#w6DyHCHt@*pw(TY;j-}HT*(c!yKJq(=yV*sJ;87#)aZhvH7 z?;bg{i>@WncBC}Sx@xk#oA+H zGJL}hB}OeKNgfp^jRl^A(6-inyvY>NLqDNf9RD899PunAkT0A%Oc`geN2`>c_>_Do z^OFXWn9K$=+(o8}5`rJ)jJ8n4r9sIwDo+iWl&HfjS)`?I3FJlF);0o@{w=UIn!9f8 z_gib=F_plN|A`c3Gb|3zpo&-UI=+0r0oV0FhCG1XoBthe7xG z`pXUdS$l_h&&Zdr^5VDh4QR;?DF5UWbf8v=Qk0Q?6asTC$hZWzqt9`8*mfKog6^Ek z!sHw2()1D7nb78@1s%L5OCxX?l5hu5R(J0%+_xrgzX`vSWv~Hlax4jEio{+t^hlOL z*-7$TiI#NqZ|Y4-sU7jjwNe8J`|X5f7BqA(fD#SCu8;k>|I!uHISf}$`=YBSs64)1 zV37g!HyzaFD803$`mJVu6ba*R7u>h6@!l*vo=aRve)6bKU+*}2yvLGXD0i-V8~^eY zTRa&)kC_fh7~k3^pZCspJ*g{-Of|ij)lK3c2npAF_^&A3%JqTUFG5Ko>AtBoIi_~R zh^s^uB;g!xqJ`urM6+JL*#{N>8^vqP
X0S{L5VUc&Q|WiK7>>z}TiDG#qL8s)OK?)1hgv>8c?Ej=#{I0#^dQNYK*zltfy{-lRn7CP4eSq0 zyH#YFL=2>FXY;{jhXN4*Qb(%-FZl*5yCSbgo%Nh(x&k*OH;AfDbk%J%^){B4KQ@xL^HIqw1ryTFlv&BYRliKNF4Z`pTHR zJY@E$j48j>ch!h|*L?SB|Cs&W_Tk#_x3_W83o4b+{L|v$v07CJO5cLHz zgk~!dU7;36o-tJWmrt1SQENvJ5R?4@)>WJAL1loZ+c&$GPh;5c;{2;|1<(8&mU$=i zJv#fnTI=;KU{;FuYbEsA?O$>ZiXlHbcQKGEms15{g=<*=QsKl@L%r_4S^#s`?&Lhi zHVNmEs`qW%06gvj)eo-8-fI0fyH#(k+WV^HnTLxerd+X}2p$L{QcT!39w82E)zV7% z&}7LZqGF5kbFBBHO}($K@p`n{@Re>(g0r>k8CW^v+&WR+d6+c&Q(k=I+z;P`a~I#Q z+H`kXmA#XRyxUuLh>h=IdX+>BQU7kuxEebv<}ox;KnA8sJIu#ulJgjjYK}jfC6SPk zp3Cxvu`a9mt|134x^iaJVix;z5SghjJj-<+&Y47@;(kWht{e*){=`2~A70hA7Sl;@O4`>?rmy}(39FM4 z8n1+y6NjZ^Vkf3t+N@`U_d>@0Co#D#iycL$WXi*qh3(p1S<`NK$B4hc)xhH5B#pg!<2;2pXl#&-wsqo#Ft3B??|C#3oomG&ZJ z;QG2u7&H(FdumNzuplOI*~k3a=KRUPCeN@(_DlAi9O<$-zVwrZZplie9n|U7Sa?Ac zDEUDrMT1Tw7*&zn0241huM3nP>_)0W4y+7LYH|%+PU7TAxYaFn}t(ie;+5D%6chZznLK4XBIFs zIq8>bf!7)SD<`gq$hik6dbW@%0ugwMh$OPA`-ADkl$~3LWvVSnDU<+eOhn032;Ww~ zc$7zA^1$Zt!76)?!4Y+V()4fhB`sA39eI%~E;YTt-b)%$;9Z(%;~A&EG#no$N#vk< z?x%!61p3V{?8VK))|yVIFmGMf>i&$QMrCB!NE5e7rsxPiRttBCE;*9-(;MHoiQ1zi z#+L$rnXch+7%gGuMM;^5x)4CC)AmxL47+cK`+o9F!$ml!+@P{6D)+024lB_*=k8 z)352N(D1p&c;wf-QHH9Yw6JhJ50=4Jv;8&%+M9UN*> zQoc$_jR1mGmLChv<_aL>1I?r*ghEw7tKRH^YinzPb8uWV1G<~kLR0}2x*(Lm8<1K3 z|KdMR=KFfpUI_dbE&0+Wpccc*2%PIFCQ2die2Fw(CUaSudab7M)i42tI(tys)8l_S zSNH{KvO=x6;1;;jN5#fc6xFc8LYlj}xCC#Dd7x0hx%N6YILA3pDQ0B(^*nNxakeSh zZltx^i-AKYO4F2Vl?X;1bujvmbJsdu>kc47(e^)&fqWu+j~=z$_3v-Z$4qNXD(Vfp zp-tE{)l$mhf!i?_$65*Z0Z zB9FlCu7oEr`WjMg32e2ZHW>GKqk)dxdj?PkH|}v}N$hw%CjM&Zkh9ao#KeTULmo*i z15UYW6J)F$_BLg;EPKb9y5nG!q;}nOvM*h}4U~L=Zip;P=Db(|3i|I=Ji$CUX_U{R zCgvY+FX4{lbS(uIStFi6<*Ms0+D<#UIO?(%66Z~1xEDqTYqd*QIN+OCh|+J_--i5J zq9ZXx3=HA~13fW6vV&DQ+Rd-5}G8r68xY{4KmYlEaA(?^SmrQ0Uwr%1^I&{Y0d z7i5uZ66Ly`afcY57G8|{eTtlvf2mr>Y}Am_z|$}@o}Qjurt^uzc?%+eAtB$^yg){=S}7$AUTrKikj8Yv`~RtN^OrVG5%mh6`DHZmNA^w&RoThGqLe*&`Jxj$P4j6PaY~SX-_rO zPBpP9wlI>IXtgYq|JnN7iI68Bu)NsQdM3OscY6+HKFx5Khojw9$eByGg7wbn(|8W` zzT^|uB6WlH89#xRz@|&-NcO(TLP?FpbKAslk2W@$@HJ-Sr|o(JMa8~}&RStZ(@fMp zcyGinY;xsQtL_gs<5QI$P;_GHcvICm+iOs#b@>Fw_ap{EA%hsZW8k!ovf9$1zfrRK zkrzEX?eOg!E56u{z2!Ucy!)5mo-BfXBk_HMp3*%rY#eaCW%QG16H0E67`6DNVYQBL z0^({W@?8-{Xud*>6+&eCoM!KjBpFsZ(!Vc%3A9cSydcvl>aCqiuGNnc8Zc=a7RDMl zD)zl4*;*2JnOqn*!sy$ev>ZZ?2JYU;ENLi`YVQepV-vzNM3EYpmOef3yx^V-s?A`! zvVS5js<6_bTB~VYu5n7NZ$O;y zD{8u|8u`EDPqFL@BdqCp)JNZ5bBLZ69h%iHZ`mSE>}~spS(tN$aLoh=U%c7h>G%<@ ziLvi)eMczq^@e6SGSV(A5r{YyJd$y7VX7zv#IItwn#8FxBdFd5&6l2uki5 zlM+sug$T>9)=sZ=pq3RwUSp`u=-h$I*rwVc8G(bgF7bpc*q6hgc|w~AdLCYBb@FA8 zF=oiD@Z@MnGF$(u);}n??kkun)CS!;2lYs&77jzgX-Rr6^AjCDMF-h}9w)Uvo*Xsj zXIjOtF^Y@s4slr}gTGd}&=4{+pejC^*Y1s~yrN4jh-Z!3(*rXtyjY9^SK?KQlBR~J zweCpex6P=DWEy20BA^IQ=29H#?S8(6wr3JJe+=@bXlPiXk>T_vwHEOVhW?}oTQau| z?L`%ZMs{z>?OrurB`UaVNcMQH{#fDrbCiDLI5QEg&Kz2#y#YVgTk1q@Si=4oNB2wI zMqHQEPriW0RH~_wGiTC`j55#s!h;625m6O{2o2J^lLnu{MvbopA?<0c-S?2Db(TQo#P;rQuKk1qI zMkF7z<3l);Y5~SwFqQgahJ4`;h8I+E1h`nNqqHyd`!o<`^@?D0wzbS`cK9zuK{uFK ztPEw45)6Epn$cWtGW)dG3*?Bfg`s9OfMsxLqaRn&ohwK-$wm}NX%U+ zWce~q!gNPtO>cI>K{?}?VrGf*1hi!MjWQK|h#VS2(v4AOpa12FW)q9K-=mI0vOOh8 zS0&I?w69I#sJ|h;>CwlSS&?$_aVpXe?SfIOA{oecU*Q@@9*1+0jQ!hi83V;{rfxIe z_oEAQ3S~D@uJ>RGPzHD)wQD#Pq%CjwBBal97j2ni_s1sIDAOMX%(lpQN5-+}h{(LT zqs?#>p0(0Yzb_;u3I(wEr$iT>(i>ka8fm5BR+G)7)#CS!t7kGCo!CMC(gG>tEHa*q z&i$0BjK&@%i{){mXU7tcZp`3u;$K;#8;P_6b<*NtW<>LeSfJ*}Hfu79;08sIQ8@_b zOGScrb3{KmP07}nRt$UK(-a$+TI;_SjvgtGW{;;iS#ci4fybZ!(PYfCES4%{M2RHP zS?-H`9Ece8`M;c%=L-WFOdv(c`ti-x6%CS%o!#x<$Ge#w3MS|X=o$Ukr6*4|$!3%= ziQIuwgx$T_`nHpW{X8b}al1SC1=fNzJjBF(v%?iA`(Q=*E2%-bh9-s|bZl;J0u20? z7S257jHD!2;1yS!a)qCF!vKR(91%dGzMP2sdjX`XOI$-k;JdGYKsNvd0O+NO5@B0g zz)q7h6U9i{yZgjSWrYd3QLT zAC&hQk|~nir3A7ceT@YO8UUgg6(mtowt4{sNXZcY0jgK32E;4zUKe%yo7z7qfr*L9 z5*IpqYhl6dW_K_@{zVed@{!E)N=j68QULyn>wD`FjPx80g{ z4}Vbjk+KT`|p(BeM5ZxQSyp!=`2`}!*^q$eIeiND1c7RmUQzksFyuQHPHBJvVMw(v;~_#oWfsa zyyV$^K~zfwJX`@W(GYvYx@w?w2k2@5*fm&s2SCfnx4vYo0E50x+g+WlW*2-H?qatU z>xqe|5ao$mgN*}qaF-H{J#cqlYRLsr*}|k1rNlfky2_%*s2_KkCbgq5-nuNt(Ubp~ zO}GJJ477O^HG@)U74p53!n#5m3AOJcXpdpoXgR(Il8H!Y>Zpqru$iPQEX+}Dl91tO zjW~Dq-;JA-%q0rQ(CEbNwU{^hTwYP`NY=~Rms#p*#^p z_uDPSrUuA_g?_8-Lf-9+tn}Cc*k2lT@xrP41v`?0jA}$G%>KVad2#eT_ltRRA zdl;8k5AtiEHoJ7b~8rNU#rU4)3|GUni~;SP$<}C$kkJeqO|zl{|=uGk3i4 z6+j#^Y2x?o6Tp4ik4YjFc~iDvq%NQ~eNSh@watogk2*GAJh7`;X~JX~2rfYjVjcvD z&GJn-$;2fVxXwvGN`WHuF({_rT*-5dO`C5IlWODpR&1LZtX~KO59S|7g!3}cgsf>1c&y9=bCqrhjdG z?{Zxu`N!+RvlkD`+1o+=_Rq8R0{KUNBqh&Rsl&kpUZH6{wPhl*TK{YlrVlWih2lIY zSE63q*>L`%oc&6$lE|F`b=k>u*hG%m9nZLnR?&4LgQC;1!Z=3&H464DU94p|`9&aa zP<_mgFRh=7GKcv`%ttzQe;L7i>E5r}9lYFJw0y7bNwrvmCd#&m+zxMlLD~MzR{$F$G#808j2WV$~@k3m>jlj1F^Qsp_4i zNmZPK7UK!>aw&3`bz0o%?+z=-cjP zzhWlDeMDx@kx^9m*VE9ObZM3^oM);!)7Oc2M4yFTB+EV|_d`t#SlA{gQE4#glvbtm zgb9zD#A(6?NR9!wa7wC?XX(zyI1>DRWuI#a1<|iD!LsaP|5jU%{LsV8(dTHPQ=H-k z4c1VdqUQ*#H*(|Cc%Lz;CUX0@GB5zk!hq*S+B^5>VhmPK8s-dZR0 zcu8c5n>i2cX(X#PJr1&TQ`lLiC@bhWn_wZl?%sMYRV!Wj6W+?%vX1z}XOEcLxu43T z0+b5jWQbiSicP7^92vje)GqH<70WBq`=WPx0_qm@kPfbJ(`~)9#>7)AQwaYo=Hudg zNm`Tld)ec?XtTLM=gK&-;MStbjrRIgwTC^PiH503W8Oq{dgZ}8i5SY7Q266|T|F^x z5vPbhfk6YllKxMf5B22CSJ+;vs4w|@R%!J-1hcNhh=$h|u3i1FqS$aV;forLq0V?X z?QS=_*s*d5V|TwC+L}1tdmSvk>4A z1yaOxQxKr)z%ma;$=7lHcGM%3`zA^3>Tn*^7Kl_orhO@OX=~BuxGjflg5GT>nAkk6 zu{oj@rEE+=Hb))V8&id_RAQqwcWL#Hsb0Ja%ddyZYpA`WTl(UnX`IH{D?ZiWLzeH^;EtLs+5sFKv;!3**y(Zk~eh;dKjH1jya<}9?iGF zrI8zDcz4~NBa!Li-1egM;;I^elPtykmi(qY{hF`hHg$h1rB0!N$WPXmDYqYDDvf(C z$?>NQb^~1eC6+1N6|+0RdZXpdR(~AdJ+LrlayT;@YN+LxAbxxhH=KWWX~Wl%`jYTo z=DQ6E#>X7F3%YSmMVm(OmTdwH$G&yRp#0Y`*Q$=q%sgwWRFY_}1<;9Vlvzd;M*#UP ztZEEW7_>RPGCKIMs6~~si{N`o@Me-6o-Y(PGED%DDRdOm$EqmRjcDg7U3CiFbW5T# zMQblFV%ndfk9i6c&_9G&_0?zzsu~sXzJ(#m7-4*;Ywe`@Ja1gH)?QJafEXED-T{)5z z1BMyX@h<2}#RH%<&CJXI+v}>`5d@)E%PlDQ21q?g zt&x^`SO@Y@8c635Z1FrmcmmNGUz-mMKhK1h~6B7sa1%5XD zJP0>97(H0%T8LplQ_@}bD)TG=#mo1~Cjlca6#fxHOJYhEC>H2v@aPfY#Z_;+-Qh}s z2A!2GWZC?5>`;Fh{4+Rlm$-8kXxEc35CEht0t|vx7G^gSi{B;NHpaJuXkiq|uhjK- z#t&_;8#(OU%d5*cOV~@b^$gC8_4?Y{y=`6m-M*Gf6NXORMf=47YpU?5M0{wOjJ(|? z7#Lf)7Ic{PD{bU;zffhkBR|_c(5F zyP1ONUjJUy0}BaT){?kp84ToGx~x>Bh}g@v!oVQ$%dDQi_iu&={T>#I$8? zeJ%lo#~Z|f2q9XErWoQ2q$Z)K7>+TcnM3S{+KY^o;#F*=K~dfadsmsq{7GT8#%kYh zu-8yt%dd>r8#FjSg~BCx03i}Ue{S>z2;}Yq`?*f*Ew;e#)mpWo|2%zs_~z%O1oS#R zRIYz$PiPCATuFXN>act}I!}WHDs9rh6Ou3hvG_kL!L9RUS-k&hy%k{v__Pcz5yH35 zMYV6n_3N%`$oQ%(O;Q;u`7UD?lC$Cz4x(zBeigCVUm^^patHg~U_<)y?$I=H8IekU zEhmnJhbJH)lk(+>tIH(VF+@*+ErL0XCMh=3IUV?dqAPS0oYl}Lr>S?Kv%X#N6xz$4 zL>))sq(UDki9AYN9KP5zkSWoE3ua79Rh7O~X!z`bVb#f48h(`8fJE0$syFE>@0E%@ zGdJXk7a$Qs%e+eU1|=VunS3>(H4oEJ#+=$SCGRBrD@Xos0@8z+@e57L!z|94h&-1c zr_O^-;`uG=(JS2DclBjs=ErBovEwGZpkQ?5xl1Y}cNWE4%IW!-ZY|3KhRgylUrL3IVX3xty$m~F5r*nO`69frkqaKlu|wLN4oK* zVL#%}P>AoPdn`+uxw#DTQX-h*+=&p+v|yd_!f(?Vb0%JpVHGnEOmMB$kQrX9(GCHY zg6u=C3qb#W*wlbbBfXX>Q?l&pV^_=L8LYaTt{7^YD-z32R#i&xx6{chDmh5kTf zu9j5AB*&_cSviBGZvEGW+Q)Xm&sO@isvI)M^0k`deQD8u>FXQkqCaO!Qf|GkUX_n) z%D3g)hJ6%ut3VE>_wvU#5|Lkj?)8`dspY)f7St^E-sA8y5tlJsIBR`^;T1Jx)%nW4 zPB=avx2&z$0_afPBvbQXpC-*C$jO+-ziv9ZmwLSYVP{9^{CL>8V$wGri z0uT&s&{_^R)#2$p-kMxK6hxVh&#Mxqr9_3L4KDmmr|~}l`}BTCS!XVdm*kkC(T9dS z))b5-4Ew6L8^xN-6!O!+?;9%m>K3s^qe#h{*8bZp$SP2P>)mJlPx0PJ>{e2glklj% zmN$XPZyNF{AgcNl@m92Bb`ClHwq7Tzv4|CdgA-rEq}D$x-8Zh>r9S)n!g@LH!kux3 zeoVbjlxD-8W#@vlKll8NMnZg*_DSjUU-A&VD5JFISX-x3J-2ORpQ?=giukvJSi4xQ z^zS`f5j=02V}_d3{65v+kA0i^wlvik(GZUp@S%)tsEO*6BffG%E~R(}rl*q3kcIlP zPX8&IC!&b3y01N)evG;5P3^9w8pCj~sqL8q_)#UPkjNce5bzf zG&MsSYs7nWlf{KlgwStOv{6poutQ#ZDd zCT`$J(5i9(Q(>?_WWNkFNc<520Oik{ShX0BYcrgVr`A*>BF`mq^J5TQKPz}?fA$ME zDV8K|;#lYOHN>+U-A}~$8dR&}rot}yK@V*;8`?cdlt_6fsiXjJSSvD+k;R~vv{Q_J z4BBcY*}cx#3!U(92%MK$>X8{N-lYj_z0UBjGFxz|oV;WAVhED&f2YaaBMD+EVk&`D z<2bbA$mLz^dP6K}5C&~G2>c@pecMhi#b-d073q6XS|!q*5Y4_pj2)`wq=!~i zOB|b_KT%Bl)0Piu^Y*}*+2UvSRyaopO$m0fasqRk2`jXia*wb38(r_HRU!mtI6=6H zf!Kj{_ME!^GKh`kp8Ulukq`paVO5HHesCdl_>{+pS)Cp06`!I1B;o{T0K4&2ddU5A zE#`!99y%a1f@RJGg@5W(?!X8`R*AHL@mbxB66@Wkz~M}ib;z_~^BbX|_n#&6pw|=T z5F~?i{2ZIc6lJ=nbmpVIRx|M$sErgMhP#~Zqbuk7t+s^0 zX#Yeg;~8Wf;CiQeAuUA61O1jwnWkLzri>%-#vC8GXMD&QCWmfV(G@I_kNu$vvuFUy zMFh?^8r|{@3|0PKRd<`iLPz#Us6H67<#e&TPfKNB;m?%D?!tKKVLJ47t z^L*WTyyiF9pQqpaYx9GrzNn2{k>PNnJeDFao43y`cemBYZ_*ww$j0xZt|>Ow$V5+3{&cyR*-ale`UmeaEyT5g!8XN8sjz-}@6oz$ZSQa2hjc3HbFzih%q{w{~@N zH~}=JQ1tk?xr<6mf!+^Rfu`afa61CkB4m~$An@1F*m!Y%uC1-DFXQX`@n4-P?}%Hr zc113m7lX7s@GJ6kI@{NNGnl=F`FUc-iK(fniHS^R10gZ7VL+H0{YR|$ag=EX!1Mnr z;r~ZEWm#oXh6*05yaEsbg0K0B`JtZ%tnoG^-hsPY{rz9|&0%`;wR9n>#QEo}zDv6^ zeInTTD=pXR3GA0ZDhROi^YEl&|9AunTL4#UEjmrAqRCOcJ+EDT^<}?o*uwv zqUZkoOgoMkIM|NS?rb<>FlDfF6Vf;Pqt-J4C08 zVfv^fU-|?EzxIFG%k33~4+TqA^)~d@bYA~yK$PwEx(O`U?k%DS?gCj7um^am$g-Co zQnFdx4*zc23MXQ6koXC<_;6qRoe9oU&F6a412XnRe#rHAkK&LJBUfSFhKq=*#uLqN z&VI{x3V6|SnJXzM3DJWzg*}HrXMJQ@e@8hArylsCWMfmE<|*9DNG)Z4+7NILC4U^( zD6G0%MV{v$HXmk#a*fG~+8<6+Lz=-KIV}R|MS_o~G!Jj_OC1ATchH(l*;F(;tYwj7 zIs6L^@Tnp|rp;CgOfUdjW%PIm(A+72Qp%v)iT!azI;gNEFLM*_I(^xvwY*$o1DNx_ z=o5l|n@pqt!52WH$NqN!_d{Mo z^D#SXe0JTMp7gOhT38d@v5xX;6WF^o?6}3vPfW-zdCFAnr$TqmooepDD{3t{p1GQ& zq1*eFAutBZrE+cq{}Rx<{gB|3R^-j3xo#u3C0}QEt&KC?^T7rE%TIdkfd$XXE@i)p z;HZKCr)V0thA!PwiD#+8u}bur*hZnD#6GQteE=E|u6aq;;+!WU6pl;)E1!IUO7%O@ z>0yPFXQh)8yC^z69pwpK9@;9WNH#{{)Yh(=CkZc6^MxJD3eg7FtNFZ7oXRZwDmh^C zYbT;dY{tY1Nu`;p>bdY^MwDln#5NhE8a^j^ASJV%kB6#*2GbdSWyds72OdSwCJ&V# zzgm@eSW&fG{WOZ0X2zOgmir_9eNLFKB-IsE^*9@Iza$+YKZ9ebtGqE`Pg}KHXu$T- z?f6fw6hVN0?cw%!NYCUH|1w|C{1o#>H(EwC4xbbHQY3T*yy9;P`#L}i7XD2^=3+8i zH>R4bR7FRnp%S$~hHskH{nc(Q#vys=->8szaf<=g7p-wO$YdfI1{-*qK%fpyoSsT) z2Lbx`W1SVaNKLN(?$sr*b-~*WGPQ2FF;1==U=qeshp{Qqn)()PyBB!YUoo%V^)YxwG-n=XXYSh={HHah*5ug%lv#;!HW?)P z1(%BHHya_YC%H^6Jw`}0#dyWZK85H&ym-={0IYoB;GQ)O-q?1M4Z|}(1 zLrbbd^9UZoN*`sEcBXmC=zXhrQObx`RDdm zuSb5q&dY%qpO(jeK8wwdFEw4rkN-aQ*5l=3T(mdZjwwOht)JBdfH1?i{e9lS<2a|q z?b65UG3Sx*!O!h_wsd4&x6|e7Fc>1%?b5?;q=uo(Yo^$OsT?d{K1-;XpNBf-=yl)U zjVXe5v5w56q({1;H1?-Wo~UrpL2|vI+Ptuv7_ThoX~1C}FMQp{(;`x=L6%0ta2Si) zojYa9HR4;yPF|euUn>n0Djey0Cv2oI1)9lq+Kfr|Z`c_dN924fQr#KHEe`#>(O`2a z43tRnJ;QP$Mg-R7(x^D-3G782;9Ao6rZV?hlYWRp>exd=W7 z=dW6Zxj$VlM^j(FZsJ=zeAm5Ym+|q-TeHWpiWbaAAAlgldXOoMN19>A`6OR>@eQy{ z4_3CDq{3JTww5-wlX#f@5n&S#*Ya8>=Q$pDz0kJ^w!R+LvWk1(Cw&_`SfO93pVP%H z{%HE^%rmM0d4OE;j+euUPgl*H&SglmB6_+ZG2GHEVlG4e*CJ(aB!Ozg=zf~j@*)gU zc_Pn#3m6{7rsz)t+M(*hAj=4<3X=o%hoWjQl4C=sy?6UH?i`8;oH_~Ifz6x!iK#t= z`GQK&sZCe-2f}sW2T}1;-4iptUo?q%{o%EBmNOfs5tC7rcIpo4>nFo#@X%d?a#TIJ zQNJ{gH~ESqn~9CR#Mlx!#FOQj`q;AJ^b&>Z^)51 zd1AD_wY{JFMML_aG5zu30v0Q3%kM8_KgqzNbVRNXa9r`H7+aJkn^PoF*Ms&wX@;e< zyf)!ZP(f;p2Y6fLd^$y;FP>SsI9$Sk#5?@rlbG?++EIl2ieXgR+x~%d-JQVc# z&z2dX_ZIY-6J%I9^bxT`9>V@lI_k7TLu3`ue?vlbeOJXt5n2QTMf*dIP80mu$2l2_ zpT1?t^)AMomeEc_6tY9Y+z)6IWO)Xrk)MLhEXYgy1bh;rh{}J6WcQ_6pvQ*Mom;~0 z^pX`gG7iNG{lRbFo~e+ega2V91&8!jCzdutrvH@_KkR3$e1GitQSz-H)&PN(Rg%HXM+lYunV7w%5Lga+oE ze4cNL!^XmhAq*zG0TL$);@ZZBFun+@kM2Yv2{8m@1#dRecvu<)=)Zt0iK%~{R_%%~ zdq!p^d_FJa|AVgP1V~OmI26F}@Bv=%ik_bgaULM&{g@Sj^fzHmFD@?DtZuHa_fNxu zLnh+KCp9nl`lSz;IeI0w+^#c#Q2sxlDvSg>^vUyVv0Pr7F?E3*@!tHmCOkZR*;h@N zdVwbJ7`5{sFzsX2x%u@k z?+9N3mAK~N!4<|aJY*Qit?mW=TW_;k@3dH%tVIkX1WWF1w#VGX-g6e&5C4Uc?;d~| zA`*lUi|xDp_?90d4A}6Uj)&vGT@N(dJOP}+e+_(oAKV)J)+eCl50`&8Us8PU^H+_x z`Yz&%w4rHMU!AxYq%4jE&g~O? zFogow@^rRx=#D+1GaffWO*DsT)~~=LGvq7ai5Isexlh|?Fzk;XjSy6+F576f-#}(f zl51)*D5n+?e&_)5$xn~jye@Dxxd8fft9*JsSR<`aRrzNYi8G%0C*z)FV14d?m3fOA{1Q|ztQrA> z#$)-#CVmmPOhpU?%#oAaqp4r~)ky@vlq9}Kt^J4*6XhU+9Ie}aDwmu;W{8Fr6mbjg zL~t2Iw)NAF#S)YK;*c>9MjwS4P3LybB5+1BKo>16YG@>)>@>hq&H;_C{nd#7SUrpQ z8@d@9!@ztix0m?5h(wB#5Syy)nyADE%_)-Pdc!=8yMtQJ#ies_y2foaAqp}Q!Zj#Xm1nv=bem&-EJb7i6Ip~!QL zkdcVAY+Iin>vEb zn1WH8!L?9G`YX?~(QTPjW1Uw*tp0rgZ%&ia-Gb87YW|&FDK7!?zF+;m6Ku-a?qX1P zogI1Vse3&b3Q5H)08Rie^}2!gcq9^0z-Q^>cI85Vpw^Swb6ToN|imnK=D0iS#?v6c|90OPW(U zQ24#PQa9Jrz>&9aEHanAE!q({&hw4pvvr&Zol4xUO3wTyvZ9@a<+4<%Ps03Q4o3K=sXF~Zts=3ZFHg$$QPpBB zEUW*_6aE&ae-ItI0}Rtgv49WfDwmvCACo+t^lI<;Mo+JX4G*HB?C$lyln48RnPgUDv5Ef+jDmuAgqe#lEBL&pumVvgy+=Sa-hg} z>*Mo2Z^4*sK(%1aKfo+?hOP^T?bv+aVRFMQ7;f_7SB;U$qDAu=6|!H+~UUK229PL0^k9H7oDh zz%eALPK>Wu#oOr^UEj4^{Nj$snQy;K#B7DcPc$_Y#y2A$kNk_Mn{r#Kby;FCYAXjd zilf*LTDf_Ss#ygWEk|wcf z7YaEpDlDH;SDHL=!kGobOW6e2k{wf+5la**h9C5Pa+ zemLExA#M}rucyF@A6PVVXdl9>ZNiH~fn}Slp~asc8MuU}(d>A1WEJS)TXL-KjF4me zZ=?}cR#~DvJ1bdL4AFUrNyvXrZfP>2n`e7*Y$ghLcBCL8Y;hp>&mdUe`p%*}^F3VW-w zJUD@U` zJ25%Qt-?o!bwj^?dt}m_p@?1xITPj|v;Mf#S5LE|;yWwP5_5oJ7XUw0Ce_hUiA`d( z3WX(MHfEhY_ct$Hg78Mty3SnG1}&3`k}iS9^=^lh(OW4;fRbQJkxr;ltdUgEt3#wl z(GF|Qe%JNHZ}F(rqFpX?UIgW1>tKX4{`?2a*AMN>+5d4$NU8ii?vZ_&DA&j!`7|k&`Rp8$pfLiR<9z* zO4bUysx6%x=&fJQh37Qzdqip1MBp*liDcLcHEaU{6LOQB6^^X<3GlK-m zkHqG);3Yrvk?c0_-uAqxSXueinGcquaBLRLKmZhXbks7J-$+4B%tK7<_O7bn6o-wC z^~lUPNoMAJGRuL@NlZq})N}Z*lLqG}YvZz+aeu878|$*MadmpmT0;D9GBWtH=acO& z0vWn}>WT!@ma-rv1ydWwd7+$%k|)oc38QD4&l$uOy);3wMM0&|7E#LSdB3n@((5MgXOhn` z?@8OI62KiY1-d!l`Eg=}{?uYY#L|rxqiTTle?!1!n?;i(7$Jy{sPGK5psa0dFyMj< z3ypew--%5_lY@hUQT)OkyZ7&a@FPqKkR=3eIX(d9{~tH%$mwVjpu6I-o1;MXposAC z@d=j!8~4QV;(r|5R4XaIr6(nUs3BNbn>0!nhK7fQq<pYhk%%Lx%ksUxfamscyW9$%*ah|WL*J*2VlsHIePj+D*SUDR$t1wZiliyI5 zQf1U_Zfz=Qs_2GW7B*N#Ia&`{?`NRSH3wExqgA2@%hUB?N}l;kXav50p&33x%@Rzi zr>H{y(iUe-y0BQ}NngwGZ)g!pV$d>$QLo+QL-h}E8t7{2Nj-@F0;%f)lIq(|csNgd zZm{1DzyjX0L*hvMM!cwlxWwn=+O62V&f+$*^q~ffsTyLW%yVSm^a)r2Vt5_wTiY&r zjdUyV9wD%)%lQQA%JFjNLv*REP($t7d`V6dQqqx;2MHaJ{FK}!i>D;N(Sq3v-DN<< z2DbHDHm0%vUX3&|Wq2V3dK!QY!a9h|>2t&QQK-(Ph^3OUQns);DwE2GFx|}Ejy#k= zTH}NzEvq9O-KmRO)vM%In1w~sI*be`3*}#P9C3+~-Ag4Ad;{M=ofd2M`Ig^M)%z?m zQP0Z=u7ac=1vsP{ksK-yhJFR_Ngyqui5kf@Jj;iQtS-q$u0Q8K0yP`)|6KKL%gY;^ z?uaRZZ+=o+w7|b!V{d)+`SS?Tikz6_SMKsEmHj$$nu07GJVLfO1AiefwdkjS_ppXP z^0VBO2uXhXR)0H1!3{syt&m*KjN3Qo@=7CS>GeHHuJo^7v*`QNSjDHi`m4&qZs1YB zr6f0L>NmoQSM#8kLokj1mmuqSPJayE7dml7@F{ZE%IPba9=omI;g zkPkc$Wj<>Fqm__nl2&FC3c?|8VP#WK^jC^z4P0V-wZKB;U>bL;(f%6=X*6PFlMVu; z;u`(98fHV>(_)CuvZI|JqeGBW@pW3`E6m!VW0&kp7m2=QcsKuz%v9$FPD8!oH>?$+%dN2MQ!@mGck_)XKbjsQUb zNToK%H2}c@zCc=4gZ2r}o^dh*3M$fwSbdRVwgPOajwIpwnArv+)FC_32``~)c;tPNn()6@i(X9!xxxyt`SaOE!)mUdOt4w_q=)so#2 zVS1=i=+b)W3q<4f*+W8RG-@&mZ(j+Sh`PO!%o=lRm`2={lw;b8wELJiQ)QDK3q4#L zqPQT_cO>g=?pf&6?9IHBLamK2d@*f&w3pXMk)YJ8>J%AQ;B0Jf{*l=JHF;~}E_G03 zj5fs@d(wadfJtf^qj*UPH2Asw#Nfk{@TcT4c%%Rnl=c=d^mCYh?hZ3+GY43nhU0s& zE_#;+Rt#;KcFV5mlXP&c7phR4yDEUr$Rmx{CmovX!wniLJjlWFlR zJ!*xzZ**|cmWD6w_*qn{V_~l?V$*g}s!IK24fjZkSo>vts;*p}_qXjv-u~hrB$KAC zI!FDc@D`bgZ|)6;UWL{CG$;47%=?MUYq6$JZpaNjE{1(u>3v*Ip5I6BySRoedZG(f z#Z2nd7F#oiSkDnss`JvSJq;pdSM$(f_(mDOR;H{`TQ%LFGe(F(^^6H`oK|ce7ttn4 z%4)!3Z598m!k}1=Foe|`y>emg14DF`_hug4Z^~^*J8Gs0-guv^yca4gE}GMr;~8)T z8`cuxBe5!6ZB0fst9z&TevBm)Ic&WAu30V0E`qrgDeVvM;D0`fa}0wr6uyU`R*2e8 zh9RTaE#9}y6!tvwbE8K)`%OP;==I=!=R(F;H=R4dAf*1B7s^k9?$q^9iGdReOYZ3S zXefw>aCvxb+&Fq0(vHPNrh!d@zJWbIEG%W2pZ#66v!Y)N3EY%&=uqk0gushd`nk&F z=9Ny7)MGu+bo-=|t~t@Ld5kIh#}%ej6q8_+I>SMXMO-7S$1Adz_K({RV{4osuPiGM?8x&1VB9KO8&YBZ5E zhuvz=#*em(iWga6VhNeEaJAus3Y~vVbKM^n`j0Dz-|yQPb6G-^2hG5>>`(n&3TV0- zNF$fjy3cXnjG<={##ZBR57-fGw9-TRohJOUAsIkt5YqtVpofm`JWPU8i>3nqprU5D zNHRf{)st8gH|+|C7YjVfHsf>cg~2jQue3nS|dZ+q3zqoDS5SHBJ%EP`Y2S@aTBVJ z0Ti^HcRd)r_9Pj%DSI@%|63h8sdr2fw-drCH}=9XhWh0YdS4X1-mc6ZC)n}C#jF4sX$DhMxZ|7KaZcOnZF0l%EN?HC*x3x9{pfS2cW6tw~{z@-^ zBS6exVJA)Rt_I_0`X?Db<%7CxO{W8EY92e1wXZDQ z*H!-Qs;T*@vz5Ec;dZq`rL!g1(JpWfTy19oIF`Rm)RCcV-CTS&mk^-{UEGMqvv`mT zg-HaBV*AP-~1fI}8y_BeO_+L)Q!j^&K=XwZB@0;HTet&OGO--p> zt`G{Z-S)R=F*abpYHr}f%|%&!@e_y$)b@O2{Gf(JC$?l3!3@#=0%`Hw;eZy8z5pUo zya7f)fs8?D9vbQXj-pkbV;N>jeji|w2g$z!KKIM>V+oK&=z4$H@bK_JsTbOWV?l+~ zL)ii{tTIXJs2*g-i01^&0lw~kt6O3Sze^_J3`gNW_+RA-2EII8NFn4!MnnJ(G1UDx z_badr@GdvNAr2c%mNXF+g@U}aFZvUn|avM z3g=85-_25t9Nq=)V`5|T-FOs*+pP|W{;~lRro_f*Fnh440mgkOC2;w_x%v18a1$iR zdCz~ip^E!c2lcW_&^x-g1RnxK)3WH%B9v`-2|-fK;Ucsfx++0ZX6$m}CKREsr0y{O zdN7^Bt@u|;%AaM5BlCgfUjaNn0?Q4aT_?C8V-`o4ePm=L$&U~y(qVW(+isH2ko#zV z7Kp@z#NEXoHbvhJ+Y$F#;y7S%Z|=4Cdy2?PjMmys06n$G@emHxCS)3zYGuiq+Gw%M z1+vKpKYr>;Y|pnAPjzLGeHB5Y4+B%0O$>SyUYAyvBWH_D!;CKsi;b0&l?{Q~f!U$| ztRevC%enV?I9vjz#O^Y!?6mouZ{)qqUe|=pR=BG41dQGhTktUcVffGa3oC`9nZhgV z2NU$MX5oIZM=gLywIlg1hb>tEq)=I5s0HoALnsgr_(s?SSReeF-QFjc4wbOdviThx zea{ebjs2^Fpd2Wz1``bTa&F{w&T}`|1Xu#Mu^08tQ5FJ3d^Ay=h_#W-vBAYebm8{w zP=2Eaw1nX@kv57peMjr6fOS{2yL0ATO4eC#a-Qg95E%94j`PTKU1K_$gWLuhQ8g(* zaHP*Unln^)F`|uCQ@qHs^1#DTi*%>S5~2d45x4 z(Gzl5eCyNd7n+M?WPKpZpbg5z4KJP%fyMf^ZXCc8hZaO}T^$vu#aD=dO47b+&`VBe zwec_lXQ7amhZgKI^L7=M&CS1IXk|gM{X5x#_`-5CwNhb}0vp`~okg1sF_7UIq!-{6 z#;HGYteGXT7&9MSRBHYKMzJ9%0W6bJO177N^^GU!r5@*zbR9|XtY2QD`qUoq1DS&{&FTv%9O}Y7KL^{n_uA)S_X*bDf!v}Be(@x~Z&TNE z=ARQEDV6RONoblhV;op}2g-*(2iPCqNze>-B2#xBk1wCEHJk#pm*DdaxY zM$0eB58o(k#wQ%otJQ6x#K^lvVolOTiO#=tcZ5>}G>?r6p50!GllwGkXt$+}8 zs@z}%H9!#vSN{rq#C_kteYQy~Q&Q^c&1932>6#Sk8r5TZ1}3AMlbFBVXep&{qQW#_ z?z1L-%bK+RM?U#dTjfYoC7Qt7ax>nJHNX4E_d0_*UaKaP>-f=t%Ud?zZO-G;TaM4? z6(a%Pwd+`p+uDcRmVsLCnR3f4LP4%@@mDwSNtQDYpT$UMtH^~()IQTHQ66@eq8j$C z2G)gP<^iN@Z_{-gOkOfSGhwEre@%qwY6$EZsI6+b73Df*kJo>9B8#s+sB0^-*K~iXefKuN=X-%nFaOuA>c2QS_?w@`oB<_>*|DW>^r6o{Sk?D zhc^pSB?k??o0>-Rws$(LMVDR#=j=j=t(-3p!c+!}$ZqkSF{h{_5(DE@$nkX@k z6l`u#hwU(PpXD?1!K*5cB+fEkTe4h3OP{U4II7o%9?3l`UNxX><5}a2K4{>!EzPzD zo0x1=w7#BSqX=EL5$iPMJ!dEV+JR0#rx% zvi|(0l|Y(`-cN@Y_L)YwWm*}(;jx9CIe%<6w^zl(t`XYZzEiOIop{-t0#t#VQz{*l z5tXmYS^t%FSoTC)!wu@PbLjM6hShnnt&%F~znOd1tG4%P#1k_=Yo1ou>q>oNM>nyC zW&csSd0@k%H-_hK+fsP*k9mUuGh(1SgRcGzGrB2gf7yzh&&D`18*~}duvUnnnofsX z1*%l($1I<%Nyc|d>K2gvbWL0H<>}EQ!)L+b>`2$i!Q4J6(CKx6j>srEG-Sq#|F!Wj z>?})%ZW$Uqrsh-#)rh^7`PpA`;?F958zsV%s+Y<1Ss-C$GYLk@INyjdiOR7V@m$XC zV^nca45}o@w{Hp2?8&rlQJBafs_>gt^V-&Ym;*b+B{&WVriQH`JF5x9PhMg7Vu%Jw zIwguwq1x4R1iUQ|CoEGk$Mg6`xI8S|A}cZ!wYo;%ZRKPBRNj=27*l1<{o}<6O?zSg z5yA9cNjQr=M&+4>&+^MK(|FT>fk>34NbP%^8hinY@op1LB_T4ICCSuOAxTzIZ2N>2 z1CNwZD0+6v9MGzc$#tVGyp3Om+$O;hJDH&#w%inuLox9i2sB{+H%C+|jB;XxF#KI4 z7?Vw#7_5AZ_)5^Wp_+}dn7$t}0hFa#^>VpNitZYLML8Ly-&;fkt-H$x9!N!#_OIaM zm}7SMjsbbIUwtx~B$Oy9*^HHWn-$%U@{@4&W@JX3#&pzKX;C!7GQX?~Th^ifuS`e$ za1#!M=#t^exnWcs=m6>YF{f{K55AuHQZm9)$UD1a z+|hb}daH>$@i1}bbmNjQbDHBkw%?}K45VjZKmQ)4=^ThGjCO<`VX|r$gLvRDNE69? ziMLUW7GEXvSB>et_;dG{oKVhTMJZaZ#hEc``#IJ$6@&NyP7Ax>(_R z^}6C{&{*I7D@d7Vc#wLM1Hlat2arc1y5XUGYom=xC!kR0Ib=fTq;q;>4c595(0hp; zW2h1+ej+qE!De7twcM>}L7y@h-K*w%N^y1Z>)JtCL7!BQoK=54tv2uo+vbIs;zYk&e!o)}^E-2(VFkJt;p&?-O zA0S4S5dSLI4-=R4N9_6h@LG+_0~dI^f*@(>^nONLMxhQ=W|TbdPLVLyWB`TyNDHdp z+S=+F(h?E-`X&hy2N3H2_4$F~)tf2`H04wBDrD+D?0ywc%u2-d2o(W_-%`Gg(W@bX|)frQT@r^YTq=5jfQn@k&Gy<{C3+_3@769%#-QVwTs8Xf{ z@Ty*qTW)?I&n~rQ3$iFf_9BF+r&>aYXy}oHo$)-i$$#+&HGY91f_Fb-fKk9i_5>fa%VJg88Zcf3If{1$sX!aQ z#SMV}f!_cGLF-))*5{+8q|@#m?tJ-Q{@7)^)1$Gn@^AI6UG-Y4>3LB-mJ8Nb3S>!1SxISWX>svi zM_Q!}p;8^+6Kr?FHYU1j7+j?} zbriZ9?y+qq-U0!-hd{$)*S2|g?6FtZK*s>tcKEO0x|+Hxw^`OngDry-D>v?R;me?Y zHFmc`oQV6XOStHEg5)+fCygmqg`!}3lt$ApJ#o_bq$IBySrbglLX;MQKw{NAoTiDi z1UK9~Bup8luE185V6O##lvjR3UP>QPp$+Q+Aej-iuJj4q0V}wG3eCjG81l)gEp=6E zZDkcG1y5URJNUvNS+pbc#88=9Wo!8lJlMD1yT3#h$Z!PXeW?m+t_CFY56`R0Fjk-g zR^krN!xwZW%qx;K%wa;297yA+gKaJNg z4BQh38)8zEp7(;53^0W05#BoSOX31=tM`Hs2zNmgx}svYg@kdXSRoGIyvX%DwWGDK zTT`#J#K^wT>IHH*rV?|D&i6^Vvd)`I+wz2+(*3Lhw)iCHqwckkt&h1&NHFlD&T(4zp>< zFVx5i-=c6vCnygu=h8pbOAm8pI>;Jo2amT;J5H+8ItL6WIs)UUt-Pp(z!*iaIx$r0 z1RxLVs(Ov1Xro$+NYSLOoQ32tfO5=G& z$f@MOB9EXc8yW{ZZ5Ew?c;@|W295v$-ShP_ppLFT-)_j{nweZ&G3lIlyT=3?B&ZD8 z>sU0jo9DILgJFVWnrO`f@4M+Q{1YXbYSVS1;_#C^4f5zD7Q z9i*Mb;0FxV;20ZUXpxM}&Jejb$|&;CUR>4WpMli^25|99s3RMHjMet`pL0;2UVh@w zK>rFoUUY;kVkZocnk(|YHzDx)g`25lt1YGc0;Xi)a}j6Une89SNB5zbjFl0mG6SQF zX}w4~DP~|T+LAh!lDQdcoVDn4tmYEMnZZ>=ov zrZlk*hl!_(v9~0c4r_ExT~s5a@s_f{CeU&Aw%Ga~JTkEq?{I0{#=`BC;4d8a3S#$_ z!`7ypXQ-TfGXA`kqnI?xmd%f~oKj3+c>k7cP$Xl(Tn!{Vi;jsv@Yg9yQROQOWz&Y9 zsjiE|Wlvyblfax&Q6HtPntCY@3jf9zVH$~Jn^OE-9Z>Wox^7OxWBn&bwy9}{h8q)3 z+o-NeDf)wF=8}5lk^LK5wsWQC`cd(zy)r)H9791X2*5_UQZE%UtT};=ELV^-Ft$@x z8+Ro)h)omWpLu;(bnDe!CalO5pCda=V8}T-z-A0Lc!WGzNs|IBPf|ru{TO$;)QPq+ z`b$U#EsXK8^&beb;HI?I+J zK~AY~LiKH=zf6tl{)j>U#Mm{4=R>a>8V)KiV4D{b!81{U4qiS1m-%BR`a{gD;M!2~ z7mFd*cW00k`g(Zu^>nTfi%gTeoD*G^iQLhuoII&T7%)U@8+UaLbAJqfuC_YbO#PG@ z)34>}i#8NN+EN>@C@o0nRLBruc{~y^U)R@ zCFn@DsIu%}1C&i;e%nOCIs+_~O7^_S_f|?rD}+>vRgtVj3(Twz6$-c^d>WVR#m_$caF&rg?(0-F(ubl|9u(>ptsNTRtj4er7F_@51oEwL1l>AAhTLVk!vtSLA zV+9j^(N6EBU>lv{2{uesNPq>U_SNHge8}hx=}Jlb2H7gTaaVQBm!ZGtgJzNqk9M01 z`9Ya6=LEsLlH6rSEwKHz7{GD>&o{?QMI+Hv7Er11Io4p}sSFDp4tY|wsA7|KqOK+> zddRM+h=O!@Z>?jT^lqzn#7t4!9Kr_e-tc_Ymu%AgQfaI@Z46oDpt%EbYtui`q83e0 zpLoCY21Iwc9A|29QYGYi>QODKMk@&9vq>e*1yKjSor(E|B|f9Cpkypf=#}FNnfMWapv3}j!vam zn49XXp*QrS8=!vz^YudhO4QYd6hy`p&nGIk*{SP(Pge#)LID6$s#>{rUijrQnNBS) zk$!@go4d86!^^`%u=PJ^5nLzrQXaC=a1^jf&j?Hr?3&!2%-!GL1G46?e`^K!&FPZO ziv;Fhk@oETUN`<*HbBMvE$a)UZ}kX*hKI#s4oalch1Mu*G0h=`Xt7mb=9#oxuOcz( z6x$6dBVQkTK6UcJXl}TZ_=`a8Xx&V!WbFJF^WtWl7Lx+YONOA z|4LVUfC!3&cQb9ny?7+=A>#43YI!jzOp)Kb1X(A~sR9pTB5e;HYn#9I9X1s`logpl zqyEoeJ-XXYtJmpE@vYjlUYE=LWE-1Si=7Uq`BT${1>R`3$;{5)N=X7x z3sh88!b%|Sf;c4530U%@Kb$f+Hs=^{CQnWV1p*8&nHh08i_8RtxH-ngt^K^ADgG&q zHVutnjc|)_U_-UuK^-NeVz&1iBO}s!-+Ui=R~V}qDH)kTm_DLRW&qsj>FF#t;tWQg z7F4%+zLs?EZ}a7Xp++pG%ogjRH;hWGT3vp-=Kvh|l115cZckG0Cgm(mm+`xho)qmvJBmlYvA6Z08ex7#H`+ixhjvHul@4vmC4WQB-t% zsqV9YD4l({*Z@GaFajv%=)Uyw#{iW0_2HtBm1f~cc7w;3>Zm1Z!a$oLRfI2m3IypEAh}L46hx(Bj)vJ93frbza4_KVI-8@7 zQ8|*zHNK>5njoYNCQ^YjeaW8%`PvC-!xBGcaEkn;pN+_szi^Ygu|AdIjHS35We%!F z@FLCOMR?IFMxt3o3zDFx54JKtlc&BWCp{8TPiGE$!xJXgz7gklB6_L)>6SkyXZ6GYy_svkyF%5PScgjiq=hYy$zV>};r5mU3JAw%j zbp7MEH7zk`rNA1AbI}~H-I4BOmommbJx2c$(i(mgaKLH_%!tg(llO8X zkWxoL!L}qe<*K!A-TD6{pnv8|MAcJjeC$e#rD}aQ$verW=Iu2FrFB(lkToPmijyuHUS2V$`xwaPxVV!!Ui=q@JbfQe0g^AHGp=ot|Z})X5GZ9rk z7!C`WQb4R5yJ4QVv4_tI)!-CcCXy@T&t^Y0HeC8>%|MJ_)%5gYr))nDr^sk`_+1B$ z)DvN^w)~TQ(~9q*aKBG&ew147b}L_@{6L?h{{Luk+YA^l2+#&qg>#%0l|owIXK zjzxFIzISQY$hG;cV+c=@e)|^R)%EObOX%>XBzrb%LrO{tMFTkZ}^23%9!PR z_P7&>%`47(ZQBTZl2aEF>`X`;vdKY11*?$<%g|*+$*ufn`S--B-GbZK3GSoULVY9b3;n8h}!EKimWI_L_8TK}p^~QCh%@}H4Vl~KtbS6gV zd!VhCqL+`c!fQqNaIpwcQ{r! zV1M~o|9w$p8~2XVZm+A_nzW0mh`)yX>9wr$(i zT3R-@ylivXu4VIN+gf(Z|J-vw@4D*6xw`K2;&*@X`O*c*EBa6992Y~-W96ykKx1UG zU+lTVs+Q5lhFQsL_MZhZZ&@ytRiyKT_?+MZy>+n zBV>4!B>$rum>r6J%^H?}_3kf74LQ3(=ZV2ey@p_CYfjw?>Bi=y67ORn37Tt>PBh<8 zqbhVuB7RY>_M)6H4j~+na!d=Qeo2hZkxeXLz<2f*(mabXMIF#V=a5-( zO71=4NcUE>8cN6gyf{R`mNIhJL|!9d%S)Df5RQUYvc$<>>17+tE3U6B#rVohZ&)l( z+z?Bk`(9PZ8$8T?mP1K2ofChJaDB!-k)}*REolgv;VZOZ=V)| zJ|)tQU%mZ%CnGxjJ)8xy+XV`ZeW$cO(!gcx7l^`P$F;*^gU)1dHoG|Q5BX*V{&;R7S#|hDoapF_Gm2zFnHX+LtkXjDqe?4;D`)J%C4>^_X zgkcv&f)3!5{G+-f63B#rEAGjNkP>o;N%xzDh3(Y7XD0(c?lNxQmX13Wz04 z2+KZz+aXHO%uN91yJ*G*ldp>fOxGfXNFQH_Zzi!81^xJ zBN`a&2Amv5-zG*2@N^MyrKm#w-#7yD1*c~~IsssJ!O9@JB81Z+kyHIX&i=6#KuQGt zF&sxS@B4ouDlj*doD2=s5f_IVoz6m6E9$NfpiyZB#kpMQetlvnKddfT6~~OCZG+}6 zDpN2vCZ+n>o26ZtjM|PkmYd%Sqr_X_1mlB~oG5w122RTF{J&Tl92`87JaQO+sDzg} zb~^so0N9iQFM`(^EKvbDnH^v${Mc-F*RWK~<~CmA`{z0fKWnnHvJXNqpKWy5%n&E6 z75$=R{BMTc2SFk^Jsvj+BMB!-=fKc?_Gz~*-nC=wM|Kron?K~e|2|B`jo*3rW9t3a z+ndG7XM6Y!ER6R+=v?V1a<$qa0Z_)}R>LXOvd`y0p6Z^6v$i{|#Ixsve@uish zz)zJ{bVrI72K7t#uZFaWP6+hmQh;c8c{#i`eK3NJev#y}Mu}p+EiKPy>@qhdDOg(V zJW-g2s>7*9E`%ag)eva>q-bxhIvPrG|F~%c7j0D4**XcP7kjNPu^z!h}o99#;!=~ zuo^4iyLA>$hDMlu?}v<~FjxztBY`H;81=*jsy>OePTyM4{Tz>eVhSsv{j|KeD8^Gp zi7tfwjI}~cP2^FLW=me%v-)fie|QUvRy(-(jFK$8JGcKP2r-nC`HJg#Bz_)t^@1Z( z8Rx1RHA%G$wSDNMttKod;UL9|r!(h=R7X-Ei04mUL!eTmUr_z}8-!t16yM>*y)=FR zN7juWflbLQwrdL#iqz@Z?*Ga2OJp=+_~&)9enVOk3faB2w7b?WOJy*J9U`=AV8Rsn zJ(ZXbAxr1F+=oy2lXbwQTHs?Z>gGazrpIwCyb7-7waJz|+O|BH1hxScdR1U*0-4Br zIlJSiPZ0EdD;k`$LrHq+YieAHw3@zf9`az6c2cwvXTac@HKYFD}b z6}=VRb(fQJQhe7T#w}ULki+SpJDbnUU~AApMtmy@Jdf`nk3^day-?3L@Lk$gi7k6a zB4!xH&-6ZhxSsO)o1?PH?ltHn=EIrnxkfQwW^hDTp64qut^+O_8ESMg z+~8RM+?~0qNubWWuFANvjzV`nKL31*TV7^)O<^7PGArG%Y668$W6Jp4!F9y5+V%77 z1#(JMplz-@?)$~DSWgxH!}NuD4`Idgfz#r2)X;{FNc$FI?3c`U&SxGq;Ewap^3^z8 z)Vz$jbROIjDNnyJJEzr@abvSK3`u$bUw>?e5fq6TJ3%uf`1fd zEJJ2xuU|?1_`}<6vu35;e)f@KWxN0lsjUj+_!ZYi4{W1{ADLpHE#8#=%rTn8T%_5V z%6VceIAmaTINJ`&if!aCK&Q6C189(l>q3svRsMgRRxAx+iIuWo-u~QJ_hskZ-oLk+ zVf$}{dG;wCB@(cOVj2`@5pOWCMwzM=tzgilBfarx4m!A}mQ;rD%a!ehoaj5(4)B@vjY7K+R5YrlW?Mvc}}VJI8##Uy$Yeb z+pI<+Tpum#X=e?jIX98&u--(c9H!Hnm^rleh8&|ReQum1B;H8KyQJhB!%JjwQmGc#XBMuC2sxD?0 zIUw;UqBZkXEVZr2)MD~{At-5d!JoK2^F4qH*E5U9&RDR=xS6($CwpGF{~>)Gw4~Nr6kfte07tkP$S|f$;jzBK=7IJV)|~;QFS^?G4_45=!U9 zwCW`Nk3~nAO=(mkwtngW8-^vkoBnQ9Zq=^C;Htlc9DnXPu!L~$httD!*=}nX=;aF0 zja4ySUBWllr^@Z{?w{~cw6+vF(AsOu<$4MKsSNuW_2>EL8)06vWE$x zGp=1Lc28IfZ?mjoqcI2}Z%3Y!1Lqpjz&lAOILHxPte^6UGx z8)<#xrW{2L3*sGP=X?^`P;_&s@{u|g zQ7b&Woju8a2bAJQ7(N!4{^IU_4P~;iT5NsU{JK)^rfe!QQ8S=Wqwg#Wh!F9@mC%D5 z^Crcd5Lk-t2rTGPy_gebk5}$O*3_MKitga!g5C(u;2j+VapBQ!cv2cC@_rPvN4roZ z6E?E%^fZLm$=qYF@@mInkNS4{1KIWz|E`zrdiSvd3WPA&N))9D7x%bGEnpxIclhKV z(9_0LPN(d=CNeVopH;k+B?la7`$RRD{(zLd$^H73myDm~=>R^dDN7EbMY<`r!pTkW zpR@08^>FMyd_l8`v;~wJl8@7a;f~*(_m)zPNnK=Xsvu!IW`4T4jdQ)U6U7kn zeEdo`RVnI6)n7fg8vWxzmpa`xoO$Kw@RK9PuGPLaw7LUeNVu;kTS|7#D%Iw@)o&um zO&WQl>xCq5uW<16p7gPVJvz^)>*|qA;bUTzDPNxGA3IY5nC3LCRg;4Y<|ZZ5>DB0= zu8>gNVT!iPe@v$@{fELIZxJ^=^k7HNe_j89qfGEi0T2BM;7Ki=xS>U@8JphXFet-er2yF*~iUs;qxFnD) z6_K~(%l`oGfROit9WcfD)zDChI%(C88y87Z0t6rUDc}@HwK{Hf!keH3f1oj_rHcnd zd>gfYPPr3*LAsF0q8ewndpQk|Xae>GAbA%nb$g3;bj52qLf%d~QZ*7HS_e9lHZm=zPN~kk|P!#R6()`}&|vo#>lClk}x|zdYEFiRb4) za*BR>da9idW`Z;9_jh*0@EDUnOSA;?Q8_+7KdW03M#hVp>&lf=T(Jz=m+d1i})K5*J!bDBr+258;ryP*hoSy`=EhA z9OkdBesBCfgHA(xbye2yd^rhX=P7ytbw* zT`W|wl2>NYLeWh6jIvtx1OKJaz8B~aE(>)6J~L^aj)sPXroqqx;D<&gwuqfOLyO#P4>x5K(WQ7cw!S zXj3V<*`6=l2v4BCU74Pa8$$N^L*3DKEFb_jpX(U@T;ZBVol?nS_2*|bF8#;RnMNF1 zaB4su6AMci0stM*Y%cyZ4#3Ea0Z`g^n`}kb_8bVfGhHv z?I285Q;dyLeZbM^hNA$P{z8?0M3I1pT#8{RRd!ym)blMEyKCdOtG}Vj6B8Idpnn7S zMgi{&CMXP*m6aw3 zA#rxj`M+H))J3dLRV2gG9wKi%z~Cr|cBD_u?ncp-L77MbiRbwpH7C{&+t|+4XY9Ds zb?<~1M}%$>mnX@%kPN&KG!J>w-x`jEqAn8AeV8?=qMQ$VBc;_N-}Dp4X1^mB*Q<6? z1t`)jd1Cx-p_-DsBg@B=6Ig`UlPtn@5eiq{dI_(*N`wBRmG>#l-ZZdYtK@&@WXX{{ zb;a;s7i+D;P$A{Ir{RxYDCIq=pzxnIy62U;rCfin)?6&K+G(b6l?ZVto3*EC--{-O zaZ#y1-%{4-!DH$~C)>E)ty#VOLCA*hsxZWf+j%~=!JY>PHL>^oGSgnj|J|v7dt3kM zEw(65TA&AgNJv?$Ygw<`M4LqBG)9&SNA4wS0M75+*5+RBn^$Sa$tK~2zL+iq^q*mR zoV2Gecj5gH+rXY}^n2yhH_rs4AWe#EM8>HvY#(6*nwBq&N&KT#Dz}M0hp^!bh9wV^1pbOP$GaM^_k;wc*zG*R;Yvr#3dY2LZ1B z7O-5^pccDG8(nZ@v9*m4n{M4Fg-z4q9-_!^fnP`}w( z7+2~ktDI%r-ePf)s;iJ3vxpZs8?@#_M?xheKd`Ya;P#42pU7}60xWPaw2`W|1Dn2{ z#!_kaTl-tIMNl9@Z2V(O<-RuNUA28ZpLaO(qKoD53uDXC zkBs8%oQfoqH8`qE8T}LG%;Gb@isgwpY@9cgCZ`#-jepi6YL(x>C+ve#%sSUV`3Rcj z4sAuS>(C;uHN?W6ei&(4KD!AY#J2~eJ((tIaZqV~OY>H8{Pf6+Bq>gKCMeoM(uoKr zPhDRfda^K>PY`|C%YP^(ce7k?C%V?kMd9*cr@GLMf42&D)`&Ey*8aL%u6J3IL4Nbu zSR&_-*%NY3#uHn~7j$Jzv~??*Db~pKtl|r@A*SvJ+sRWQ=2IJ;4ws03{q+pc6Sm|N zZ+gw?d!bg3#bU8XbAmQn8(XRpAsxQNqsYH;Ow-Rwl7yiIub7SuxId)GG@UQKX89mqzRy}WMDEXSrn5HFNYrw5_lW^XgG5k9Ds zL^*kKliQ>!r(c-rW}U+Iy{l^L9+-(NF4n9WW)9Z3G}4BAMJF8P5J`xXip)TOx%XZ> z?#Vdz>(yH%#@sX-GO#l&QsRDQYL<<%+w&-@RRYHVPndX2#F^@^5B2WS?;=Al;@z}1 z6o_$ldcew5_&?FDU;T?69J?L@5dxdF>?-M4G@i~l>Es2ZB3_jRX;CTt5@$10EBG|O z?QasZ?7`N53rz>?)o0L?7K25bQ~$epcFaq_5(9|VyUDz_t9SOM6JGmgxL#esy7wD++DzsFhC5%uOOG1id+owxGoeIc6 zgyz7*{FTvMurAYzg!dz^yi2|uDn6i4p{ z{av_m3tW*zuiy|Q(Wc0c7nE+-)tC)JGx&3m^ME3pgU$MdhM^9dNhAV`pF_8U1;?9d z$5$+D$B3R_C1@vuXpqguBvUPmv9Hc-apN1v)mxwAoLq0iLA&dcqK3;jDP&Z3Wv5d3 zgn>xwwvrF38NuACzz7@CrpcKrl0auHeKJgfwum%@XL7`jx~*c z?d!O-yJSLv7HJ5H|10{#BY?IN0rWxu^Ruub%SK;s4<{V>7bNa2mq_~6-p(#~<@-vl z$&66=*>tYRpYvE~^x9Wo<4XLeA21LFL{>MueF-3yzzml7NY$(r08It(q@ThmQ&t)75=Ew_Gvt=jXjW(%gw zr)LH$=L<_lJ|loLVx;Dy{%S;a&1?|zm!9Jw)T%y)ILMl-8&fF>1WwWgLuYCr!5k)Y z7oNw?8JDfWQ?;nB6J^|dHd3|8T-e>%oxdzSlU|-o=Q9JGcc5gW2lQ;3?bl<81=_7Y zN>bLfAO4*8usa<5{C6F|766=iB@c4Rt8)O40N7~?420IrtChB}pl(ERcX4$!L9Su} zq6+uG))Lz++s^Ecv;Cgw`p{5I)HLnr$XRmQvM>t;MNqvGt@2#H1l&rI*>LO(v>~dL zNrP%UoV>n1KIeK4IP$kBkMX}+GP^CH{;#W|hHnMe%|eeT0ne(6d{_&+R&+6A*c-Up z0kbE4jlBZn^@XW7ZUNgdpR9*zQ4(-NPDdiK7_@7?!n@PnSJu=xELLa%OtG)E#sSb% zsxW!w<)fx`y(}nG9%DZP?%nhf@bYaao9RR%SL=4uBn70;7`Ua~^u$=f<+=zkbX!Yc z9W{0bFopFPS)GN^+#$cfmK8*CQCaULQLgdGW|EsPoUR+oWoV085vGF*mZ;>D@r4Dni^tY`X# zSG_2RZHwJJ0pghNL-u^33P--CVjduF!1AVGEz6l3zQx!%a)#BAqEX$@ZCUh(LMVdF zf_wCgj`W8deGP`#c@D)0SVGtV=g>t1?4KX?7d3pg zmu)wHDDKsy-7(5z*;WX=m^{K$J<$Ig`O5l+ra1g&t%05wazuII>R@ka#1(Hlf;wAm zNi60u8-#m5Cz5fS#uSP~&%w6Yme<^5XG@dZDIsdl7votxjzN3ZgWGJe#eDZ`xS4N{ zFTGg!hfP2d;_7H=o%?;YrzTCA{G_qWPl=^7^N+8#@k$>;Cf;={h3llwTIizhtN}a( zXCAsci|Do}`ymE|#rac2^p|iy1(e{*NKY=$z(fnjx zbBrLfJn=qo&siO(GOHXPs&4l$h{`f90?Os4zNQkZSaFL+RFH~*uT*L`bgBUU8O9w& z|3Ls*sBmBCmYB=7$tBmHQq3*-T-MprA){@fkr@VUVr-VUUS-i6%9UoJYd(33XC}F4 zwi8~K@`|YPnkBf(*;H@6wNb2rKc1SMf`aPAVAEqFt7Bs0<3royzho^xW-Yx&pWa4OtLurLMypPQBFrYFD>TlU^>v8%)lroC$ij1Oaxkti$s`zUvsaZn>pYK>K0zsJow`{_QP_y7_lScHy9A}Cz1Xx_x!Qej(;>)9*GQ;n2s%MA(8+G-xAr3qfrhn+V zrnA)uv{;LB{NcK3g+ar*(FH}e9^@v3#-sK}|Ga{D;Ff~dFOZh-LC2S^^uFQp5>e{? zz4mzgqEnjG0}V0}`6Ol$Ma$@}z8LAFGhmTGOR-MuuaTqupCt}KmmUav$aYY{-c3dQ z;`F@sQ*snnt}K*+-4lIeT%0&vg8dct`UatSqP;8;EW>*67QQK3gF*jP!%?6dwl4Pg zR8lR2Dt7Gb%BpO7Kflbr4B}QmIvns%X;{q*ShTt9|1j4=flRS1 z#Icy6FzIxR{@cIX(8#03UKc%3!q$%Y(ULmQ2iX0Ku_>`#vsN>zz7IT8 zog1gx5B_B%9KRaJB`c6B1^3uGwZNw9+oobKE{A-o4m~#0MXml)9r$9%$F^`=pTMHc z%3O#%4vgi_!Z?;oSV7OC{+7GK_NrzfRqglLb#8#o7ot3(eNM6Ml65%BDN6gO3r1f} zjeu$YE&D4?3`R%I;Kvx|vkW|xi5JGygcY>B8)ebTb_e__I<+FN+y|R}B_A5L5^6!- znO&FQYQo-s&noPU?=N`Q3LD=yPgF16C z0T!LOWhh=p2RgtKo&_bhz@myVjORZ};Y3Z9A-D!!UU?}83nzp`lBKpm>1pBMG01K* zZSyZ(X_VeyNIbbXe@&(MCI(RPOV?o%!$zHaNWb#G$6Qq|Lf8w_hg@gLGe>vc-Hdr} zs8I@qRzZtVuasEtv&b^9xH%-u*Kc_5R`Hu)?>Z#-Y_wdt^CALsfOe1gx3_d2*^~!; zUI#kjNe!mk*hNCGnhB^@7FkFL6!QKCqea^eW1L~UZgWf8`}y58Wb^Tt+|m8pXD!JUCF7GUaVb>WX!S5P3S_WkuW8!l1-t@zIufVq># z|LP5=5aKWLcBc_u3wY_z_hh@iALRIY%!r19(qX za6n&s^68~zq0?>Ks$^pOs`uchS2Ob+Zn{RGz{(mw0 z+Su5rsi8;nWr+ZVBJhS4kkFKtmIel}%3zq!!OBRH{i?)>gL1UUp@%SitI)X~mm7H8 z0m$W%3gY6>cFNb7V}b!HCZ(#ssp0y1W}sH+MMQcehEblrJcSH~T->ggS{5DcAMPI> zSeRQjjXTKPMjN9n8Z%fC>KBqTv6j+F)xu)`oyk)gvp$hUV3hEuy%B|iqGFKgV*?+V z({t3joy-B(q^fRSbF&8^ivlkJ#ZSV`CE^HI;$%2u7Wq$-k)eP+Zd?=pjh3>2l6*C? zo1VTk(vFcS(j|)A3_8SArcp$G4#%K-fZEP#lqN$!;|YWxAOX;8`+@J< za+O|#?~6xv5sC@ukx{C7ak3~U}@6R6Y0}Uvl2K;Wzm|fY{ zAI@zrM=@KDRF#gWvxbiE%~4LuXebo-#0A^+QLQLMR38pQ9#W@C^gHyG?aVG|eAlVO zM`%J0g+Z{W z`aeBW@UNXyM}D7vgzkKk*tO2-s5&w?F9$V#!Gn`q6 zo?P5xyV;eDa*9YWC7jY3-v&@Z+tDv|jXlhNnyHjNgFKzW^NkoJeuzlWkVw?!<`@3S z|IGLk|0et1n)M5Q`{1#Rwx>Z37V<}1`+pw`4{zR{|8j@!UTeRPER8$|Or_dwQr1V+ zRp0M5Tr1lmTxo7jY8svKc*@b1WKSSL!pCS)k!p>pwt7oE`%>|$t)^nlJ8`7-={wmb z*kkuew9wKD%uQl|zsiqrmH!OE&GXFJnjnOrKeyd%i-LE(CJ&8?5;C6+JFYm#dhHKLu^5V?4=D0qJ z{MK?ito@SM@eU? zTngW{6L1csotVE$`>Z+KwdnfAIi<_3Z`+~rTaz{e5nH4{^e#AscV3w={Lu}zVeL@f zRCs#GX@eKl9vsC2;&6jt2XRX59z{a%br!jK7U3P8(+X|U{y2~`aMp^K(=zguGf^w$ zhvrhb2z=%gAv}I@(1Xy&uAh{3W7z%JQ2V*IcyJ;19@o)sXuIb_HvQsO|Nb>&Z?dNQ z1^S|yvR#|^RC)i)1_@e=i1C& z#Z&Zp8^Yahjz_+1qnxYr@q@KVK^sxoNGa%B+xC||^kXL4wI-gw94acaM%WX=N427; zLq~CD_U2{^J!fz6?zEeqtU}v_Og*GWD!VR1%R|s(D~v-=#%~Xx{U?lkr!?IAbQ=1r z)`-rWn8Z8enHJ#1xd)UOMdphtz#)pEM8!Dyek3=pENd>3;@1h= zAk*Atdns+xQueq?Xb|3-5dJTk=Fly*|6VXG83)F|uSv0BAk!J6L+S9d0b{&hiYHrF z__XLzSM|GOtTv1^R5+omF)yO?e_5+S^{fbWaWX$Q8M(E{x)CiljZ5TFbG$W8Pc4RY zBTKRD@<}5yxe~CoV@=yO8Dp#YDQdDt1ky()XLB}4+dkdY7{_JwbaP?3Hh!^V+AZKl#G3mx20=f_H`*1hX13 zR#62_``WVQ68|vVy?O;XF%LMT$RCdzD}$m)u4X(~v`Flu@*7y>mL4byRJzbbBq*5g zRoyLlZ$iv)2ZBDaTO$bs^|300JT(sGd8efrrJ^+UE!a#cDA07VZc%;1`#8hggLEJ3 z+$i<-e=}x=tmM%9wdM+nV`Iy|LQ4zcn|^rJZa!Ic5QtSs#Htq#pnh+KN^B)tv;CFb zj`7@&KrCsS)=hm(?;OEsAQ@c&M{Yvf-=p?@>%@K=^;%Mk;A|z@;!*JCJR|5))s*%y zY()%7L?`IF00Xj~-{V~I6Us-Drx1RBG@~+!gr<|vL<7K|^v6%Y zpOM{-ab}ADb-zrd9^a^A&nfhSko`X1{sVq<322$5y0MXwfw0z7U_$g1OL!1y=Is9J z&*=9+w4Bqr$H&LkwzjtRb}t~|>E)%a_O-11xJ6Yk*k)gy!Sa!ZoIKDE9~)cRnqHGM zja|K&J(&$owoo!Wzmv5E0X*7oP!rcO4nmKl}>Yc{VtT-X0Sm`{%{b}wT3(!|~o>!BI^ zB{?G@Bq01vh=+lLk&Tg^m6(;BoSc#(^_!BjFf36T7{Cnr%^^8U1&pAk0yQ#M7Z)q; zc!)wk5{Y#5Ayi)|uyGk={s`g@`y$VqpF&qk1tcT+QDur`n|hhcikJpX_}x!~+nB%T zQ4uLf|G+oQ(9tI}+MsGuNK0lm)aZB^J6@#dlCjaYaUsY`fJp%eR>mx~CiK7oOIIsn zg~l&>;F`aGaNvHvk}!tWw`I~*uw9`0(U^|8N?S(zs}~Z2_{+4%R7(2+T zXC0H**K>Z&C#P>dt!DzGe{rPSl(>yZ?yR6vNChrR3m~DoaE9PFa^)I&06c046r=|Z zmH2D758u?!3?BQlvg+oah?6v4a=I2ZO{x;K6&MoUmGK_OHxqmu_Se|F2(8dFW|{*o zC!gl}^C`-sKLZd<8hNUpQM1qSCsM@h?o9is*wt94A!1Z8W+O`UA+^jw@MXIyqF9lQ zX!Qu+Byb=j&rUbp6sdyxk_jEnnOd0A2_gBRk|r<(@MOnIw&Es}WQq&yrV5Q=YKHT! zE#L2?ai^|56N^8J=3ZDn6KzLhk(8DZ4=OA#K+M(JZ7@(dPB#aTya-B~%8V=r%%Wv| zqn0pI$AQCe=vGJXp#-)Iz0ZGU`L0UUdpoRSCpo|K8t=h7{3_gH@1#Jg@Bcx<_-bqW?F)zy@i z=T}Pj=3{A&&Zz`@)(A%Ef)~ZU=#DJu9F8QlXX1l;%q7bMbF)1{@up))J-=LEfVC_sUh6o;N|MA=W5UIn@+SK zR5;IP8``D9@xnvs+J{c>L=GW#sVQV0RZGh_XRS)pc#A!j+m6M*`!CNrp+sgO170tI+?&oXk=XxLlS!p&2GmVXaVBV(rXne4) zZQ;@!v+|rb0sU@^^qU^y+3AaZT{bS zY}ba@Zt+6I@>;T?Z1oDZVGCuWvqNpjnhgu6juEA)D$b5KQPr2;8e{Xy?WTz0n(M{X zRwvlNIL@rrS>23%HiPIa7mj}hof$+k?lc{O9G@+}enLuQeV2`Q)YraCNJPG3# zI|GQPJMmG2=h?u<$1>Slx#r4FT^V&NbjB31xy%Ucs3sifzm6aL`HP={Jfxkk4CCrE zji3w1<_>y><-<^`FKh8tUW-F>(3O7VdwifA8nr-n|RB-R2Pk`!Pu1q7X&0wx2omZm{Jq5H?9Twgu=1@ zl3wg;9r}5yLW|!T_@57-45_b2XIg1KLz8^RS9xA_ylZ4=Nm4H56}&j8AJ^#82Tx_p z(lCViG*|UMxur2U-z>WI+rV9WMeZo)ue<0>hKjyiY@J#^?7GE@9NM;sYcu_~6z8(Q z)nUy-&3oygrKF7*MOmju0VX>%j^8ARld$>VG z3bjCaAg)~*zU()CoOqs6qO2L5!Q*Vg4htH(V=M)8dOLxkSns)SB5LEa6G{!p>iM*- z^voj7{PbfRv(#+EeX!W7l0WSG@T7#lFz<1BUAC99{>DtT)x5p07=`A^>~q{2kD!3x z0EO1R%B3tswSrgmMKbL$>|Jm@D5sxvjk*nx6ZJVQqq$Y%a01e^?AojUGO@>g`|S?< zp7@&}Y=AoFtH$tJgrTp1-}8j*m?FDA!d#DK>zEJ!j;r%YJ7Qt;oW0U`-?B8*{* z{9xRNwuE>M+d%`clqpG7OJoWy7$d=S0$J^dpHd4=%+T_T$DA@f(S8Vf$Y2A0i#_!d zzFBxdc=kQ{rPSXvtbsiXbTyoEa-ZlAC1UvKOKB2XvU!RZU zSA`!EoFEILf|W**JS}3OpsZMt49z~Oz^}xf81Q>m58re!(gGzE69qoXi%)|fDsJai zYP!2c8NDDg5X+!d7Z;N8BRrwBJNXC&;cJ#HF7bm_#DM9IbQP1_pCFkg)wBPO;HL@j zO`5;y+JUiaAZG&YDspn!yl&Jq zrsqnn{H-@)9xp$-Yy-EhjlbJxwlXG`)T~=_MmnaUX2m6k#iZF^6TyrNv=L+?RD#EbH`!(tcFq^58E8<@h-q#HyK+Ckk z4PxrNoe}_Wo!|AEfuo~Y5L4d{ogi|t(+f=Edxsv(YSDlvzNS_vEjIF*4ENxv6KJj1 z3IY~+Ss87aG5c@8;|7dg>JQTG!UfMX(?3dH^! z-u?DqkfBmUI0*ECe0_Yr>wRms!vpY-{b6jSoVW*N{v2kqPj|y4{=puk8=>Ev330$+ zO>J#hweN<8*|S!`#{|N@Kv)#mK}D4S6A&Ou6oGcUfGi~*wyqQ4lhcG)5U9c9I669> z-YX`B0#$1X35gT-M$yUelvmO<#(TG+-4?R^0JVU(pl2iZHZSA90sS4QBD^1e2gWe} z4fMEjTphwsV_ul8!Ab2-ni%H`pGVEv<&}qrgLFR&)N={MM#MzKMku4IOovbWqT^ua zVB%+@VxuDEBBf=rDw-aVpB|Im9-?olC>PUUddF6Xr=q8$XJD!Hb?|-pf6Q@k^8hb1rXqW%`GNzM1)(8&|f5ozp@4{fe)+nc_ zSVW4n5L{v`G)Wog@4Tz-tQIzhlEh9^ZPRpWQJLu&+ViA*F5oEB^G3SgckyeT=gl#P zBR3O=^$%}5FS7tjbMcev4E_TL`IpdaeemGGfM`Rq%V*yp$5eGIAf9P=8)5bLk1o@E zZ5P#ROG!;HUaQB&8bGaDT3E0n58)@0$hT%kdt6R{wl&6#Z0y}w(i+z8l&WLZR*wPG zBT5<4au@8wtuD2EF`<<$V6{IEy)&Z9mF1(ADuMm;dwCbGtHbTTXg&^$6ta9Ys6P4kV{zL9Yl`$e_*;A4kY;KU z?pyCfsuFLKtUFry6q?_-a?zT2Nsz2BwDpv0#X9w2jO>XR>5DY0_jFVTaFRLj}-oBw2v7mT;J9QiX#ate|o>S4n(KxM; zEmC>?y0QD8T*ym@)X*{&)I+O|`X=k`bfh~;oQLP1hSxNx5)xEPA3mD*JvJzjExpGN zU++u*9LBe&?8T2(p`90Bynh~SqOA88(Y4YPkeRowy`$U_3X*+Ig9G#O2kXmIT_UP9 zuknx?7XzL)BE{~|*wLGGXbGk6uPF>FHkG;Q1tbK2NbpKY5~@ftYgmF(uGEO1I1P;S ztjaDAR#Z)^Vir*m6J7wIkEe+&WlyE&Zu!7zoHvSawtnh&!b7uTTnbdBg*b=8-1rg} z+iDUVCRBLtI~49a8}nm48dQ?qwOZOnXEV+Ig-;6Y&e3UKcg;usV+Z%{+tXgfuXl{M zUAn}EkuB?O4~gNBRQ%&(sJ&E_f8yDL`>6LD5XpoSK4UfYcDvC02qo8`64RmnZW>+2 zt|h`eF+=i<4HSnG;rg#3g8CwyhB-6yb$&(Ods(DeaU4u&drX*}tjp%{u4XL5&)tVZ z?^7|K4e^J68sm>b(8jCsN=~ZvgY}lsn^O1S7jZOb{8TyjJ!j*Sv~zm!aH&ez zWVCRh-5S(>zwR55^Cy@&vRG;@he-`6pUBk3dD?rQ7}8U~K7K{h8K-SIkub!k@zS?b zL0y8iGqD%Uvrhr+RVIvaUIm|R-s!?kXbav7%T@xM5Psrm7m=lGt~#ZbUb=;wG|^t({a?Z(x}Dz` zW%rxQx=+g&6X_s^H{NU$29KvO|3+QX|%;P?d9Ho))9{B6H1gXO9tkcb#8Fzqs19qGJ7tEI5t1y$<|hbW%z5G^j$rOcfuLaIq zf2>)FRR5QAWWF37Tv*_=4J8Chs(F-~c0n>US8Mj@#WpJNRvaG75+_MKt{~NHR_BgH ziehk{LZ-RFuf9V3;q&6!G{+l8Ve-X|( z@r(Z|DH7?mfG1rN$1V_;Xz=}pub7(05JR~hmFRScu1TLv$g)`;K&s!yYhp_(&T;pO zz+^OerD5I?hN+$wCQ#*yP0ts8%{R9CYut=CNClTSq#eUQ7lm9zF9s`OVoI@EF5L|R{ya<+?hZKX^y%H&CVrI`Dv zSnX&~8mt7vE7ed3daw&~i!c$LGEj~BuMY@CR3{xhlIL)$K69BO&tewwrfkm%j%kb!wF-9cOL~d6^?D1#E&pn>=(Z zm@-i$v%PXs9+KAJGr&bA#eeEC#T{)5ocT2{6}*QmB9H6;*n(zsebrz6@!{%+C^#XJ z=M3y^0zK`>v1ZYUnqN#GG$Db+k&F?heG*V%e`KG4arkeEPM+{Co=yM1ErB7=KvK8! zm21HB-1h%?1H8`ZY=IX4k9XiFiA;GV1GaDL7J$auQ)AFlBN2vz_D_J1@8sSK1i(K+ zR`60y2uW{34DT-1aiCr4;T0~5$h7fUjYZAO_5k=2F#wU@AH`g;{{EfP?n|{8*yCUf zOV<-H_5nZr|8q_Ng>)1+kxjDtA&>=|`@elbBqDx_n6Y`Nz|a%bLCZ>sQ5U_rgfl(w z1OO0~NSLJ~ZdC5n06eqkCt`@+>)RV0t2kyLD>UX-MXAbk(kRWcqn`iXR*YlM z8+P~7unI9r&rhGe=P!G((d&EdG&F)V)XcCRVs9CA2Cx|gw%{(DW8yoQwYZp=nCQr0 zE|VP-wdz7?*cd@mUYEJ+eZA!d8N|46L@xKn*L@9L)0Yez|0C&~qB3jvC_a;I+qOB` zuF1CTCQP>NsV3XDZQC{3uJ63-yX&IWdY+rpKKs9adp9>5Ts=-)P`XfBp;$4o7ZPE1 zjRAM}iHV86K4A@c1%+ZM5k+9v3iznNRsNdM+ZsT60KYKMzc6`MzbpneH~^PzJ~X1X z2WO3_!XLZ@+FVjtSQwadry3D4RpkNT?NaArJI_sq7yY&bzeDBz!cOzYF5kBY?O6Nl zc2B$cE;$(vI?YC(#h^B5UY;v1RM(^5%l`Ql`AJLEmZvL-I74LI+?w&D!QdGhqQRym zbqrbW4T;h};scr$DT!d*6)B+kU2CPGssB5O`(oB zd+EfwO?hU2E3uUAAOU?w=PR}L+g%sy&DF%FH+zFWBO!jQb$GJA^VOzRqT8=Bne~@@ zz!ol1Rn0h845maBn8-A`xT-}G-d9Fx*JJE$t$EQaxgnuDSt@;y2O7YdPi+9CHBi z`0|zpl1Z=lCGX0Hbiv!Pp1_KGvah0qtnIEa!btrhsQUPZoRJ!xSyn{Q7E0_4#L&!b zOZzAJrjLD-n}`&Ca+qr32e-d#{lHcS=3`*W5{adNvaUg4Ii5StO(^uc6HIA!$ulJJ zomK=#26RYZ50nLG;V>Aa**v~e}lhAaG0gO1UlillS=zFi5xqo2f}dXZ-RQv3Vn z5Cl6UUU#uAow7BmEI@ejyx7rmc>5LO!&m+^$?% zJJyxHW+hXnao-7^or|&VD@4BpI4KQ&6MjWuM$VL%1UDCv%4f@71JgOZxl5*j zFG#X6O1{Sy0h;}Oc&zPK@VU8N<>K6c&UpP`JIR*rE`_&c^nwrmf_iI@`dHT``@9<;0*i{n0*72>PTYb7=jB6pgq{K0aEK%ZMB>Cd-U(;)N z{4J4v>wq>xP}d7K4+K^l1g;l2u2(45tLS(j>S6o~LbA_+;^VZ+AF7>|1XyOC(viqmZkNny6v0$fY(A^=PO=5 z8YztXG_XVvR?eNx23-W`*k8uhoMP-YF*d(nn^}jp$MM`*Z9yt*5NtAb!c{$GbeU>I zbekgK*jKJbHP&Y}me)%S(=tt?B<819GOFM1e`RqxSz7Fq5mszyOI@e%bQcYMvw_f+ zL&}8KlVjDA%l)^^HQ%kr&8fFnrMPWAB^{~El#+@9l-epDlnSskToj%LPAy{+jy(vd z=w1KizA9J?(%$U6Tu45be5@@QT%|D#jw}-*J-0e^Xw|=u!3vcVw~SP4LCf=8TfBP- zx(kGLXty!+V)!fTkV zq{RS(SRZAd!j+9!|4Wa+lYR5vEa!dpdB+{{g|BkT!_VdbbAJBEHrwZVv~EoEU0NQ- z7M0t&WzC)??jF{c4XnC0`yQP=wv9rKLozC~ zj?ugF^Y8G7DEe1t4m8V!LrScJy1Z)i=Qpz(2NjR<8~l-q{Nk$rRj@4y`iAN-=7xmP?PO&(v0z7PJq$Sa-oh7n?5qAc19T z@_ch+b|pkM_cImlvv`rtEvQ!Rd_VQVW(L_(tCt=(?Ut(4VO6;EWKo_x%8tjB3Bb@g z6f5_PvCaOdEvl6x9L`G9kC7=EvIs}&3?M>s>7MT`$n-H<;a6TxFdU%pNDcdO&2(@I zug8l%nPo;17eqtHSI$+`!@uX7yrxxDAofREiVk7s;jVG%To4C^5063H_+>edMT_?t zKtP5cwteL`U;U2RlWf1><1a`*&H1A=#SMujD0~owgc3%^A}Q=oge0tq^PM6L&!*=f zi_VE<_8=>fxfj|d(9^&e_t)1Z`&Btjk^Aq>k4^xh&E|IY@@gwDFWYy2^L?sQ?_DTWV+a%y3`&g7-#2MSAC(7c(arfTj8HM^F+UrxVqW0?LT$#nQDQ5&aXkG^ zLkEgVdG{3}^Ko!aiUm^UlwP#9^# zccfT$F!UyKl8mN!;;>>3XK~T-QH<>IlKc?V#{Kn1d~%G=>bW48fX* zl&gsgnBGM(Bb4(koG1fOziV;H(aN$rZfZ8s2eWCsJ37rUMd+8ItKp~)(s2rnM;Eoq z@gQ6hwW6uZ-5rn{kl?`IZ=6Irm9~JnK<96%IO;wBd>EPuMmi&fJxRcthl zd+9TXU@SuVW?D46iWx4HBkGM+xZ@UM(o)N&RgoZWd>B%nxI?p?V>pM^(8?EKAH#fs zl=Z#^7QY?led&j5@6Uyt-+DNnDq`^toYGBAO@Yj07SJUx0N&J5S2`6BE+ekb1EHUi){T&dl0DJ^36l%BC!EM+V1Qh0| zsi^_sKunT?y}dod{*YdWXaas5qx^&fls$G8Ma6GI^GB1J#9_FsW^iEpNYo-^1bBFK z5&3H67qbPBU_MWdtr>6(}*f1Rw zu+_T+*t-hZJcSb-KqvwDq|jT!KXnWo0OTHUU&E*9XVb|2kk83kSWqxx8qgDVKx_SE zW#PcyM0r!84u*zE4uT{PS3N@iyZIK^!$u#{`v z_SQnM;jbn=?DsFDX$7=e9hZmjL)|NEG+GS~1H@1+u|#)sXS&be^(OMr+w$SHUBji{orXUElj*5^=jHCPeoI#yrIbSlt4gc78xr z0-;=vi}ePRrGyuszQPI4z{<#n<>p6uVS%mP+h9j$XH`6wa>rsp>p2}oeI0=-m{ zThSoD<0|k1a(iU=)>L$LJ8f3$faWaJj$k@CNbXVF^yo+mr1s!6$ES;Va94MN1VabxIOutG{jYo1J<6mt_*`W#lP)&KOH4{Xx; zZ0m$i{XukdIPJFpTxtmj3ji^MlPevaoxcnKgs%q#6ufr(g2C)HRcekfL-o&ZQY=}E zsY(CDX`sC#eUA@=XKTo6h{i%BuCWHTlA5!u-Ku=$`Qt~)@rdVcZuW<1m*hz&l=SsL)oU8-ycr%=3uj!KvJGI3-<{$D*{4@w>NW)@zr-9^4}TA| zyEU(5Pvu`3ui7Xix+%ka(S!N%lH#a&tzUGR+FF9yky<(l5y;T}#P+?iv2Weouk7MgsfNILp%|NGW?+2~+Mq z+Tk+O=qh)Kk{{>v(K6v#OyG`F%+7t?`l|3>uKc|(2k%KF-5oiqtMJaWY4upHf+6q`5JYJx=NpuG+&ibjy%k3uJ)qer`z~`Mo;?%zCo=BTyyO1 zKdN3x0m}E{%Z;1Y4#kfiBoRNtzhQHWph656P({cFC&=XHl#;=~Di`(eQ}R$_!Vt$`ICo{O$2e4M8{KdQSn0Qv`vjHEJR0uCM#qJQ9_ZXh{6~St zR&>3>Pvvb%Plo^$iWsj(%lmS%{+xpYy9nw@Cz{w4=l27>?@m_~Apx=~ zrj)IGQUu{EydeyBPS%{Q^kqm-$Yr4vIBO|ZR9B}Ox)=ISKa?ee<`}NPm*^u(_-9(~ZR(jR6NmMz+20i!Mf8#m=%CAK zZV2=*)H(TAU~(5{7kT$++=tX{!pnoEKL_e$=*4|zxu3=tHe~1{QY03W-j)vAmJnP^ z$@ws_Mj1jP5S*~tzIE}AA8yJmwpJ|}RkzvR910+uezi=`Br7c{`XOW3`%|bI`xUah zm)=#sG36ia542s>qL-7k<9H}4 zD3yz+HJCCWbUqlRND;{1T^s2C-ScvmjCM$nPBWPU?(uZ)JCrl`rFK8u&0Mzh;+W=j z-MK{HO#QbvZMs#M*PLfF*6}B3KMJAomJZ%QfUk8Lm-t>3%^XAeP!_w^mIF0shcG|I zR+`FHCL@LbvJ}JDP0R0)5Lh#*FCfC`jb_D#SEPGfly2lwG9Ou=y)e^8MAC$5E3{4MYL39U&HFOr6YP)#qFC!>Grs8 zjX6^4o=%eR=I*LFlyPvK|Et=)jCIjRr~S2SA>*%-WJ3gQ&%6SaB9Ha*)ITqtnewJx z5_u!nF&JPka)xs7p*w+eX&)A^XJ_*|-*-K4dGrGD`LoW2RnJd6zf9v*k~Fl@7hx4{ zKHd=TqV4fW?Xw8ihSKgXWH_!93zm7}Ld-Fb<3|+g8uf&CMy}+|XG^x72ey8D5=lVw zyYr&PPcA+|gs(HFCK98tBEll`iQcr|$K6iHe;g4}nIMaxDA^8;(Z~;TGYDs~5XuJO zQeM)eP)ysIpj$qW(}vK3hGmRJg1$yJdDJ3dHVkZm5;ia)l!|>!B zHXwC|&rH;@C03CFP6B}*D}&y ztX3OhpF$wOsF?`& zZUFfG2H5!tgtquLJMN?2eOS4XzkOk1+-$oJQQO+y&f9=zw3VNq4>*Hi8|*mIWKzH# zTT@dL9UUDB3}3+a9VpCMuGR;t;VGCQyf_UZtw`bG01+9VKGQ*~UPY(XU;!Y|SHOJJ z|EZOP>}+^sqzQi{+`Vm}3<#=M*tC@v-@W=7KV2#}|SB0;^v15ThIAb>&*Cy-H8L^4c{b0HSi3GWSdklE2sqpSKBD?au?SdjjEKs7fTd z;4~Oy6*D87q?{atJ7^Y>Bqzgsat2^NO7)L0*Ou3gFcCf#)A!84m(v!PtL4r*vBUZV z^RK(x#tSa)e+JWiTR{cLF+qTLE;Qc#_SnvwySdtCY%S+Ad3!W&4lyBXmkI}OvDQEx z1?nh7Leeus!1XW86P$B?Jmd=-p+9Nw3D>JKlOwz0hy) z{Ckj_A9zY&_O-`_3yX`$cp~4GC7a?&T+2`5sKoryMp(d{pEJW_;U|>53zHWp2c5J= z>6LLrM#%gM?k94j-~VvJZ?SojIu`2s|znhBOkE1xHr>%y39Z2#{1I zX_{VWQ}rxI>o}~P-`8ByR4JBI6(kPeiqC)^6gveMQc4OcU4BooR6*+i*Mf4El*fo7 zK8@^5ZUW)yWqGV11KP#BTJ2735!)PFjK~CfLdu_4|6F<;g3QZ(-CkW4GkZeCk5G?gVsWMW0Px8 z8zPX?6%Bl7eRjmYsC4@VKe{kgf6dC_mb#N*sz2-y)n`*o)h(sXxd)f<5|HLq z_7k?8x;|I6|7S+`Bxc_q8(Gj*9ovr0xM5lP*|hRum^c2x&{hqB zKVc`|n0n-%Crb9HNBNP+liP0FFpRenjRl?|UwTAR71I){ycxF4m%Vh7il5z$XY%4z za@Z!|{QJmUsccE3%KvlORqInJeC}BEq<%mC>vn{Tn{i}jcsLG%P+vqmF3&xQj|?E) zNsos{xRG!FD2jX`iKw;kG3n7*5MvhO(yWgcQqP?n&84L%J1RIoXk4{~2NT)sAV#s6^YyRL$B!m^LJ{@n9&y~! zel5vC?4pP~hDOjVE8Y=PDGlr9_R_iu$U{82VZb{Tt~fI;V{D{q+ibeFx{FtUfcI%E z-Wwd)tq&}7);YUnCM6$kbgO7~wVJV}Jdvwa%_U3Lv10ybmbu%!1}CuXJ<*m{ruEeZ zVk0KnQ4GnmTr?)>3?bf_&ej(Fb54rk>i8$?TK%Q$c)ox(A7g4YXnPy&Zu3U`E$jT@}pK|iOs_t|K?+}hx-N-#RV3D4d`DPhEvawTZ(4)%5wc*so67&n(y z+@iNOt`jfX{z8nHry+{%ll}DaLt0ISK(h>`d%=k8G9v|Wt17pqjq~06%_#Y9eC%KK z-EaO5o3&_9XOsBT9QCp)(6H9cdif;%a%GWGcEZ@h2kH;x*ImrUG(W3EULqkBKmnuYmEaE&mCWy4HYTf5rAv-e22?K0@_ZkKef3~1QPF+8jg#K&boTySyHow@A_H5=8c z*9iXFLb1xB1b^iGyZh{$r>2)Td5tkz^}>fiqQNqR&4T0kZVFGdDTAgmV5rxrx)gPj zth-A$6nh)=FdJZs+p(bU#{Q%7MmOC;)+lc2doLBy*6QrmL-Pav7231jmDw1(^Ay(+ zYiHu`0uz+ho$;=EIo*MK9?Ydoy( zRBE_WEU%oNycE22gVzvSx154Q2BAjrI+UE=A}-CR|ET0}!tM}^9D?3bo|%-sExpet zW%$*D8H>fEoC`L5qil6~>DQD@qW>5Ux=FOt&@Li1>!5ln?kp5&dxvn2+xQ)L z11@>+z>D(u4!ZQrAK4Joc>@q1;Hy)f-?x#1U^@|Mzb$(QD->fGZ%G0Od zPB~Rx!r{Iq7#R32KJ+o24L*Tu+wK7=CxG`i<)vewv2NvSS?#%J1}`FbBTn>SM}WVI z&q=;n5Kx-%))Y|Z$qq*E&dTtdgNXNolJR}#=u5_}3B+o!YRL3x3oS&KcM1hH%nQ}H zB7MwTVUPOugbD8?C-J(-<8qyE(gPMjUL|!wg+aZi{BD&zlO=+Ls^g!jY@?vfkj&U* z4kNn%n~HNcEVRFJ5yqKKm@q`fNbe1)w6;$c!Z#W#QfhM$jqpx?*`BztqvQR*LieZ3 zU=4zRc|zakU4h8v_j`bR|9mDLNV|gBMUfy*Py-ceGa{~}yRHgg(1fM&ez`qdELR6? z&JYCrHh^S?x**O8?go0_WuPjEgqbrZPfU7~(Dn88!!n>xz6)KtZ~GM61T#7Oi3eD3 z7o_Z;4{AC`~Ub4y^@f}*IaWy)oE@uob1L?gmf-2Y-hvgg?_P4uNt%AcQ!Wnfuq{InY-6pTY#9695R- zFN?*1)jYh-uUG$TayO};HPO7NsJN)OI5AuD8Y3VbNtB$GHBs^iB_2cA1yJ2-8mx&2 zEF=@cGl&7MJqjxi4nKPmbRCJe=7Fs6`DQyO9voA4e7fO<{YTs6^0Yo*sP5hIm(UIt zUT)2>q4$p9>s}k&l4(_S$D3PI`HeZXO31G+^ zG5q;?hOG)(xYk$ts9dHX>PSl}UmPTr#1yR@T^z(Cou*K_#L}lM-595Q@pnK5(3tI4z>)miyQLDB{U65mKT&z(RSS(omkE&Wacace zm};&D7OVxRWN|3(A%}4H-W|hF1iEbQOE*)hT}hDY zbe|aWS??Sx(RgSf*|Z*gA&(S>`+69EgrSwTpr<58Gr=J+nE;KQkXT;3beUNi+>GK= z(Z12mM3Kjmo=kNZ3B#s18If3*SDiB!Z6 zQ{>@a%bdE*-IFVWWf>z(Dx~V?5>I0#;u!d_HZ?Vwp}4`0wSCxtB8)ebD(hidxsrOb1hx+?7;*} zzVhrdO<>SM%AI)SW8P;)p&H@aGZl(wM=NFJ^b)>HT|4t?-~#CIzOH6-v5oy_AJ!1u znxQ<)X6+hp8fBqv8spwb7bb2cu0A%4@qZIosg0+KqPSC8j!foj0g-C;einCC4{whh zbc3-MpB+@_v?f*PL&q+bea-CvTal<=?D#Wn#yRFr5 zNmXf)aImYHCf`j;1AcWj3SOQVm4!69p3AiYWk*ubUrR}emMBT%Q+B)`zE%7AT z(CU5`CWynCKWM{pu??|D93!LtvBYfS?Kn^=uVGSG!#D#qOOKp$)z@=!PQZ&r(Hm1= zL6w9#kERD77 zyaXx=z|7<-5x-u*E2xoT%z|3Zwm05Fu3X12Uq3GYft#OxRY5T7Oo?&;!v2*fwqdiw zz|KwpJ?D*L*TTu$IiT50t2JiU%95geWUaTkU`{pMG??|};EfG8yu2$^*Fv_k0$z_q zt-Vk!0Yf-p7V^a<{_2~xys9TJCa#Ls#oBgKF=4MK7?_mCR9w58*1?uwhtbEw8Txj}xl z2JSAu?83(*fKp$YxsUVsO;#D)*$<0qiv&fJ2YQ`_i8iEiG_pRyf;re zZqZo5#R37@8IOnzpu`v3k1>2`)NK1bC9=Wk3fUH01;K3v>JIVTUbog|lq9ObCclJT zgJN*=(pS7kKN_FpopJL(1Mb;i(&wiwSBOqh%6svaGF9-W@jb3SolD2Uqo%CVm7x{V z_gSzks3w9|4~e~yej@USzE_8N4fx#tdsYXZ|m?x7q;gp$-Zxmgo8fdyl2?IJ3mBT7S$qH1Y))!BGFmJzA3PBMP~QK<%dwLyo55J|XFt zzUfUXNC^2`Q!0!96yIzW)1ygk7&@{%4;5$WfaGS?im!F)QsLI6X#wf}9p|Bnv?>2G zGCL|l%$FmU%scN29Zw+ZP#GC)K;F#Y?A;;9z3~DRtK>=4E;qD49U-RH13~AB!a*2f zxR1U%X;-b)QU$yUdr3X7c0iiUOcOERgEMN2O{kbSImWx{lm=fw|7bs6i%4Av8jRZ3 zpL$hAwU2<%l`C3Ujtj;^mTs- ze4qgtK=dGiLD8PmM|}mpWBlq6+pR?+#(X|Y)%R7pjY_H@xvdUpqrw(P>Ju4weZJWT z3_?UKrayoJ0#@BU#?7Ge)uFa@b##b+VzZb)4HOFr2~|~9%^beJdutGoxU3*8z|sB( z~bhw{D#SM(rB)$L*W1; zEiKumV$1pOTzxk!AP_wch(n^iA5I4&j~DBVx@};cL@UUKrD9jP?xehdi28 zQ^S-$2Era^LSodIl@^xp`%irkdcg!SY*0vW(NOV^Fu$6!L|x#*9Q?yKeEh%lc6`5^ zJ#b}X-{->^ZeEUOTMO)UmLJSHzd1xj!C_`c0K$kGnOss} zs90^b|F13;@H=@|F#ju5-~3+iJ@fZ0>HfF=hrwh0ip`G4l&8uq%}!qTe{gS+icvsb zR9Qs@dcOoXnTsX|?_j}X1`}ZoV6_2e@Bj@I2I-|3z_f=B&dtvo+T;n7~Trb`Qp#_C^XLSC&8tY;U4HqcfL7PBN=T57;$-225T zP4OGvb1IQMlzsXS|7-0ObOx2{6e$%7YKI769pXR{GovjN!#RifSq4>*lU1Z7?KbcVU`|Rl)c^nbQ`!Y%87(y zE0jI*ju0^6#Q^J&Kcg1WjbFf1qBLFnQ-#`)!KfQ5j7PX!UfKqf{4#$*!s1g4$!|@J zk2aDLY*$`o@qy_mg4ah&?n>+){f15t6(?&thiVxv(yq5|iU16=S{B~evF;;1TNgjIR*@msE4K*1ku`1`(=zgRLK5@i8_79(4CHu?$ zSp+c-otGj$Beye-PSc;w+esbCkxBW4i}Lz&QdQENKMS#Gw8xAyM*2|?cW<180(4e> z(nzO>$D8Cc_5W5B5eUjPxRfZ%>qsqaLtPTHq)%>09(ow?7}uz7iwRNIpj9Tov%~^B z3&DjDJq?Y%WBeG;N6wAKN~MTKNQUg`W_Dtrm^K-fd7(}ljJGPWAdL=GC5cy>jf3hp zSjnm;-o(AJoePBP%q#mQ5^GSZkkV?SX>{{tQ{~lY{@z+eI^?zR$g$iY9??a*hCB4Y zqK??05z0lHFUi|VCQA@T#q9LUyfnCeVM5jJkiOXHcQBDmI&nyaWIxigAS5U}zX&|& z8tyQHQx*MPC9O-f45u1}6e(&UGX3Emh`R{{+Y&B0zg$mWDI*mctTvYN09s4RnW6j>%ZsYns&7(1QD*tmmTbKoU zS4Of4?ZgYM%tMWNg72Dx(S^HAqR`4h&Z2~6*!zZ^>-=AfV||1YW`0;|+;jDIPzl#! z22&O`>bkp;%2{!7_6C~^f2j9Ah7_*rm773^^t#1N`!&!AV690Xx|7{RuRF04i(19@ z!?z*|T$%tWxL;K|2i?lX8FZLnQG`Dkv$F%8MLm04s8uz!?ZGtX@wc?LDqbcpBAe(R#TH8$Z9>z8B!(y6>$9M9Gqueg7(A%)q7*SUHfeCC!+ zQ_8)n^tkZLxDRNmIKq~Hz0uk4@l5$^pI2oXDlw)6`|=spaE8W7AKZB&3~_&hHX$yy z#57R~3ZjKl$_;w-BCGF-D)4~XRq(o2uGvWkw$5vZV2$r=}E-CqN>3Qkxa*u1e{%KwRO2_}zoIAW9j*Q8l&Ks?Y$+x1(rel-t zOR>1WO5Dl6=*4>*g&f+ZcLgK|%@iUGfemxP5UiwyS6@$}|4tM_d@Fkhz#}TTHv;LU4n>>3(8a8>C%~eGp-IbV((t> zLo`A5347KV_fQw2R@xg#2D;B!_^b2jy)L^Wi4&c3$7p(*tZi0+)1@%C!Grq@bLHguSC92qD^_ zPKg*i{~Ao1)jCrdH@7CbabOGmdU+;K#LxtA96|lB6DbuQFE?Rlkp^aKYij|X2FYCn z&*k<&N(1v#3&;)v)l5V*lq3Bct}i5fRw0H_@Nh<>!-tYb%b`n@loG#==GL9z4{UJqYB^UoG zltc>?V>E)LQl;YdT6cGXEzir1R-j^DL}B+0*klJNpZ_G?G*CG3@wBb);jrs;HonTR z8Xc!6Lu!GtAc$sQ0nqPx>RzFpUY;8<0Wn2f@cOt*l) z3TV8~GS%`wzWEIWQ4q2M&$qo>eVy>k@Y%nJwPe14Y{A2sL>QGr4^p(wo=8$ghV%-l zqCxw0;>jM0Hc0{_gkNtKgKitrBPi|$E}8!!t!SgA3K<)7{VL#NY%6f-wAHWm>9 zlZL4NEv>YN9Zcl6y|5Z_u!GXMt2;Wqj_{D8lNDM#>Kt5f2FvP^vM8B;(TaB#`byAG zv2XC;HcE;JzxegYYr_^~S6HfVFdnMmx#0nMnS4(*M@r?;K&RK?LCqjqV+y1xm_UM_ zKm_77?-tq$MN&WWUk>{X2??wn&%^H$(;Hcf7Yd^L)WRa;K**`taKZNj#8C_)(vb4f z9}>;Aze1X+PL-tl&_;zxk`s}Crw8rjOP+QGH&YSJV0dRf4tmJYLCEf??b#+;-PD%u zzwVpXC=eQTQ?3wU8okO6X!kK2p$1+i*r-=1EyeDw?WDpodyIN?2!4}iy!E^wi4Mpp z;$m;1e$2THv#fHkF7M?_Ii;$FqHZ`Z4mQ@>&9A-=58=iy{bCzF6IUO1q_m?fT&q^e zc0P(d9VoDRED?7VK5~cM)1>^YgLEfNYdaG0A%TKl!Fr_{)Spp4#w&q^ND_A$jPwu3 z-JL*3JVGnrp?5F`du{k_rZ+>1cjVi4@7gq<$kV<3q#bHyM(}z!Mw7J>0YW`r6@mu` z=UzL^HsDZpo!EbNggRD=2T9;B#cbbpW2{|fq8-WM7Jz-!n0$mCvGmFE;EeH%qvIdm z0x?BvOFU&p=ASO?Y8u7=f=PT0wQTv(Yz&i%v*SARQVfBoF;1F50&@N* z`&xa~KzmhWZAUsAg)#vOIbwK2-~7MlZox1~-VweI@G zldhr+HWAgbD85ow+2jxUrJu%{#v(#>d;uPU!%|?3|BgxBACh=~jg@YNRQZP3eGkd; z56LkM`MgoqOJIXB{VCPocjzDex5Hna{qTpY+E?3md3O3j6bnOl=R>07j9r4Iu9OUa zuEydu#9jLGf^tRMa-#dcXX66wv0-`4CSkq5CT>H%Ut=lcBb*N zs7%=1+NZ`<#AY$(IU@T8eSM$y2UIf<#Iy{=FE<2wwaezrlr34HeuoW*D?A^F<(5$2 z-!x=buFL*n=9?D6yFB|+faN0+^r@J3uj~2juXdS8br~1>Z`@`Yb+_%stTtdjR^sgF zEVwG|0;1SY8;Ad(AKbiVWRe4xoW_@D8nJlAR_K-8uxNVwjB8V_c}s5NsMyv485hJw zo1Z-wj7LY3W3k;*W}!@^_n+Layo>MI9jV;6sGfF>QmUpu zHR1Q@*qiy+YH3DBK?4ecde|gHz(f#HpV9vEg+u+*wP;Etnz(1LbeN@cP*VTLUfdpB zb(31ebj~H^>@55*9;zRKQI~k!Haz*t%rgf@H5Ue{nY+BZM0P74oi`?*gQq0r^e3$D zVxY%>uzg5%t|r}b=|f`3~QAZ@*z2m&aBgu`8jLGyQmYakUwLPw1Exo=MgC-oE3XMn}(H< zKH%)QQV>c9J$cFCARrsfi*%gO59|JKT@NQQNl0m4ILlXS{k8un<_8w0Q_~?EvQLd< z)~7-5!mx&SrbF|*N9}P+2YGG)t9!|ze_3^?mf83O9_8m@-GDs|A7|}(r_FSEppRR- zHK78cj*0R`tTR`7zP{)kq2^CQceZW(gOmwYX^AL~=^qN(<+2=_xa8B4=9DScxvB>i zGO-U$rRNFpLFtvXIW@)be{Tmf?s7Of&g|b)1ahwK>}tC&%WHDwu|ssEZw9C$tb&i% zw`r7_=2#FwS@vuDF1pcJoLatqU?~vMFB{yO!HeM!=?T+PSwW+8JiE^zmhhppv%?Rj z?IUIGBZk(dB2~blhWTT%cV!J4oR`2}CPm7DsZopai+j(PhxM_FI7}A#tx5jsI3(l- zpW~3*ad+;w;A8O}`8$H1$hK!e0j-kRW*}9Lbxt8@Lnu+JLr)G@{7jd0(WlDd?U#Z7 z$LQ+G71tI?#gZ#-g#$#J@QQC)tih_qjwFB@1*-_XzrkRp8~V96B97r7Rad~C+u-=H zu`B;g<1#q8%RE_q5pxP{FC?pUn?Gxe7!0co#9$e-Tk>yf>kw*Av>&&L3&J8%4YWd` zLHautm@lA0YwN39XQ&4lAh)FpI|XfsBrdfWbpF~y*ABhILu0bv%RD%zUSSOtmOT*SD@u>BK{Qb_q+l$r@6T+pJ>hp3R8@n1{JAF}41hVv9rTTF zx?hEVV9h37)?L$M)6fZnp~@mqfBjp9fSM$zL*HhVSMn(QlQ+OA1tcyjRA=5Q zr`^Wy8APiP!_eO5;(H`$Ocu2o2HCh90oQ*&jg0bFZMFbxMVk~3KI9v`o4%~<1fb7= z7^e8QEFcQ-J>WlR2UQTXD=;t+6ciLXFF5S5uc5k{QTF-m4K*7hjiLr+>?S-m+72gs zg@!rLRM;eON)e>BX<6P*PDPHR6{a zLA@ZrO}rkExeniYnt)%0jHK3T0S?2afO{Ff1^X^j=kZ67{~B`v_{s_ze@w?yuv<(_ zO&?C@As07z@4CJ|%5(L69^PnIP93y_wu!5N|L^~0V1eyXz!CoYJBU%QgDCVB;L3Lm z7zp)zNB5P7Ct`x4L2W5aaE0sr=NhfmEg)RrGr$C(hedXq<1}(kwo(2V15pGAGNfVx zu1ACXiVB;FG-jq+zq zub!7lPEPJX^k3ou$WB20O#fkcSd$#I?UcV5PC2DuwmeDkA{u0C6x&QCib?(kO+yC4 z2r~`2lor~PCrig!Bn zCfaypp1E-T5=v0`rD0mUaFE*C9$MbfbBt_U3I!G=vC;DY9L{CyDU}cwKetQI9K$RJ zyROqQ%wR4PA1n5@MrOgFVk15BDtfJUF_A5XA;ywGFB+yJZQ+m0o(=254@bw_>UAyt zObkzM<`qRcDqrW{!|8iBkEJ7VFOm zF;Z7One2H*Op||r|4IM5^ISH2nK$S=sX(fF=)i@AZW6KDN{LF5lQ}*l)5Vi zW$J~CFL(Q(c=E!Rf8*=$^xxH3hh1nLf)qOR#w2N;5z|ds)$9Wnjg~~6 ztwF`v{-3%h{$UuULv?7DF2HG=smTL@>HeDKulOm%m4*JR)$mHEtxW#?>Sss}w38w2 zPX)`2T9$d8#o^kv*#D7qjzO6=d=%eo+qO4vc5Swe&Bo@= zygmgy5Va5_QK~z$s2JVlrt1}-`XsMT4bLHSk59+q&>aX@td8%+7v>)-IcU4OVg69swiU~7ay$V}?TXnh zFPDp#l!ID-UCRg#RMjt)IVnclS&DD(s=0e=2;P>aF~1&$nvWL5OtUdqlnC$n{>=cz zsIY|F-JDt5LiO{{rF?kYBZo;EyX=#V(WRZS{$dW<#< zQ9(I3j{ManzLjj16?OIh3LgKK8rE<`d(2cA_~*EkMzD`GHTSpJnuWS(g*MG$owUF^ zH1{jqH~iagm%%+($~e=KcI7mNDx{954_8OAgKA#Z7LIUDg@2=@7vbMwn-4Snu=s&^ z^x&rKz&d%lhtmE)$+F;QRgLD>=$N%bp6u`*_WQ&+|3<6bNhH?RH~&Sn(z&V2!B0+u z6lTqYYFEbsO^(H?RzjF7XfStmLDjpEfKdI@%*&E_f6eR&=2=bx5W5A~;k4-q&` z*8*}K0#vVtfV*x>+zzvs2S#9L-btsj+l&rm%py6koB+-^98Va;s_ zNPVwzo{cnUg&+wo(Dil$=TZ4#uZw>$hTJmDn=)E^iN1jO;?2B)Q%p6jTH30rIK}fr zT<+=LV@gmA&$Et9A}6Gjw$4AiE_U_cp{yUdBV(`F@q0%)(Eym(F!>}!FB11kEFRX^CUz+EP zmK)Osvl!|y^ZD%WkPNUao^s6ughArJ^fuJRuz%G_GtAx+#T8isV}b0gx^HbRuLa%tF0TP}I;3~J#j=ndK2Sh*LgmL?HnctG}9 z%emWVr98tqoOd3$iH`p#(#$Dl100JKaeZl;vBM(c6u5Su{`4{GKbba z2X`c}`Ip;rjk$to=TQGQy=|35Jx~HYx>5$&@DF@C&X(YSP4J(unpXx7dSld(Jg^m4 zu7YG%D7-4&j-k-C@_&=_N>K*&(O`Acvnc@5$UVT87Qv@ zcU&+UMF&~)_L4MyQC=f;?#~$8Q$pG)eogg&R^KtOiMJ6ojK-BAh#kzGL(x{J@38EE zXT`*6z@?0?P$*dICuij!KAxdex#3Sw#|AlK_%doKm+w5fz^5LNiR~N(vSnAfA^%i8 z$B*MNM~VKs4#FT1qLeiIt|jkKSHP8W_-0g6+h_lGj-3`=Tje9Rw|;+#Pw%$2951UUpzuwm&G+gdJ`hZB9~XdqTN z3RQ4wTga|ATdJ_y(#>*q8C?*5Tq4gQ(XWt?64)ErPrz9OoJ$x1l2?U#$HZ^RxgcL0 zo1lv1gYm$MxW7%;kjjh1;`C$ecn6K7iBx`Tz3DjS?)2@=2vN_%D6pbQ57fHz|Fs~K zQzL%@vQj9uX8xp6t7_K#{`S@%`o9zWV}x#xPnU9u95X#-0!+URv=`7*f5B`CWc#m; zgG>aFy@P()HI z3<%?7tAv0#Kuk!4yc$JHD78cUNhg3i2|%QfB~c40NJ#vS=iA%bP;}RO zdj)-cefxhaQqwVm__Eusb5U|rQt=*z&c_)~jUZ+q5xMZc`h7uVrMp;S%`=usDajNp z!M2DlQ84mXyW8IZ{h!4P4X8a=VCXYA7!rCPc!7SZ{M^R=Q=Rq0#d0IqMZL3zoD;ym zNhsj%2%EFc2nkzB$;nYlu`#SRQ-{*DZoPia#U%gt+n=vA*b@sag-TBW5dcHx|3vAB z_v<~4A)3%rkJs}-ab3pa%Q+sG(RQv-zz^r+*yQBov@{q`O-1LEl?Ey)8YM}lKsMn) zJ?M-e=u=_i)jiPhUQh)j72uej1Jtqs2*JZZ5fIS5P8hxG*Vp`Lz;w~LuiRbP%OM;< zfqsJ!-v&UN|7~6Z!BY6C%`VS8CScN@^2Gs&$c6;$|0n1Lp7DpTHO`FDv}I%_1z{P$ zAx;4}fCcJ5q&0uaKDNfdR&*3#whBJHv2s^l^cqw6ju?tCuecYxe@EyVGw2)Gd|oMgm9Brd^7XctnUCiU;85s4Ths#H3D;2HO)FB_DADOQh z1#v#oBCZ4fPLwYW%+ZGt?+FmGNfA3;FSJtsr2khk1M(Ld-|D2yWOM|K_-4oXHeTjf|zT{`j^Ua&XVw*#Txfe7$7O{S zLUm*52Tjk3P*E5>z+Qh2Rn$ksu%m)K@?sAW{UjgIdD!Vbr|{)m{IKPDiIwWW@f5Mk zSE2ehAqM6mV;~?T8Sz_bM8{cJQ&^q%L>uO`0n}e->a290_A@_7s1cLE?QdMWb>7sE z+9lzj@$CDJN5pA?&@V&9^Af4uCc#%NVox$HZ`Sl~i~+*fRLGxQFq{^A6okd9BV3>P ztWG?^Jjc1&;iirf#%-Vc=$wJO0tU}(vIIUFiPy|a_jldrJ>APsHtzR&(bdE2Dj~i% zvcw;V^mmJ=*{@TVuhE;sjMa%zinp;`ZQnq@s)xgcrkYl&7cURXqaEGDRT7CY zIO0Qw#v#Jyu9N4MoxyJDZK5sjn3CflVFhsg&-aECV}qJpy-^wq-oGN^+W!D#FmHFnJVWs}#D)Xbu~%t?ZEC>trpLTmH|f_sN~ollP@#j0Bym`gOE zk~WBNncifmq@FH?xnhA*Y@=vKH@?6Ve)OZ=+Sz6KfN5jOo_}Mteoen+O`b<)yXA$; zYjxAPrs1T)wOIRtU>eABj7RaNAA0bQ?fm}pYE1NiUsXG=&ZoGIyZX9Ci0@9S(XdJ% zY4R*$xLPwZsC#wj*(T8|j&;%Zm@SJVT9i?UwK$~h%E_s3edJOY_U0U|T{y2}ZTGpX zs*|rQiyG)cs{WH@o{yqY5U&Qz=TzQk{~`JmLVTgCI=N#2?+WpIrp+Os`f~PggowDy zIoitc))$I!eUdfZXqehtDo?@G$-A28w&EEaC%Kz@Dm|yCJ9!rm%vG<-vV-G1807+` z@sn~DCoc${I*Z;;L^nLB9_X7+YeRky>UW}+=$;bG7_=Tt?cNYiUM${jblbXhq)s~{ zRI1jL*Kx_v1AVPK#9n^ zwdEQJ%H`X1U5@Co4EOJiZN%CYKe7!lLU#>GKuA!Z!IP+bJdvNbQlj@=oq71cK2k$( z%8OdFvs$Z^bR-m=-IDvs9Z$3bO_9hlsX2VZ-gj$9L>jnj8ekM@XM4kdf?-s>lwx?9 z2)>SY{LVi~;PlIPbM>m?Z`RgPE8d0kh(CdvgLRuEXX$f5_U?h&DnkpL-}RhhA`G-) zidVy91LR@8#CKeI8PKr)sc}0Tytp;UcXpHp1vMk3F%{mU7<5D?O+iKXXI6N-cDhZ{ zfV(XZGBUluNAgeYeas04!OUd*$iVwS4A%lZxT)1A|NdBM>BxzHd;4YFsdamn6;!as z>{o^~8H(6cio>f!ZtjSm6@+es2RR&JK~WcANZ?kO89^+x%{9Hv+i^k8xC4mA&qC;5*_C6-3b!b{wr#7Du)`BYF_fTbaD*H-YY$Rn z8(HBp@>pX|%I2cXnr48hkC6-|nGE`vRQfY37yT?kaYPlI_f+a~4K_cT?_VSq(T<|t zv4Qx0-~4Sd(||#T8Oy2_vY>V%?WtF9p_ainJvv@cOwsamj_zT?P3+8uT`yL16V!Omh7jKQ6S;^+XK>!>`4F}+XIw^^35%$ zRbD8^xfV%3J3^1hr$Msfhz_HFGF0@PSV15HtaH07NU5y%)%wA>o211U&?hd_vcV_6iIFK(DCBaS}9hFQ34=mCG z5TJPnuwCScDS?sKDY=Uk6U0gS zNSJ?`S3S2PvJf!7>gu!`p41tS7z6@S?I+u4+dgke z+Gff)#zCKgGQhyY2bLT7e=`T1(?Ql!_p?Legkhi6^LdJll-^~%9v_0&-v?t!0Ilcf z=xFKRzh24y=U!&v58zZDNQO{cv(@gyd1x!Ql}X6wX<}yPIs!3VD-lBoP(sO=r2!0; zr6ui6EGeNl_zZEk0Jt9sXxj|@u9%p8jEs)t&sl&1ZD72 zrTHN{Z&%IRd3%4N><NiI=^Osqk@Px~AW;35s3Ym&ki=aJLJ<)@0$T~BETrY*7llGW7< zd=KLv`!pZBr+J(b5*-h^_a8c${bK0K>ldAxVig$q+UO!ni^Y~*-d zeRKr6ectbH&`M_P)40~+VLFyaXD7y0m!NL_nJtZCh(|=0Whn+9gkM#H#+m57GhUa~=5Gh#4 z!(hR!HRG3oLJ`DaBF6yE;P0xXEryvjllyS;nB8AW{ro?>vQdwAy)su4Eg>n}*%WrU zBkYw%YKouv5>H!4H{elfsLbWlT0bHsM;SV(9e9dXr=d$UcF3pk4+BMb-CCjs)B_0Y zs7k9}NJbAjgMSnNb6w{Ud@kX}Q{`!J-@Ru=brWe)TTex{B6lxKr1bAw)@@2UP6yUQ zQEe$o8M&2@exH65s~LQr2Bn|tfNzs7D7HNe60!Ug(V{N_{~e7hdm~cU=WPvh@k7}A>r6olYxdBEBypvdw z`s7`G=T^xP$6yNZDk8X+H@#{LSgJf2ipu77V7Nl<-btFC438PwE`E)#x&0AyxVVtd z*(jPgufLB5l~z8T)88{YJkiX2YGW~ZFp$8}1; z`zmR5QdjQY_j&rt`jzc%_UqSlrjwc5;rph$@0Iq+_p)_EdaiI$r`JDO!Z^eX>u)cj z!874HIrf8i#2oRU@TH<+@gU@d)f8wdRZ7jpRf_l`5_2j^F#3$fSmEy`ip?7mT$tsZ z$Rw9du@}xtm31nOuh@22ZDEd=l7+9eA+zGrJ}p(}UZFQXo4k4|ETnVmHxsYKS+>s* z4#R^u!a=~@E69IKr0!wb%||`n4ylDZwJ|<47h6@C;;Gywf$H1Rv&q2BYvyuOF+VzWlxC zAc~RhTp4;bDP5T*z7g`CD4@BixHy37k||ET9RgRLs$W}$S;NQB&WhBUFy-ltad%FnmE&PM z44&wxaAo~s8j|2nWg6iJ8gCsPH%VSkuUFf zQG9L;&m99$nlb1jKE{4DEOykwYa<}#!ADT-$wbL*;B=hbBZt&|ei9M`yGh@w zEK6rXq)deiv;|I()c-3bD{Hp$9@Ho)9LrjwSD_yLN=0@>_@p5ggIzY&DMW)41>{CrNYez{#uhMtX(d%UJAhMQ;tlY);t#TaPf4gQ(P<6g#4A7%-xsvt;<@Ugs zdr^y2+0gM|S+By#O0bi!Bp92~w1VI3FrmC4AP}cQu<`sHHk)O`jv~K*XGF1>y%$`Z z-o8qvD#3!Q#F{JZ^Fh`G--t08wm)aV!dbR~@8hUwOG5l!i=r=A*HsLaerB{{s9-1% zY-^Bu!LiQuDF7w=)ld(ebbs&Mrh}Nzja>1GY4<|Bb!6wqz!eLAbNB=h@%@PIuRyVg z5(Qd9ApK}Vm21l{fb1mvUD#%)J6J!py803jT1C!~mgq|_B9=pffdPr*ht2`IF4EG{ zZs*G+-UN+Kz%3mhA^|h%0HR(WsK^ub(YM-QuTV2G+5&;n(0L)YCxDVaupt-(T&T@$ zY^s2topiIe1!+xx>MBuQaSc?LpIJamPBTnUX(x4|?lf!xDE_oL>_M6nE`EAHU+*+oEdlx+O3M-W+=8`Ox3Q-bVa90Wm%O|@ zKpJ3nUiQO9r%zi0fS=Z5>nv@?4uUrUfl!lc89>hq(gRkG6gv=Lp&|nPFW|^NIXRhY zX3up!>jwta+Xi^lebsy&mv8Gty!Q5Ta&mygW=1Ij09Anc9caXqK?Je$d^m+r>#YM8 zmU_$jAjP+jHBuq0`%W1(3L!cq_jCvqkho$1Ou_b5LRQuWR1!eKQaB=3fJ7R*O7CGc zKe`Ow;Jn%xJi^N4x_0XSg68Gg_Vv4Au^(fGIqf|GJ9^F5MsCWr5>k%@mFmQu;kXXp z0-iPuZ*W99A;_?mVxw2EP#H$WLGFDcK27-s0%O-l964ylQfP=> zyewHxuOxl_CSh4PVUf;aJlyVbCP=}~&YrRH2iMzn>P}b^cYr|E;Yv4ofF(_TZ-(X(Hs1}srKx%lQlt9oezLIf4 zl)U6-z-3lhWy*TKJ~Gw>N|}XF%K-A0F?27n??`@g1fA zn{C{)8g^uCa;zKUoHO}q4^ldJrJ$Jz9?Y>g8w5iFj4jRIoVp`IEUit%L&kJuEc8JO zzfJMl3TPh+lC?l3&*s>77Gn>nzALJIPnZ{$V4k6REQsBC_?-s+!zK0!3c6i?ZkO~T z`G$Y)%Deu8nF(k|Q3l^<-O+Z#2~398Z;uG~OZlZ(x(?;^>&OHqQG4i{+g7r>h`)#6 zteB+(XmuxOAJW7WKaCmQ#2YqcyEHaThP~Jdc8xU2Yb%&Tz08}4GTW`G*-mTNcSDIq zv0r;9;8UflbsTdw(H+xQ}7?9Ziqk~)WKWk zJT77Ci_k`22jg%8=Y}rgc}Qz1iEonX4W~LT2-wMlm`!9^!-O-*AOCb-#+gFC9v+X! zCmW7Q^|dm+(F;UVk;VVOUN6!ai>8Q>h%_TA1Nd(s;}gjn9i{H|*^!?P!lyEOVk{1Ws$ znd^>1VJP>tJY7q`S_Z;DT4O%y6!y%F>znv`N)S956`E&LdABVZH-^j+r6-OIdEMw* zE-r+pxvAOC{qu9og1I*Jx);;*XVqLmy+5oQ|Jq9UgyFtF?F8(;z&e&tqz(Ii##XOv!fZ@FKDPBN&3(vNzYQ?TtHzL!n+-H&q@9>tnz3T|3S zSa%JN*-JQGX1!5s$Qo2i=g%?nU6OnAVrDmw{QcV1m0(vD&IJj^m{EU^{U8TtP=&Uu&!26e0|dO`CxOs!~H^;#UMgXL;SoJq^7KtERNkOu<8 zlMf1Y?XMwWM3vubc#~f9kPOjE*#kE_f$IAW`9qqZ7^7^E;=dh}GtYs{*ZBQeLl8Se zOucZ$9B0l?%fye#9Yj{$gNwMC>NXHd5+Ne9SM`B)(oEI^8Dy)@%1xi<>&2N?2k@@> zXkV71jkDy^fKN(g7*~)!TavVHa4@4H-uU(m+C_7j1?JV_!_osJI#)M9-4BNQF>Xt5ieOI<;?eP;TqasRz5A9MOr;!HNwT;EesdZz zulUTKWHfRXvI`WyLP!!0m@Bx>cc~qe#yJG<76E_UQ{-`qGdnY+6=HKzBH+u=7lsd$ zRskHCB*T&%yL0g3cmmYu`7`LGGZv`Ay6t2!`7-)y zpkAOR#kN%6g_LJ|5a+a*dni!Yd30iaS}7_if&KW2U?~rRI>BP^5FQ=PI5v$$97==F z5*>1caBoqA=gp?Tk9%GVq$tJXy9!V{1qiEcxX{mrZiOBZs68TNy7m0dlJmuls;6BHgHJ&k!5h{Whd!^HX zbdo420utL|?w z6V+}Pr9Mt@ZW44pqL=_0RmM{^5Y*3ieCCButQU zyd)QuPTvjQWUFKRgU}c&uj3-*os_h|u(a)G`_?Fl=#(9B=SEO3_VEZp`;EkG8?sc`toN~` z@RN=(gO0c4qrG2FADGBg{?JQ+@3*|%+SqvSrW?G4tDy6VdFtQu06>KNHJ*XGyAlF% zFF4eHC?7%Rmz7^R-o%J^+MNe2l5o{tOVzpn2cB3CD((uHNs`ax@obwbcNqTTegX$;Iy$-#1UxlB?XItT zz1~|Odwf+B*OI@0T0(Ac+i&;AQ%E09q%{Ead!Vxv-~e_3Uf{7misIySH##~BT*jqH z2jY(r)c%0H`QHAe%AHi`p%@)!{m05o&rFSM*$@~U>>uUl=ch3U=_D<9V{w&mo&mtQ zZz~}h4jD3`Kvgq+lyr3w)C4*6eqID8;r+7ZO>@32|GSX;ocrzT8=x&YnK-ioY*kBC z&s?iLf%^UB5KsZ@)r91Or~yvx{NAU|0~5g&9O}jeMgb!JLSVITtXx2f|B1&E!D#gl z<=~nO*T28T@Yp!a{D931w&!}b$Y6ik=g#Z& zI&m*HZ^yI6c&s@C08^b+m<9WrAQ;^O=i;m)Z6kNJn|IkcRSs3~k>(af3*4hOWRqXB-|(LRBMe+? z0fgpl8S2U{QV@DnFA|tGNo?q0eC87|)RG}C_|0e*1gTPR<)7ra;=;V-Ttf;H5F$T< zOCv-j;>yzl_iZRT#ZZ0XX7FIN;&j5v%_0mLk;KW#_n|Jiy^WL|(DqyIt#AY^c&DD? zaAR&rrJk{@^&8Dmo&y55ke&zpUDl^y<|*lk+TXPgT51B-?VdTm^1mR|^un3?Zhv|` zDE$swy3uZhsDxwMvcbCVIC+X|{u+2DlHef>O2lSs#ukcKs_2D~P=Xmw(?Pu?kAff8 zh92~^shCb2u@Kej%4RTA@yM0x;7O-$7ILFd=eypm3Nc@YK?M#2i{m@Nq^T6U1WOS7 zauCw^jx@A_Iw^9YW6|~)rDv#qm%(`+j?$>0zaUZ0sr_|@uRXb&gLXL;-nE3AWA{04 z)=|g~<(Quu=XNZj#shdOn554MAJMCy$DjT#W0Ki&4%kpF7YyIdo1L=++X*!bNkD(b zBRDMtb)!w`-w`vFhe;<7n(`cjBP3T$4XR?@##6WpK}3aDdn#Zp>}x`}R3W^T<_wkK z^qX4kx~A@iuz%(}sQHo?JfhrALN8cQKA(iz&5Mqz#jP9p=Lp*)fD7Cb^nxZ7r=Y#7 zC2N;$W1-gT(m&N^9GtN>P2+)OFZ}RnnR!$68{#M9huR$cLKCKa7?Ix>tYeD;jflej zMIox)({L5$qCn{xzF{0#FKWovOpb^ZUJ#^oU5*j_8UYp%WXRGVYTa;(?}D&g0``+; zOi2}~v5d~4a+p6AF>y#M=zmdM1DJLM3tyr3)V!3CUT?>3CZy#MWF%K4nD~l|^k8`WAk=o4| zhC8TP`34F3jdGalN+Zhanb5gs4>FdDXhs5|#r*mWX6C2H#^_aL%RK@=maS+4e7c&>k6Nnid^dgFdnJA6yE%*`uuR)CzAquTTUrg} zh3y`LQ-`sB*VfhDDDEz{@y@HNP7%}9x6vBohID6AxazU2}f*u&i zonhosgTpmcO5sZ8|UwVR)7bmp3-J8p6BRSvOm_ZgVS16%35!t;)^(mJF%tvJ% zjs)j0%Cs+0y&6jON1cz(r_KKI7~vEPtU`e2Ng;GES6+_FXB4o(tLNmjEamvEWN$9* zwR}2SJ85VLl|Wi^FTajLBPFp%K;GTiKot?wY`yBW>h@K6gsw&^apK8#p~d~vn#a4FR70urDenGESxVM zJhu3<`V;o8_%=QiQ@4(Rf1ln$1gI_DSf|ddOKZhxwCNq;xGA#=9c4{AK5R;G|1LJg zT|uiL7N1y)?t`JdHM4O~{M;PZIUE*It?(JvvE!bP$WPI{X>)9EK~X`)Hq7HnHWCx(uh`NhFtyY`x^jip=hmCUx`4*bvjgGJ+7(GeBk0M^bK-MHwF!e`3>ypyV|qraa!v`2NM~jQ(k+@C zp0fluodYH2@tM&SU09Wu5vUOcT&`pTy9!hl z_`$R$rK~uoVvM&xwO?A^!u4#=d^qP{Bl+A43s#QEvgAtA58#;D1*<}I?rFQPiqNGb zgAm#uMA1|V!p?V=m#=U^k+kYNCQYYD-a2(K@0~lhgwR&ZJYP0M7wn^Lpoj_*Qa*wp zk(8~_cl}a1qw+bRaCHBEKcg<8(2F{FTUig2VNNto8k&F|nr{up)2Hyq^J$W2LoT4O(8}`vfPB4S&t0E89?*%*iYEM2a zO#E~J8rU)+teTX&mG9g$sKkmlQQYwxDyF&Msu}-ri{<|3T9nmov7NlTeqp@EJUS5(iI70i zSBS%BsD~NI6bq3<|CoLer+~&fKOt}bfEMwP$(dlvKRkj83Sk^3VI0FCg$SaAu$cF6)CB`O<4vqsI9Gwl`Z<>kEfq~^PW~Y~WwyLS zZw%jupK5Fi_A76OkUGuuh%2p#I%@Y|LM=`p_mJO`j3?gGt*K=Tl47MY_l&w7QLgk^ z!T(a4dHXdW*7rM@EPb6oAPsnkb7ZxhlNz3xGUC3b zAy{Fpt*s-Yq5!!GN!qt1Cg<}Zi#nsN(#72gOad9Z}wf`h?WuSeX24s zzxioJkPP2~Y&zZ@$|94&`F}Ign{h;L2-t4y?j{^8{8==&xQH2;PyF4*%?(O7Vf>)D zxY!`bR1*yi4d71An8Lxq0e~XplK>QGL`29q7b7Fm(vi_o>hIqj0Sh+X{DMeGN@D`k z8F}z6020B`(Q#~i9IOuTF+&8;k%UkFD`FK8ZFxU!0ICc?lsW(%dLS8r>xBZwjvW)P z!HCTO!Xx1Id_B(8GuS*^pW2OE0#@W==h1cbZ0rzyCp z*ZI>;rSA^I*Kz{ig zc+$~p_1GC3Od9t}F?F59|864r|0$;xz&kE1B)5J7^TA$OS>fj91|}cCn6}RYwgj!M z>A6UoeTLI`3?SH$5^U@dwmE~#m^2O{GM(YK3yX^n4}rfI=2|ePl-utm5OQm?h9mpj z2`M#N-KHa$*W*xBOV=TRSsC3Qk89E?Og0-`E`N_HMyjGmOooAht$;089UXjgLo+i< zE@W}PT5k4d=tkH#G(R(gL@>Bo107JSD#_mvn}+G#e_FPu{qvBdt}zR!4c?aPj48x_ z7Hsv( zvN2MI8Pw)@So=4#OzbTt>{9C#8~F974)l#)9D+)tZ&=16^M~u{+-^~X1bsK=?~9<7 zh=l+(CK|1J=i|A9q4A7hP6j%yX7lU(!}vptBD!AfS`=#wT1zcGP=O)3@sOT1Xv|uw z#*`YIXc66$qrX8xkW|&W zLaSR8_zs>#d*u@lvlHagUEIrwXG7<}NSr6ey4ANRQ0JoU)`^l4O8+vMPKlPuzhS^0 z7Zw+Kh9G4w1Ls5Ur(Z|*C-^gGPn)N~*b}$YG9UY+K6W1oZ~h44RIYXRuKU#YT=Mg% z>6Wu=0R}X^ zCpKY&VQRB%3duOCx(3CQ?an+J3CNqu*EXs@-eJK3I(xQ1RMde@n7n*GHoytWP$j(_ zHSoQQ{_IubL@6ohhEzo~3}uWo(8!#bLGW*i{d~QY4c*qc_4|X*o&1?)ajU9mdg8Bi z)rZE)3tPPm8?`L*$P+^t3x+lS@)GxBgZg*{Ze7;bgU0%n5Zq0@)X!3b8KdddpXT$G zXI8pv8~Mc@hWdPr3{w@>=ktsE+kB)tRyDI9xG|IXg@y1; zi#xtEiDE*$wzO=s7i_mP{o15_PHw1aPp$!j={3d#38u6JV$neYw1;7 z*1K(fFmKU?r-bl>&Z*?9=0M-P53NjpmbB}`=H=mu|8*mZC^4t!rFd|}4qZs*AZcSK^r$PJopnY` zU#B3mw>)d669s`~JQl$1iWh$_#JZ>98XBp+W29s-798iyDCdHyHb?U^xmZ67&Z8$+;}km?@8Cpj+ot5&KmES;l9xgd)t!y+SW~DQA-RNC7Xb5W zR(Vs(F%#1{udk=Zp_cC zL>t>&Ph*2>gqDunkgI6OQ${oDX!S1_V^TsP{DxTE3;IS|0NuojeoOaN+&81oggRkV zn38#bd_@9vfQCWRnGcDw^qFs6ijrSLPaaA^UGR%AKF&f|A-E#jvp_Uf5;lKw@YKb^ zM>`T_oef`zaX|4@p;=FJ2%m&}IiL*5UlbL-RWuozEImS!1_PYs())27Do6r8-Zm6k zC4$W`xS%C`u`loM_ z{wDt9Lo{npd|Mj$2Hb&2m}Hmhk3M-LVhs?=SocuJ!)W-d40qe_LRHPGd>N@;I%JWm z4Hn>C(?ZR?`s{;J7-7`{kKmeuf`{2)Qd4HB-%$B;ZiUpD?V;gRez^sw84Z^RKj>2H z%tNl4D{a7-HKTO+A3%P0MwNOfF<6bmQ)x27%(yY;o&uBA$wF?M0wI8tF)C566c8e@ zG5ei$E+xpsjnJrP)VlyFW5j)5TOVs6s5%<;`jx)re}l=&%9?!y@I{~fhNe3j-hGTv zC|%weVQhN&OJ zZ<*dH0Q(dk83|t11t_t#Cn+HVqKTAmz>n)gk<~VC&Ap|+QMKnRFScITer_-cceft6 zp2aTY^??K!_Z#)+_cL!m3_buDDj^i1wTqT%JpLLBv3a111oXdZfuH~c9Hxr8dVnOd z!~t+oHt@d8Z<7|M=ic=qC!qnMByF<8sz%Va>3Pk3Y7^-|`naAZ5?m5&=p$YS7^Nrk zCF(W0y7fMPHLn^z23|wNh0Gy%L=1)D&^N@Er2!2uDjJxRqpzZZeo1DoDq}f8awak# zET=tJzdy4h0_fA&R*h{LKYl_US9pBH$l*G)5w%CXHcA88tA1b`^aLnq_30%3`}c2R zVxqE=K2p~J152S+1#TG--H6+QKzafD1%wauY<6}QP^Ba!BnEuIdtcGIZA}JT=HpDp zBGG&W`GB&~cJ6>n8};N5{`-&SB253C%zBk07$e zpbdj2IpyA(!VK_O%>dAW)gnENS1)ZVY7018+UeQZ4I*k0xOxx>GQ5LN4|+ zgs?IBpjrH8HST$&A2B?F6c1j#DLiW=IY($?gJFIG`8#M2zfFJr&bZ#VzTJ6;gYaHQ zIXs0I#f#1aImp7Pb2|V*m`-I8F^cb9#ifeQ;nm~RcKiOhuUt_wKP->O<5FpxZ~LBj zgP@%ETiQm1!K8e`8eF?qNuZ3d)4w1l2*$2Es=&0?n~(DZ03}WNadbX6kww(!uRE9GDjc(^I#}CC&${y z#o(UPsZrn(=F8RfO&W9GHXUSZLwaob`rK6eR<7{;PWVh+x+^Ef?X|t{#gMZh9#|##!KIjN-bscsa$^M)~*e5eSt2i_P_R*iub3R9aVGl9ko&Tp#Xfi<} zD?^|)VILEf$;g6N1%;dBFg>XWEAUE0ZzrSH7tnu5#nW)j6j&d~p}Dg5={6P<9%}n+ zPQTw6oT*#+Z{bh-q*|)V)r$Li*nLcgud>0@FB$>ktZi<6+JcBF-cDG}sV)>Dx@dz{EWR!M zYg#x+TVQy=dH7govAbgO@bq5f#2AQ%h{-%s-5Rf_6vfA-N0e1o#nd`$FCG3Jx^bSA zWj)A+MN%=-4Ec|FNm|`p`HXg71A0eI82vbrKnIl|i6+_zcIpWvi5#}T!D=9Am)HTC z%>WhBjRlWYRk>xe#!R!L9X|q=lVn;bEpjf-Q8>_0iqu)(Uu~%vtm1r3?<)1~Y)yIV zLd%|_m|2;RQUyCZm;S5}Kk6f^FFQ|Ik*Ch?j|MhbP+|RPor6N-8Xy$h8|?)Tjr)~ zDUU5yMY3{TXQ|y(c+~}BZQZZ{#DC1w+MJtJ-90WM-R-tcdvPu1hn+V4g$LQ`H`$x+ zxYLNZF(y$GnZpGQ!woINblQ6`YLFeCrsllkY8p^+M89}o7LXa%)Ph` zv0>}IpLagYr>U81rsg{L`JeMU80=21p*Yse+7J$3kN@Bm1rEs-BFufa=E(5JpXxd( z?& zej{JTtT)BLwx_$H@dcl6o9SlTVmK>S!@(KHX|STkty$>MVsY0*waU0TENwbFZN<@ZkTLJRqDM>JF>ILf$WXs3#X*Z@!OZ>2F~q_w ziP3)%Jciw69iuA?RL9)S(ph=n$;y6Go=<-=g^n;0k( zPt;pP_0v*l{*}h-`$~eqgxvY3LV4W1RBLLLH<^& z&9|!3UCH3t&X5fgNHl8kw~YHOwPV7cpJP|o=(U@F@Q8}wu0a|_TqeyP%2ECsf+I0@yvCYC8B46F(Q#% zuxR4PdtF`y&?BL%TYFzV75RTz~Ht2wNBv3 z`Nv`Sk3GkLJxAu)c2#RnqwEI%*A0I04J^))Wb~6Naqfe^;Kcg#B;8ll{?q? zo;u%^4Sud@v=whVhqyIA z^c13~Ten`LDjZjB*q(U#nC>`h7mC56n1T+MH+wjDqLBf5(u7}RHpnmTd#Vhq?ue4e z$>As6AhU3D?fE}HQR>yQ=J=N}PFB6iJGclMQ5aohMzMI4^X#N8KTQyN9Cq~cQaiNcn%slGz>(I}bZnuw3qUj|2%vi3mGKg=NBR?}%Hb)c+!Ea7ncPn-*WL))N^;(#*=v zZn0kl+YJjt472nirjW4=VqELZPD4P!re$QTt*iuGe*=UJK&gW3AFQs9%6ZC;)Ss4OHU+)~c^JfI4#O`S;jDk_`Pj zAmcwk)+}f;7($PE6bk?Gq%?O6NI&ilMeaNR!oawSdF2-SMzWW*SLbZIM@Y~j1RBJ@ zqb=|kn^tP%>t?IR-r9rR5krpW^Z3D$QtlNn#p=CzUzx9Uv^o_T9vvPX2_ppc-~N>k z^!My~eh#N`erC!v`FddA9ayL0WYFP3#N%pic{ka`;uGqPf*b*V z`}l|gj~EsfeN3?RWIwsO2Nxssd^k37Wwh1mz6aQLREoJP^*<(xN97{d86RWsMz|#b zEzFreEGVVwk3rpC7LbgqzKg!wKJ2-9+5Yc)?B2Sk{YWF;`91RVi4?Klf8J|isgmPn zn>&!K;JR?~xLWVhx-v|&h4f!3FE0-d58uIpBG-O|Zjd4)BkS+)ujT*TC?2<>zwHhwdCjNF7XeqcEqIN z1A<|tJw&5nO`Fk;hL|hi_ZYN$ByLp_;_z&K$o!NiAJK6V8u;0edFvc zxVdpn`>7*HU#0PF&*K^LS@OI>JS2Yp{JFzq8%s%Zf!0nIe$XtULFMBt}EFQ_YiWEQx9_&=(jxEK*43k3qT)i=E&gy2|?v;jjwowQ2gIln%Max9L6g zcdEaa-}6z;(~s?hzrIxqR5at?$h?I6s>#ja-~sd7eJTELQQ}SIVO}m{%E$MTzoE$S zMo10ab2;6YLmSs6PBv{)e)K&#U+){|ZyIteQys?4itL3*x5a9ATKN!GzlwTnfxGfe z82M`9Xr47iw}KPGHBo&(lN!Y=TjjM(AmCXWex5Q?jmMcRtNeOy!{^ULx~(|siFzAp ziJA8CL&x;hEB_!8fsu<$0TvZWk?`ZnRds1N-*$nSj%7_#zBYtDRl0V!O8!py5Dq~M za!^EY98#&wH*;H22bBzyA`(l-a<-fYIR2-^ltLg^O*Xq-*B$E@Jb(Bfj1z%2^W(1|3gGzX5`sPfJi+HV=DGHM*OAdT2O`{))!^(hVt@*H!P7q>LDIM)ByT>2bq# z($i!%w9fRq%dc%u|7o6gxGVo9GeWbxAYZ>$yk5-uZ2x&Q|ocKHG{QPPxO7L zyhbApVZO}6HHResNYVW(;oXyoAVKF_*{3z1q(M%2cJ%!O`?ud@(V$F6 z=!Q85f*qORt4v^6Xp{t-pH*W(_zxPH!@QB6LK;^>33`?i&4tBh=|2x!8zV!ETzgnl z1<^QoiJ#$JSw$GhM)?ea&+3MWdDyrHFpu8$8+dHlMKsa&Q7AOT;{L&#MpOJNN?ol? z4p%CrQZGE3k)aMJg}ixrEjXryuprPTbBfD3?(<<5k*64~wtSm%#E#UdM~9*NY$F>< zXfYzzWAhh!9pb`1b{AB&Q7MeJRiCxUHPf?(e`qul&$phQax0(QW^WWGMl)q^YuDJH z)RDGrSzQ?(V@8!Rp3;KFHN;Eorsap}#51==jUMdgZy8IF$e059>cXkZ;QFeM^^JPh zw0?bx<>r+?s>W0Okj<@Dv|B*jz=4WM1>lxeg0=+Bkz)DV2Iqs4$3p)uN zrv#hW-XngBmWdJb>S$3a=O$K<&@Ox20s6XR;(JzJ0`qpMDT4?OyXa;^N;AE}#Z+$} zq`0vNost9#x~Mh|k6Tzgz2ZZy(nh*e7deJ~h3?a)=G!*g?#U>p=7c5uZ+H6}zVUPE zsA>k~7q(_M0Qea4& z84J|+pTGu&^1W(KI^;j)JZgy698axpQjk$c-!??S>+-G?(2?wh)Clf%vtp=<(0C!d zm+y)kPXdgg`xFd;U$9}*IbC-%Yx@7{#4+eRO@^5)+v0x)kDq*zHpR}wcp<);=H~}Z8A0GT9WE5 zPVdWuD~O&M(?@T+wz|$Dw;ICZUaTzxm`WDl zAE|{1wjg|`T8R+FX);b~S%MPb((GwjC(eRLa2gvO{d?!HU8uGujV7m1?t+Aw94^VM zO5NXHRF^+{!zb*B(iTIaw5Hrrxc|hLxp8YYjW8$^!tEe?t{mVWil8%i1Caw zE$=EjUYW{Ncuoi#ua1o%5(^0~r))jJ%#;&L5`(?MUt~`i*gx_bep?mKjE8iJlH>}^ zgGHA|lnK%yONmAdNK{39tKmwkV>$0YdSXY%-!Sv#NGg)EEX%0+ZN4)fli z2(MlzQFMvGN|_XxI)apxu^3q*V%B)fa`55k_Td46b`RLBItB&|5oBShbp~^AzTscc zy#$4sk$9+x&y8C*QGK3|ggyY9ZF+at^tv5j3gO_S0w8^Ptxv>(*LaC~z2ktd$#NrB zfog_Bvf}CCp;@^^g7~YvoX6{SAK2{tjEIPd__L=+2;24f8~t$**d5>;p3h=Xj0)Xb! ze~0kEut4$f9#%L3)XQH$(gN@+!2VT#9l|l<7l+9TBj9oj@Na5v7W8>UiVaU<|8I2_ z@Je~Oxz*Lx1FmGvBF~Z*jkn0u>}LxFIv&pf8QZuI>=2|myPoV8LxrMDZZrQlO3NnD z=MPfHSPyJy4NYLj z5)1l=ZpO^!%>uQm(9qD==VzP3rhf&m+28m*NniWtkfpAOyMOHUMvE1qY(BONjCrcg z1m_SX@Iu4Fg0uaAo3HQ2=B5BYf4>R>&`on%Z?Zc&IYID`^Dhes5y7db?VU!;wp(Am zT)52aaemLi@KH1na6g-?CivpmXy_==`EfmrN1zAWnAIS}^=q%Sitre15jd&`kO!t- z*w-0ZSsTT2IAiYA2+1)-e3T%Nu@tBl9UmVb3kxekR$Ci)1!Wl8K0G=aXmi21gJpX? zuCW^t3nlZUYmuhq`r#3@`yR~HiWc7%tNeAA;+HVUDW~DHUAEreWI7TXb}gq)x$SHx z7spf*`=rL6#}H%?5{8-nVp}upkGY+IX&AMcaH#MC8CH7X?FUub@`d6-_GEu>)23L> zhLNILq6m5Ffw?e(C-&2eKqMEWH_dUy{w5P0EgA(GQBfofQ>4Jp10ZswqGLDSED>@v zGiP#&K{2u;X$et=qD2r=sq`ppl0Z7_gC0pw@yswnV*Xt0BIm}<7%a>lfiZDZi##jp z&XB>cT-6`6FN_I8cSOb5OKVi$Tja)PQWjyBiWu9P2Dg_w>@{im% z4E72BR(|aQ#7_9&paW=>KvB~qQWT#Kq5DR%QQ)=u#zX?cO2~B3o=Lns;2L+_{FU|* zH_=F`OsE;C(F7oSZ<%&rF@vsC=0*!kznHd}lmAP>6n6|4m4mzgZi?D>8h3gw4rU_H zqcK75QxJunrHhwD{OcyzcV%^44eCjbtD!VEhI1y)6=OX^Fs*_4#;|t!3|{228h)?! zIi@O?q@q7~l{qXB&e5D_2=*1~Gg3|{#X9FudasP&bh?~;K8hYE) zz{3;D0?tAov3(BXL+BdY3%;ajC9DqKDcnxLHda~hIfv$w+cjhcb9QA ze3osE^u2;fXO)t|;?Mnh^PNLm`Tf@*h}&^q@IbEva8&Ep zTcI1!_3FrRQ#i*bKe;G}aU-3rHfva}(j=10!x~&^J)Er<_G}rnk(ZgrZMM2vZTb># zLUR$}CMxH1O9Q=X?11=1>bpMA#32^HDq9g^IU>^gML7JgBA<)YEi5Wrk<>xhd20?? zC(?uVc+erg<)~-jyLc-rQd%mXo0YD(BsMQgPXTMSo+*`M%n%wXZ6f=a<~LO*$!{^BRJ(%MQ8d+QbxZ zb@A4#OzklB8xqzJ92;fFYFf}0En6pdR}COP;%jW`SO^s3#4znSXWUBC$auIS% zeqo!ED3s1O##rD`w{lRPn?#S9#&6q_Z=X1_i%w&s59$k@ULf5VC#1tgd5#e#&=%uq zVZQVAJBNIg=?@*{j6)WczgF?**t&W#K5|4%+l5|BkLkStmFa#OW3Ged7*X}78S)j= z6U+?0k%L6k%YeVE;_;jQZ5^BGOF8r?iIBq?rV1{0E2OCy8nOP#(6JG7+PB+&D3Lj5Qg$1Njn6jzJMr<-2B6?; zPbtPGa5-wC>^eY7fw?0eU1GeI3Ubg3idOnQMgU*28p4-Y)IG1V+ACtVY@CK9@gSvC z*H>LiC$b6PqP^uxuduz}TiM%syP(&>mqC#&=+i@pW{JZI$s-He)85+- zej|*i%YJAlQ>50iU^5M}7ppl#a~OCO8lr{|uHz=*+k*~XqW#ejgr3gxlPn+CZc$$| zwk7U`Bx@mac2AU)&;ub1K_x|*g3KScc@~1Kc8~|$Dvw$X?UF)*Lbkbv52doPE#d#} zNIIw8QQ%7)DI~1^w_zpOUpa{MCp_mBN@~Q1C37XJD4|Kg_iJ%anu;x7-n)M zfiJ*R@1IoMxnbD-J0i2cmr65-nFY=e6pc#%>Ck>BLnvrRadcI0>%3ZdNQb!a!qyRO zdzBju=B!Vz3O|r$1fgtXCAh5}N9^e8s z%b7KO7g#p&@Ac{K_EuhAo*iF7Lt_cZYJS|r{&@Sl7Yl4iLDn%0FazNM%84F_iz|GF zKNn6wFYE8MiJjdU;9J%hbigj>@_Rux82>Ufs52dUd%hD%;sqW9gcyM}Pauc-Q`E%9 zW~{!71Rlr{#8<#Dd;r{7;5X~4t0(2=<|ZUSLh@!YQ($G81QXeIKObfSCHViaO;HHk z$H?l_fSn3-<$*QzK;{boeQHdMRJ8LL^bd$kl&RAe78Ee*w;HcRehDGkV+chd0y;`7 z>1*g^)O)Y6I3Hm^>)8gWhQK$3uVd=N4dV)VL3F zwgXtE4}i+%_j3J03qYs@{NMhDV^GC>~*3u5Yt^3Q-;v14Og|7 zw>OtJ7nbiWt5wPD}L`Lh-L(= zA9JXD!+E0|B;Vazz;K=&v*hg5nHIl1F@RiVOyoLK=I1 zex1o}<5=_pK9ziqGGDXDZc@0Kl_HE&AL0t6Ad!%b??4gkwa5ITjPXPdSAkY7QPmDi zCy~$}q8l-?EMd;k{hPGy`g~)kw{{4Aa5_UYti&n&R?NB+xAjBf_8*3Yvl6pT39?CP z+m=M6eV3v}%5Vm9nssS6d<1fDIMjSHWRk6UV`a+IuB=*rIS*msukvmoEomf4l0_mL zSUzr6EkxIg(7NTJ%}^`)S1IN$$u=Y;U0359>gvg-c1PlfSD z8Piokrt9_ciApbUK8BMzjX<|>;wFbmKz>=wNXn9cVSSi66W$;BHcHTT_@a=68%m&1 z<{dSko5JiW_)bZZw^M)j-+^=s9m*h2PAds|i&$$-#fBmA<)eby<<#RM4~)BE(M($0 ztBeN@S<5+vd1(F&nvbSp_CXj1^pEAR?p@JrP3NO10P|_dLz?*+f(*WSl8oD23zR7= z9GH5Ceed9O8$9X|(9iS+pwCQxz3BYhv|6?7(D9(|TO(T-{_tkuic!uCSr2o0G)>4} znbQrc`*{}bkgTKYB3;JJk2=1Ea*gr1iTagtqKa3iDhyo`I#MiXkn~2>s~I032}^s) zT}2c8FTPh!haH^(LavjWRfG_aJY_I~9DPs)IN1(NY4UtYers>Mzvz-k?MOzh`$bp( zd#R~96urjbtkmYT1x{065&y;B4bV#TuLq-z9c^Mv;>r4v3|j>HDyB7&=(1wL`D43j z-I`4p8^hSzzpV%PLBRxPG#)i}dOqBZrk!>UQQZB+6%UWrrRQgKk9X)kqzy%cI2PUn z7Ad9mp*`Fr`qZ%14pfg1%7=DIUWTGq&>E}iPiq%}OBRY8*^3sA{!0dptD^Gok_S%T?`kl_l zht8)7=XD6)f@U|(Fr`TXi)%#WLUN`y5~qUOa^Wm#@w{N33++Wo8CZwfmp zlw)0vYZ&JA<}fW<0f+XtJb&xy7)iJdK%Yeu*qlXVVE1jTNz7t zg2EbGU%^XTFCP8Y^t`IwH2OiA&!YQ{bD)mCFg>QDRQ#K^+3`dj@*_%>b9uZLO_XQL zXv_uafo)rO5l*{cr_>|;L%`RVs)0W zs=6Ja3n$NB5luyBF}#vMt37FN+NlSMKO5RHU%7enc^F{r55f|@7~ZL>emuWbO6Y#D z0wVSt7rcdG&NVV_Ufes~%*wkW=`v^J($er8<3Z^YH2Djp$G~FJ()KymT)SCP*S=A? z(RSZ1`CL4?ib~$B$iuQxFLie_l7v%|CrhEZgV2IS$2_&YTx%7fL4KcZ@iq|2mzGz1 z8jH^2odmvMRHH9t%CJPkl{3h!uZbn8$d%i&5pUagGI@T|JKibj8=OFtqX%;bx~(_U z$Zk>YSUHJ?U9fjhgq^$Ed7~=(s09Tmtzd!WT8DQj*P>$SO;qUt@ry=b!X~=OZ+wTI zt*CXIVTJg<31e&-oQ892X%{MdvA-9EVM?x(uxqun@UgQyqKJ&mCZJAkC5#mew30w? zbx;6_Ra*VgfPxjRMd0ym4$L>slWu4OTQa&PmXnEjD|C6{FAW0u@fytRnrE^U$78IA zjpVQ9wM$yX&7tQOK8y8en1g-jHthm11`+e{6-yJ=HCP>5h1_$RD2nZC!ZXX~+h@U~ zD0?DtdJ({$9rYRqkg=*_b#Lpa9j>2q4o zJqb#Cp%p&boT3iZN$%^-xtQ(aEaE42pE#RmTeV!H7*C&-k&-A=J~Z_&Q=k zI-nB_P5W|P4!FwNYAUh?8C>`ikM65z{vAZ5#qxzebx_B8PvBcbeSwszLNJw(YOvbl z(BqJ7fn6VFYXr^Pg|$ncrCcR?ZF!dar&`?ymR-ToNYFwNN>6CvUdA0%VHVMk4-aE^4I}X&OOb3_&g%M5Ip5XGx+9 zTUh9Gy17e1_p%L}!8%B*6C<4eERAmwW_SvXgPp!@PZFj8T?dyhxe$NpxN_yc{$}G!^-!5|R7k0xXcTtHPmZG_0thHZLpBb7MA_H!MgzKhl)CW=O6rka_H8%>}R5~Ea- zd7C8PRv&S)gfu&DvIzka`Ms$2DtGKd_9QFi@$_JVjIZ<$HR&e4%XOy;q%mvXJ;};D zbOWw@g!Zq59aRKN+Nt3L=3GDcK1uS_VE%~d{Ji&S!X{Mx<#}mKgaHZCbLwp4x7iD% z$*(Zb0;|2zz?=>nf;Vm-I9WCUCKVE$(mzlRny?rh>`qEuOQE3{XeYr`5a~x<)nDit69PLd*yuMJUY$`M2D<(KrSil%oPee4QnM!rxLaO$7b^D`Q3-Gww^4l9m zo~-~@vYwYJwSboV`(dUNQXsyv$Hm&g?rwo8r7Vx!>!xxh=XhB|`qw7W4^87$Q71Nko9D2&M?yNmpVXxa-Lf)#s|~ zY0E(Cx4Xf5yMmU1XYS_1kBIc>_Kum??(Nsk?Y6^lj`Ybao+D5?Pmj;#wjW>x7Xw*Q ze*TSxg@yipNOpH%JzyuUxw)C4j9&5pp#P^$-2#z>|N2^4M3LQw!WO{JD3B2aY-t#R zhoumaPJ1V(FS?>CnD9q{hGV!tD0~ORJI12N!>Ic-2F32un&w_Tz;^0#K+AifyRRJIhiW&#@a0Kv)0E{*-0eG|isMFL! zO4^v7(&0IcNn#?gT90a;pf;(3V~+*2&aysBo}BT1_J5@CZ1+ zVD`|kfi2bDD#GNW&*xg;9!VjgV*Um7a;*T+0$^WPF`IX2c-Rybs+2WQ%@_|`-U5Q^ z6o@)wk46i_oI`m-Yxa`j3PE6jZCOf!Wtj44?Tw>b0$y0?p_WT0L^{m4V9444?K=+SCd{4Ky;^86Y-5yNx(bV# z&E@>^+cK6HUGMl?{#|OUes{ew#rF<=r|0?&iwQ|asP)t+ZOH8c_gX0JYf0b^Zv9U} z2aiZrUVDTr>mK}cswiagNm;fK& z*{60Iz!jjQDK$Z%JM}q7uzBZLM;>0JO@`xeeLo8{oXHt2gAZa@L2o4T4n}me4X3`N zh8D<^5oZhh6~@FHoL3niAj~6+b{ajYgfXWU`^%b3obIO^j`PBKtfMUq^g>jUB^Smn z5!p?&@t6+FCN28a;fdPeqDPG0v~YUr613-n(e2&bhG);$x8JFsbJbxlI0D#aqd@{% zgHxbZ4(OJkIC|kj5g+&)llZ@=G=c*Or55*CaEI+weoKL^*78}Hb2tj&uYOoFnF7Qp z4Xs4uSy~a(fc-$@k^!!v65k*dbxn#R1cBC7Z0BXbJX6YA_G3)m6E*E}F=4IA!QY2{ zIAgxUkmktNMxxHMhVMuQxyNiyu=v=tuJf+YUXqLvYRDai3y=Kq`w+Uf9#sEoLK|DJ z>CvD@xh9N&S+;9h9pY;&+Cm_y$z}YMxf`{CssHaTw8g`x#1N!~ z5W&-zX^>e|xV}HxU)5RMFGD0a z5*d~CMtl+>xsWAjZPc$TbUy90ux={YimrAWSo&&vvpbc9rj^o-8pEr!dwy7AB3wS@ zr{2`Iymu5|BLcsULzGKK^A#Yz|Fs;rlA_8!)2;2R@z68AAiD}dONq|Ll(-xZbR1{* zJov;kwDTB|FBoAz7)^6g45nP{(wJr9gEmp?yM2qf-Tmb&kyVe#STPQ*G4!b+!!|Tz z%n>;gvcgIH=tna1n}7dxW*zyA8;$KlVC#7Gas2O&!z;vJUk|GAOp4PF=UVB?yw!vi zDxLfqKC_Qb*DV&|+U&W1|4eO$__=S}3lZ6>;S6)qlI_Nuf-l~xw}&q|A5ps`%~TQk z?stdYFIDF|olDDLSHg*4Qhj!m_+OP0@}wd`+ZU`6C!%p<&|ZUSTUPy9jaW*l4e6#H z!1}xycE1t*QfwZl&)5X1QZ9-JJ-0K*CzWSH@?k|QyGzlqcJkCa$Kx+KGPAmPo^+~$ zI%5HO#+5;CRmGTuWhQC)pvhSl;nWQHBtNGw&+9wr(3LQA8YaH6M!5n8nTsa0GwiI! zz+Yr$)YFh}&_^RF8FYU~f3{K>=@}ZWM0EZMN=5j*@7ta z86{Gc?}cL&O-@MAu&BW2H@`5kCYA3DEDDS1aWQI(67LD2JaVx-8I?U+TIQCRpOaBw zXGTIcCLCIp!`c#v-K(q4eqx@PZPGu2IvAT$8lCSs?QZ5iC1zh%6w^AkMY|TXnMNe( zP+1*~t>7ofP?e#B84oGey|vZWGIacpMrT~d`}s+lRINi8*x z(JmuJ^Mv8|4sm`y!3stUHG_Z2%kA@I_|tRYialcpVoH z?Ki04$~AxOEfqB3BKd^zEOm1F08wulA#ea1OA&ca3Z(a&)vjL%;jzyc@d@XLl;~n&Ne^zSa~maL zaCIC1x5Ani#ptIh;cuvIk0s?$%Nr0B-?lCd;xLm zff;O_q<_Dl4zPlkf4@`&(cCV8Xdw7lmrjoHds3^OKk=*hwHCa^Ef+{x_Z|IH)5G0k zC_?{(n?qbr`5m(-6?IS@G`oiNGtU-8pc~mkq5{8PdGA6z(8l(vsnq!dN^*iePa}N#dG2ggoAv-#Ymn6y7HC6XKuG90u zW`~B~!r@Ip7h^&f=LT&G%t9L5@O>BRSP(8iW(}(Ry5Wt041N+prJ)E#Rc}bsF&ihU z-~Yusm-||uN`Hb3;)>Z3j>?r{MmY9L$MI&je82jX5{p<#PKPkMYLxP^%v5O=JGgDs zy|Qci8^uqkOPg1#KZf;anOi1ho<0p3tf$H%cf}rxkck~^%?;+}Er9bdzQmDKrVVS4 z-$dK{jd8o2<_n;YshPfOfzjy{p><@L6~kr=MCs4)(_m!6NsnO;6KCJbvNJK`%6npMnC>Fbw?o`!-uIZ*}7X;8~|$gtDtXKEYOMC9RS4j(Rzzh5U09^Mj*cM zKhwGu;5If!!_15t{0D%M0?z@!r+~&g31qw53HmP+AnmDIj%h!rI`AV;KmR}e7NC#P z)6?^$NC;6-P@3!NAl$)-$-rf4eRg(sX(<(ax>k*$-}CJOu+aK`J@BQ~lgX$X`fNvz z2dMD_3u1tZ7oLQ%eLZiOxdiB6lCW;ui;L_4+5m95@$m2nM<_rb_;BMN;Vl8lKpbN4 zoPn8DLP7%A5Zmne@c~3HfPscaz|{ou2)U0uL{|h?1jp-jY9p8*Nn(_t^SASdQ`s%f zfBJz!5|eJD_1%G0bt+hDCYGg7`z?`F^ql1ak&CJY#z+h%P z*vcdzR}3{?J4iK1H8fH@JXAV7@)&cm)T4D3*p>J~8TieFQ*OK4udlq1_c5z~Oe8G{ zY!F(DfM%I5E+rMDByzCw56=fH5UUo-ku$`oR-EBj9bm?SX!9&<(agBHi4Z!$40)M5 zn0W`rR<$t=(Y?IO{(VG!h|Jomuu${>0YX+rrcbXnjtTn#W(1sFN5Jvd(^$i>R)B*8 zYZ2kGuT28%A1N5QSLpUND!7wNP|~Ek*B*`4GkCwSj<<#|>eKN6{R=x%r$JmJ7PL%o zXXad@6!Deoo7UrRE?B26+-@(2>zAf`4t=S}>&GChO|nj`#~6odl}eR^ni|@J1T}BB!JMtuWS&8;_4v2gGmam!%nD8+1in+;v@Pt73YjT zMbg32p~axBdSYyfcVCA&r|qk^i;jyLuBRU*(Juv+e-z`bDMPNb)|D=_SUUkPd??o?c(x>rtdKciz*#U6NO;;wRAsY%wH**GDxlXt zg4;}Hx0++O5kc`YmNSQKO)iXZLfj_h~yE#EtuU2ti@M0Qo!UStBz zu1ll}-9HO)cI7AKRRs3S$__O|2ax~rI%wqxF}td>8SJd9ad7~vSTxo*R@PS~7OZH+UsTB7shT6PR})?7dV(5l zv+ROmO>2wEmSxfG3d6psJ;E6ro2lpKRmEtkL*KMbj|-|uKCk;uHw| zFFYlT9A=r0{*zauxpO)o?=%)lycAEC1+8 zotDhsk$JR?`^`+oqY{Bjs=pDxK5_7zI>K>pML^XyJxZsy-Uo97F#_gph8;~GpW2&u zYBBtnf!<{>tHLS|dDe?-Ec!7aI)pw#)o0Z{uI_ATQal6Z$2yN{NkoY3?*pVg zCh+sQn2k5T+yv@(>B6G~V=%3lZxrS|9C6>Q)dfg{bxj1cMhw!kW`s!v%ZgEgDSgNq z6j{bI;Bb&ROeZSm$o){!_J>9MCxYfjN1{czCi)eTAG(u|zHyBlcpDxsIi{3m4~2Fi z4YL&lUG8=r?jFG2*TrC0+m1oA_~eH^Z<;qW9nKHQIyTjuHukuZiCxAt5}aAF=tx$p zvT7xqKYG-y*-k9gyCKNL=I6VP5~>9u#^8{!+(-MI%hPy>>~1A0Zlalkq@F!e7qU59M|(>pI!$<(=vb4I zs0TCuRq0+JYlWwK+1_*;FLB4DPqG}p2-^%!%G3C4Y#KKw$XTqAlG%c*_btjLocR7; zceBZ45tw?_=dLn?4F}kaJd*wJ7zC_k88w)iYu*o@^`Vbov;Pcu4hGmNK;vR#`m=Y+z<}T{L_k zQnD!?6M-3T0Porf_T%R{Tzct#`j&l~B+tCdg@59ksNj|t{7E_O*u0w%^>&%KodhlU z@;0?vrJB*VJc%%*jN?%{)}p=gbgAH))(Q{42yUjo_sFn|;{;ji(DSZj1_F^-8K7+4fe}{lr{>M79P?uyuN{ zAsxmAmvIgzfL&InCu{8+^m#BKxAq6|E^hAkDOc)2MF)u&^)9i@t68X=y_n`FqBRSr z!@snNzN|!ESIp``M?~HZ6mH$=RiTFcc_lS9j?s4?k85+rLp-gUsza<(c169Sc&uR! zc&Ga*{z-yUWRaBF3ckR~oNSgv6AovEjF1win4SY;&~31iMzmQR`$M}8o|NmfhgFfC&^C zIFhuCg##O!IiWH(5zT~2-JC6RX(cHP1mS(@$4XWvE)p zuCm5xkgDLEZ!KZfcZnZE5Q-bmBk+Rr>hsaqqI1u9&iNhrISD{S zX3#y_D@d|j`lQe>k0xjyYbd)Cam@~NFIS`nA9M#V4`(Fka4P)YVfyOB!^c993EuyT zlW6toy#wbIsh=jK?r}{3h}7FHP=i7vm%Qfh@p*D|{tI;aMOqEi)V|#xtIU2q&wm3X zZjbi^VkgYme_D{BPcIrUsV79FeE@c=$R-TVHFhwkYWZScAH?7liTxE&^z!iF%UICa@0HFX-xQ}xxFUJc&5EMAIH_+Dy zKvuVkRFp{gUe`M!sDkT2gRJ)lQ0EH$LM#a2VSgEPELZ73_0yp;g$4&N{{H=+EZ+9L zLq4W##(?AqNCtMn2!V#xYP|(e&jBF0c@)4)B4#!gc5`D7Yh|H!0cv8NVP~pZS|iNz zy1MvfM+-~`K+++v8EC;@0vx7?(j7% zCD8wJvXmKL=-aD9Psixc?PyQB_r-mtJPN4!QrT+Ei0H^njceDB&X{LuZ-cBD4CBs{ z0_l(2N?QdA+KgdlfyHp`71VZstpaFRrrt;)ros?TBE)@H5O$}gsi(2m*jZel@0U9l z;E-?<2C{2IL9o8={YDtHkEw{dxHcLtm3CDO#o0L6t6#??FZs2h#!5 z7Z1KLqTGiWLE8=LQKxv}P8eU>9R~1Ktbak`ju!)}7PN(Kl)v?Gkn!E%t8YOH3vMpj zUnNd=BBQ?~A@~f3v~Q2RJQNrA{PTWzGGgr7(0KKt8gPeh3W=)<3dR^S%xSZy z;gwLK;yDZ>^@&5x<;<&DLA62}y|cZoAn+SYXf+~Il5lwjVc!yjbP(rMm2qdt72YFSA%kK;L);}AI! zfznPzxv8eo)JC{tsqTkzqNK*EVF9V4LPO5P5}KccAUhQXmqo)i67s5G^}x^Zq`v+* z!HuBG(|LtF>`P}kg_vU4s(ZHbv+7?mDq>XGs+lDMe~NpWkh<`N>gQY***z~V<8HaI z#I_hF|9P*B`sGV3Y@Xd#XAXDRaKbDqK3P|NTSQ^8qik2U26!sCqJ&ye^X#LLL0}CxGpxP2Is*%N)t2U@Qno8yCD&l7(573Ull&1>%OO{wIFo}ucDcH7Dq zdDZp5vDd7cQrR3r&LP{l;ocx5 zCJ8fmu56j5C~S6D#J<)Sn>q>(O;e^@qGdsSAuo%*(Abuj{%dkkOLfcsilAI(B~0ag$n^vtf<~t+fZhQwsiB zN-X{D4MTKUwKY3--u}aiJMQ}!jvMck`#Fxo*^$TD4PV zx*L+5Qu6GC{(%*5r50@)J(PC?Pm4S)ZybK75QBAFS{983%FB&WiC&hi6SLG@*c5!hByz~BZCj@Y z7fX@V$W*SAzBCPNgZ5UP54>6f0@xcv#zS9;57@<;(?T$T_Z$~}ET4?=)uQ~)lGvg; z440y$Mtn4PKV1Mn@7+QpbkiW34`}`0hSmHjM;)+)a%>FNQ);lXFMfj6?KH&mru$vW z1Ov%cmSqwVtapR8h0mbro$4n99ppwa%E|St#K!L_PCcRRolM%LG`2ze=<~GqzA47n zBy}2(eKnQ$p+2&N_aa=5wD4p(e(mEX+9>lsmd+_U?#6H8!N#_28;xx?PTJT`8oL`? zjqRk3ZKJW(n2j47|KIbTcW-vD_UvBlo;mZK`AjbPv!1JGSo+}Xeh-y2y|uHa zg6D{TbG@;U?i&mh@-OSAF;UZ38{eab=}p{fFZ{;W&#Np{^_+z>@Dd)y^Y4VwVIA^P zWb8M%bvT*8Sm%J6ReYv0rcl9qHib065)LGOlPFa9$heiDoJ@sk#HOe;Ca1J9K7$%x z6DW%HhToNw4N}F}T|<@yw|sm-MrWRn5li}Xw9>=n_8x8;Ii(e;X;(_YO$CgSo1!1n z5Ks`wvnPb@aiKdzO6rCm1xAVcX-dKC^;b*gBm_*!x-=1i`w`W+V9C3-h#PlbU6cXK z^(o#n9PTcwhSHL#&YhXt!ENsCMHAlnJ)D*)p;n7ryp1BWj_aA^-M8c*@Qk?1QzkLj zwqC9HcV_smg*kHLid;mcHv9MqbQGd`a}IAshj=^v_C=T%=V$X(Z3RvR3@Pc-$aJ+Ij&Nb^2uf@ys|<&xo*1inrG!$<+UX3m z_*ftjv%7{V`T?{I@nAB-PdMKAqciU0C>A*0=*1~2voz6#cna2AAK3CwPUGI1V&$m) zuRv9vkbvNOE9S^`-Y(1Y0b3&OTV!`LR# z*)JEUm0kR4vSVS)m)8x24RQT1S>V(M0DD&;x*wj#51okHT*A(g)5Ovew$XdYd$tp{ ztC|hiR*l9O$8GJcK`urH25SJj3}~n|*vyv##;H=Ci2p*m`oY}+926?@J;~Y0LDVQC z{$#HouqBY-xY2HcfPKj;AVByl$II6@Sf>_s7>;-s`0U%;=)i-3V71HFvyvm)hQj!k zUFkap&e>UvMo3a}vUnPh=;&keAcTUk9~qav9-g zKRfE-O_1X*Hnsg*3`+ugE_6Fu7z~}vcM>tXm%Gz&cbi(WMgXQ-P9_L>%*6^C(x*VO z%+9rHgrGWd>eGcpeZ4A62Z3jyzrRtQkq2=}P@;uk4BJhGET6s=B370o=0FnnpbnPZ*Q;E02&CzJlNIb#E`t4KS1>8u? zhei)x5n0i2Joptiq(uw{Uzl6w_Z!AGQmjny6?y@c1Q=~ow%34`M+yAQWer!^w^oSt zt8ZiZ@BI;1bniRw594gpqsoe{?1da826 zvayGVvKl&1ST;l-vqzD%ME`yy=IM6YA{%eau38#AwN@@@+P06OM9~+NWrNbUggMj7 z1yg{6ArPg=4Q17f;)ect(t&(s2;@1O-A*!Q=w8}fKcOH|pOG*@-ntQXHjguF;0 zRfiCBI)*lL)h%RWw)BLw%25N76+6RQy~v|Z1-B5(hvW&7q>jzFw}7ad2EwckK)9&S z*^y32YwU)Ry|wmUdnXTcnGY!kJ~wi7MI|(1>?%i$)F*7?k_Q%!QZ}dTv$o6ioRoFW z8v3m!zt{5xHbk59_MwW&tmOCf$xXyQZB6CUx-o{edX0yB{f292g5#%8{=)z#nMG<* zn&ui2Z>o!LgbgUu9Yw)Q7;6S(2?(eax{zr&vb(Sd1+#V=01NAnX5mg3(XwySNt0Rg zpBL#ct7~3V(YU#P~P#vx>0Gt5hPb3*i13DVzB^f>ju8O92-Nv(XZbM z%@|hN;^nJ~C>ISg;DH4Bd7=R8SU96Rmt%STa&s&1q98BM8g~i)V(7SHJJU}~AMP*F z^g_7WiFO))`yU!UORb6fD5>rf!JLh{C;{lJJ2sPdFXN+h2O;8hiuLW1CB#Bw{R*Iy|~{sQAqrqf6(i zZN3>VwlGMlxUuGLs2IeZCGf&&8%5Hu6KZz#{wi2kF+ia4=3!uT5HeZ}yNP`F@~m%k ztBgpOt-Yfxl>uQr3@1(qKzk66C}}to95L)*etOZXbWN_}PBV^sBy!Y_tO>c&5xCAu zQNodaK#x?pZGJ!Z8R+SDeB0}bA^LQ|g|hM$kvIJNb?{d?#+2d)WPnS)E*6z(+AEZs zdVOy`Un+KLPM?Z&2ik=fkn>MW{~dn^}He4MZB!NTzgIN!B@fas!&i^ z)2KK@n3U+LcIHAncWvG_Pe;(Thv?u#`sZp|25uf&b0=YmQ1ThO#)RWlZL7hoi>K#! z^4EpE!N%duYlVOXT}hY7h)vU}ugdkUf-CQG>9f>$dP2%6Gj422tQ_0=k`rGMFsuy` zCKt*FF|d_VK_*x5Fv35q!29d?c8e0eL$eU86hv3@Xve`jym;WJmrvCr8_3O>QBH2W z`ixdG6|F9|@}d|#eL)#!6r_W5c4x(0=h#=wT;dF?%WE%cNh3lUHqnp!xWt7*zT zTIIr$Z=p1d?!L?_O!OVZ-$Y_isUG6xfUq+VN%a>*AZcZ+oTB4DC;`*w7j!eg*N0;>7)q8ytK8rB- ziv``gjrzna_h0(oj^rI!g$|p;qA~F{$@OmtuI)e0WCy}$^$?G!}6RU4IXo;WEu2r@;K%UG&Hng->0YG^ z(3!E~2|v_1G0OIyUvF+;Rey|dBgp@2resLxD2J4%KW2cbQ+O^qG)bLOTiE`sYVIuM z!>9kbO#j+Qy^}*4CzWD_4y_9|kiOb361En-sTlph8>i^dpK=EboVt%=l~kmysT(_= z$b=l8S{h)Kcy``jc(BT?ERO<#juZWDG}mL+n|C^-0&mi-edbr%UDT+E@Kxc!kK-X* zz86SL#C3RT`1JFbAGX*D$|yRPgldxT^+<)>Xr}mrm}8sIMNib}1eO|ZG~!Nm9Ix&C zYG%|~!?Op(BvFS^;?{=*jFgH5?OpKIzC|&)m{5nqNtd#a$4i)Q4|>cNQ1Ws<40QPH z8%YBpLO(-G*UZcQn{%Y&`%ZAnf5Euvfui%)Hx>tLMvLm(0{NSp@7-(iB+hN z@WGriAZk>tU2064!-nmvnL^K*Nc6U6NZvM=XgWgC~6qdjeu3DS`2zc>od6{d%ofdwp8d zW7y^j6OxA+2?Avq@0A(3g`>H9L-Xh829lr ze`$3FR#r(#Ny+W;Ff%tP0#M;sJS?>IR%P|5vyQ&B{A1=F zW(ejR-b7B1_4}E3gVj{VYWq+3sn<{Lf7;zCP6@vdK?%SayX#GuL7uMc(os``qK`~P z5SHZn(BbIu!RU|d_(XkBv{1<)yG2t&S>P_ex3^aZRHA}zrC_Kq`4QmH;+Ol>0gme< zj^Av0D_EWZUlY>c(m+YJ&rllQ?CS1rs20JI?gB{@6BuVGivWPQaHiRyWb;~WC4NSJ zS%ja!-<*Q}oSU7&<9-Dcf*zyqG+AOjcZL zm}7^iTB}C9KP*r#>g|#VA8#=m7{WLx;)Zl27$&qJNCs#M80?d}9~5bYh+vYih2bQ% z6SYFt40=?C6bp<5M}bV^2SWA--_b4m=qGR0e>WG`cOXxhp~|u$1NeK()}4psoEl#M z*X1G9r-e0v4<>=>Drj@~Ji9!Q6WG>q{m}9^;N@w3?Z!R+I%lexkKb;5xCGZ0|F(}w&x*uft)Fw`Ch=vlAFEv34&Qf*8f(LjKosN`4d;?Fi zZKW-eZ2Pz^t!-&Uj+nDDXblblFX99G75D8)*Gk%i&f8m7Ix(7aQir3G%ZLzi z(NS>*YMJV(US-A}-mbl$MnN67rc&jzxa+vu%`mctbv@Rva^%AsKWx4Cs2!8vbnW5w zuzs56MGnq$&8@a0Z8jKMl4~)>A)JEeUOJO$T@7SqmKXojT!bpmR30!?pHVmP^q0=& zirvEqBBnEVKFy~JG=8A33)u{L_$lRMa%xuz;OuzRy>R6)r|b@!kA)lQT~T|Vyn%PuL#*0;je_ty#MB#|9)m%fhj0Vub%;V3o$tL+1QM zO1|YVwt430+CP4|Q+|}paTwp&Cu=sT;w9fRR_~o|^ajl9$cTA6;6nqJVStzY{_hAGwyVj`} zAz#?-6KGr?y0H&q+OFBJl-K}6sc%NT*h(1Bk%}vKOfR9yT>6*)H(aefN?)Ve1b3zlMFtP2 zM^UEWzv;XvPKO&ha0)x8WRExu?hFC;%^i!{Ic=-=wLWCu8)duOR2DzoGR~t;0aEKu5Gy=ZR@0?S# zpMYIJ0~R+nBDPl}DuECbe-9Ue~-ZUEFc45mQG?5-U1K?F`qqyvTNKu70de)KW0p$e_D?K5Y21)?tI{ zfN1+Y>$PoZM@&`(^$t~1hNt;8+F~aUjZ;xvje?ZeKMIMmFe1i1JpJ@63Up2=j-*op z*G%tj9DQX}f_F@WFaC!6>Cg0$B)VA4pnYAX0w!gz+suIKZ&gz#*vO{CqT5u=L1_jV zk<&o9HhK)&f+&z=!B zA(UC6v)d4s8qLYTjycBZ0A%}s1`cx90J!5~=xa+Es`_Q!^3Hzo@^3W#kX9X)-+Z5t z4}Qv$TZO!bt=p&u(-NMv{8rzMhbgRdRzu3j&NpAyRg6L?O)m()wr4>J)mu1XGlC*p z>?5p~^QoZ7Qkmlc2}T(Ap1I`KwMz8)P_hq_AXHI7QozUj?oE^78p2G!o5#|}6aRI@ zFI5bQcO!(^F7!CR@1fxVA-+@0f%K?YmWhCxrNCv)E=YbDLogU>T#5;0Zvq!P`wxC1 zqw8Y{_C!>RebW`VBLmdb*Xwb%|L*AGUD46eDJeMVuYqrO%XwflG8Ekg;Ahlmn7aB$&cV+#JSe}OqBJk~EOElyj2B?v47#5RPB z33UK3ddwS+JaJnT0_j6UM7*y-uN>k7V(>9UuquuAtDNW|;CuwoGLSGermljr4P=&PoY^i z9R?{EzD;7AxnE#dsdffYCZzYB-xr=pz>FMzPsmu%D>}tD*bU|tzK`i0dyl%ckN+>c zmFP|g?b){wqF~S7Om<_cuq5{FU>qNgOYjK#8L~du-O2eaT|Zb93P-}zh8rGy#U1wm za|y!wn(3}RgkLkGgV$${Pke^-0~zvH-#R{|K0(pVSRXO6(sb_=83coCTr#uNMwP)veyms$~*^9P@k@p+i;4gMXTMjIg^<+I72>3bj_ z4w(hY{g#)Rvu3h;wxa#fpkXA(ed{jp*Pk${ah9Ej=4r|4Ti`4f_RpYw&;XU0o672> zPpLG&1wycX|0BE<-g+^va45kS(09{<)qZq@S6nu!PN`~1wMZI>?~JVQW_8jUj`o`S z@%V?jsZ}KH(=wD;lsX&4n<&EYancg3=^dDn+Rs!0ijaN#10gVSZle;w`9}OfqTO6R z0v0Pcl{#cPX!V66wUcD9^V7!ln$9;TpkaZvZi98w__;CK7?mKZM^0-aGNGD!)djQ$ zVkus&1A#aWG1o2QI`;>*F4ivez8%$|=?8w;jIengcIjf@m-k*bsGLcEn;(jlx74qmFwI`S+ zKH=7zz-3j%Z8fgd9;V4-n(22Jb4w~ZTW|tWOTsoS#+lNfE9AMJN~CJ~JRjx0mHD7N zjpt-==Skm}`V!Vpb3}54A-$y^1O}7^w_%R`DDo#hY(wZ|Lo@K*jah|^D(WOPL9R5j zQ3|!W`IX1tAyT>B{R8*HzSE)vD-ngK_em8bE@isShe=sDSV>35Q?6k}rZ(!BKQV-b zUq5a9NU<5dqpBn?D4CJTvVx(Az(}2e5E%KaVY=2xSG@ZYKoFlCW*E!NPhePI@9!UN zH_weWiTN)~JMUS0>$&ZmaHL(LTCt?78P{Sg=w|pjMUJdgNb9SP<*%>D2AAlKd=+HIpSPx4UE+Kvvu3F_9hjd= z7<{ed^eR}e^YrGrO*F(DvdC^NrW%$~SNdUf1g%F}xp_Lc7X`SL-R3K=ehDts*{3); zbOo1gs&fr>xuja{eW{U&O~#Ee;WmN{jd&s!4e$!`Gmjv)J#^cTE_#d-IT2uo!df?B zU96tDP79t_mBKzSu;Ef7igswK>UP&L;Z)aZ%ke1pHjDZh&?;@#@Ttwmb1H) z{{`06`g179mzGD1yv7^+;&rdZXQP{Lb;i;iRs_``UnLCm##m%zaAU)|NE0>>^f9MV z$G0lbrpxW24-!VPVleXK&~eGPhezB?suEEcWEH2;50+qF>B9aTnMkNLp$;(*PV`k5 z@;O??L*TQ(a7#(Az~UMOeU<;NaZ$lj+ag-xfX5V?hw!dx#Q8Lsbvl?XXftB?QgA;j zxMIrl1dk1skGHF;%bW?8Z7ozpXDM4gtL89UE%8IKc`=9=E?EA(zRW)S2? zxc{-UYF(SppEOF{fefaz2b(FNXC=i3W~^OEVCPNG3Sujj;)^81;5nefR8qH8hehW2FH~zQcQWuXY|1*c4Nk?rmJ{M33Bnmct0TlBDU*Jq!@V*z#PF(HzP-tcay@kSz}3bB|IYe>S4w}-K6{Z!*0dC@&sO`Fy2W7iaFp^7X12;$b|ja;o|zX*7@2a!)WhxTU2nec@ga>2 zGk+5a{JijFz((@3cbJ7!j$~dw^lPF-bR??llcu%~pIhL`5#qgMsgFP3GxefefnZyH@2K>*^&CC{8`$1Pwr4E*McPQj_fJvJgQeI!c@ zB^J`4Mn6=+24w(SV0kNkj_{EsL|gEuM?;W8t_QI3ThzotnCQ`< zXg}1xiPDTN#%GnF$MNd0zhBx8KGWaeNT5Fn`@BEyhBl?(tcb=@`$m!npwecT(OaS* zY5lzSXcS5_CyT9jilGpI**L{SK%Bt*g}@ta^6aX_KlTW{ClFpDi4oUFSx;=qvWivs z1>D!${i&Ux3BRt1a~KDc%0%mt%nE!0Cphh@a~9Qd9#idZTCB^TV&-ZiU<6v+cr*48 z%1}{zVQE!SgTo6rH;hQ;!c_jUXx+;nLgFsYwde-Lc=S^}Ral!5Rob@eV??^wAmH_C zKaAXG* z6=YqceiiIpa5)*M7C3q18=}X=bAVic5F}7#kHhEa2O^X!E3E|uJ3uKX&y*@ZTiADL zZEbCK)@3r2hs$yTLNGYu?q5vQf6M?VD@Da|pbY+gfC_G8TQc(oicj6z$fZTwD&PA) zx$eV9>9Pa3AZRAO1O+n{xvhQJ^gZ zhExI)0#}c7C5k-&*6P2UDe#}GYs@Fsy~8B_L{XuyjB@1wiE0bfo}L z-MSArRLJdDK?8fYdLi1)7c zwZzyBPvFM5Kfb(}+0y~dFTW@r!&*k9)W1VgB#=l@MzR1X_0fGNo1yXuY6=``&8{6B z>cKGt(@3BLX5ibdO5hH;`FSZ1tp=&w&yP%s5~K0mzqSDL5k5=7tiv|O+JqNX+oz8_ zrq>DMHQ(TN&?%I19~a>Z9`S-1H|zi*6hBojwge{1uGmct`~??q4i$W$ z+DUVJW;Q;8#eTE_pq~wDb1vrA z0C%6Q_erWqU&iEzq!$i339?0y4C1I%>k*cd804hLEX=jRuy(uy;YRVa(`>k-b0~{X z_gdhtXt{ADJ_=)?b~RuC-ylLDk$y)0y$ueBx-p4_PfcqhjpLIF1#>LWzBxF-MmRmz zwSb2@2u45egBl6DIU5kJxIr$=wCM*R_o*PSFXr#_hqo;)R`2(2=QVlCuP*ZYRv2?Y z<48qkU~Oj(NoLSHmwzN$rJ?ZjyfrJ}366Q1X#4TVdL(b_A>?Kht3fm?A(pKC zA^OlcD33;Zat?Ai27}})Hdx{?olo>#)|~Z?n`QZN zHw%!N!cH;)JS)T?_?jhY5khOOyziK-AG6_<&@n`u%0zR@nNUv{K&^&+^B+8p+*sk` z^^~7H9ck&>t&~*s`!Jw03E?vOEo$}5Qrk*q?`FBqzPlH68hHE zx@(DyVB&y;WTb|+hc(>J@!>?Y%ke~T-J z23sfd>>XdJybK^0mccrH5b9nLyU|XF(-`YJXIohg5aNcK75U>5?K4l4Z}iq z@c*z^``wBad>y_wS?1e(MRV7pvw%D*5*1w!Cc`czo?R=p?beyhS?O$gU*tHEAd*%! zkOl*Nr3OwMX2__W#HhId+Pm$T<5HAR-b`G)NnIV~BFXvL^j%K}nt!yYZHY-brX|N# z>p>)7?WZM@Gqtu)wWUKarOMxTPRzcfl~>28r?_ciBXj?n=eJ2~cEaC-Lj8`f5=VnC ztlUO&cJ0MCNpBS~jm)!rtGVR18%=Rph>x0TEW&1}heF6cuF*h|f9U;Z+v<%I&wJS- z)2^PG8zt_K!}mqL!fTzEMGFJ#&oL)^*o%@A2iu& z%zn(9e_^}CP(F7}S{Pe_*hhrieK_EBL`0G%>=RuEG4doDhjk9hJiA97gL03fkGSLL z5$My&ts3lE1{i0smvJGR)n`a@?RkyS1ms3I>_g%dXJB0VJ1CBkzi}_JrPcjfFm;B6 z#7pJp;L(*x8Kmv>xpzyB9`?PxrrNoYkwM-KS1kOz~D(LV4}X2!w}XOc68bIc&xa$lyy`__>@E7*O|6) zOn=khzmAttyF5=!YPq^~ZgYDM-i&5J{z<()8ut^0)3Pgugvv6r@<1a7xs;zN4G_`| zx6C|zvVD$RIR74!YB+GFb)wkZcks%G_ORzxiPyxqj1y$DF6}0eFHjnNKh-F*_8Zhj z!NVU~ddcVMJbqEo%poy#;r)?kJNZP8sNriq4B7od57ZGyttyovRN-W{P} zP%FJ{lm3w*BL#n5+mAQ}>l)(vV?qVPyFKSIvGnbfMcoM%&8LIr*t%7mE{7%Y%4nM< zP~rH5sgPcc+bLWE$H0v>J9kedNpJWTA~kbz&Vu_KGz!J7i=GP&y=u28!d4b$?~#N| zA(e(ie1Tfm`osFEas65S`)V}Ls8f857r)q3W&c~p>dr`!re{M{K=xtXN|K!_Isxa! z>RGPQL0gU@haO9*swel0$LE!y>XWPr%L*ZASZZ)Va|lyzivIHGC%doJ3C0hv7@Ocb zSLF{_SM^g!9vL}KeL0f3-kt zPc@}Rk8U4!_`Zvg674q-gCD8MO5%RIht0lW%*~-;SSo#V^D=v3e!mPy6`-Pc+l^#T z^(X&A9dTaX6rG52yM@EBn+&2%D1+|*5%Prsw@HXJQS};~loS7ez&}$_vWT8gc3U2F z1uecV>pUrn2feUG32NY`q*P{rGQ3Vqhz{rjfBFa#f7{w+`7nyY-nqdQ_(g?i!dxBk zdWK?=V~vsy!!cn&HA{fxlZPXy2%AB%iEb0V#SNbm;CDs2G-k;z9%q)JOGtTNgkVjt zhfMcf>C-@nh8hWtgo>(X3Ld$5kGOP(ir}0TES5|(762vulWN5r%qi7Fj?fO}Mq7?RiAyX&#bL=4;{t2kR_nRTg9d4I{^ff^MV+1KB|9}V}b#<8#nB>f63j)q)o%!mXaK8MD*o{@3wHP8S41sML3daNr%gn1-q$eZBJ9by_aid2~4 z$=-k&T^~F%@pN|RW)KLpeYkUPtVLjd4w6CGljeUx7!c$yfjPDOy~^0N=?q8=&|rS_d2(BXqd{7g9yDXAcB;e9Z6(& z1#=a%drGgcss*#F&}3^SIL}Vo_ZM*kQeWl$-S$WCdHin{-Nu;@e-(6`IQ9hRK}#2L z%bCX_^pLuh)6$D2cqehv8MQ7*qy{$`o3^@$Rxr&s4fK6Zh#i*InWKa$F=Ism%m7s> zDP{CQoI*HaE_g>gaWaf~qY@_4zl3B>KkTUovk*>xeP8-$=e;J19Y8TU%!Kp*lPzSU zKx$HfCFD7pEx=H=6s5b5PQA&xb5T}Kiu`#CwdkMQUT!>!Ut7WS+PGOy-jOq#x(ioO z7Y=2cJyW~2!WVawYM+Q{*FgdXmu%Yj`dfrJ5j!!XW~`0NP|Eh@_2X%Q@oH~ZU+E+u zs)m0>=o1+K+dSCzS zcx)%~=1TC^xA@otZ|8Y>1}-ig1ho~@71q8V_&@hl-_y+*h%nm7xW6r+ye<4ulm1@j zss_|t;whOzrvLTg!Cq2zVh3Z`f1b%;yZA{Tul_@pY0L}<6gXwo1vViBr$0f!69TU8gz?ZvoZ7ZA+M~d8?;SN|q<;+8n-95R=9Z^AUPrqui}%N> z&9Czj+*R)L+*eYk41HZ}s}*wM3spHaYIbUhHppsTxc!Q9>A>&~dh7LGb_vi}!~ZDBB}UGj)AZ)4vvPlfGb$ zqF&mxb7Ymgiq3iE!fOX6n%r;DcO>U5d@W!i-pC;LMR#)o+fpCN9`lN}xMyNj z)=Q#Q!hHSGXCez>0J1YnA>AyTD{9@3D#xMxtq4*dcKLQ+#`1S_~r(NB(Fslyg@wsZ2hfQC;QXLHkc35awe1R6J}f>4#C zt_KxN1$detAb1MuzkYNd6GGNhM6%u6lN3&7Q+E7_N7ATqcW<9v zJg_Z1FY!8-=)A~^i>vW$?%`wKHgV}Kf|@qEzwHoix1fzXBx>u@$M^5g{TIJ-(LDEn zq+c1~HbA!JEZ(|2xnqn}LSxD+b9&EH8*3~FmxS@HsO2Pa z+ES8TuE8I{UzV&W{<}z5Oe=&FkgAo{RSTW;!s=B2|)CsSezUY6Qw?jXxZ@ zAA1k(%;28qO^cmnC7+3vdm;i8Kdpqis<@&PDstqhkd}`C}D|m(o&mvBx#5!d#9?eQ=5UIr^}0DYlAO^vIG*o zB%)s*?i^lRF2Cjca<`Un;J}r*yY&a`$(1`|4Kh^)2&aY=xassIsdfo>ErAlV$DL}h zF=>7^mMPq`E;MQv6NG-QpxY$R&0|jdMlPA!bY`ucJ}M`iDCVAjhECl&;|s}y5xt!8 z_&gWCb6%mP$cl-@Km(gcn)V674%yTpY{pW|;ZTJIu$V>{>yMhm>GKXc=ERv6l=(>O zdb6Jbzd$$vS`)#XG5{dU1C^hs=;)1&jfVPq=a~7nZ&prDPU_fQot>Ab{40TI9coW3 z3GPS0Ak_EuNx-Dz+H6j}DVKw(jzr7M%oOmxHvh8IFfH=U!vk0(&22)IKbRmE^k8OX zotm0L)oF$`$QKHD^#bZRiHV?xg=05>jpW6Axda%1E;B#r6?}Z2PZocXiTIU!K=XBY zU76~oQc+SmZgszPq{yNKAbAo!d;soXYXzBg-#j=R`f@fr1S|iv(x3&^6M7maE*&^Y z3jyj-RpiMWP|9AWXe0vIz=M<&H6tAmja*)r#_Jq>d@TSQe|u~?Pjm!sD0~3!IW83t zjl1cD@r1I5t#s@*UOrd!OEpGYfS3hDxMYYRV2lDDByg1?4z~;l2mtb+j~ar$_h-QH z=+zAv2FY2e#bhAtKU4E^XCRIBYrD_g2_S|6R#KhA8t*FuFop{8V0-W<1<8k5ce{(c z+r)2=Hbixh^bYw{p2$eN zH(3@5m#^*XLbLMPlI@pYTE76O4>za*W9HAcjMxkGZ&-Ry+l6urGP3wBm^ah_By^lt z8k@Cd7E&if7GP48#oA`Io~BmZnWwWOGl$?GsxF+G0)#pw5kWox8U}%s^N;#EpnXBL zco34k7BsZ9u+wzgEUlJ6O+Nsp2ZkR(w-rJK+#*g2vsxlf1QUa;rVrT%D-)IHf@ET1 z(yG@Pdm22VNTLWSjNyp&9_lGB3_3y#;%$ZMReRJmtBkUZ`n5~Jcw92JOZJPF1sn?~ z(Fzd$>swzB4^Dd@%eE8YHXWmN5YqdFaw$wtdRkgqRu&RAsu+bP_kd6zve)?I^;HTL z(_*FY5;eNrQw-vQ?jFmd(fY5tz2P_}64^%<`IliOVzVl*{*uCXNhthiJ6`F60 zLQ>%swlyblOjd~>dFe1lMUx~rQlD{KO9`61#dJ(Z334oY5qcmXm!o zexQN9>(`~8qI|`jM92_5m2~CHTi8BGZ^g7>4f)gkWq#+NK64t6+Rup8V>GYlC{krD z-;U=F%{AL;Y3Uqe1qb_z-xi$=#WV2Bh5IM{4x`=psEJ9bjU42%f8(8`I$m4H=7Agu zZJFeTadTg4s)X%K<83m!vRjEFUlN@qM$RPr)lp2^z95@AdDi-}FP#69Y4+vr$1N5P zD%OC#NFw|8CsCC8rYyMl2C~0|{aBx6fR!%G;`XQm-vw3j^?P*0QslO1R#WEqY^L2} z$*DV!5gkGGpn^`}V2uI3CsC6Zto^$y#v32f3&uweb{qYW+KPTfISpD=LPt2`*MStr zD_3o>i`b8KCN=r8u}q~go#SZWE%IWa2?@U$%(Zs&C_{gpToE;#suPc3t?SJqf^9Pn9Ue**U&=B9tVh8nd!mEYRke)Km)5^yNpSZ6^PN|t2gBxd3y z8UNN5fv2EF-iHP2t?l8#{Lh+_l2!u#iW}{+eBy>99))unwdM<+PCaLs`6a=HFU5jC^})INw<}GMd>z}>_Y5NSPZS|%(L@i5VhLX zR8S%LE_MH`q#@sv$8o%^GYO`Tg?VdD#gz}f5gf(kMSpPk{?7826p5rCcV<;$M|Pye zD`qS?KNeSfn}9pr?>SjKc!?8ElTE?#){-Hrhx&0#-licK-j4x!*yJLmw6LP9w#MI> zDFmTv!!D1r!M9jVy*8PBZkS=KLrnL8)b~cQIpja^qq^1XYxcOu67yrVMP5yoMNJe# z4w-r#eZ3!RKpdkHhQ__L-zeT<6rPMW@)hTUTZOfqQIu#}U+&?~Cdg^kvCp7%A6edF z1|4NNoI91DO>-YiYv#+cwbuCCdgXj1cc(#a@oYTdyqV4$d#tci z6|ZBd>=o=f*|4QRj;^I;ET5j&_ZMXzQx^!n0d4vpA@a?pUvu{l&P>Rk-$vu+GGHCK z2z>-7xPPzm4@@^sEqi2&GuO=%+aLCDFYGwl#OhIaNJj-!Fgr%Kd{7QM7WGV;TREr} zO&+X2(-8{hTJP};%Vr!_SA9>1A${&9efDXcM*cg*nd9)t!D||qL;HCjVKw(}*l7qu z)S+5LfLXaC<+Sp0q|RE0k+p4Ld+46(xV{l{Ij^|+vhVSY`J@Eh2B%e)BeKp}!5jB_ zHLoe$YD88)$}yBq3=zd2`idWaTRzF}*$;7l|JS$IKqEM%oylb9`TR%ey4FRSK8p1~ z8CMu7xA8!kBp2sdh~f6l#pmoQ)75v-B8iJ-IVGxr@oqx&J}Xd=x2~*cIA^o!zOmjx zU7m2tDOTZ~T)!NQcW@3{SE*NU09U`ym3hXL6FX-QpP0vD`?j$iWS8-7l}z%_ohQfU z?;n{#3`D3}>Ao#`l$PNw+V2+UEER=)okM^2r0=9%8Y8m4*q~)mH6unUo|l~V?V_g^ zN5o1W-QYh>uIhdGd2cVaFA1)Fax+iInH^KbDzx&k{TKM>;>n6&>)zEzfOcD5{b&-h zkAIyE$NI%SWQ#S~kpa^ux_{KItb$d5EGa%-Z<|qtH_}W0Gxde5-8=*yf)w1mf0HL{ zhZSRC5e;19@z;RpuA%p3eXU#4hM^s30Fh+H!rvJ^@k-P=^z3uF{5b)Fk?N1+qQJNE zzBACzP=xmy;Z;4PPur?aXyr8_YBd|CJ~NvO6Vz$_vILcPf&8$Eu9~gC{8w8{1-lDtSXL@ESM%`IS&*w%CUZPszX7_XB0~ z8jlHtQnKSGXY|Y_>inJ3%8P{$9^qSJBcjbEm*Mn8{`8~J-W}^iAuhN@D6np0#51vX z!n1L8z3rl_BBz^-xa-S^M-k}oz$3GYG*hp0CYM6LfZL}CCGY&ILn~=TaXh@<-d9-k z2gaq-mfXb3Cnv_+Q+iin{-i^ZY(tAE>#2N!A4H!f=}8e5Gn%sDc6OXIkc}oV_E&RN zphY%zP_yVS!P6`Z+;oTZ)VqN=*YcPO9w6nb?B3dFkV^?lJe#4900Q#UE_-W}i4dA| zW2&F09HF)Kddku1XioHpWA_K0^rY(Tbm0390MYb$x~7R65p+8W+8bliseVGBjq|?h`I&np?2DV<@v1N``hFEJItfBKWC2dELl_T$^1JGxp`qcwAKdUl z8|{k~^1ZhQDkb9L;spf-fDKtELciG>_#qNEsF!+obHl{Q_@~Ra<>cx`a1!Nhl5cDK zacFT-E7gKnV{Zk$Q5^st4XA%21>~9txkPKre`+g^b@FPbN&6>_ zPMDtPj^$+FzcssU*5_}5cjbrX@szhboY(%4 z*y%Xxd;1+-O`h}qr8%68$<}L;{|cc+eu`df8TLB$n3Bn32M%Ta?H4p#Kr1!uD}HbO zs?{eOzCMx=4&Q_NDN-z__{7s0G7^11Wq4X_0rE2nIZKc_MCErT=oYETYh)Nt=$`*^ zbe1u3HEIyX-QA(MySux)7pJ&;afjko+}(=1ySuvH@nGX-g#!Q z2?-Im+6SVdqJZV4w=OVhBop5K3`93*B%JY&ANd1;F#B{Sh5JVRLe#>-Bs}4t&>d(W z8F*M47_ste1H~|3Fn&Pr#ET%x)|I^AI33|Q3xKfG*op=jZ{=J?`yt3e5t_gn*`jg0 zEsILQKw$I87LVXINSug4{nNm*)$~s_UEQB4DH^J(1>#i2jL|>fo!ROhEF41GelQSP zlh}}$PjJ@qw^byQrv*ZoBi1E_g`2M!9~k zWn^JtVP+l`X#?zMFq+^?2q!&(QZN}Q;x&U$oJprlU2d1-vm|;HNF#_-q#&B0^orZ6 z7e-bbJVs%n_1_Q`5O=Xv=IRd41%9iWxD269$5^D1cGcLSP zl4?Hx;orqb)Y?-?CmD!OBDj@a9P{bUWzk+}4@VnCt-57{WW0ki^>hUSXp2P~;aK;W z=BM&_<_+0#adCJI2Ajr0AK4}u9OdkG{}5-@`YtscFLKc)mE&31Xw*kTk_^9Y{jIY# ziSsqPvloslIT`o*^7C@!+8a*~L7U^XFD`@(Bwok$qcLk$HY^cvxWL9)>)VLQ#}*07 zAK1?C87JRcvQBe2I`&ymi0Twc z8GbesccI#-yxPunas2jQ7t(j3Tl_A*qF9}DTF~R(l=q?j(~590K)!-ED_oXQf}BK? zt|de7NAtgy?KIVw5Y$jon&xS6N+OdHY}uvoI)jo=knx6qlu=^9x~%`uzNAC|W(Td< z4Jor1qE=@Z-Hvk!DUEeFUFL^;R{kQls*Q>&ZmHdmZGo&i=D(|KD00b&0s`qYbm8z1 zLA)_TszPe0Sv}uO9F7wCM+@)SC)Tc*+Md!4V5jhvt|QW3(;VuwoBKRJUI?H8x%kjM z<3=%wx;C+;S!`u}5&GmU$~tkcJK(2FAfs zWG1D!@g{3_mwf9$a%?l}T0QMvb#|t@yAZs5E3IUAZ)_O=l|SHz{LG$!RIzTv@hM(B zAaijV56|dSa~v_XGekQXMw;Y02Xz$G`a<b7QHb#0Nhrru1XTfPu>9rL_IGcSd-UW!u)1U+&U#cD#Sv3SpKNJwj3!G+|e> z2az!Ux8YIHA$dN6AfNXrd0L)#tAEKUJlJ63fr6+`V8?hV z?spTqQ##irg#*o&f{gxa0&1JVnO)P=SGN+otB6?_NW8jbZQWZZjuAZ_)P`pt=1dCl za>?Pc%TgkqY^XPcAsO;Ap|NN7sFRRiyGEN9EJP1D*H*+noMr zZR>k?(e(}LQ!AL^T5+|YV+ zRGe&Q|NyN>AFx6nT`?#Xqf0?WVk&=@)lxa^clT$;&Hm9;Ghw+-G( z^+)9tI0p~Nt`%Wt+;I<1LSR1I+s^X$0v?oHzB|QlQ&dp6S{iTS@Rr%3za>1TkZ47S z5Fh=kpgcS;qK0&EX%AanNmSu#Gnhf3a272`&vJDVz!XQO{bozq+|Y3@YZs}{`Ayzz zW2-i8A()$4+n=K+J6IN5ap2@Zs9TDwP7PX*K3|5or5GFWjJW%xukF5Q5cSYn0J|_8 zj;Z*jamYqEw=a`J*Y{H3v3KKYk5PG9t##4NJ6UT}iT(idJeJq_W~%Jomn-BMIU>j}4QjKGW3(#LWsg@hvg8SzN2RP3eL5T^GEF2^<0 zC&|FgSFiHAqKCdggAe!cg3^~(wy2T4(a*H)HQ&D@C{&$aAP!I@%)azIa}z}uC+fya zb~Dz5^L8Z!GU=C=bO+oQ^+oCk$KE`{b#+V|19#=B#;-xv4}b&{0Zh8nnQY!>cUM>aMvLgIFK{|TPC%G4))bFO zCLiFu|DYmh4!v)**y#Yi-PQFqv=|&B;?&fX-Cl1GZ4BW0X^w$_TF(80J>a4Zhz?+E z5|DW~d6DHR`49@Q32-P5wK2Y^uCEu>^_$H$=5}^lz<+>lAYeo23@|p^{ksKramSPZ z2b5UAa~PZ~zCgg^;>mc?!LD0sr(cLCQxPa1O5}z2f@zC}^?U#O3|zDKN&ahlzkU7< z(CVXNq+qzbBx5c@@z1S*0RkW{B@xbtBKk46vVi}hcg$(AIEP-m^aveLD ze(I1-vjA}hs~Z)xTBhG(OZHm1^~=$9=h%j;{2G2Mlw$jp;nZpO6-RHdXAn4|zb=*u zWtTDKq;K%J>D@IBpobpWj)1d5JpBMvIV37njjX1K?!IfqApYBV&^zP$dh53Z2ztn6 zFf;PI02^t<8*4*DqJEZsD>WgjFA7+?!$AqmL@2*d1e<-E!5AUGWAtYf5Y&YlpuodN z$H$Lufx2S6z)Rr%(}QM6SI~bc#>b%Ksm86Le!;66_Lt)(Km?K&_<@YbM)zOD*r4>k z+=UGYfn%Vw4_8pLl65qW8ffC9n^3PRQ=skiapko{MT3!K3i>ooI)S?)>Yg7aeqF(@ z48z~=2YX`&Ea5jNF){FYDQAkRg*y7`FK_9aG`C$lqz3$}f|E7!DK+O*RZ>V#OZvkl zMG_YE3tE_y)Pf$~oh&wASK6!8w49`bf&x)VA{#LObPHE~fhM0O`H&-@V&^5jkfnO;++kkgVwWO*YLTw(rgU-kWjfI~p{GGr6@E_~_<(?y_%dGJYeV zB%Xi#{r#0Aud^7!Z#7!TT-=&?fzSF>xpQZY=_zAySOXz&Gir9dI8=5!7EK){Sik?Kt{(OqSHB)G_9g|L4jH!QYF%ArN8$;>FAwF*!nB{9zs$?CWW<;npJIqSLeV!& zJX6zWf5}TL4G6mrAvG6u#?t2F$oUp8|E-ncZJfpd14e=8_d^~tHX!TV1fxZ5epyqk z_x*&NHo!!f2|NMwlw$gZr-NRJdgQ@Ur_=}mwV@#)9_WH6wW>vzH+q3tUUpr zXMI_9d7<~pN_+M;Qrf{T$6&{kERS4p+IQ5H&U2)|^+G&M`Ly!O$=6cLcjL_ax#T)n z0@TZMbu1@=_B4szihgv$>NKPmTEvvDqakS8>-SXdJlEv5MS9tDJnQK?JEBcPW?RbN zQjb%awhX-&mVVR;B^YIiTy6d$+?mLHGmWg`%PVboDr5Ma7SI06K z;?i zeI>hNLgyJAcyV2es*d|{`Qwl_Vn0XIVVD?Uakrw<-wdT^al;9Q;s++^wFp#6Tek+k zBz67`*)of~ibxJPZ>hq7jjCsR=O#iXp1;vStTzQRY9pC zp-=m>+XU|IpRC6xq5n=n>t#q;_Ni6&BNja^`i`l& zsS|fogyG-2e%-!M59ABToiO(G&p3Bv{woibhY@drI4OlMQx{v`FA~DD6wEv3mRqPp zc=Y8gUlK>ne)U|e`&*|FqIg8qq7d>7-D;+EAs9Lr_N?b=(;TX|z$I1D6i-N6vWHV4 zVz_iJYn0&6h9VGpmB8id-%VHy5TImgi-;8W+~p@e(NK#Mym#6j#LxXV`&aw9(WX}hhA?+ zu9L`DV};z7=%?k}pmpk~%dnqQ*faSK++L~9X!ntMa23(BI03N}G2aTsp;i;>u@x zt}G({lz50oDN+5&6l+>HUSC+a9OL8C@!9$zb2c!UymwoRM|{067L2%ULKvNG$@02X z%&WWjzWxa=@HPPB^6PMmSC5;3%##}9p36)I8K{$T{GGkIOB?@csqoKQV~_;NWA6*% zHD3Ir4;=UWtXxMQ`#(pWdL4tV9=_~<0GSlK14X3b)q2o3;||(ZYD(S)x2|Mj?bsEg zH}-O!kT@zrW*LR4d0vyIC_^!G+hjKE#Vr*o5MLpUU znKmF|^p53!Jbt@XF5ilMxeZL8%qVs#K;+^(mlKlKUy|etEa#d{zM)AeW3ZL9PH%xg zCZbcKirDPtecw2dFA}CNro}EnrD|&~Zqp&P0yO#Q~H%4tQrBD&Hz9mL;BhJeXc3|bk((n?I0QTS&+!&7Qo zA5+!ltuXqTmnSRnNZef1@cGG0$fiKE9)HN~P%%%)GCi538#Ih0Sz}j>zLne?mEk6g z^ED$O4Y*R*8+ZRCI_la#wwBWM-ts76Zx8=8;#q(=i=)|CRSY6p zVQ%0K>r<*RDa#r@_hlpP@z^URL+X`8_!1@_+NZaTL^-nH@V(A%vdPXNoLQ6UL@Zo< zc_G~92k&=fNKWQ>eH8JkKl)Frqo02LQqNP+bn_}A?@*pKYHDhUOyix~ogQ}c1!BHE zc}ML)=Qhz)#SJ+?+z{^KezzRB(+)SRc^2D$3fUT9BUXjpY<0N2xQH2&9&d#-L}#t? zmqK*Jks3SxyH+0=h(_wHcX`!MD!jf!W+nPY0k&bYaOZdn=|QMET7G%_7=-Y`#hkZ?In%uw;q8B+VH6RU1uBwO&q0kfT6W{gn`mEQk-@ggCl6h!Ktq zYdqoy5S)W)3;G(|TUqG?#ywEKFAe4t%pVjG1dDiQ5rhq?-QPZ7+YB~bSmh074vvFx zryE2QdI<#sBP}frVFuZbrusDq9@3E&WQSyrBbvB8EyJqoyb(SDHH&m(o6YSI5Wr`Xey4>lqeSRoMc{9LU20-rn5IO!yjjHTX*MWF0=A4dq$%XnGtmmsM&O zx!z2+eO!+yO(BXX4j6gQb0IN@uj|RNb^M%Wx0GeX{%-mY+RPQNRXgMX19|#gGe*Fy z2J6rbWdib_ASpbu@%THH2jXtaX@l;Qq@!Y&ks~lCHRLdG61CtY`s$onTkC?yplYz) zA`G2nVl98nE5zq7mH5-rX;h||H+eB_yi1oJNFJ6hMN_UKE_hBcbzr!g z8;zUpjU_QJr8Rg6n`qz5m{-tRWGE%%H7@oR^NlMbRk;7#=74FPzMA4K7zj+C)0V_jXYXA-IV`;+%%u=R+K@_26!K*| zU@gbpOMfY@sdgxoH0V%Ro`L;|!7og68)6uP&HmSs-qyl$ejz(89pRnz_&v(*Cg!YB z3QG}hUyPOtRBIU1_5+doh?pi)QyHF$cPQsB2L6krt>(E(^0Q)T;#5U{Tm60>9&w zN=_a8xH%nI7Ns64?CzKQ(1?lDRrsKY3g*6aOpzWYEOS1kR>I$2ZW%-F!!;Pbgj!tP?ZO#UR1%cfsH)U?=D=J%mUs>u*d}r6 zA=GAUozb847TJ(Y?=YJ-OXxgEx7cJNdRZM!W!B6c*7T=mVS-W|>r0-YeU1(}h8H!t z7mgfZ<}B`TyQ{;ODb(oI7oghKlRA%w&3#7>4J=&VZx0@CtgzRk`Y*-ToE_v}Yt9f_ z4*y1w{PqpIJ}2?-{<`B-tscMTp?_Vuw>|&cUVG-!zbKDIwr_*wuVPn0xtoEIL25ul z;rX|c0P~P{qGEU2<+4R-U2OUUm*tmJ5V?VAbhfER;B~&1|ymvrN|DWS9 z6(Rd%zShcd=DRk&17_k!3=yseB|Pnu(Xe0BzoQ?nDc=*a=|5gC`dO?;ck+`u3zPoA zv1C&jXH_euxuJKu!1ThHMey13G(XX!HX@hXRHj6|#TXG+bghiIBnBr)O@Hvez^?wr+*k&hD%c;a*acbcS8?oLctsAX_h5&Rgc}EmM#jMc0xH-ig6GGibJz(aDGkj zkgPA)l|c+M6M;|_wG5V?%SFcX_qyECXG<2d-CyU|#hb2b#0ELBKU^8akDh!9B zIopx*wcbgyBE0-KQw}b!nD|n~!NaJQo;CgaeZ2=6c)<9cW`ek%e)Zf4rq(o>HzEqJ z!lnl(p`9;K4R2CKx!CrmZ$GCZtq|ttKZp#*)WseGHNH7r%0o*zW{jot*PKa*sPChZ zr|vhBJDML%v1MA-sewJd4W)H~n8Jt`n0BNN4viZBKK6n+T__VD-Fj_9G!=MlzMAJMtyO9rq7 zi7n{rvYelp32%`Az{wEPuHa6uGgbRWy?-yNfoER0^M7yuuO0`8K>`2@Iy$BPQ;Yb4&@1j@}x000|EHacvz*6KEx0herVUXc&b(Pu*{>Hv=2 zxaB|yG)$-|z)J50ehMLBE^-gxx$ND&9l<0J3jm&75kPJq@vruD`aDtxc>RmN2AJ+K z>^tk2az`K@$)HoS-Rz^Wl2e5)B%_${Q<(mnX51zh7zQ{iR2ZwtI|843)P!8h==DXL zz@=-em(;CPKC^!BcRoktz`(l&KotW?Qn~Knd=|s@Uw|ggRIskAZenI(te2ajCQBT6 zgnXd@3{@x(4Bgp~L0PT6<|61tjDva^S6B9Z>~{9Lpns4r)IbyyW)zV#RsjAC)>$*? z08%2zVyQnzEnF6qw!W^rlGw`6WMUti4RMeh<3veG3G6;bMnoL8IdhGZh7m$NV^Vh4 zsTITg+zzopIxE)<#^|S;XWL%~`vyr7emvVQ?1<2boJ{-1f5R#)3A};e;GsEd1v%<8 zO3zbTxhi18s!I}mkw&6qsA((q3-mk^BLHz^CK+5xewAp5< z0A*fBGaV6hvg<|lmcDQ#T^&Nfm6j0G1_lQ4uWEe+ubOhLcnqWar1EIVp9yYP-EyJ? z>6>(PhlQuepp)LBjhvcZ(xm;s93Ua-kBbhOym1p>z%Zp(5YU$`k1HM;PR0m`{_Q5+ z8B~pGS9)?*X~kan*eQA8KRcG$uk4HvSb<|8c2^U0fUSmoGSSk45fa&zV(c^BV>!8$ z?3#Ufx~WC%`52s04aQE~d}tKZIi^Hgin=@)f!L*J3CJ~x_<!Kab|_YHJ&SjNjo=)Dy=^qyTy6PN8`ys;V8O&?HNCZI~?%!;<;q< z^L#j}J}bFqrhc{j`&S(Ap1c=drWsS@HblAdxU(8~Vx2GwY8x6F2k}KV3a{$gd*z68 zTds36GQZ7m5yvS*BnxG}Uo?~-u3;btuYU{+n=7(e@tjAAoLROhI%G4#Qi4ar`OYS& zXTw6iGhZn99V}#(i{S2SFq^(6d0zM!I$)a~eRIwq`{KgAlPB|L0Qwi&9nhu!ck85a zFW$0;Py5c^eg8;)u0F+Kwu24#t$oULyVTjSl$NWMM+Wp6H^pgZ^IKwnWH0f??i+52 zI|{k3ynsk4Q^0;48dY(;i!9U8KI#|OrNX3_8Q(EG>79@lW65Yt`Z(>tqOZJzrCHB`8ST!S#g-71IuC3#ki<$~o=2%MX_W`U2i?n%wBn94#Z zCw%;GB&3SpU8*6X%__1n!G*w@@kte@UFA;hVoDb%Eja>8<0@%qWx>eh9nTL{U{AU(zVfk&qtQ=Md|42@P6!iw?K1dipdz)1&z$Fys`a zS0h)@4XEHUy~U>P{E!D7Vb9Gxwl?C`jfSx|czPwq#JvWjoor&t_WD@tE`}__P97Pays_qzjvD@LfyVfsTdQKRf=F^D3X<*1IVXSb2KjYnNJ4p2G4IF& z>&TT>Tn3)(7?Kj>Z)X!Fv5WB)KX?=nUcQ>e_s}0wwvt}5cWKJ$ zXSGsY{B1p^GN=0%dY!!kUON?^S#+ekm*}{KoW3oCk&?UN3H9L_nbsizYQ} zI%VA>M_u(P$fw(!MW=LoC?LexfF5B8+B98`M`gi2+NE=T{6;Znrf1L=m4-S%g#E^| zYUcp$-96M3hsGXp^;$`ldTLSCxIgxltMSzPSDaotJDy{=^w}7k9HU4O?&PysUh%pS zizb~J!B9&f2hhn18E&ZgPdmIfWNzZ|a`V&N`X7T_9yDc)CCn0*6tt}F1?8?1&&LVXE z|5~{^v@aOCZZpL{#L?Z$hQU?&)-R2$3-wL020f29Lhf`!bTi+c@Fh$jf3&fM=`H}~ zP}#?(Ty|JrY-Y`#yhq&5_WWzD{mW^#WnVzo&6>bvpb*!?T4`KehLpSX6@^FI{sV1?LNe-fE!T!`Zit<`b2 zC$Qkz3PtFCj2vTuAsmPd+z-)-C(Aa3h;go|poT%5dr-xHWXrr#n$x*hg4Le>bwmtU z-lfbJhEd&7!g=PKC~8KFqgsYd$u!0i%vAhRHj%3HhzrvVu|u5j7eYJ%jX!oi1Ma#& zyWb|z%J9J{R(2qfbrMyhgBiwFpebljsnmUZhc=JmQ%fNb%V6>tyuB=jn&U!DOQ9dy zg&a?F1R*?obV)i3$cvVfia@3!XsFJACu?I0l!5)5I)hF!yxGpt1FOe#u zTe$%@4vc^E-6D-E-J58GSZd)q#d1@h4mN4Y4@~+!+3n^2wDAP#vgbrm`vIsX5$XP*e32QNxsp=3+M{a7^7&yGPlY;Iu_(cXR zW4P@wBnk)YKG%7VW1!9U~2dfr~A1)@4^S(#%X4|{B)im;qlA=5i_!Qa|5)vX-#TvIn zav0>^Ckc`jeIPdJ+ee|Kp;@caLR5`yZ*K>Iyunn0ur<0oufxKi?7Z~A)bC=0Qosyg z48Mw2LOo%8Lq;Q%B}6_CzPp^~@y6vrq-H9hhtP!W*4Wo2>j=pUl85txxez_NI3Qc# z;JvImq$WEKaz~T~FNRq}ArUZ9rSGEx(w5#VAfq>*7U0_LF1_lF2p7K0$s@W8#vUzC z7wo{LgWioo3rgT7 zIzB95C88+bv{XuP>`c+0<~QYF5YD3S{-o_5iT2yv7ti;IgljAl#?L-!=f;kxu7xtQ z3o$tcC+d**C!_MmCmZOivTVnv?r!AlTAn3<$j$6jhWlP+@nmOqBbPKKeZtPjQOdt5 zVy|Fosrf9BOS?@gtUFCH%M8Xzk4F03cyTDUEh@6$@U*AY8QxjmpgMOhO?=cabgId? zm6px0`JMlW=A(Cp#rsGN=Cd(Txm-AL{!j}ZYk~(?83XNI9(|vV7qZw_p?eEZ=69Ie zwx!v-Jn`mJ+>RPvk>!Xa7tz9c(~IWxqjJd~u9wD`xnjIF=|~~xzoaxoFzIuw5;&r+ z5#5xsV5$6pf{@_IpIk<6!^~hDz3|T3b+ZJo)<<;p=k#znf3>uGBkk$2we=5d2Y|+O z5X@d{Q4*nLGV)FYx@Bs^uhBpM6IyA{FYKu=l=zh#Y#!)TnpU0+by4Wd$00{&TZu(A z4mp_SXez%3Rw@**qhWJU-HejbwaWUUk{)}3A1OJ$Bau4045H#tl|$9$U(<;w*~HC$ zpP4F}x;K{un=stotm7TvWtG0!TK}nnhMh2Zu_O+6w5#2~>pFl8* zYtIB^8!KD;LrPRk^uQ^*yyd`WJR*4wcDH#03)>nouA!dz1D%fhFP6(=)!m^?N)Cud znQ)PY>5;AkZoUeu|Cix@){O{3K-! zPC0DVor1rLHSU)alixjD6INz~)V$V5BKW4vH9(yyJIFt8a(wP{T$^&7}Abd6Dhi2-BGqQOS zCDQM(=wwhd<7k7$^I6=wJ8~eEe$$?kAw?{O!>~b*f8P&PY4YBmR48pqQa2XlICOt@5O6=>yEnSpN-5}+Qi9mNGwdFhbufE*Dqc()tglrPldE|W4|`gHb{Y53T#E5~ zi6t5dHs)!$C^}9aBNG6M!A**jM^_KcN<X|AiqrEEc*ek@ zi0^_MF1ac0lq=Q&?L({)`NzfONXW~wG(=Yie=I$IJ0QP}ZrS1dz4$YR*LzOzoqEAM zqwmzpEt10YFNU%14Vj^`rxBKe;Xzp@1!9W(AS+>EKG~BQC<&%A=%`%MyUrG4C0HMU zN7|5z=Ep*`+jR-m?q*ZmbzSR=Ae!T+vwDx;M(@~nEnCE-I?3LA81X6 zB}q#VOWyx(sZH=g`#-$Yu4jHc% z75cE|QA|MZ`otobOcXqI?!m8n*nr~l^D?rwZ&&ZbU+<~5cV+jA@NMZ~ITqP|33~R# z$XwSCaS@^-iEG!aB%u%?uq)oNu6jAll<;vodXxJdIV7PJ@>=&}*JRGcMS88RuE``W zgg|!s8J#Qe({xG&R(kj5805i`y|m)c`;^p6e6g^mJUsAjujfX)aHC7}yB^`_D=~cl zH@S&|LRVeXYgzHp1A#PYn7aFRi~CX**$y@8j;=~vpH#q!)rgMTu9PxS4H)S&b|Rvl z-~PJZn}MQs8T0CC=N%aWT)#oQTFlFbz@b2R4D2XU{9S#TeE%{yt80YB zTm3V$6fKS>w_b#*XSon0Y9}s6cJ572>y4ozl2(T$elA5SaqdRqUwY8R&4Pf9hFl;k zMH&)NY<)g? zhD(ldCB`?PhWiGwP-^@@^s<5p52B~NS#Rk=0f^c!=w1J6Dk=eb4(#MQ}#mUNz5Kn@|TNlr+d+GGFleSKqLZRt@U!mW?uOh zSLJjOVRX@-;82xX@{?1zEEqdNOX%5g-abn#P}iHaL2F=@kLI9ATxHzXFShzS|lrPSbp1h;WqQ1PK|>Z5#w}G*|q5D_;B#B ze^x_ni!J#1-jn7r>iO^s>4wyr@uBCfozb0?7w}}C48@;zEQl|a!123rg=4u3sKXB2 z#N|I5HeUO*(;4+bLfwhDL;~JGzPh|p7jynY3S7i;ekD&BxWanb+~ z_?!B&pbY30n1BItgfT^uQc`^U{I-CWjL8F}V}B-aSPr_7f%fTFfkEhoTLFAV9{`Pc zd;6ZNoRORiK~CKQX(IMWlK4=MYWS_@Q_K1YcrcJRe0(`d5Ygv}igW|rnX(f1I3ElG zYVa}DZd+x+wSs04RpRxJS?o$Yl0D}jsx3mWb)ct zr87`r5@~UmY^54epgvVJ+;x8yUh3_>>s~Iaq%%_2Z?T82YeHKNy^kXJ7kV!{B#lhS zWBYyjT~|wLWl#$7zx5tqj9h%P7?8mEd~PtLaHIr*v>-b-pk;`PFr7$NNHs}yq1(gl zZNuYgolQun$8N3edb1@|Q_TA#xDmoW1(1mSv|vU64b+a>e+EVdhsAG_FHS~wkqZIf z^U(G-VwRBXw4!{PI>w@WqES$eOo`lZ5vWd5($a^kcih4V6^UkMX8pr3D9DP~XM`ry z1=u0VqobpTkOLNn$lDBw)ZnVchpc40A8q*qZ zyUCJwIM_@5#X~}Lv@Ab&{W`gJA)FHdlBO=C4G{q$XfMPX297nTO;>^n+nfPqO#msN z1NdzzO!Fots~3Ns&`trz18embVi)4Ivi1wXdXT0(7^uP{h_(2X#+8NvntU3yB*W@! zXF9hoPGYKGw8;RxWQK{ao_ni)%)fd6;Iz%ePA=}YM1w5VXfcwLHpTM(5erH!)gVjO zo_zMim>-RFw^aGsLmA!bl+)yBk)9o*jQ1>>JiR{;0+|)vRAmGCco;dat@T}b4&XEi?*dL79BTp`X95yy_B*^PWONukB-|9Vle-J!t;tvEZJiwYPJS&5 zel2%5ax|!2bP*W8EQ9fIuDH;{W#r&B`BhO5guT0gHt!ApmwTj+DDE#tJvbji75_0qc$X0#BndX|JJkT&;lRJ#~l6?Q$x!W-&EHH1-7XFhP-p4O1&UsOR zS7+$urlh2LrzpOQ?JG~y&RG?msXI2T5@gj2#vv1VbxjQ%4Os zzZTZYI))?3aY|quQVe>(KaCUNheEc969$+jHa2yd>kMmOjLhV5kMP^nWqg?V`*Pf7a56!!g%$H?r)K|A`fBPfUwf-2GJhZ)m&zOXLYeWw&t78Ps6j<)H?DT40*~V zz8&Rg|JHB6ifyC=l+Oan7J>J)`vMAjxSFshEWtV|21rp%)}jNcc0S>VXRM>%2|r^r zQA!|QBg;7~QAC+V8Ryk#1(`+L%JA0U(bu}ZJf+%5nUiVZ9+Bu-z_tr{Icf^R-t+bH zMe-t}^AhG;SR6^GjqqQXD5fNkeRTeDWo_z+nKs@qT#s%c?2yWQBa4w53o6l{-W2V1 zeciCRgRPWnmghUct%3WID-fZBQH+nPCsSeL3_O;U-T^&T+BL54 zctG%vYdz$%mfG1|aG2u06EWH~9Mh{k(?zc!J>>1!Sq$k>F2^)IwoW*XA=oD*pRnO< zu>!@Zj*&NY--kP~$5?@1YmRkmTqG;Q)#TfI8I+WXMsyF}fu6nX<-M&jNXLk%%Vr(l zF^e4XRyJ2b+UJ7g$rZntri$DNq19Wh5BD-mDREB^TXe(VLu*3(3~-jDRz!>|J{~ESnoN1ez%&z z8I$moju0iE;;U*=#<4=mRK}~mm(bN-@pDl&jD*5HPMH2YIP3d%^a)Fhcr=5?nT59M zRRw{+QAtT|gZruVBGmj)Wrow@UI9EC)(~B(19MSPm5cjc0FJLB+<+(jYTt;#Y0KqL zg>_Ev7K&6hac~9;4#`&x1rUuS0Ovt7-lt<@3G;cCq4EFRZvnpWgSXTay{YIZ-tvCi zrDhTHn)GyAtvUU*-Y(?Fi<*;+!^0H9shwA|ck2<&$!gKS%Il`;REBOZtR=)uX5ifv;hyXYS(Z71b z4t6p!FjzW(o@tAJuU!ZNGK+`23;!or{8h!26rO;fB*8*jy?0q*I!$E^?IxAYqxi>9|%Yy0&sIxMfgQKVi~8iA((T( z-}7?dTuv(BSpyt+xw*L|B9OoX(@?_&R}04We7t}P@gzU$R+8nTe zV8fuKa18{QMl$0o3aR1%pWk^Ynk>+@XO9X<8ww%80H(`s9cWLXxzCb~c;A zM8oz%rX{^`!K6*RDqzTvcH^U?!H@Qik1gKh0U#y-oeb+&Gqfd|Y)a7I5CvUbU1w)! zpiajT-!M?i+@L>pA5E3C4Xlyk7hHs)f-0t_g7>~N-2U9Y%^0D#Y9b?iC-OugkdTBg z)5^sPaNOzu!@R++u0<5cBMUnkR1e0(xP%uXu({CBmAH2{aklAX!>L}Nq+@_ zy`g@ChsVeJN1)#QHvld5_w*?bnQd2E_G5~IB$ zfj(`$I7~;2>SF35=VIeyBc~@PrY063ChSv+ z^@)wbLKM$4$uOlLuW;8VMJ3g)9NrOf>`%gFX0bA^RxFd2z!I2^+G#&HzCcks20g+b z%c+=QJNf{8G}R4j?s3+QurbAm-}v{L$lOEL3Y7tB`cgF@R-{82eJGvm0x7~Aw%_24 zNp>{;kg3&4=h?}I^~T)+e%iL67qr*~4^JruGXb>|RTimS^jgqGCY(_fDay&2eaIwX zBTjyirEoKX>U~+JV4H3rOU)6)9MbNWiQ?qR{kN?6pK$zEY*<`W2{?b<0du5XJlpn( z>PPg(?`o^ROWbEalXhkSg5#RGWGDD?{Hkq7aWBJ91n>C8DE+Y|YjX*vIRP7`@erv0 zj~d1XU zzS^3Ipw1mttrJ9ByL~(gIsgP|Syd5Uhf3-;Kp$p?v57N!cJZ{$)gn;V(N&u(SV!ul z{zW%cjxY!GB>W7TP;;?_O(E(4lxvS{$msw{m?-=Dl3;&NBLfgq;!m~4XbwAezquBF zfI7$u@1-#NJ5uaeiuK2{w*R_~ zLyHlK^keI-dyfZIBrE;v7%OWcH7d})lgPO-Dy)?Hri@}qd^JUY=VntyZDv`#c^5HZ zl)Z6c)sdF*s&!M9*;g!WN;8(s+97Zj-JOGSOm;4&6yB0vKciDR8L@R| zOIVu&;l}jwA?rG3*L(7^?1MIIjuJK%42VjVy}=X0cBs!SQaxL$Ilm5lttrbtsHz=v z*+KWBoO~H60XeA?kH(0ri1DhAaqg*!q+&PXnNrf!qz;dC^Ww^tzj2|uK^os)xfaFm z2&U(gKN$Ew8(i|Xh;4J`f-2*zYP2ur?zFhg3L_Yj!q}4MV-M3o_N-Is;|}Q&>*)j~ zn#u1eHKj|*DSjdz)|XchY#w!@JxVXKH)8A3a_92rEtb zbg_gM5E_tX(IigdLI`Devgb?!I$gy?X%GLLR1mhk|IIirV&eK*`|-x9XUSKWf+^@Z zLvXi57}YG4d-P@|yW!a2pEsfK9mlw1h-Q3Ajvi@eU$J|s1JW;?K1MV%Wq<9xW(S+t>Di%Q^>Pf6yn+hO>*P?%Km&lPioJ4xst7v6hAv{|c|`<%L=>Q9znbHY#fj9)~demy$e~ zumn$6H);8f9R2@iaJ@F*O>geD@Kkuo`A4@@i5U((}L~d0#X9mB7G{JbgGGKJ0OY+-F~r1e>*W#iMgl*lNBP$`pXpV zVXdppNTJB_o`(K|*LWM`MD&U5XbcjpCT&Cj zGhH6=zq}5{Pxd3eWXQ}}J{A!p7KoO9xS1R(?@Um<(29R{y^BHWje5Qg<`?d&I^lDgFRuth$3*37X3#$ z5BPPDRNH};2Dapg+@sIkz88;|Gc(%1Ei5f1D>uCJ{WlMdr+)jOfg`1#5Um&z*`jzt zq${IJvdXAYY);v;iY;opn@5DAC6K z^-cU7&QR1f*UBZ0+q8kBAmf|;kh0GNnBA;ZuhEzYrsyCAyhkRk^8xd3>(gZg7&Znx z^T|Ac7@i+}ivM4|uCK4}?CK(I7B$BVI|ehLyo!zQ|NHkJDoj8z#R$!>Xn{W}V~rz% zQ3q`kX|VpmM!oC##DQ)Pl9(TO1(`z_A53qC@EvjDh9qLJ z7FJOgfn)mXu&k02AkXq}2p@O2h>r)hyM{(^;eSaJX9k9nxHF(Brew?k<%;BmlfPFl7kz*I$6$@F~I9GeCh2PXG`^;mIfK zZLYSSH5+(6?g0_cUC<9Fz~^#tagmgSUKJ*%huHi-b;*ZnIecJ1-0eg}Tmn=z7)mIC zjV+LfDMbot{0ogDlVoS63`Bl}P{2B(69k1E{S)C1Ha;wELJOZr6x{KTk3+H!%Vq>i zJYd=s_HnQeo(ysygawUh^b7l{qqL}q1Rc7WT(JPf?q_Z=VQ(y*6~77Fh_whBvX4AD zMcD2SgV`%X;YJvUKyft`QBVd&q~kAdls@cesD>nUv#AIWhLOd1Mo{DzGR9hA$_#F^ zI;|=VdOZR_691};VOe19kvGqoRmV=9XXAVl|40ZNse1f0Lgb;Do6pv5%i-_=NJ5gVy{ZddB(H4vhoi&sr~(Z z_#UWx-G3BU#$Ix&Y5RD8YcT@3-Wx!RyjDeUoIP0NjuFEV;l1J;kuv$O)AUrL`x%GzgUwsX>*)KVUADf*zL`xheT%*3Ef~FC3F55!OKKmK{sS)J;t#g&Q>|#Ckip7MyEk8 zPdVF>v-nZ|C7#(Gsb z8-F-hrp4#X{$ZNQCU96hI5AzGs-Q9-a{q*Z&SvY}Lna#H${X@9aN#3vE6k@YuP(PN zD`)QfB~0$^<=VfT$5f1PqRS#DSz zR^jaOoJ^PzU^6MRmeqYRM%NxW+(&PhgZ`G5 znb%jx9wGn2Jq^!y$z-2;Nd}A}69b=qg%-Xcjxmr9V-yzIfpY=9!qoP67r!qPD%(`o zCrTY16W1PAvaM(#PUCV7tkZS45(;gVai>jrRZdugi4vJjpOesZ!u6QaJ*JFxN+LZlt)?AeJ++e{J(irSHzx3jp z`z(a*U*HNKa}IZ>s)Lvtj58;ZlrtFNvSN$6$|`K#(mm;|Oe+5c<+vS+G?_&fl%1Ay zvsUPs?im^G4`+SYmws2 zX4|tpW+}THS);+Cs0z1g4y*Dhhz<-PtgjS;rrtE39V;I1K0?%F=uH zG`sJjJ-s#+fke8y-z3PZ($so^jjucpS_ub_-tHZENbKjtoRh}HwX%XV`OPy@9vc#` zH57>37-rb@m&kRGfE5Nc58WX&nl(vX_ zmYdYIyvOo$)py@rLUp8*ETta3)=eBVxFrTLVnXJp&E)c2`If zu*0^FcHTDTUknqjI=TOtixAoiBWXrx@iz)FS~c?i&7i+2J&i0B|C0Kb4ykX2D;%Tg z?}Xyq(88owSz5Ex@AMdbm|VkP4I3~A6SF82hNJb67O^8reN51Qq=-t>Y~q7aGb|Qt zL5U1$Q0$wM&LZ?VIy1F5AS2}=D`+T03mgg0p`9b8CTasG{pyzcwutPwj$ z*7lha*J*}%LZ2jEo|@nJ)s)R?s2OIXiFu-k%gS@~5l(mzb9a>onvk+hIZQ_dNymBW z5m5ixU@6XN*^IET%v3B!X2+@sN+m|L$FlA?z!Ya`-yorcV$4RpXo@vu%pnq`*aGb$%0~qu z_!FdRl%i}`@iQ`#u3uWxm2)DzqN3E6#u^In zCWXb%E-!oBPZ!fkVfD5et&>;m!mUtMqt8VCD@y4+Q!$&`dU_vRCm7IWSvbOO22kWr zz%cf-bfZo<30`oA>2N#}5kC(N&C&bgR)qdE2HaLE6G@8klw;5ft2P`eDUIvi2r7UF zgw+J`Jfvd$_v;44v|?WVBj;fdh4Jc2+Wh$p7Q*StpfdRHNyMu@zN6 z`CnyC_e%vJnlkD%^7uYf`LEf4g+Jd8g@Cfme?28%H%K;TOZ@TgJcm$-Ox^$qvB=x# z1{wN3vgOw{vcteU&Ny}S1xgo-({}T?y=(fUewPm?!4cs41O9$!vxw^DR!t1e;kmuf{)h8>L5iBG32b*TUifoNaDf2Xr^hO;Oh`1I-y(xubh;jPlgQ& z-BG!qqdBdn|1&pW(T3YJ_El{*qAW9r87EjKoFPpi8M+S5A}25BYfcglWs*A@4kcq* zro(}mx9iP^Gj~tC{z_>K6(~Xzj@A(!fKc+4ls{NHrXZ|Rx{0Q*_J}?d9~ByO(o2{) z0)CPb+vasW_~Qctp{4{i_yaAc44Kd{OrpqoElVw>!TEoT$)$^CaI_Z^@C60`7$eoR zR)VFW`lJ>X7C7=FuXX=_D)QLx6pbchtcSdaDADP1`K&N!8Ri}539t$HY*PyPSoy(T zwg#UjC?s503K^70CF~C;Oe7p@9!Py+e0ne4JGgthFO1Ux{)kkBauU>e#NV+o5R@iq z3r(En5Av|NfU`5lOV?8;$qnY)E!29EkTg}(3Ru((JyeQ7+n*3_GHG& zoQx3F37S!%mZ>b?WSV}ammEg+8a11TEuCuI(eP#myU1(zT_&e_KeG9Xm?_ROTUwV3 z&MaHjhJ8aufgBu|m+v-`BSxB%OCr5c@y(L3Ns|vp@Zq4vdEUSqX@+QL&_Blle})M4 z`PO~p`M(9YJigw_c-dKRp$l}!o5ZT`jIP{h_uR>Mnpu@SM~83wC`*-$oZR1!9q&1E zA2s#iv3ri}zh6peGZA~2b>khpcsX3Pl^|bUtxn{)mFGD%H2AG%MvPv{h*l3|^la(h zta+Y(ItRR@oAz(IruZlubJMK~1v|UAM5Ty?@(f)Pinc_qsb%qL2YA*CT*lsZYU&(D zC=e1e+6Qv|REg}jZ+HC+%XX_GrC}rpdlL5>SmPCQwDVAM8`I z`B8$w+}2gPKD4pCs_Jk`PW_aL1~!95x3f0F!!RWf8rk?-_vx!JRwc41UlYy4OFfy9 zt;mX=H9xo2Wb#9I5XX`3Cm(Bu36-foopJhy15n4QGWDZtc&C|xitc49FPSxK;{h=# zvZp_|fMMmcRJ9$`;c1lBO6&jRV@*C3b0RtwV$CW;KeJqIZj@Iw=zYZ>_EJ?GIVdr| zG9ZxdDm5loo6R15Yi3ilqDpYEA3t_r!ap!3ypXBOnk-40EZOO<#(dXj1P(*+CF31> z2_)9X+vLEZZ%hyED4`KkJo}z|Qp|Tegf)DGRlM8s&fnz}gK9|>*dq3*H`7L{3GF3B zNb(T)S>YT>5#cJkrGjz$#=qmdT~E_!m5~|v$dhbaykFCF?#^7mTbJtecXw{|4w4TK=z14cpgi|$! z*y5#MM$&C!s=rwj-tdA`Uv>9@z3hAUrCz0_&Z&!ysTvHWWm7U=F+wc7K}@1m@_;Nt zcftp$?8gOAf(l$eZm#@Po@v4?f+gD5_~tTIO4(%j6Q*FEaCEHO9aKwe)t}9RAQ2qC z6$*hh+voG!V(RZcDJaHW9VOIKVKscHU;%_b+d|!yL?9-e@ zyKSN$yYg>dUW`BO%6$2j<|d${xvIZptgkBi2uI5rr2@ zaqZm2yVn}sV^MSoWobY+uJ@(OIGlb|Q7Xhus_$NX@`N^NxyPcf2CwfH9n!dhM!ezI z?{4zA7eo)~uo)(LnEnTO>g$+*yj{u@I<#Iysqbh+Oq1%!!4l;2b+_wv!{~Z;mm_MS zwmNOmLOWy!$K`Jpd9n=u!kDUvpgE_rgL~smA?oC%;)RHeamp~8IUoHR5{FR|b%~DM z0u{&OZX~-JJ0o%hV|M~6){4a!zB*xi`&H7oTd_h17MFrz)VDvOxmrLmXxhrX99$aV z(f*qlX1n4Z$jkEK;W6%+4B?Of8%%!{hS-7?3@?%`j``^g`cp`j z9|B&@wCP6jJwdvsaCu`5oN$ur?U!^0U16E0U^ohv3i+km#B9;&SKglX=|Cq&IDbXe$pSJ&uZoLwopoHM z6bq}j&a|s4@^9@|y0Wz(&O%NxwT~O?-h3x+3CRsOqcpKk+Wb77m>>~KI7AVf0ddom z9NfI}ZVZ(A9MtG4#4-5#HO+eCz`#+$dP6nDRlI@eS~9A)*@EW9YICwd-H+?H=Zd>L z`(xH)e)yo6MR6!&-#{`h6A(YsxcEz04&#M-TfYre@@6_)(bH#2C~Qs|OMZD#1D=5+ zAIZ))ivug(AF!&C?OD>7DEBhV$8Hc4HZWNpd*2Rq(o?oRpXbtf2_L-VG(1OR4Ag1D zVr%oEN>cg>ifOA?X{ns!WP%X_T(gSAn88Q)1icszyblO}2+%my(O`*1h!4wMqLy>7 z0%Bd0M4i9MR=}Amz5hdTYVx#9VyG?r!=%B)z&V@Vxk&>gMp@J%f_)+)73b|HNu;;j4?qhgkoo zC-yww`&q+O9=~@m^5DaAACQqGeM3Y*0CFzRz}Si{DI&_e<9h4N@UT?PQfYB9qh9M3 z;B5kM?}lJ#qpVngvEnpZB}Od>g?MF#$N#4#KVC1v#>9SCU&Pb-r=-Nifz1}|WM>j- zI9OP%HkV?uAeAvj*l(p*_E)-E-fvH=t2AO?c*H$Cnk5T?rR^Y7THmhN;y238uf8Hx zdzY8rf)0V|%CrQYxjMiFSJu-*1apX+{ohD#vyfORSp=oj7`+21A?2u;m>5y+3eG%u zz9T@p%MW-d7udX6&^1&*&W$B&l&%)mfkG2PX%*)G(i@Fs*E^3%gNun7HX^C#rzfd4 z6f^?i$mDx_pIYO%T)RCKhyVY%M3;T|_8`M z?(ChX2-^zY7V7&NnNI>+_#X-UUNF?pJCkpVcLTA>+;@P~_6@)=ndfc5{+PTbdO zz=K6qW_x@d7ObYNIGEFZ>hJk5u##M=CjLeqC(=h}K@!XuAfa5BW<62bN*&8DEaJ{ByGpJ2|jA26LDw)~2dW)OPQ zHYkoKm8x3=OrctwrLTx*jmj5bHx%uvi@T06tj8|pI##Xp$7Dx(Aj}YO$Pqu6Ki<4q z6g&$T3!F<&W{lEC`4v$TcU)^06fwW>Zb;8>ofx+->bzPPd9kK+EdS_9HGZ+^99}9E zaCmg>o)z^ER5@y}FJCf2x~!bPL_78gekP8X%V)B6)=PUpyUySXtn7GmQ{VHv_4OB+ zeRK(Z`I)C@wfitPv$GYr>?$PWRW9w_W6Tg%fq)8)3jS&HI+qi9F`jhHSG-G#&?}i=>vBHf@!=4-;YJ5rkqX}W{%h09w zw zvT$6*QX^t`Gk(R13UTE4WBtY{CP?U1(CPVBnL?Dyvyzh{c!()A%#W8RD$2NK`d9{a zY<+Cgg%nyB+oUb7>_fdgFd6NGf3>G+ZCM7(OXiF93$qv%{u9n0{9!X`sGv!PaU?zntA zw}rMwfe`XPHq3~q!~>2;Vwc)(pOG$9}tN2 zHi5pvRM@m*<2i95JV6(twnH+3YDKn>@^#WF==(l?nYh!n7y zl6P2iOP$R`*u3W!q)@E&3Rs@7#TQp)N#+uzrQ)H8zpf)*$CaKIDX^&?TE_4J)&%r;e?-(;6I6s;>EGS18x}XXUKw8?6M71 z8e!?ALLiJhzbE5H*maWST){-}4&JcC%d?L>+2y(Y(5UVqp!aGo5Nk>vtnw%S zbdbJiN%$lvVkCjFzdxB)*x*Tc_f2HD{l-q_cz(T5xoRO4$NUl6V_kB0h|ut}0I%&m zW2!zX>gA<@PXdws7X$SUQ);7>gr)O4P6Tfd$RS7DP(@WG;PrTzQ{N=ZMK6iuZU<87 zftYxIySBYMpgg`OZQB#bG3hdiI_3$M+Dtue2UZYQAI{2v24lRmT1VDb4WXK zk#o)k62FU;*+0j!EQ#W@QQk6)?F?!Vq!QW=yxpIuev-gvOUu^^M496wS&pALgPEjQvZ+LFK%XRK= z1u;%(lPMV(v#A*0!PckcR*1Dej=66W4A;`+!%Yqg^KmTb*!UC==%J4 z&CAOJ7C8T;6l~cel4%7zZNuPH0$Ip@O#ploBu0O4*c0%+x%QZse28$$Xtz+Vu2;#^ z*atdG!X(w+Ya~$#%0U|Bh8e|4ftYLCmzatfJ7hL&w>vIT$o@z0<7FzB4@UpvAlvc( zG$8|isTe$*9;CJ^E$laeA85v1Q!Tf3S^J>d^iHSiB z$dClnb+GEjsB?c}Ne6lHcu8q+VrXE3@iYDl%RsDGo~D5H2qGd4f%0aCrRQt^@1d8UVL9*TPZV6-s6Vh5kzNQc)<1zT>mzOHMXG2JJ3~ow(nnvXr7ZGKsZPdX(R`nA^c&u2(P0?jz#QF`|BE3HVjuO_f zWMs>~jwGeMQIr6)q6n5ZiXYo_8M~JXLKQ!^&_;-UkZlGhyAnSbBwS0d<4(Df$#Lh2 z#N`dzNg9rtA*Qj8M_BR9O)`?JV1kq^!{l(-*b_(hG}z^ijv3CaSW5*9u)p!i49~w6 zTZHqi5FMVDFQMOea3o8+ZpOS8bR5rNt%m4Ch~>OA#{1ckS#Gqt=2bLI?pZ`%E58@pVUk=>okm^HQN+g>b^#So{KKW4X zv%t=59>p2!XpDnxIUdDnF{lD@RHZR=Cf?SXZ!6LLIk3sJzkMU@*j5;^zL*=#_*-C~ z%fN-dq7^c$ojWbUGX=rtyJqZNy<(MIPzqxDO}q;zST6&(@En*3Ep}((mr-p1W`s>k$dnx;wMs`36B0m->O4hRd`Aybe-kHlPG{hqq&P5BO zv)rroAAW?lmL#hg^ro?48Mr4KDoo?oWYT|h#U1s=gWVZ@=!~vH4g$uu4Lh0lRv(~d z<_##1%VRyFsjFg*At2>xo_Q7tv0;t>uPYrvz^z@?o7)u^1OLL{rnUXw^RqWGFvC=? zyRIgLED2|W)9>c9t(Ercud`sPwPG=0oJ8Ede4wi3d`r1fsuv7*B70%nE|9#~oXt?h zQ+BR0|9G2@hEx?1+x-daEsefAV&*z?=9Aiym0o47$J#dr_`j#5E!QO~yw}|>CIW4q zFl|N|5$*&EZS@CBPd(&V_=-KB6r zA;SlC-eVUYaR9bosWJLOV5;JlO|3h5kCV)^#UM#3gI{ z@X6P%p$Vp=FJ0}pHSy^GJisY(Gw{;an8GNw0qGT>xA>NAB>^aw6O8x>((*!m(XD44~NoXIuCBdgeSJIkEPx;dm#i}`o8=`z;2!N_fwx+s0Y$?u(r z@@J4md1-`q>nJ!vQ3va+wV?XT&Jyi?EH~d@zVX>Gk0E9NHXF;nnZ0}#0)2xR(k#Bm z-cPgn+kECF26#uXwKYxUx~*TYP5C5 zmZXUCR&kjee1<8{B{gpw3vV02v9eoz^*LtAh;^bpf8K>%Blbdd#)Eb~Kb_eC34c$7 zDPm-Io?H|cTaQc3X@1N52bBIWw*+E?Q=g*Lbj-eC>}xE_OleSV*w~F3_Vz9X`vHo2 zMOX|fkfC3QLmrySwl49874%pUV`3)z&!IUM6iBs)DK`b_{9F8<3p-{uwK;z6w7d{E zxs(vZ=z=X2=i#SH-=~B-!h#O_%@S|0O)wna^n}oQ$BRzBWo+wOt=6eM@nKc@Qbh7g zR{+0(+a74%$c%4fa78g?DDkJA&~d|;i^E-cT~qlloc}@ji&!RmEnH&_?IRKR*&oI6 z$#GwoiAYFG#oNVm`U~`(TX>dAv5OJw_&+pH*Bv70rGi|2fp543J^0rpaWgJr9@w`= zdJ~?(P55|6eReqnEt6?K20)I4z3_weWK_7NlhpXw^IOu9r`AM-VGC zGVrKb&PlDu9WZOAKj^QImSp`$dDJtH9kUR;?4{_F(ClVa%pFxCD*=bUIc~T-5NfI0 z;C6-_`t8H_V=NTvT!C?F9)U;A;GHz*(f3lRSih4rCu<+v@En^hh}sE2aHd(x3I z1WGuThSuucYJ(bc;$}6oU_eDLhr`MLla%CBKV-Y?PL*Zt8fVeYYA}wE7ZCh0-WbU; z(!ewpa58oG0y{VI+ws(mF#1`!bL5KHKk|V)9l%bDgnsO&8(u8enT{q=%32`xA=eU$ zwgCqUBCxx>Ba%EyF~&9!1+Zo=)>@bc@*Er-YFQZ=5KUkr@kqEhvL~3Bn0R@4QT&Bk zwAt6liE6~zf5zM#%@hKIfPlGs#+|)&ok%W4;&uJ_SYz1JZIX5efOQ-;+BsQRP_nLp zjGQLbmv=i|3|{Rp6qU-o)h1i8DoRhG=1fn&x(!&YDOw~O0RcfwObnYr%L!opQQQ4L zdT3-v>JVHjz{-mH-R1daU*|_cSJlMWe<=dkRsdR&1)1eZIm*A%D5d-PARBO0Ns&v; z)V6PqOY@0VJ&vBk4`KSh9W;&Drq3f|>aprL0t?H1Us-`yLq*LXoNIM9g?&C<{2gNj z&1QfSXth=TUId-oOlgOKXj#I0& z|7h=4Ml?nrNJNnqk%wS+_~}$oBul%FxgI~P@&k46lOz@b0V7=YABDIwRF4EqF&;(q zaW%=;5H0vVs>s`aOd~oXkZ?g5>x8c{YPjM#7&Vk-UnU9Eta`(ZOOjNe!R+&6za`Ae z!^&vJv7q2UeSU)xu>F3%V6@0PV<^|i=?B;ay=`&}G(hMxq*BZ&JG!7V!{K-l36OI) zzb`81Y~%7`2qnQdi4<)UiAH_{M@L7a3};x}Qj2!(8_o7wFU(==N!7K7r>RXrB1*TI>xs8dbH{`ka z4YWQ*$qa=F5rHT3+tg6sDRzdhp06T)`!kGUNVDS<=37 zC-nEB%)X=FO1b>rbUJM9=a*KkYKCl>pb$E|SR({Zzgc+qGEYT+Ok$aL2n^lG#@E$1 zvc!gH%0;)B4dYQaavcEO4M5Uj7hqiJMo%7x>UDvJ{K$~m1C+XyHP(f7WWkaF6xAA> z-BvSIXJ+gy^pt%$IBMDhG^%`+3=(Y0u|dMUU@LDuQ(ksaqE&e!xUGX1P}3Sg&tKA5 zb+!yrMu-ioW>mU5#QR#Zi!ZNio#uXWyP?3rbspPm1Z;CZ;hb?P6`T+j* zx9H#Bw)@}zH!vLx8ktLs9No70;>JoMrpXWL{U~X+CiL(u$L)>Bqqz_6J!pxC_WZV; z086%mFO@PY`uk{DuDnE)3kY8wsE4Nca~EH!SnziQ4)APeD`; zuH`uHzW%kO_+Zu$=G=+SDsRGla&|ZzWm$UZCbAs` z(L&bQ?w#tBvrc2t;b~3tb!BJw;b|wzrxlSmzO=jDvXg#qTw{|T?=4DHhfO6_fz>)F zN>H|*Sz(z4tsC081NAe;)ypB<6yD-FG?9)tVfW5?_*ogg6*N+nq~vzYI|JI{K62!Q zSF?6VPB@g2=WzLwPwp34YlRSjddc13a@Jr4=0jMH%r1IN(v*W6vhh%Q$m`v3WM93e zF`jaqk5{@n80y%>Q1hhLLBrSP%I~QPZZY21jJb{M(ykTY>Q(jRUbQ@ER}O6=i%v(F zQ|#vxz_PTFVc02Jp*lDDHUaN>tsHtW$(NSj)6oOgPJZUs)|3jElHwCXiUo zTvE5j;>@o^lY^{-m`a$5Y%73OJ>4u&24l@Z+Q_Nl9B_^=@l2?By`M9}0-R62Ne zMph&Ek14g#i&Tly!{jS1x_*&TlAp!XXc*q+35kaqLhd?e9M6I*vpp2^g&TT}r)MBb z%R;ib{bMM8b5^k)8N=qNY2%s0OVAA_R9ZAl{A zj56rDee{JJyrJ3+h_CRBfn0xkD+=D)qwuzCOIt<93~j30FblJHnA=yPqB|MVPX96; zgYRCHph)Gsw|`}~=G`C;PPjKw?-KCUN%cDc|Nrn~aVJX)vjI&;nCc&Aa4yt;?{>qT z^Z?E*^3q#t>xz(NK%choEq#t(^y5t_uM^nv3cvFf4KW7C)9Imyqt|^w4~Q~* z;Ux(1IjKhw>F0KCgF0RY6l_r2_b(Ip@xB&FbYKbg-b!Fe!3 zV8{>-ot*s31lW(kwUdK`P&?B=Hjk#`4{B~sj$|C(JpfPEk!_g59f3&wmCL~pt&gf= z^=lGf%MkH6{R77Ol%eB!jQ;$0getCewP09+{LbfP3EXLkiHT59P*TfZ$=gf@;UiO7 z(jv}*7w!b~#W4l{Oi-bd<}dGMz9T3|D}bIev<=)=Ju8_GVytR78jZo{mBIP$*OoD( zt~c8AmScEKF&8MgSy}KxC>Hy|f8s;oF?4~u~y2E zQ~!8n@D$~Fw)}s=3Vm4V?0e&BlczytSvAcG$c_K%DI=8i|3}+K2R8u|B_-4|W7%ai z8AT+;AMfBrX{(x89_X~*udo_MiYT*=D8tGrmMp)a7eFW%!u#lZ{lHOGMbH6#d1Xq9 z0JR9>fL6#-PQXqMF_NNzK0U&k2mDwXVpPmpW)d<7rFdT&Saxo2xJA-I6GuT+%d52piqNr4kmLE z7;|iOoqUn@hfKge%yvOB0ND^EBNG+e|3?!jOw>w%@HI5foYCGQAz<%;0y}Y=uo_tg zV=Z#(t_UU<0+#Y0+&6uTI;aXdCMF`G0De2E9|-XM5^zvK?Ck91xSIv0=%VBJ@WsjY zc-rEuw9OJmL4il5(mL-|q`VfuhwZ)d|v~sF2 zrXt#Do4QixOhK-cpq*IP%TN-zpf28KH9;E8hP9u-s6sG>MM*8}STk?yV@@!x%9g!P zd-MfzF5h4GLFaQVn%aSD0hG6~`Sb7trZ8Xm6F$JWHJGxm_y9$Ac+?Ar<){LR_fjE(TG zc$DN_RnH&uHf?|ahwhz`upY&#tJ99V+qsYYG2V$|Ai`m8VFh#>WSA~Y=p>Pn71jDf zvtEVmS%+e9Ega6H2@JDU+E!Z60eqfBsM zJnTcIT^la<^NHLqY|U|x8w>Z+(!sykiF+pRVnFCGqeG~o;N4&1e7I(Jw@6zb?Jg-I zQN`nclf>z2W6${;(W*Q2q&4=Hek+n@i&)i*HeemR+$mwan2JQd$lkf%1f5pyPJbAx zQ`0onv+gne&h7D!;KL4d@s%!4rr|-B$2y8eEoKKA#=1;6`3DJff>8lIpTfzb{M}`y zco^9ek$7ams=~T>oEO8oaYDmEp=3KhqlBy}Zgw%mb_D9Sz2^Py{&5TT9S&U?8l3j8 zbc+#;I9zZ6oSQ<~U_&cNLkYWxo1%`A^LkH*C3EP?C&5Ro6}&D}MN(m%a|f<@ zQLD8GFU0t(Ch`wOz)Mh-(r;NU@1!sc*F})o!r4UX1t+lx)j^Htr z@pg($ciM|;mCNkK^^(5g-Ls2NseAm}WJ$^SOdeL3M zq^A3!dW^oC#O-slnlIzI9>W;+VCQ|GX)-TV5+K z)k@D4uEx}^$4+pD;DQ(AdTN>su@4aCKVVN}e$#B`Zf_#NwJb7RI2F#SjEZZGm3!I@ zh(RAq)_dnvwqptI+KrLcql&irAbKDeWw zQlGd$`5x#GKC-L~{^bH7Z-C6O23v72avyN*zcW$k zK74_R=dLu%cTVn?uZve~=mJ~t{6)l6uR;vhy61&_(`uiRk<~`^vdZZ*-np9$DVg@Ftvm-@Jc7JvqvNpyy2 z5vOLmy*)F{Ku{#;SIm8iSweTE_rRmk+7b^;zfllIsj%Q60AnNA|89xRhkaYL{vB8 zb(xqNMAUMKxisywhC(Ku6GGbXh9<@XA4shS^%m?f#v~4fW=NsDuN%k}o-WPO683M}ZwqilYXO4q)k)RpO2%5ksi)f-Bv#CC-!CpMP;f2)LI_YiEwj9F*_pcJD%ZMm_`k4+ z;Y(gT4vY%}0vZ5}`9|LgRS`TrWW*w@c5}AEzPxXa0>M?_g-~$`ERZB@1-zaElXQ#K z`UuZiDpN|5C$HJg1sB4NXk4Eup$2pBH3xy5_`ZD1YkcVA|I3cfdm~tx25a@ zbcM`^uWb@v2r&Lf(K$xPxqv~qZEV}N&Bk^%wr#tyZQFLzsIhH3jnkw-52W?1kZUndOm!{D>bu6phON&?XiE?-8-@;56^O>5w$Z%EMFi*Ji9wCGX zlY$v|Ur@&P2)C5<^z^3EiVEnR!q+j+%8Cjhd`PITe@p%#!|t%Iw^0*g;8XMQ;mOh# zb&MeneAT$uNoiE0k4*>xy9#mR<~jkC9&l-|!JIS{1S%34#plBPnBK3J10gP85fl0I ze8Fo7p*_?Ks522M!k3$09@WUw7P7@WDHlZCWM5br()Y!%CBI@C(W4Dao7MA+KyYHA zK42EDE41K6nsX}8mLx>CC^Pt>7W6KN@mkK+P-VG^y-PP-_owi#Zs5i_9>By}M@wi+ zvS(8t6>TdltWs1!MTI7UiJ~&x)~!{sZhuO0X+vW>j#$Dj!es4l5?{;5rlxL zlAlTj9wR!uC&YQTNA;t4d9|@`za`0fYb#!t9e?`_3x2;j16Fb=Uj^=yGkPAtNo#*V zRPt{KCH}0)L)`@7^ zmYByf5k_feCwv>cZeHxaC|;+BDlBWkRjsDa3nh6l{;7w4)IU^lbUP~o&D1MexeHLe z1B0l{0#jtcTKgH#PVUG%D+JIwuWV_<;&Bv?Z=L;1oIqq=ni*C?5tSYtR!SlKHIX^% zOU&*GF6$cNX#TspE$z!8JP))6HikyvNwBiWYP$q`=Hrfar^mWqYjah1C#JF{83NPx z_03FrCS}e+DNrCyFosm&G?4&Uqf~HE?@q&;tM1H68O5Pr4Kuo*-Xzs>Z4R!8n=|kD zFmo8gagF&1N@1CxTy%#9*kS?qVFN2aotkHx`FecOb>{c^^t;}|La~UDQUw=&tdy4ZgZ)PoYFso z?1Yqq(GVY-=SKm0nJx@m<_ZG8c_j9wet5@GtDTo1W#M7{hop)cAR8<6O3S&Bf6BfFa(PWx8#s;VO06$;(d#e+%wPW ztc_FC;DlP9wc<&#^V;f?MT4S-t#Y-i(&z7+bMt*~a!bFwPkYr>ZlkcnN~$V!I+ZjQ zny%o@vA=^%wafgSx5dN9mu(x~+d-+pFywMi45VVvcqAH6qYCOsb%-_BI_?oZRngQ3#Rq4rNYXdF>(W*uvo*6{1_IBIfKWkoq*cWvjE?$YtBiGBNe5o7)OGuCq$9jjb)&lL{|W?AFQs zHF!W+PphZ`N(RB8k${5-1D9<~AWd(_$gf+5AB~=IHM(oM$|{IC5{)bRjXBQP=1Q|S zwFnGNv_sFS%t7L3!1Ah9XIC9V_4+oXAosdk|IW@v;({kAe2~DKXW=)nso+u6wU`M~ zjUa%5iVd;d=pmo&JCYa5|K5{B&KW#Sh$70h}sf>rAxn1NR zd(=OYn{CGs95~_4f=>Ifr}&j+%JIwppyHKVEFuqUK$N;7$`3S>8TA802XW>}8Ol!hz~T?P8b0epbP ze#RwCmi%v4&(YW2t%VFHM2t?oF+@`{N2LP2uYrXE81RIdvvLu%8KSD}q6i<`1LU%n zA0TMaC?%Mm9AqPMrZY;K%czt;4vjV^$A;`A*lIIW%EWW-Xvf4y#T>%WCc|*M&A113 z{$#I&Frf@hUmq-UkJkHS&bG3qor!y_cwy1PhzyV4G8I_#{I*?ZuedI@upkh75na=* zW|0+6%1lLaX4cJoiJBR4U|sX2;%Jk}!LBv?g0kv(?o#(=-^`p=e76y$-4OM`bp~pH z(#<@3lTRKKBEgdQYsh+bPeQy&mcvWboNMrza(drRg7n*2)H{(afmh51fFs%)zXMU)ftMsrz8VbMvb~vsJJ)n zNS@e@I_*E3^y*VlfsA5UTTRCya0y{2lL~)Uia{mI;VFs1XfrmiCi^zvN1B+7VK0(> z&2GOKhgLzY^>m&pIWrtNS{El|v)BU*&i>AR55P%LG7Kq|P3Ps| zdH(m$5FKWp794Rjo+#X;(4$%9pTXw~)`GH<)9_9jZW=I9Co)yHdNCS{_cTtG@B99+ zN~M?=!i@Iqw`#cp%{?{*{PV*wgtgDVKP2pKqj3btM#4+YW|L_^V>XZ5$rxBJMlR@i zSkd{z0D>T4#xE@>*b67}_oS#(ne<)%sCMXkcV8@%M}~lcf&vEKAi=?fxZ#6&EawV< z5j-K=)a-0L6qK)tcYtucl7t!dzbee$wfoushmQC=)CjByv)ob;^B zoFS?$%&tfC^WZpPGGE-+o#*oIV%nfSqB3L6*VuS&-*D57-NmWX8ER&-O|F| zVhHr0Z-8E|h=_=;#MbtF% zMERZsFnfg)A*t>|9A->R2``V%g zdIdNOrua7vVU0OwXc{7!GI(1MGIj_$dWw0}^iQcmcVY_2Uh&81o<q zqZbOm8TUSO5n~>eypDRDkR3c0ACWoTYCDF(o-u*NhTHrv9rfNQq`maioWX-rVGo?X(kQ9Ph?9`&bN8$d?Hd1RnVu-iG~@?mjc_KY4qfQPnX#iBq5wI}INl0H zoA5AOd9e*9c5OLh=m(v98p*>RF)Sv!tvCO}UFM`SD`;yO;tdztl3{ zPX6sT6@+5iQUEi<-XiroDWf=Z823E_seTom z@6PrgE7cEohv)EivVfi4r_tK&-T=09|IV8p<6R3p`2y4A;$tb30;}8kU!Jo zZ`)%2wwZdX-+M5fovCjvwpPUZj3dw9ISHou22^v5X%krI5?I&PS>)o1M3O##pmWkx zi8i8W&en@=N7DwKP6&C(R%L2`3LWcBmu^piO55V$FxswuOSIaBdnrk$$tg~164_XN zfzTXLO2mX67BVn|14USg`DLiFx1eq0T!VHNGoAG|aQzGk+4W)Z*Ew^-Rbo zmO|;EU&qqCGz+w8As6|S7YH?A*tTbh{CxH_h^y9MQA;I2=W z)&%R$&1=rBgBBJeCq73ey#Vc)#tuOfJ^_IThZYk`Q0+ z%H*H7=N=!eQ?wTEURmBRg+BV&hIwUgdAbtkHxc12G;>d~p^go4J)wWOje?)tY-iz5 z;S2~1qb!JCdYFW5re#xy+(yy*F74z8pqshFT(E2HtnJf-m%^@=O?rDL(5_Ud(40i2d8MGUM1-esf&d zpC516u`S7!oxYM;@-DwEcy;cTzR3W6#yaLjbcHuvhIjTA}~DOpCx+dwYs65Ly*}Lz;4X zN9tLs`!g_&aIxhahwvqo5V6Or>HvFGnY?g9uZL1B4~1G)UnZe3U0>z^gcB0~TW!6l zVO%s?5nfb-h=70xB2_Ud$;1~&uvD#=KfWquwO(m zOQOHbP}J~pmUVKM9lWhJPo+&LJB-@Qj=9%ST@s8@298J*-8)!?n%fISX(dbH)0`Hb ze;)LYAU0m|v@*D*bZo8>6LQ`Y#|YFtSYQQ^GkUXWoA^E6bLJ3`#{MA^=gzY;#-6$r zj)CtS)6zW^cf%IJt|XMEtmUR%YOV}@)52Qp!QXXbn5xzMdV_1U`JEZ_c{qhsz-F@x z!Hv%ZUj!zSWNX}Wkjtd|1ufNfau?$btxZp9E zd|IFJJViRz@wq6)I(E5wkNg-597yjxDSs1XUlWWz{wx+Bg(iQ`Ji638iDOT*nq3Kq zj4L)LmWIK&q{Pc8ePAuiT8Qt*3h4aD(tUtTvI_H@l0Xonc<@w}uQJ70K$TTLDg{Jz$(H5q4N zhVu!&K%_7u1pQOhx@;;Gv(4H5P*V@21CG_8jxAJqd|i|0zLMW)`Qo>mlNa@I6}_~E z!(mNE0vz5%IqK$t6fb1`pKPTG?DIT%#n?#=AwckIi3L7B^`p-B^Gbubr2o#B2bhmQerL_g2~A3mzgH} zHE{&l>8syHsb6kFEX{Ubz@4crK|K%U2Z-(v$VR<{*bG&NNi8KPRAv@`deeUA3n|7~ zx2t4i3>C;-U;ct6Ie@VDFOjy8lnv)Rn-!CHH8v&vE^WBYWt&Kb0wbV(PB&nmeF&` zXy#cIa@AZ-7gMfT@WUvZ_?La%)Si=vqw$ilxE!Wq2>_5z#&`{x=CLp5MW4jLI+s>D(XY&MXkR*RyJM=zNigf^qKfrMWf`D;xalnh= z0vF%8LeX#pJjmPO+r!a(;NA@A&|N^Ie!RbqIe77WhyKP`Jb!w42)55Gmfqw25Ckh3 zhyQ#ujuuK9bkk4|phc{EzQ6q$jK$-c$>C`Z`1JP-kC!3a*F+)`6iZzNR4Py=M%BK! zy7D`nEBq~|b!;Y_n$8yK2nYJ+)S_P>eu`~82<(dx^0{BawG}i?0HDGwb}M7nOyUfY zAL5nG>UJ5V;6497eEy;F2kt`PLhiRIlm|Qd1Bl{N#rrJ?M{; zNtO5(1uH9<`_v5v4nZ2hR!M6h{imv;0xVYtcd`XTBFHExNDO_c3KAXkNoUmCqn4JI zmRb`s1UEu(Rg^gTnyRlvuklqv$sLjeZVFGNmHB@;weJ=AYy5-P)TH1 zrKUji>+o;$#8p2!zFt>wBhz#dr;6{ltjP`$hp<|$lq>=d1kdeEt6VKLz0#w3>T7=3 zP6(SHlLJNK(Et!oMNbbwqhw%U0QfC3(2f~xcDFSCmvYwU^yQDzT_KK(xgl<%Kp@m+ zy^6cXeCk zl7l0F)VANzcbpaITtP9+h#j&zCZPTZoj-_*O!-f{j4f0 zFMm95T{VW@VNF-ILpf@ecuZpR;fUxuoyb*@<+_+?(7iK7Y9enu4axd$gsw30MuQQs zq0c*2@x*qn%6k^X<}#n$cRJ&{F81Ioy#1R@w9mTQr?qaHD|yhqbxv^FrSGU*|JPVF zgcSyp!{USne};*~vY=XdMo43;61BcnhudC8-dA_II@Cv`(v5^tg;tyVj2EK& zTvl0SE+M=PCPZ}dQ%qNhSPb_`_i(1RvYV+lx+Jo^J~fN;;A(Z`;&>|98ET*y+Ts{k z!khLpj3klx9(CzaKGLv%HQrma#(^zYtGS`yiGv;X*YQ@#HTwMZ%r9)S`kO@*tt*zNhb zcyjQhOjXK*2c1v7$f1Zzj>L01Lp5X%ovuo)C;Yq{Nq4TleQCua+^eY%O0+e0xKp56 zXNw&HK~!<9s8NLx{{;?PcBb>j+A-B8Mq4YL0xQVmAmVFIs2k=51|JO6lHLo?Vu zF@YWPrIyT#1`H0h5?FmXE<7J$ZZmProP(hj4vV>$=Gn0qlp|sQKUH;(iG(%zfOey* zAEs3Zb3ju4wLO~&7v@Q3&G$jOM`gto*}$QT0Z-%s8rr5N>^uHK&-u2s~VgP zjorDvdYCMnqGHpTR!fc<5RvZK-gW~*xyH}nX$G}ek1v+>?5#SqvO3ELsF_3Gq>pY% z1-*`ZSadxSw>2j}oQqyzxGI|>8wYg2`~vx=pd&lC_>OOOzw@DJTxd2p*0QTdi# zd#r=2t&0?P2(5P1HVeNqJ(`A|fJO^A;GP8hLzLC#=X!WXhm+(;8^!1-R9GaK!h!|T ztxEIYeM}7>WHV#a*2Zn#-SQ!D>|39@)33c+J4;#D&y)jhX7P`;yq77V)I!zymo`4F z6o!V|cA$zb-97PEK&`$6!!3n^_3rATrki3Ia6Jw_D)9K~Nv;E}nl6p{kKsz8D36=b z1|!)EQ%T9GKMEVfbZfnMGkzS(&`|hQjyZloU^%b~6LG6LfM}Ipf@wd9p?h!XN4NM< z;Emh%M|<8?EYUCrUd`V|dHr$Cmh96zK2P(Z;D5GLejZ?Hy$1rWahV zW(=Xl2*ck8s;?3bt`X_ExAuy57}4-+Zu+SCITA+iQy%{}&r_D%Zb7NGqhDGr-B{lL ze*c?pRJE70@CM>T{ysw-rzwWc4q_2qn>9mQHGOA@XC8_2K% zYuL1GaRk2jCySqztRQtiI|@xU&9?L(%!s+BjZ%MpG$phq`1Zb^`>&`se32&3dk#sO{jES!myh7W&7YOs#^d?@vPM z^FYuZ*goFjP6gc7bWsq62Qox)`6SWnMSJ4)BrCjHZsmKCSa!8s%eRPy>k%&Ejj&h3 zD45Pp#G*n5YE z#VVEL(m@)r5@4yocuB2Z*CU`&9rf=^KCoAQ{N32x9PDh-t_4J3^Po<|n5`8dG;6ot z>*K@4>X>cd27rZWuv#nw`pB+;hMPm(Z0qN}e7-+Z^$b1H`=8x{P>2aaEPv%@8O20W z_72ZfYViO3@QbL~-rm-z)dtpC=v;x~MF@*n{|>;q{ldEH4fwj= z!0_E@rh3Eia5UDRWqQEJ1>n(`(ZjZ!78VwRfo+4Yfr;#xYZgG`1-Tx0S4acTkH=S; z9#h-i9#r58mdLL=!Q4DNopzgTGKg^lRLV^zO6NnE#lO(8&7h8Ki6IwCtp4B3TWglj z=4@48io)g=sFrUp({aFGLFzE$TZB*-D;jpfQH<}U`4uwaT~Cw<^v;x!J=&u22zL^ zEl>kN^5QLkKUvc8OE9Agz`@+3D8wqoA9Td}iRlsG>w_mE&j$7jnpBe|o zqj#I-v{K7-MuouHi#bF1hj4!>`$@gP2n$_+AB~roXq=rC%-ZIe62uQEUqi$@9b|`0 zB~~L~`hN2UnjGG1y77~G?!r3F+cE0AF|8Vp0g=SKFA2dwAd-II()6Z6>2huA|d z$%A;gEWyYQ6fFuDI~LL!C6b;sbMiuWMs)UuFhUG0ta!iy{)#C_7PQ*k&F?8L24UvV z*sAIH#co&=Jp?M%AH#7(y+GpZQnMQ65xd%jos-ZexJ+a;)mw0e3iVW1v`Dc6v?AVQ zKd&-I@#6!pNVbg2k9{ts=zV;NsEtQ0&{A>v5>IJ=8YE+PrAUGm(VfOxHLJC(F9BF% zL@bffILmT(1!98s>UzvBKHN?m)h{W^wMCQG60LE@tobZedQIJuRClMV#CKBzYHEv>i^xYl9f~ozu^dY(1^Lr$MTWd ziCruVT$;)wp??h=*eJlVdsLar)LY;$BmlJ}y4Hkit>vwrFsm=1q-)*0D-P$5r!kS; zM3Xnk8AKWbWc4=EB3Co9aX=qF8&%Y+Ku&H|nW+ZBkWs9LD`~;LL(706P~TIOq~8V} zH_YZJ9RzQ!rjp;ZG&4mN>*8AKL&>tx6GW1^(6DoFMuQe6OX+7oN~_G|GdV~XnS^3X zW(iOv(rt9sb`uWP_&aMEXeLu}B$bMpC}lhSCyZljFh#IzD02iR`Rb2@VL#Ii z6bb*a3S2ElkXBF)#+L9Q{Gra*Z*^6T9Z*}AJqb@0$$nM7lgmQ8<;1-dZZRH#h{H#d z0+}u7$w)6GfGL*;VrUx9(kubHs>s5bVV-I2jdTT^Tfmp3$BIx@qQ%KF=efraj@M?D z6|qE6GT5-iNLEr-S}XOAEw)L&<;JPptq%i5oY|6&EGm9^mPeQADOo9KB<2szC%Nrb zO~b-sGg8v$)2=_z*~slrhB5NEi%FXLZgl3LVgSD=--h;J`Z&hOQawQqV?IA#qePxi z(g&OD%c1>rm66rw+qw7Lff-Ez(UF1264GKwJVIc~i#y#w6_x$j(sXFaHy(>zyUq#U zH?J@FyI%0TUaur+G9lw2^d^w zrCLKhQnO6w;nM`o<`eyQx1Ho2A?XTUyM5*%?0!rnGhEvp!7QJ?9c*y zZ*5f%aWQxwUF8=w^t$tAE#J)pR<7LhQX5bBu5yVUZr9f4iUlSy&X@mI^NO)e@mp*= zD;|n?Tm=*6wex!C$Flk-9(gx3n#K277bka55zD(JaCKJBuCVCGve>z%Jb6xP_^9F8 zs531{ONyw?j&R|rZMWh{qC3HvI<iiEl<6`@kJID*t%)#ifH!ACfl;E6w{E zDtImuZQTaU0)8QZ$qa1A^n#{_jagXte9|$wDARG(vB72>=F7D^^eilVc1qh_eET5@ z#!qMlyeSrkKP#!S2c*$2)e9r(-k}Dz=S}5h+l5lEV6{DMXPLc-}UKJb<@k#u7Jaca*tuG+r}@_W=sPQ*v_Y zd(~QIOt$Q(Fc1OOZa=@v_)%}n#gH2;SVHRdn|$bF1nthVPI5wE1+m zn!@Tir&uUIYP*(l=j;d}h~0B@omYie40K<3)_m1~(BDnIH#IS0i&TxAiGTg!R=f~Cl7iH<^F4nMH70+$6Z#Kj>($;r$tzT;+AUs-&Zgc+clqa<^p1Y&sgvQe7;x3-!5V8$ zvcYb;AA(pe#prP4)7Dlp$^jDC3c{C;v|YcYuaJ0E3AR_zeJf;^Yr&|+RDVUJzaR6- zyHzu&=*je|jZB3a?;YoK=+WgYyL7dP2r?B3F**=1AS{cYiSbOG#VCcJz-ymM!ho3C z!@8iZPc<_d(7vHsu505EsRZQs4`ErAduU=E5%qtOGZhb>c1UtgcvFCjl4iDlsK zc#=d076vBp3Wy7S4wm3i?*@Fn13i{-L;~1Cx-d2X-hBJ;5DNSL_V&}`rWY7B)?qTC z2beXo=H_MO(!#>R$O)8GRHEH5;X-pIztBA;hKSJhwshL&gpkW@o{eT^241Vx& z&IsLibB@(8<_Syom)BPhz^C-M*#S7wLNj0?+7>f8UO@dtv4tO)2RRsy1_&JC(1@h= zSXfvU_(9h|^fMsmz|^J=p&md4P6Noo#=FNyL<9r`L`3_|wky7q?T`kTTAfz=m2Mmr zNx*#EY_ST4p|6k#GUt*a2uw|5&@onzBgF{9Z5W#nn)<3<09UKs8YDqUPF_%q41%Mh zp`mfq=Z0(2+QaDO>&W4>+W_yUHx_iHzl`KZ2*KJbk>V4MMNi=pmMR3!yb+lN@@&Qg z4Q#B^bGuM0%7|F|=oL_ZW02*8iVexkZw+S3C)6D1SLAi}y*7SqTJOVcR}TR*aLUg4|4bKWk; zn9c7#pt+etImC!;AvBySolMz}^ncHU<3LG{0rLp0R#MdI#$hicM^s_N-do=Y-fjRE(^nZF^pM< z=gSmDFp4)a6sZ*^6QkJ?Y864C0;b>DVBA+ww^=DM!J?CorWIfDOH8`OoNaF7z0(v) ztS3W4A1-6ATf&N?DQ{y$;&+oJ8%-~^P+dW@sYi5mnp)=+f0tkCJ*oqV+jv$6vE4wjGBYJa;TH_FYH zcdxGh1*~tq-IiyeRPti0ex@Ef3}yG?9O~fI{@m-2LkpFY07l3CsnGHf zmmw1a)HSbem{tVyk^dHif25=J7|2y{WTW8CXUf5aQ3;rtosI#KNy%vk!&0-HrDPNY zqIX&6&xxVu@0!N&JO~Jb**JfBdC_CJZB~EscMSIF5(2ZWujr!Sz?(R-|dIHMXf7#jcs<7ZeWs^|)SDF&FJA*`*iz0Hb z$tp0}#nU6n@RKjqyaPpHlG#CoCX+~AMY+~fSyQ^wLksKm0CVJx+e8^_v`Nq&9A4Um zQJzFdo)BZ0Kq1r@YPAq9A` zq7t5T1&ekCYlyR_&)a2vW|1h4XM8k2}FMpQq3aEjR*Fma<<5)&>tsJmV!K+dDLCQDka+ueGd^$QNQd zf>r^Mon~T)dkh&a zTQCP%B7a(*Q`Vr2TUYU3w>S~=bciu&F6$p(0#)#}>jI+7JcGPIFQE>XC}p}g0%_ww z|8g&b(vz9&D+aA`Yx9_RS3%Uf9M*#f$9LJJmD3?G!8><8uVk4iBbZc(Yxv0?b}CYM z6LJyA;QRo#wBSuyoxJd_(UG5sNR-0?m_#|h8nVB&qz4^gcpJ58<@}QQjh8XQ!CWt= z;QROf?8IAH*2}y$_}ntXw>f$>cB-Pqd@N&Z&0 zqqECNtmp5=dG6*CthC7NJU1BN1n)h%Tgf{tu0ej?d0fGuG? z>(mbQkI4Q;Z;}=}TBdnPD#EFyu|@q4Cc9x!16|mm1-K2J>QvXZ84+DTm@yVPZXDYB z;q=sQESx{+)Q4<%E9FizVe8;cFh62_e&K#lv#(lDuN|VQozrb=YtFYb+(n@JGzw3z z;21}HKc?qz>V zLOmJmvg1CImdUwr>96ca6<=&(ih@L z98yyzOAAxIOD;#Xc==^4Jb*iR{oU+RwaVpvkECfAV|`uIRnV+NL+pxJeGkn}TqynG z)lObdfmCv2Tx2>!lS943>k>@#Ar|Y(uGF<6OenP-nl+cHiAXlkvxkm#Z-D{huZhGVbx$|D z?c(CZ=evz2P`33dr4wNYcquvQ9z-yO|9f^o;83Xi1c(|w0uUQyd!g>ic}so)0YDjQ zYiU_!kAW?F`}pwj@CdWJ0>f_rmQQrZlr_`){xone#_k<9=UXbUq3jG?X|=SpkP{xS zw_nGTsoMCcBl(ra2SVXAYBX;F`3Y1wj1Hy%ts2m5{Y>jW-L|s=i~Dzysm~TbS+A~Q zX+smV6QFVb!O~8_^#6Q+n)3_@2#`){(Pp?`9UUFDWQPsRFt&8{Nu(*Z!@Y zVizd}rSVcn!9kcwZ_IsLRWUcG1P3U1?qVuXOGH>$tH5P>xP_XP6?Gn?B-#)RSw~4< zc~DF^+_0cZqdqRYz#Q)UeE{*)H)IZ~46e`_E(anz$`Ymp^A=%ht3NBzCCHmKV={PT z8u|<%(x7rdhY|ZhAOtr^&vEiYn->ILzZ|9x9O32=4uo{Wbf~ZpW(7$1;W;3GQY9paj8{HtXc@+@RPW`#jjqq4RU-VE9^=S#Z zFGQ1@mtARXy*eo<=tyD@k6RX zfgSwI_lv7%3%tzodDElP7}wIOH*?i!7kLH~oZF1-7k`Uay02T1v>c16Q48pE?jnaw z;ZhFnEuXLLc)c?SKQAYDBUg8=t$$pzmui7TaZp}bLzL~zmlm-*@#ekAM({_zdv}a( z2r+(cmu4D?!HMbzb(Zw`oOKW%H2m+Du|CAJoBdMBb<|i8mERVX(*v6#c$=;S+_S%0x%ixg8Sgm$F!R{RwlHz zcC?n(W3@K;DuZi%VU-5*i$8cr`Z;33Nag*ivtO1t5o;XqHzrcj4TDBfrl1)Gbshda zmf>$BfP=JRHCArLcUzC#t;+tPDc&8>+E4;(8f&WD)HxukROw=;$Qe0}DB%4Ply zzcjvF%$aAYq@>zvtZla1S}U!`t_9Zncz9)PRFigBtT`MFG%Vz{-nFmLW?+gOML>C_ z#NQGboO9j%i2+Ql5E9Yt)0(cYsmL|L+syk{w$f`?#0pB0lDKPFlPZ{@$fJ!al4(cX zLURH+XsdXo#%Tsd>UM4_wQOVgKQB<#f(WF^h!tPX8>eEE{^iFX6k*?u!qWbF&o+(S^RzZyKfo@AzzZsoTBI{=1Q`6 zax}>!HoFm`;TJ5!@(VHCH>AyY3lU}C=y}<;dTBS3=O0(Qs6Ov5&(4l`^P#HCDod6T z?*t9J@CW1OWvPk~EWW9E@bdce70*nHGxQN&{vB-M%pZN8bxuf^T;IMt_G^kZ=uEcP zD=^BFAe@d=Z8>1{af)wtZK(234(?`|^Q>um2k&Xw&~n={=xHq5J}@i~wi_&)yp-^m z?%j9--HZ9i?mh!19b!QN)CY4?40AF`nj#0bdt$L~S?G2T zw!P^oYa8=eo+L{gcL4`>Muo9eo8*PQheKb1agr16=EU{i(w@ZsM%F* zpmCD4eQ%X`RPJs(zZUsz?apgZ%V@GXUk1O51r~_w>hv5G`d$vAzq8(|VQqfAM~w80 z9*Ju2P*Q?mJTc-A5Lnq#CRN5D)X9ewRA%<3+#e=hHQ)sGeIKeNkZoGXsnt?WIT$eY zsS3zC|MWuZq6C*R;;nw(^wW#pR+|BRUyOCf9I474%ZJ05^+w5NQ2e&rDjZjoMEZ|0=w-^pMlso!sqdaz5Dd;efMAbRrm%B*2qmXbtdN%V zpK3*;h%Fg(wZATlnlQ@>(nWfeXzM>ZeFX@(WJ6 z-F&Ul|536lJk{oSg!F4*G7RG2Ji1gJ7g?E-lXk$yg;<12FY(ZaRm$5~bFQnM06Nie5p^Ss<+$P5RGqa=g zA(*|dSNfHctAp9z<5hM4Zw(ht1EgJYB_$;%Hv@c^rKKg{IJ?v7 zGMZ-h?Q2tfJQDt$aQEl`=i9MFvJ7>w(T#~1Ut;FF&6`%GkB_Im?*JVds3jmPjWy+@ zq_hB~6JUJp91viDIT%d51XP%roq<1rQ4ScyZucu~O-<~OmekZ#;7Q$P2z`GFOo|}) z_Wuto1WAHW+0)aKOWq>HO-j-VfJ-UZToF!~8Ao__>=8+1ONz_7%Kc>}gg3PWvHY5-nKy?QM!yGr$PR1vjwQh2ze zqhkXw8rC1;h&Rv!*iRo2Yu|P!feuL_DmVUsV}uHTnci)-P{bBEKtPv05+<&Y%+x() zswse_^Ii3Oxev5dkpA6&L}04hVmr@D$yw%a{GO7LBFq7)wp7)z=Tpeq(ZpJEXZ~k8 zdIxK^BbVpe*Vp-BT;Tk%Vl}}~jCrG?|6BQwRQIcmeh9E#E5MfA*A8MBo2yW(#!NCT z_)0zdtv1MHf9Z4;jDvU>f`&BX11y{l9+`ZNx=um{7x7MDt_+7(nOzNeKtu+HT}ZiL zcwVUhRnApJ9ch4dd>p)51O&^dEY~dJWwiD+w+Y2k8D`O=jpHvZ2k}%ZZkLz?6 zM{w3Y+<-6jkcuG`H{62uoG2`XF|tM=!`NUyV}U;7Wn=@GOcph#DC{WMu?#YjvYZQZ zwdLK>IIL+POK+_RToVU#k2o+}XDj>_O{xjW><2Y+h=?Cewx)X-;rwK-iXg;oTBEuE z4!HH7n7J6>M&Hk137p&W{T|l=+~9uk{$MyTDbxQ5T)q1dizVW-v?cze1v^2FD!i3L zB_9-&%=x6Sla|_;)cVW)!K4MT`=bEAr@fW-5PuxBM~x;7qy@UAw5VazYj!fILim_Y z`YT)V6A?4Ck(1)<4<|rRVpqF8jC#U}Fy^Se=r^$DbjD-mDd@bg9#HCcoYLSgRmDj! z#6q9RaNK%_cy^}wHw%5#$kG(4C}fZf`CIaDe{-0`j;I<_kuzNxij47f)a~{Oz z(Ol=*9C%_49Wb2FXJHuEJRpqG7pZ)MaYRDr@#ALo=?l@ymgoIZ<>!L?lPUu}#xJXwD}-;`+>%K^1S*60}C^uZ#1etWrd zFCn3Se=uL&K^%5yXH3+rW62V6 z*tZGQoka$3$&43GY5I5KH$bjIb9xl=QdNGOJi1bZ?$MTUBdOAHsj_*fx+tpFwDatY z=A2q6+;UK<8XR&HxqPdE%Ge~+1&pYAPq-ISeXq zz4MI%$rU#JWD|{eMXHI1rJnsuka?vhxRxvgJJ>xg$R;weB7y0T!u7|jzQx0Zb%%f{ z;K^{ltA6$4eIlukAm_u{{zIcD%y);`!p-!$W^bVIy7Y=(Ts~&d1`l$X9kIsh(jfN- zgI}>6PAm0Jk|E>iky6ryUT>qse&cD5B3TqKAEqQ;6CQ4}GFnF{OM4^>l)J144c!yd zYq$H(@rY<02VQrNJMdHz`aynS*feYWN8m%fg2%-`lSnQ7r3Dko5>JErk#BU01r=| z(=ogAB#g;IM4la@+XT|_J@;$P)8WImPiHL zTo{eh^Ae1s!X?YBVO}QwPKfH<>ysK7^Cgirm5ZEmhLQZM!PM=y$mBEJ<)<2_6YnAQkz>gGmEz&3XO=89+dis(4wbYhH_E}v zdXuIhx~P}4C+YHBm}L)->>f1q?@^}(#9VG|;uTbi?k``5(Y3b7VYKFK(=M2xF%YYS zFi+OxR8u`$LI+zlC!NEd$s4CPZ}RdLt6nRkJBF0ss;Xa`@5a>Xsv_Ngj#Ey5EB55t zKFpSO>&;!>yCBKim8*It=i76~N|(&+x4_#pkar5;FFamVHJPQ-Y?I7H6x&oQgV;Go zdF!k%(`87>JmNsC8aB5o4)*OV^Vccx4^2OFl>i}*${P=lLwe;#TSun@yCsL{!Th>Z zYHO8I5Hz`1xU9Ms3fNy<{FR~2H6zFh;sJ}~3mtl8OH{HV;Eus7mq6%U%PTQyyK*!v0Gd z&A0Sl%p9RlX~mD6m(Za#2O{f~dN}?9Oo$jA*YpA8Dt;Wx&P8!n)$P_T!5RNibPf!a zHf#`nv)gRjwr$(4&E}@fuFbY>o11Ohwb}Td?*~+;b51qQJ#)>B?3AVJZ)$PaC2$vO zzVe$wLH&R<%Zfy6&7u|p**~aS)~uUU85rxK76jt+EA~l8`w<4)Y#{WJ*V-X1f1@}o zkOou5Gk}RUYohxnj#q)2??rFoRS24+b1E+t2lDPTswdFGUo6vpUQKH;Eo%`d&daC6 zzRbtC(oei1?5U<#gk;(co^wm%nof<8^NYsqaP(Icua)|ziI!#79{Rvl7%`siz+{U% z_}9cVXE^1j6fF9HxdI`wj+{Su%Wh=UlE;~8*RYAGt5cJg+Y3KGWbe>7xhEV(&H&mP zTd^hRQ32U}?LFgD?O!Hzc$WlYd!|Wonyk*idD*W&4IjTk;+5-hOJ;&v+Pv;h2+Q(_ecoIpI5R ztXU*Xld5le@bdCF093UdI+8xzpXCq+#?H^{aj-&ZM0KZVWHLWzZ5-4J|~8g87b0W86#}6Lx-| zskOu-M=4>D!PPt%(v{4cxqzU1>=ZR%9sZvcde&Zfuq^+$cRz1j#6FqMn`za|joMl? zA5mZS?K!KGEGc1Rho>?Rg6v5j^D@ZLMFH#*`u9ZCnV=z&R+^yy7tq)RipK~}9zH%F zK@pa48(hwaa8iN74u zCt%8YRH=-H*DJNTr1Vli(f3J0H3yJRG@vaG+g;d9M!%^dks?+Lct$CO+w~!~16lzM z6(-bUF_i(t1c$Qs${2fI3fkJZ$jHD8uuf|W3OmilP?BXJ^tC$OuYj)0T-h7&^%)Ji z+JJ44==rHk&gZKQPC#s3US4Kp;e~rW{^B1Rm5LCalw@uhIV24szW-?nv*>>|Uku1i z;Wns2IJe{DQ<(@`cJ2SE_>f!QXBhum>LUxcZt2Ts2IOf`D#C=3dx_!XUfYw;PN4)Ic8r9v+d zPr}N2T45Zt9X-&CMuf(^S*?KO>IgeID8h_QR+9&YTye20MIsC3k2dSf#k=?)zHk== zqUH@ok;|%zDw5srQGE~kxW4N1=Xoo(8C%IbFfMw*DI?5>~dHd zb8nvDWHgHjuF9kuP>gQVh7t~-_;;y&bHeL13Ad~h!UtDh z7ry6VspnRx&R(6Zx8JyidI77S&{%F)RF+N1n`D+m-Q}PBf>F%$T7@=Keoe9LjXv!E4)3xS!N>fP$!c>Go=U7mpb%fT}%1fL4K$)X&vnR5hJ~>5x z7~Rk%0TV>ggkQei@Ay#2C9I;PM&n6VcgUbZ$>zYw1+$`OBO}2$Lp8hvqU({JGAwf* zs6XIhhY6b|T*7&CrCN9)>x{MuiI}8I+4*9Gp|&0sFUgdECm+2cA%VGHT&vChO(w&L zxns;!Z8o#3&I+<~xjsz&wEJB3&^+nt?0jf-SnlrRxZfJD@|T zTwf?A8}4Bm;fk&(d4T173|!$E5sq*VQMwQAYv9f=P7E4n(UZ-@4jUvA!wq9&Jr=n) zkq_RV-%QIi)8v}0bQM*b3+QP7iG$F+Mh`e$vv|9z-=-gwZ*)aDYb0#c5*rO3k*Mm~WNp_8Y-@5J{^Pjnu2 zjcDpzr8Mxol<11;O#}_FC?jQ?<}D3y2B4#S5dG_hl{`fqqg)7MWNaYW@{@<~(UoyZE|F`2YNiqMFLtr#8&Lx#ZB;#{AS z8V0SH6TBo{wDCEw#y*@_0x+|&hv*9L+u1&N8=9g3zrfD$b;UU|O}3a`vFE{AdFQOr zD0{_+$ukacKC>!+a%;}K80F18N*s@;gz{d|aq6bULRrUpkDrJxX%;L>@*Tp>J#~UU zZh*PPy>|$l|N9!e<6t}{)2}KuJkMXluv$ec6FjZm&{gkc&=M4;^tDb>47NHsD>l*Q z!?M@1R8d488so7FseO`C-(r)towz~p>}i#mL>a2#yJd<&f3el3(}-#`vx)#2k*8vk zN{rs0LJauf?X@N4Mn8L(E7rDsiQK%PKsOf=QKdvDtGl&ms;3S%HE^Ln=-174=cD?U zUE+0mcDAuv&snSolX7|`Jc@mQ>CH0RE3WlJ0c@xrXMk6kXM9a+NX{ z8yZbYks{!VNdf%n~(|LxA93^i=q3@vCnLc=LQsukxw(rs9TY4VU^S> zbS0Wy7y8eb2Bg*N4YnG!%So)BkdrIHq!O(|@X{U^(z$cEV(RV=uZmn28Mjc?eplXL7*<$2|1nmWU0G5yB)?(&!K@;guZU#+!upZOrs9(ED-b-?rV= zrc*AQH?53-DdnW`Aqf1~`hAlD{M*j=zYXP6qGZ0$z4x1eb(;0s`OO}}e;&=T_E@9^ zpw+p_4d_#j_|)`MV--G8;`09ZZsD!G#3L_CO~Lt-q=K+4bY@2j)=rtBX5qZ6S%yVf zV^519#Mu;Ozf>$n4U-Nzz#Yc!o@+#h9c+|7QNx}ig8#Aqm#FQR-OUz?PH{dIV&PTS z$BD{~SveQSKXuw)K;nqg{+(8a*~pJi!n(!!M=CHX9L_7g6C_tcn_pP1L_+aCdO+Hl zd@L*Zv}{1vG9eZ*Hp9=9Yr?t6d$~}@hu%^L)sz+FK3|XYrF*47#|*T21F^yS5LmA| zdBfD~^Y0K617m#!FwsCqq7^v30zt4iLc!2ezw^cNh=>TJh1a(?5!-^HcTW4w-$oew z{Xb4IT095-Z-QB}D^*z7K%7hNh+3zROgfUf}??2Uo#k1C_houW$wofzt9j@IZ`72?7II z({_rWAq2hd%z#BUq@Y1fY!npGsh7Ni*-ijfNU93D@2V$U0^Sqi*Z&-AQdb87_CXPo zIn)$=hPHK9b}+XU(fLDBz~|p-H)D>U)6sG=)H5NU?;9EIMiul>4F*CDc%h5_1P=k0+s^nLF|>q-z@GlCH`5OFA|2Q2 zG+P7Hc|d6KbiNpl#cVbJ`bZ-%C}>ZM3n?Sgq-)ev_ziaPK0vFcz!6S&w;ORoF1Bh2;h{)nrwm)4;CVf~0&d(hcXy(R0FOqE-Vknhu}H$BlF0FXix@XE#~c!D;+?b zp{b#nTB4Z(Z%|=VOSP$~iHRULF`b*GWf_>6vNrt#SP^nqMhM~zCk;tTaJeK`V%@Dw zr$rBHK|#WJ49f`6_78D^coo}wy)UJGbj+9MO}e|d1-u^HDt;np^k{2xS_kAI7l|{S zHf!2igE6TK^$K=S5M6hebJ=D#O%k;*&_z{jD_GJK7L@F6xCyP-toV%z3=YSN;x11u zMuF3-x6jExoHmt)Wfq)?4Kj$b^B~Hr17*+m`^`psQB=3VlUEjtlyU>fNVgVXkLHGZ zOr+T;t5LC&&Cq)Zyxy!jE|g=glJ~(w&I~FZtucGiV7;OVCoV(UKokJsagv-AylFO>}N=kU-iua$b8%&@5!4I1uejCatXS<_6L> zwtLEYd69|7p2;8LY#sKC_&TlEVs#)+Od6)yaXOkUz8;MB3Afhr$>wU9`t|-;GRW04 z%G5JTvtT^7(5B(Cd>BRTX(l%M;bq*$Id%JIEZHSVaHSyLQ(Q}l@{I!_zy4h|7DOAP$;kZuKKj%7MXs)N zCoa6?Gv$tIi~jy!?dW^tl_XJ5%lJ;53IU%8X0Dq;7c5A77J*U36tm+=P-k~CfA^%usc!(`hFNo zt=cFC4doJh`M(DL4Vm-VY#c~Lfn?gC`RQc8@Y5F}D&337{#2vS)NZylA4(fGE9U^UYvG8SaC_NxoKU~g+k7ae#DbFkEX8AL4!68Fl-ihwQ$&J&Z)zPe_3Uh9!r^? zWD``d3ZFMaTGflY@vZwcMm3T5eV9xB8=tIe%qFFfwiTcpsnbJEnL!O!RUVN?z<*B* zE%&zQAz5n6erb<0TMi z?6EvQg)sNFAi-hMHh~ZW2f8?MIppgnRy6R*uYL5{=y34w7Gpz=PP}UM?AbHwNbv^a zC+_Z%>Ee;Uq5!fe>8e&mMrWe&(=1(nx!U((?Jb7fb_5Y3Kbe2&a4$$clj|rO8|jfD zMpA3Gkh&o()zL9(40kGQ zKb-7yEPF1ELnPVg4kqdWHlSMlT?e}7DZZiUkA`7*pi z3HwSB#WcZ#Ldb~r6?VVLFh4lW(tP54LeQy0Wd6OD9AkxSV?Z7`J@$6`@z^=lVW3(% z&qEw_@V2$3(_sp|FY54A3PW#~C$ALr-=Sy(NX*&W9WEulL(6Z**|L+L^6c!>8|NG+@dc>R7 z-p{Mfe?8u}`@_I22jIt5&MN1%0Ct-J&|e*3u>5aG`0VTq=^m&)pt`H(K|(*}+es86 z2mb$pyVLFhWtW$)v$1760R*b@NCBubu$&750s^R|oesAPpoDR&%5?~wLxFW)&djL- zs*d({FE6ijIf~Ojg;E!1XXg?K#4sUVc6M|-c_3N`Mm!IM>il1iFu{HDZf9$A{lBo6 zl^LG_RmxPd3jdu?+yd z1PsT;lp$te&ZVucY7LPfL5OgpSwh~a|9JtSopm10hJtV=ei8$Rtj9k^oHr_{h6q5L zBvbjwMsDrbBjdwYQDu^cxkZAw7j~Xw(Xp7$N(VN64?<P%UShc59O^UWf`mM- zpjB7(Y)xRRkZP%1I%rTAaMT8%Inx>Ja83z#r$!z*pjxHzsBSrZ@ zg4$26bdM?{d zCfREsq5oP?C&r{dGPyqVtRDRAmUTt7ZOKldCjLSt!IP`!?C`ZplOI^2PJbB%i!+5h zXU_DfjkXm&x@Y>R6s^|Tui&?hv9Gvy?tk$^)X`nh@y{&QS{%&hTU9>W;$3r69%-2F z%=6@&Ptvh}IS)X4!V}`0287v`cw9287^=UZ6+z0i-vp3*0sZl(4HQkN@059S}RlnLe1#uSZ73ElW>|Zl;@02DI1F80yXm02^z>x!DcQrOx31)^& z)OD`HE#10Sp9VG^i zYe*n<9SLo1GU`70XRTgXFZ5?dXruJ&(v)pI72M2w8H+s}tKxP+jJLwU)`&@KR@yxC z%8;thOKI!S$)ajIwP<%>{q*M((1Ck`<$vgX*dfBC;C?H9gDo_TgkLh5N1a3cJd1 ztM#s3pI6UobFQefqg^U5cbm6V`K8e?%Z9-S&a>6oHbB`i&ok`Zt5ni=JuC8$EX(#f z+Y$c#K-{iD$AI1UV_Zr{dOSMMs8n8`mdSLw>|xhJ#XY-Z(aUdXRbpCbU$TnVzr*UKD;@UP<)O?*2IYdV4;d{upJWlmG-+5k5Vd_D{bC7n9 zP3L1=s@Y-J!G|-3R#rOt;ujXfv~+}f^er%Akd?CW@%BS{nFKt0BKo2p=^kWLrW{MG zm$7U`Duz06$^K47R+u@<4^)^q>C-nVy;z4nylYPxaXc*HUEn>KUM_{}Y&^<~@gA*h7p!0q$GG<3I6`v1t64&b^F!~R|Y;%vYI zs@uh~@GMk65%_OF9$7cz>>mNOhX31NkpdBR9e+?RMIVahEVWYHoSXur@`1w{(iXrH zz6SzttQM2M$C-UXnFN861rj>waw5-O;O{XYz5w5<6QFt^;Br)HH(CIvNTA5@>*;cB zM)2DfuBhwRBkaIlU!PFp8vNrA0=}2N05D+Y?I0IfDv!_q3-1@KTMm!gxW?S`(L^e6 zD}#Y18rbab@1J$H9Ru#Pg5Mu5T!;T3W}V6H^}O8&hZ#EFc_0{0Qng8!l06dbq zAD6wrAr(tEJRBIV1sbV9$rV3XCXbXblgnSPPGN(Ono5ba33jANF&9vV0!&>u#eJAi z`gL|NY?r{IFMzrWL{^ZM9n6=&v=GBZatNRz2=qP7ZL-_o;NZaE82Baf9&F{e(Bit& zaB<2o*(5?*nrj6dA*X-~?cNn|P~FviKd-FajRxX;xpd!VHdob!II-3_i+JK5i3th7 zvKk~vvFRU!z$R3x8!J~=wo4>J!nib;j#nZB5J_UWe5zZL_1|keFUTwsOfb;Hw)H_& z8-XnXX&6v{NoXOh=(%uCL4xakkSM3EfwTXA6AmbbAo>2l=V=0J9+t)6yhipoB7}|x zmxw%Kqy>SdQFNGOl}0J3YH*Yyu5jX-O1ltFl)Iv4o1NnF9DnVKX`@gKxO1C@g8Q~;`69s?+73rl18-tJ!7D~t1?R8f3% z%X$BL0o1rIgqDz?<$wb=zwbxUZR9>f4q^aK8KGSe@Wy*w`wefZ;q(2GqW^gULx@{j{fja|Q zkGfwXUWo13H({QWvhQV`M}d`|;|g3SeT4M~ioe1Qf4KT_b6$=y#dVIA;JOt@8Eldp zUfV~W8B}Z;dmCeuIvA<(Ht$#0T(|yucW61?CG=-5%n*ODDz=L5TA|ZiPM+p+($!ir zIS*({RsQ#L5%=sqHM{Egk49rqd@IWU<`R1xRENOWZB_JK_TjK8o`3Yw3{D#@$UZl# z_jXSA>43qld9B(W!Gohu!qwMRQ=jI<1>d=qhun<)?ogC^F>UiBCHdL!jO}am6 zcnaq`j)=}JCI4k*t)Vgr)UJ>s;TYp0g2jmP-+hep5)!`SL|GhnP>b^lg`U4LV{HzD zD2!8d6vz@Wt_=mi_N&E~zg3i1C99@3{I#Vc+v~d9>!J=ur(?QMv19GHFie^EE zHDaD>dmh`w|2%n9kZA_HA$0t zOON%RL1r}PnZCi}nP2f^-x~3-uip5+d$%)_+iudUmG^sV8u_0F{HvQ!?@Bs<@5RP_X0*YKcgfDb&66k& zuxXB&O#=@Ai({N`D9tNtOnPvGC5lLaI-CgyB++!SCCSoYe{rQ&RT#LI&&|rS!;V)V zjV0BCm{XcI7`UJLk-P6c>?RoJkjqE6&xKaKf!kt>$;Y$kW=JHdQ|n@?vr(7P?UZ&U zib}&;xjR+xpIN{yK(EqoF0u}Zaf!3bf9j@7y1LL%-_qvd>va4+L`bfxG3aV^-!2bb z11!a-erqA5rq~!J7GakeZ zG>6iLyaN*`8Zz?)lwMIfIy5kp7utSW@ve>86V!rU9Wu5O`00Pc=baPVmJHs+QY>7C zs?y66WahVN&U>orHs!xfbQ1ptftH*>OPZGfoeA0OaM((wVC0Q@&~>4QJSBMWGkmoH zrfzW5#rnV(bMIVS{LMUs+#3?Pc3sxX5zF^?eSHV^6~g;?cka>5hk1?awsor9A4}J_ z3v3jKi<|$&Xc2|qFV25S-Saqj8S7;d0#!A>p2wr**wXWL00rX6Fd;e#Kwm^$L5Fcx@Uq}Wxcg!M|I6}`;gdi?gF~N|KeRRP@rKV zqQOa^0{$Su;{OLmK?z2Y9t{FUv3iy(UP4ZKY8WVd|T z-R-xUl?8qZhzgx}qQHqqCzgnhigDqU6NJ-(q@FUuHa;=$yol$$w;D>^U;1arzkD4R z(}RDT)+^M$Gl^1d@7uzCT+!z{;YtG?|Hg{=FCENh1 zdsJuPX&CE{finUr6T*Ta(2Lx`o1imO6AxUIU1L?lel)Uw{z(EsmeIU+c6@{Q6*a!r z5o^0wvq=K&Eqn0b;j?Q(=-Q;iYflO74+{6~JEtACw zE!=E_r-tb9;}Fx@ID^r3XRXnNjxvW~V5n4d3o)>bMXL3w0cvRwh*s~pLWX}{=?7JK zMv!JmNSIK>^M4z||CGP_mUo(2pv* zyec&5>zko-n)>-tJ^P8nm%jTozMvD7Q^5`Edg0G{0_gGX- zfn{X)BYz)Z$*@NhfVqDlJq6rcE=_2*3w#-R-$k)~JTX1u{3zEt#HoW7OhJ-G6-k0^Oh4w>{XdXWFac4*Wb6b3ROP7o!RC^1ZQL-4lhXE}@Ac+@+!x|Y% zb`r0xJK^vX|DV`-Alhi%X&}PJO)Zmj|B^8b`htNXbca-4WG9H2T~3uQs>YnQaM$)g znF)GUh|+k|nF+7t(x_=Oz$yJACkITg>qAZev0?leiB3!OCQ1)SNJ!Xi@B`!>P*=nZ z&J3-3A^F>HeS>ZIv&8%sg%RR!NI}6I5|6;Xk%;xcUU55 zz?*Y_dwauyhfJ0!UO3Bx3&%@LOk`zYnV+BM@wn!5JypV*PiOs!Wc!LH{`$CLC@Uia ziHHZyT&`37baxlXHM}islW?E2OX%;NK4tWjdD*{qLpOR`jv&Ab7f2!Ojh z3Q$YJaZ}(h0LcgpySe~6nFRszN5D}aA*_do2Uk$t=vIfDD5GgoVn}!o5U^Z`4tN^TTf>}aQ=$r02wlpMDR66rvJ1^ zT;nB#(X|^P&Ir@ggMojN5FhKQZ9$}o+K}<_W55ZhqLvC8#PH<`w43Y@zEM-spfz5;+{prX3B zkppbFWoIL3B7I26XRug_>;?a1n0ad2G=04UtBB}6j@ayp9$9@3N5khSJY5)R) zWpFIIMse_^qwV4>DS?xym(RX;N%|14q~LOS)Y@Q@B~z=VhGQn};Z38ze{g)Ad|*;j3 z@-2ESirj+BNesf(dOkaD9e$K4pyUrzw`>`WMEWVB$fKM-wRwC7dvB;Rv9Pp zVl7!ngbbxEMK?t#SDPmBa@W%(^u{0hRHq*_V<7$n8wLyXoR6oDIQhDf)+%stobJocS(Y`qTWKxZR)r6P!<)fMkaRk z6?Wwng#Yp)d`&*SQaWsoQo8m)KlFkdEHP|{SU$PT3jWi|EPOQSlZ1AB$Mp-fajzw+ zZxcO#Q_L3opkt5q|bXvVBhLCSi(c23HGP=egz#2|d3wSsRJ zE^?$4&_4@TU>H6SncA>9(zn|AmwGv^*kOKMm1u*=_T#3(?3WhH<`h2sFh2YcYeyWV zv`hoI?q78re)>%w#__H~3j=YHT*6;JyCyI0NBDNHR)uFIAG$aAFMc`a@@9Jrp3hxu z-4OIfUp}o6>-%4BJ*v0bw{AQI#8&l}AdUoLtbbr+DBHO~;6I3ug%*J83rm{bHaXsp zy$yN{$7SIusF{bp=uigh{8*gv%3k~Qdd^8jeUJOW+`M?A;d1Sec!)6o8_ zKXvYSpGcP1R~gq%SW?vKhX?CqYh@<3MqUj?XOD#zs+~e)J@h&ru1Pkk`76zG6WelA zSA&%{U4ULsxkg)y^<3omKbP>5VAMD(&u@~*QMkBTvurepkGvm~o^h?0V+6~oe9=d5 zz|}Y+yp@bLJlBGm(T4&Cn@>;}g|cy<+208Wt6*Wd+x-4Vqp6-Fm1BF{FhIcR6riDZ zTAh)X@TM|ZlpUBk0HcaOJXcjGp!IBG@o~Vc&e-gxjl27jt-!TMS?#JE%T6N+GvF$H zSxiRK;q(BB6{OuptZ31WZg8#+f+$&OBd2ER6d?05olHx(w)W* z{n1A~HFm_4kG13?pXKhLss3$K2-mh2XTR zzUpBEA3i5QDc0K2!l_&*EHF4J=2M+{eJ7H{+UTLW%CC5%M9u6nh?2)SlAn&a0}ek3xYDt*$SHu)adH%d2=4@3?a0gfF`O}3sE&a9xc{r0 zX-jxhRouHSlp<=6qC9xPZjjRXBTFVW6@~xAi^t6vE%Ty{VWGULt8QAIM|sW5=8r1D zgoha#eN%-~%{l7rNF}zhY&U-DuS$j%cfoW5gHfkXZojm`rt!gBYp4B`U&6HKkTbRf zrf#Z?uJIHv7c&%LRZg18EzDnczR!}08(D#)jA`J6Po7cRu~Y?x%7Qxdt%qwN3?c%Q z8U;SdM&sRKF2Di&J!=RrpiSd-gD(FnJBqkYXDD)G>BCh)D&Lx~y8L%hYc=BhOJUi2 zQkwFHV!!+aEH6m2__s}8xzj0gF z#W#x%bQ;N=h;m?g;R|MFKW(SQAaP#`>$@G{bvE`V_PO7l=~ADCBBK!a<~Gz*5IT>N zDixh$)4oG~wewBEj~88!O?|0_9`!aV#Ut)#Ck-)emFvkBD-zIoDe1`KG-q84#}^2| zFT|bCz!z?de%F&6(2J_@Xb@{_45%niY9P5wDuxpFA)S)`F=)KWmZNwQ8VV>wo?643 zhIYX^(<^((OYCWiwB}Is46TNt8ThH{h02I~N~{toYyQivP3f=^Le#9Y%nfJkzbh%b z0~O1hyuV>maFUJp)~H|bPM6k-!v+;97SL>~StYT45(F_3sNuw*W2{qTdXgl6Ow6|7 zR-5GIeD2THp&WWcVOuCFu^6E9BX`y)aE`kJnmIq$O$9JYXem@QJGY}}!;mHJd^lrS zeahkodK5p1dGYNc^d7Oqc|%dp#7`6%2MeSIKd5}=Lqq&Y*kRhlYHnp*4OEu`(>o5D zxAKreyHAVcU_$lQZS&WQsx@mE;Pa^09n@Ie#HZPR>1a4PDuff!j8j{=z#RPyS;UC zb#--ex&>rete(C;F=EKAPzca%m^2D^>M^Ys?6 z(F=bM;PK9YW#9jUl4Wa+65_KSg6aDt>wYs?l)VDqT-;s05#=P1yVI{-alt#;-9CIGqaFJk-hKz!r$;EbIy5ur<*iNqBH%$Op>6S-{)lq7Ek$vRZ0T zH__SWMBcfi*;N14js^*77>Y`%px5K0Jsl9Z03tdp81D%N2T=9aN)>Zb1gNUZwCB1l zo{OHOfskPE6QF0AS0J~Qb1;P=hhUA=>@2P3i=<6VNV*24kQ9Z0g98nd6xy(Om?d~@ zp0HfRfEnHk4rdd(nOQ%iYrv~85PHjly!Sw|5avPfiR2cmYm`Z*37^dSiF7CJ$j4N- zxTyAG@_f05N?Rz2VlPB$=^TTHj>j$2*;je{TIJB_VjIN08n@dO$yFfzAJmjY4Zpwv*@| z#4Uz4HKr5FW)M9oB+D{I0If~es7X6?+UP>+D@_9l!*y%zQ@VVr7Dj+@O;lA<)Z;g_ z!z;A@D>R0Zl7bsOjnht)vp^#@$k#nXA!vIdmK9jx<|s5di3@QO@6|wxIP8&IQYrig zh{ooq%uc*wvUe69!?JtV`ue9xl4o&=7RFG`Vet&~Hi=!rP@{pZ)t`M^B!4+Zd-Ei6 ziLXD6CfKC*8sWU?^BSe>1rooNOh~x?tE_3JIvE%&H`^ubWD(sIz`2>u;(EbAKhZ5dX#g_J^L^GV+7k^ zhO#&S=)ujE36`P1Dp+Qpbc=7=+J{`hO9o7)2wCRzcQ{8-W`ug%rzs>lG zi=$!dE~~RUTbD<*<1JL{-11tBTf@xFem{1;bd~D=>bY8Z6i(f4B#qKNyEhuPwu0nb zJS01Vm3+}|RU>T9*V$VH(AJ{JRbEh{$|ap%f8xyB2!+!-+AWFI`t*Wg2qP+~7yzHC2+|lN&(lLy&aZN(3Fd&cvFlCwYArbBy58vWup4k6o zb^LI^v_P18Dl)Tbd&I9r?tT2zd$f`n6D9}!06K23Mqg!*sYwMXHKlLPBd z=buZp4nMwq{60usE?X@B3e)ZU*&IS?RSKum_{)Q!1{;+;fRaOY8`6j0HcBAx@O^Mk zxwBa1UX}Buz*u|4?v@_=;iI_JKVs1OHk@OB*}}Wo38ggx3c;zlD|pau58tX(yNQ*zG0$dH5cn=f zT*Q3Y^UL69i$o^VyGB03a*%pitjy!3d_?vGHb^by@Y7On@aI9sm*?mk(O(xOCa=KH zAN0#D8v`uo1*LtQXM0wLncv zUa*dCGWUNw-0CmcB%wqdLv^Km@N>@yj2_Qauuh50&D$3dm=o_uzcF83jy6dVp*n20 zjW;7~Q_A0(v#w{;YnCEaE0neyOEOQIL1HfWZ$kx3aMCS-nXo%MGU^uWdfdBf*kDM@ z0cvser_NfblP}rLT`#PIv8^@EcZ%O-Z(iO-^b3?JToISt&&V;?;*1V$G|w{$*0_of zNiTS30p;8zH9-;rQayOoC$EIVsVBA=OQe1%F3OGl2#(K0M-q#H0ZA|}znIdyv|7*g zvQ---_jj6^Mdu#1l~?be)eVKl;g}mXsc8k?&mIOx;^(+OIq+4er_xxWGa|YHcw3xMMwV$z$q2G=BUMMHg zQl5c3H84~b*l74w_u%FFObIV9o~scB z4N4%dWYDf^)jH^IuF7-}@lawMNY27_pvGaqGdY2dl>Hk)O!m(Z$-64hVY++=OE%Zm zM^lJTtVYTbp2Bt|4ZMbhgBWMFw@UaUYl|&h7~oN1xnMzws>bYA#D-L1j;lDoh&<4A+=ampoNR8@qqOd1qb!1%f0PAEi*lOCD=)w THaFL)PB;@lI}b-#8f{PB@_LYAf)T1!MB8 z8E;-X?_S#8ZRx0PA+w0)z(oyx`bO{YBrt0gU^*j8b&*I1K_WHg`Q0x7?s>-^8 zW>&l`ZgI*mG04aAJMGbjz#<#RUK`J5UB`4W1*gzLGUIP>EWsgbM`mVm;T$-7XJ;nx zuqK5IDZ{kkz$5|R4)L}{BwL)hWUE;sg`L2;NBOD#7paCOJp*im$jYqWPs{~J!HcJt z_%Q1ehHwsfSkmhmz<*N(}#+rm;X$m`d##B{qG`=Bv zp6jYPY!lKhBW9O-jG2N_$*FI#@7L5d>eIp*f)a%~q~1*AihWmP=qeGKM|g`ar%|?d z9JPl|VTdY!<%e=wg*9l45N}2k@-x~<-{?gD37vPWvU_cvDM0bT>=iWRCML=$B&dBc zpI1*+H`kc-ltKUdAj|2v$yASe?_&pJLHRflxyYz?ex8S_CCGt6_#&f!8JRd`+~~(_=7lF zPR~_LWf#rXzROJfrEJeAcSI4Vn6400N$T-m4P#5+&CLR$XXij^;w7~nPJR_<_WpCx zBgJ;9Q#=!@AceZX9)Yk6TFUnK)9|?Q{>a<3&rj=IV7b?)TpNqp zMJaF})vuVPx;B#$Al~$3+4Zb;T&VPVMr3JCg1LYZ4_Vj3)+ZC|x_LGHZ3s?(jN&+IDNM&@`?`I*uj@Xp%gUl zdeT2DFj2L)Mx8zAh~)@L=<@EgO2)=(z}&xT7=*p8Rb)wCzJc>tQjtQ=p{XM5!dLfVHHY zX3*u$xce|Kwrv^(Y710RM@VXvG5ihdD?iMEN-KR3Mg2rBdNyq$-9Nc4*e?MFrwg-p zi3*NG9q}*Z-80Xrz7PIG@z}FdbbC5d*X%LB9P43O{Gr>Mbk~oRNz%k>Sy`fCR%)B8 z&jN5{`zG(JX{brmJp`erDncd5)6WU_Fq<|et(0fWyb1h{=My)I za4hM2bb6@v0U~dV-CHA)igOX1-EGvyO#1SLGH5D7j`<8SY=M)u|4ILs*BM=iHt1^L zp+SZ@118ZB^T%@y;&*g@ZE_OTELkK;?d%YHY{5K7j%F{O#2KVg7AZF}x}E-RrTYA7 z(J?k0q3pp-F`nQNuw)1Lt@` zyy_3_>f#aQ19D%$Y=K^vTMh1xhzWea)BW*`|Htj<|EL)N+!ygnq?_RfGZ5Q%1Pn!u z)3{(m0cuIk01bTX4ge6B%jc~Sj@Dvs`Zt=^><*R=$}7Jfx)o4DvVc_oAO~B9-Uo#M>a~_GtQgGh+6Tmpy*vOn7HD&M$CE2hY?=5;^E(La_t|1!<;+P~!aJd2+#iZ!Q8FVh2{s0(c|LfSI^}1i=!%>#?8;sh&IHL&oG4+Xsdy80t;le2835M%jEDZ5Z8np$TQJZw(+=}NQe6H zxLpKaF)}6sCwkIXRRA2`3krEwb~df_*8m4%r{L4ivz0$qgL0x71~}>vTS(voDMIy7 zL{ul>cqL4$Fsnj0dL>dRKpA3{UP7%sg)ahdA&ELAz{JrVQ=F|Xpo!X)6F*2@v&vcI>wm6*kF3h*wYqdAE>pzb5$PmJQ)#zYCZOn+_tVZXicPuvcS z3lfg_!y}M}D7+FVhZREZrU?W6oj|t;T z%lfhlz9ly{M!O7^PKEqAT=!#kS;GwGwe-E z%jzD`zYIvJLibon#*wYcfbTYRcXt+ZPh?3D>}-5H?hS{R`|2run9eVwOHYBK8TVT* zF_2U^cWDmwz(|vK&5>XYiZE?6ag9`U`Yo-Zo%Qy>QzUe2sDdIw+8;Qxx_ZA|btvbG zrxEJspQh`Z2xpn;HkupW%7a{;;ui55>fE*(C&Y?^rUyT0eY>XyKdX7H4*_bgkEZFu zg`~3~VS^0tqRvk3GLg-`O1-}B?fCMZFFH6&^K%O7p^?_6rRULp-Zjw1(&_1zczCs! zWMxqWC`7{;UiI)!lYBrcmvE-<(A9&Gyz)*pQrRumz9dXiW6VS_qwG))F!M|>lLS^L zQmw#*2?l+3(~9`i4EofF_|Ua;4*HBDJbdEWXK=BuX8~k;_!}d0!?LX8Vd#C;J$?p9 zWQspA)(D|t@2{rf0L;Rvm9$1R0!72m+jaUCa|t(fB`3~2+)&gs6t>{GWSq87heFPg zP+m;EYDg@0z#x^eXCKS%KeAR^2ld)Ww@{oSrmxUrXYcLIR4>|o<4%(PU;o-A&G&FQ zJbpQII)-(*@Cy2}t{Emp?X7lC_333hxA~vAdku4jen%hTD4#|fSmA)&evt~Dln^#T z+AD+-imeg)NDk0+|{!M9mlD=b}tNmn!t}v0>uP4sGeLb zHwK>a?v`5_lsL_X5wOslOH$itH!ZzwU&rzx^9AXBST)GM$D6BA_M>z#BvAzg7UQUl&o|@q|5zpGuGAl21wsb`9b3yZb=k8X! zgHwQwl%>4@YB}ZXV&kRk*4HWyi(!eQbOMgHo{fq2tzVqT$3BE3uSRVe`g9Oc3zIZq# zQAO`bCv>G+wWpr`3-Csm%ZH5b5kN9x_{nun`i2ZC8f zelO}n2+6Wi#Wk}?u-~)Foa0< zKDG}f+#N!cAhiAZkkw=fVa4!5 ztA5dQo4FWuan$L(Ds_`Z3X>u0YL&oi%`KP^^YzScp3Fr3JGu^%n{DJsV$sxfBI22^ z`XvIAEnG7zcA%~JSg(>vqmr~s9xeD{JoAB#7I_)Iyy} z-Pvr~9?~8ECo#H`H&$L1F)N5l&Ie{xk?_Qr$mo4w&hRD4@LRLxJG9Z|bRO`=R8&;5 zdEDvA$;pX{iBo{FF`|F>4-df}ki-g5M5d)Mp|6N`#(Go0Ug^WzqczkN5xzuC9N_l} z$5*LU0rg7i!JlKd9zcKy5Rm}o%PxjwP^uWrAidNLAlq;-IzU7So~nA>|T>X>W_f?;e0%A7*!r+a&a zN)-_B*kIQ0bHavMFUNrAYS<4PWY9o|0mfa}y*`mqB4Zmgfk%m)5fbuI=|+BP6-cpL zF86E}u857o#_89#!$l#KT>wqgl9H18fqMS(OaMs|1cb-*+8sdL2DtS*oJ@gAMH}Fv z0=ML+#50G?x&!5fLR>XIV2TFZl2T9%YFYsN5-lz4ivV{S(?F0va=@Jpf3W_Cy>fCQ zlfr@XBchTDD(FueDx19`4@QkY<2qF5AQq7|xU1qU1k~9ea{n?Wb-o20F?*v0ozL#B z)^A1vbRl+X;b(JBV3xEJD0a-EnmrJT{h&s{zI)^Jtsv>_aa2?Acy%3w8zKH8KZGVIxzuVlq(!$HkpaSD$ir* z(V)?kd5_L1s~-MRrT|y zo3}BdE7lpOY@G|b3>z3mLU8FNb z*>?Y7=?j%=>Z-g!I= zsf3K8our$?$yT&>rQ=m4Sjc6U1XY(Y(4uT?K~~nG16ACcwMFB#5;ZZ#Eg6cP_-rjd zu+Kv&PVV24g;M57J|v#+8y{{->-zb@==0mds5VP5JDb3?%|$g}Kx?qB%m%!fP@h~l zOuf=gC!`nHTGdimBzMsVE`ed)1tKNUd+;%vV+)H?5 zgucg8{lw?%Mc!?jiehGnWfFQP`K*~`bD5i--dImE`A-(A2@r{Bje`|1PY*BQ7pO_rHh=4fc_mM+h;c-7bT@|3>fFjyGEV+-IH zW?J1LKbjnD4;}WMtnT}1U0d$fkENQ3C`yRj5)w>iQ?l6hgRWDB8a*`0v7n3KrFU_q z|EBWpY92=xW)3y*Efq&}njB)BJVB(1_NlLH>Ei(8HGUJ>d?C)1`T>#; za+CMiEp}IdufFXJzG^Ho<2K`MWra8By)BC}2DUxc5XK~w1r5JW;r7{;7bBMqABsba z!bR?akcXeXLs87B0I%2WhzZMntrYQvAyEZnA$McADnS!W`fT zG1?ltEG;h?Wyp2_4CVq1O(_cX(U+L0Y?bnA+~z@S89}mBkU?&%Kp7kKL0jpsitCv1Y3&*nTB(Uh3g*3dw_R z`b!m^Z+!TBUj|FKIacPf`}={|0<;$1wH@mvq;WWWf%q}80&5$eT^hQ>@lrT!A1zlx z@*|>rw2_~V?q5rbvn&mJF`}Odl8@NthY^UUfB#U|ahU1Kx@PTHZYccH-=c6T3*fn+ z*}Oox8GLBJqvxkE!dvE)md+-F9ZWG7$E&lRazpN9xj217Vi;L&Umx8M7>EXVsM$287!zzj*1%wD_r9t3ifuCynu9;D#&qu&^=;bV=g+oBjZ#Co zMTeIuS*syx9otn7<__=@3u6oRq|6Z`t2f+@AZ$=;+stOn%Qwf2liP^RljKhnrmPLM13`AGeO@|==`6f&^;2Hda9`|iTiJc?@E+{sKuxT!L> z_wIW8w=vLlGd677Bg=I0llac|4Ab-;=H4ZM`|1c-Javclc_l9Qbb%4mLiGMcMomP( z1I#9z7urmOjTg)PJJGneoM+fDk1KS&4Ms$btyQ2Q49Kai8QdcDvN zE@cnZ!V&4Mi)tfW&{Ej#34?ucZ7aBcvisZHXYvKhXYLdhMf1p&f{iL!V)sKZ7?&yq z2VM^LVl>$fAnTVU@NNf_*$|LjPG&rvo7^)8<>i>5f1M&SrIykjPa|iSv8YaeQc#>r z9;BjOeJhb!gr5@$`vX%zKsc+rr)ReopZH|CPG96gM@cD=NDpWab_4#Lujq>43P_<- z`3JD_9^OkKn+e87_KO)44xm~9=R*?0>-zRrs8SV>o)3Z2g;3%L)kaJT_%UaJq|nja`~b*578xenK28)VGK<1_)D5jaMI zpiY4OsBZV`TwaD?1(RZMFHd0506>~i?(-@@mJy1u5=7MgVa)&O0+!_aa54uRHCNkx z2-09xfNX-LvAfMrrLY^t(0j7hVhu!z`1m5MR239vv$>ovS}gLFr*?rHx*TAoH348S zJUl#!7;JF7@!y!Sh@CtDMFgfnIQZxFNmcX*j8Ca_r15V+IXRr^N_unarrF_uEA=@Es8S;g`sBKVi zyR}iM`4o@ibzQbLZcB!LF1a&QVJv$_;+_M9cusrIl>oNiV=<<0Q`5br=PO)p<+}1C z6|K*pm@L|3MW|gzmb=P0mfM*JZLdU24k6S-&$g8Rcv{V6cKfWD<`%UTdg9vSv|Z1U zD4ov)k)5ehHQJ@1@obl|uFH2LoY@v-bV2J#jpFmXgl&tq>dU72TPUVEH{qujOjwRe zm`*XDlpQ1uo>?+aMaCSvcuR!h74mQyigmZ9-gkJ_9;`T5Qi2)dyn3|LkD|BC1i70J zA__aMDxLnjC@f2S5?hzORx2@1A0iSu^W=5aiAKp=FP71}PC4_95ZCP4JR~i}zW?;S zBp$A%K3?nS{aK^8?8rUfZ?It<-vsTYQ_Hx50&#kHr?9S${??Y7_$nsW*5~BbD*$<; z932^1){a|IAZsbrGGrY(Tb-93svfo6`@vHQi~t04R&kz;CnXblrXa8U=JQ@AL1mn=)(=! z@#Dy>#)U)Ikq*?BhfR`nweAU}C-4x_nZ!eNs+g=8dH*XEA>%>*mYp$}ug8O}VA5F;5-* zi?Gk*f1~ekGf%}bQix@(lxw}?)L4F`>1pq)bdxI|)DZ}ynT8{S*EPpUUSJzgk|$$Q zyE^f2ta%<+%SE`O7Qjcxi65d!j>R#o7$n^2Kj{$Om}RIXrtikhqEL$HXOY1{R`5{z4>%Ah~e4$bg6vk zkjXF^G|hm2Sz4(+{rB2niSNG^;3^lDT+^O)2PN0;FH&;-+9V*7;i3FnnqNP^Qbsze zHcs6KH$x){V+&fiK-f%CeX)!jOA0|SE-J)cvWZNF3PqPqlMt_)#cTTa)Z7JoWvb zBBpS?0;t!CjYhEVE|trU7}12V8s`DIPUOyC@5Zu?4ry0hf@6Yy;6EonyIN<&% zDJgk*c>&!%L#iDTE@LP%wfNS672z_u9N<`lSrb}>I;BYu48~0Xj*1q`1qf=d0E35~{^zRD zES?s)A&*n3yo?N-%^=auPm}=m!(Aeo;6-d+Bt|$55SRS{O_Jin@socma{-<{?S9m>$Z5Q=$;gE1oA$I~`5in4Z`Zw1g8s zk{?)+n8y%)IiN|I#+}CTr!+;Hw89ZHhPUlVeqs4V>FoG#JIFbetb8Yfqk+fAv7ztR=yNRxU(uiU`{ zbTi4a!KLee%Y7Q!JaO2_z7>QK(okhnCdCbl>SLlPMcjyDoQY%!Gl)85F}(L^Lwgh`2p$Po+pR-Jmo5G_xJ`&f>a%sg;16`Qz zK{+Fb3n{^9kge2>|Ek!6!@Gq0a&%-nzEVBs(3KH?-H$tPV1OYT82(OU)6`#7(~q~I zyf`vDI~Pl|n;LQrFK?bB2J!+q<63rW0WWdo<8`ihozd@;j?0c~(dAe9f6s!x;cw)dr18XH!3%Sd^} zuIMm0H3!jIFIP;qv$uDq`_lL)QP1c?ES9!)EUx}IH^oFZ<;Vm0l0gQ(Ar^|%?;Jaq ziTVKtbc4VJV%jL9DQTl=ltHLe=GoEAb9vTa*?yBBZxq}rPk+*(jgCYp zMMTGjx3v%%iIzj#{?HAQ-G}dgZD9NrE(pvUgLW44qo=4^{As8sc{ zk0)o+kxzY}QrtK$qcsQ#v2v`XG^q#j<+!5hbT*_t5s&%Fp$UogGE_P^n9V$E73?hp zXQME_9)@rfq>aHrn}NH88g|3TG%={L*H!3d7QrpphT)P%1Q{Z#qLfv}hXC&gHgSVe(c=d!|eo`>N(rNlDw^U18_& zXYOG{Xrvtep{*-Nr{~7s2QThYDbLJNIroO@*1@Kks$Q+7qh6{jx9xaXybq3HtGEZ? z&>(z$OuU*Y8~$!5Ie%P>jfzU`UYK{DsD$+SR{rIDEE*!W{9r~-p zFsORds*a|xTN<5#lBAr+WG8#1u@v(wSro@i(+SE|t| zDl2Ad-?GeN*C>tB1fkVu$iCs3+UZrG`_SL4Aq;|_j@~V>^B1Z;mdNkl{yI(Zdvt?X z_aFdE(t?ANY{_6Dq;{P0j?=H9s9lpR8)2_*S^bh7CM0?#W-R5V1=^roVO_kqB!h>o zVsV}~#DiR+T}_$$gh=KvJW-E`hSmWQVVE;d{^pfloJD;)u8e~v2J`O{gaz0F7=Zo7ZE=L}foCV}<4#%+e&T<#F zuFw`l^o~>I*oNki4A@)ZNr-(j5nW&oPElZC4)h{_R*`%0CFMa?2`ickn)qP8#!FoR z^4bCcio~7>*^4r-XrVg2J;~*ckM8MA8sy;%)8Z?T5BpA%z7;4Y>^lTXU_g$E2@}pC zjKRD@5DZ?CS2O`}p}*rdrQp{INnEbJ-z#_rIwFEFqX&@smLJDqra1zb3bS~`_{xwH&kW&L?z2)ENFoeY-GQycev&?LOZG+%l-mDjNrvnYE8pMV zA`8nEC`o@gIXEa(E!~tURiIh~@L|W0)6&vXP?!U@*Z+($ppZ9_LPMioTfJlj98w^c zfABS-k2!GeAr%R64}dD7le2R{Z&eiym=U96#wRgGx|XHoX(E|4VXbH}=-%ha$qBG{ z^dJ!$j=$$Mgp0T#Se4&A)l4I#UaQ|7i`UO_0HnAW|FgJ17*7Ipo!`#EGNhVsm>dQ( z6tzkQ4y{drM2p%e9uE=jv+Om4zY+gcnprj1xtFMD+Kc-+S7!V7IBfw++0x5aJ|0wOF98g%J zq^O|!2w%ZM)Ewr3<%1vkr?ayP7BJ$VJiCa=CJCDf4v!83E-~Gx#Vvw(qPnwBlmNuz zJ~SUAogWUfgX1QlS|ErQ>%o42Nf7)2NhA^Z>}F_Wq_7MbR4CkOEReJjh3QTN(MBj6 zEGZPxUg!fv=)ZRxl8g(Yd&7fnq6?^TOed#!84ID4KYHbi zf2b&o%03BZ34}@a2=QMFV)sZMHyQYGs6#ZQ!Pq7#N1hl#`C@KpNc7>iH!&#*`w!s8 zCbDT?ASB?VIj6w)02UTylQ)C&jK* zw^LnGeL50@=r4I#Uw6-6%2jsl9ij0@9Os?+ zV2L#yUT!qBA3M{B5gaS=g~6~r){?Mz=AfiC)Pe9uRIBtJhiIZYR~jMX28#3JD2Y#7 zd2ox@lBdAo&zAPY7a?hf(fMSDzn-upu|`89aOzpRT8c!ol%W=-9Vk|vh0rM$(k31R zS)2F%im%V0oZqBfr2_rhv?9I&L6QQ~8XI!@D{AV$>i~3t^R(p3m;=Wng`U7aKr%vK ztl`vSX-6+;fvYRS`lrECSV3MFLEcsiZ`1#rxfamECQ{&(-74TE0(rLfdb*}IXGW&n zca#8lpffwRkgFFn00sjT16&CbYEP_q*zqslOEX=Y~$mH4xj<2G=a7No$9MAUKS)Nr9zc;gsZ35rVXM(=(cZBnA zKZA+Ey}+19=XY;3GVHr73ui?{&dp&~=HC|G7m+gpN*w0DY~~mR0-;I(Htz%J0;Jv$ zM5dCjmbFC7;z1iJGDlJ3gBWnY>9*Y4wO)DG4F9%eyQ2tA2cPf7u(Yk&YF%ji7MZA!x1zUp{ zQqj$ri`M6aiY~c_3OP3g3+sAj9YMbx%q61&b)GUX_uo8r(94^Te3u<3!IY)jq&#EB9AU&yqQZe|7nnBUfjei5SHx4a~>%R(XyCM%E z8VUov)X7{CQkRuSyfMzW*TNKSB%!>Sh|Wf4<5%%JNkg;tOFFl7IC3$xSU#*cDm0KY z6A3ddxP%`zGv9>DP@IX`Q~5n)Et78AFyhqN%GnncZ*9xpj6^m?DGW2pn%C7kHZ*l~ zx4kpV?qwkTg!1=MYn;r>7nF*cD>GZ0bDl#W-n2J22+&1!uJK*S#x>?9uO-`^>e^mh zJh!uaN!r?>X&(1lT%N1=KHL}Qu5B+oi)(LB&6n)Wz;$g)Jh|%rO=7VT9QVj$bS^Qx zxOA7&_V257*H(w@ic2i-L)&2}ee^{*;n!h2yTW;LS1e6m@jpX)=}I%FE;-U)pY^qM z-g;ygkgB{AQtbT8uYc@c8?N1JKy-AEzP&8{Dr4UJOYy#eg{YUh(=si#Vu@H+3X!_V zR?R|51EJo*(K9^*=}i6hVq=(Y$GX?pM)o!)K576taAHfv4i)h-=l&4-vusyhKwP63 z5t4drbz{iUSA%{f6sP?=_vhdQD}ue34T@KF89cpU7ctCQxFLHtxNvn{W>$2e#ow}n{TZQhUc$IL>W-v_%GPO{D589=nZN-mR-Z87 zM3W*+k$T_AB+pAZk~n>T&O0;$lcBMrKDu3-F)-v! z0M+?MM^jzh_4Bs>H`P4x@g|`p=%t7?pb3fGfQ6VOa1a~QmJ3G7Bmy@{E|2@w$z0Ki z23S~;oV};#Yrqdkbb6iioSYuONCVP6D^cfbBIwLQ;IKBpQuxl>BzcAb^1@&kg5o^bPoP?Ve{=Ku(Z$(0$fLvm`wOwR-~?D0mCzApn~S^_xX39;^rkLEJu8t5HZsbA(2<4q3^gEN8XnZQWW~@3GVG zbUy{&Ie^UI4airuL}o1gZ-W#6y9Q^est0kjb{4eb>{4WCH1%D5Ke~#Y z5EHs6Z~R-Jr(it=6BAO2Dbn7_dK*JFxXRVA41o^V%5O8Ic_C7;e<1z1D}sK~fGR|b zN-P=PCJ3uqsS+?~==Q&m6g06bWNU(MkeN(9qkB8S z24X_ljTO>6l|9`xg`16eOeW7Mk03>x;|lYIFAyj`LWf8Hss+wxs@T!ZZ@% z36RR4G93Exb6!YMYeG^qGNw0GS>mV$rlGsGvM}gMIK8m2r?k8GExKwiJ#llQT)lA~ zU60nf^z(J9TwjmCzo~U=CT+BrhwIfzWOPCN%1q)@zDxU~y=1z>A?zCS&?mAv+HA-% zx;;@5A0_zSaGndnu1Q$y52ZjE;S4>;W00Yq1n&}0ijpR$dqX1_TNpFwMsst~?Y`um zy<}h)JDh&h(L2w|*KpB$T+`+E<9G1DTE+es3!-ePwWB_VvVUEJu8u5KC^2;^4~(8| zbCH&%L`%MhbZVF}#0L4k%{x0!?PbiG&RZVVgAYzvnj3#Ht894VIKSPB1O z59_}JVN)rWQ~cudohoazipyLpQkYJZ!RjdE=>?K-p9w;OMCN-j)Lrj`R}=$7AF}gw z1#2J#9%?2S+^U`2D$&gTqZ26VNwc9p-!>A0fk+&_C`;f|weICS&A!#m438#4v^{+Nv$H1P*7Ve=;?C~E#Jwu3`EI7=-aV7m`^7p z4ODl4fc=McA?*-HUwdTYJ4a|gxy9Xqfb89Oy87s|(62w7ZeTUsAKcSKIMQ6`bX)9@ zIFx)Im!X!rM|fT>Ik`c}B2%1)Vfg;X#8CFG{GV(p#cIZ-b&7J8!NHil&}zokLyE|w zc5j~WZL0qFo35f9>L8ra>l}<@sZ_ZLA9ibS`KTvl6=ImIn@+j1Sg3laWU%u+mSIj* z^Mu4^Dp_bc7mQy{RweJ!rPPd6oIus%;9{x7IaVZIJ3X&Jq9H>|-Gh-<6P;aT=L0`F zeNrb}7t>HdSgv)?n?=qu8N(42J;knanHD1`f*XlvVJc1@UCqw}f?=OE1NQE5bi_X9xmk9o z%jcx9NE6eURM-vVyTTFJfhYJ=s6_-#XHd;rJ;iL+UssQlXs*a|>i=+ImS)`NY9iw~ z-p}1Qqn5kGYo9ohy?@ypysX)MH8MF|m&JV=K8aO#r$k_~ zgP+s2z3{2XSYODFuY#9Rx0>L<&WH;+wc}vRtFxzPgj!NPQz)F^u$aUf-cNY5rm}lp z7_?_~>s}q$cauKGcom9+RP)L_^)Jhsd3^injx!$D$tfV|-1CpYu_x;3cixi^{5=ew ze{tliPvh=xe!8pKr4!Gs6Uhuq3*WX}<$C20LgRJw5`=Ok5O=@G5l#zGXiyha8@&+-SV+~4&gJOjQJ z$ou0Gn`@lHPs@pUkIr;tD&!@wB@fCg)9abZ!8uvgi4e8+}xc>j*W|U!b)sPJ2*=^e5lnW45_?^OZs+Xv5v~75^_!B*;Jp_3)OxoMP+C)AO%zh1nD47YJVI0HDg6TEdDqe(!%}$h(CM z{w$iEdsS;irLh7<5$(~5u2gvLjZ!)#iKU1YRMPX7QZIHYLYc~Pu=WFBsW@Z3uBUF$ zNOIga96=qQS|F)>)~d+Wb&U~F=eYORbOc~WoBKjbx|HQx2{%Cz3i;^dw*yi^4)o&B z)m;k{9jcsFhp)3(+s+u zV0gLwzHdNc+oCp@_>aS7p7i6|8gxM=UGxXg>_W^J!Yy7nHAn?uopm}Lpt^>Y2~Jir zLSBH$1Gs;2cs#A1_h#kLhNxg`jJk=W{mwv4UBP!49oXUW*og*mQ0HqcRS7BG-MlV7 ztu+V~Ay~gP*m!wy_ii^fbQV{Cd+-8~gNRv0Mszz$o8r>a0e0>e7vL3>^Rd~^SLxQl z-eUYE;Qn49@E@=YHpl!=CzCk?IE)4~1EYL_Fc=_enfY^h%g68)oyA&hcCZc50*aGt zCTjq^$7+MIgcvgVQ*m)|z68QWaKd7hI>OT*>0-D1YZe(=Bet99vqnxcu&8_@FPVK7 zC0Q+CiO2e=(sv%Ge?Q9AFL(2=&3e)p$#9&u&o1wgPCxoRHjN zp+%trg-m&M0oK8m5E&$lsa57+Q#PVZ0Jyym~D4pAb+6N`BG( z>ejhPMEYDifTSgLheXWSYTnO}Wbb+P>N7D$Wjx|5;@X5E=NXbLdCjC)ecnpTuyD~f#j;ct6g{d@=hj*1VUD2)p3v`sri2rs! zJ6rp5X?ztiKh{4>{QcZ-9OXyt!ImcH8vOlQc8`Aq)ecKqXW&nAaw9wP@)fzzYjk1~ z`4;V>v$zi^5yuh^z!Bp=wtUm~&2N@I9cQbk2Zq!H)7MiVIkHzN3kDmJfAjnvZ+g+f z`wtCH>hpAl_}d=#3!=Ch_lthG1fawBRMZz4Yr^a||8X`pwj(jtBYBg7k5IykEa#Rg zVGh65&Wbvl;95E`9KV~bbP|W>YHH0^RsUN{V!oWrf^yjJIP!Hn0+Mc}qK^+Uy=#$} zjctSQ)!-%e0M&~vZmNulRQMBn0URcI?UQJaxgqM z*Haq#zV;dW!^CJjFP84*#`ZOgC22bb>#qDgA6(IMTIO{LelRQZPH9?f)-bou)KzD9 z$xGy?%CsJ`a<*zka|Bb|Djj@9kfpMXoRUn)C0-N{cgll9$+(2RSQ>6Q_nyXM_2+np z^8l3qB>wP}S@~=oT!`b51p0Li^PZtpA4ScZjr;^2*4Qlab|V9^^ztwUT`YYDrm*>c z*k&He(kBfpip#MKO7CKYIEN{sr8R!# zvcgC1$+h~;`hV>2RZj^%4Q>lVR%np?DP)xx-mbP0Sd$<1%O_nWlEd%{DXyZj#{QSe z7S64W3K8X#ss*6f-={LAQ!er0Yo2VJl03Bxld3dKM%@*iO|6{Ts+Yb!n=j0~j0kE@ zCjXTzMR@rp`*LVjODz_WI+{l>jB{U=Se+sDt;=Rtp%7;j7i0{V4MuO+N>9V~?0F$G zi!j8nS!yw=j@VKAB`NY+?3qLF8O0=%-?XTlH6;F4EB>L{%}^9IdEsf$bhItt{Uuv< z@JX~MqnW*1{LVbpyr$~RzFJSHqIXDJb3f~H*#0Uj!l0cQlMph<=6h|rxV}JqB$%}0 z{27QKvEqA4ol|GoySDKt?z~(i^acyhTerlfTf6U2YC0sI)Ls%fs!`-7AN}Dap5uPb zjT@1${Kk`Wj@U6Jp4=PQ`{@N1FW{ zC@AHBCx+`1kvWgfbSiRdYdUgC_m~xoeX)#NV$74O?^~KbwZr%}?;t{W^Af7-m)V zRC?QDXJ(PGCzohk&bn4>WWi579!s|U%=ud^;t$hkb>9reX#{)+w8Zz1`qml!h6TQk z)Wl|FU%NOKTB4Tz&XO(FhLeK&#px1DfLn=Ls)B~rB~q3LpEhMhebp*}9&$J=qNAyF zd{W~pRV-_H2iirTZnrX8?C$~5MwMLFs=w9q92Z*e`p}i_>^syd=o&@xgrrs3DmLYA`HBCUH+L1w`$3E?p?mrlbd&GU&=HL)WH^?WiyF+51{)&J_Efc$H=w>~gcyLdi;K4a8n~x} z)CgkEqIeL7$#LM+r(;NYqjjZUsgL{4Kk(^P=l#)32MCmn7Ke4fMj>qi(Eu*fedG(Y z!~%-U%ky(`YU*Id*49>UFeqvV1=KeIkeNu1oB`y(xLipDSc?dxhHm@!TdGAMtO&&Z z{9ZP^wgdqtC7@)=Xy7sbQ9|519{~#HKQg=El~UhbE>K+qJgQgVL`HKC_WB=s+{0uN zWlZd@14t7+UT)DRvPCY}uvu#S4M7*>JbIqZ7ytjs#a94E4p>ExgUG@pi?={W4cO8V zCDflBY7r4uf!+~?@tt$U`^F6!RhL~bGsXo63RY`H34DEe65&8pB{J{`A-ck?!LTIf z=Ei1E&fs!@1OLc^1qT=MJktTB%%r88we=AYqxpvf%sajULqKWV|8@Ey$WHX+^|e$a zTY%e9852BZv4foD;Uqqy<6l9)$3MuuJ6)to#o?HPjN+(5< zVM)u?dgRHF9dh#WKXbMjA}=w?DAJ^VN(rZ7|7?>6lH%K11>kHYkfb0mMPP(F1Ok}{ zt{P=l1PPYIX}Jn_fCmmCm;-08JI|I0;R|73r^jh%SRR|)BH18>kr;4V%cIBPxe%>h^aA{=n3j z9nSb269neLLI|q_@>fW!0zFY9$c_o9-9(@;s%{}pLWpn^Gy}{n z4A?`;7`AZ16lLL2d67F(pnnJ#+7BcF2X0-E+$sl<(`rNG#=EU=hAgC@GOh0uTgna| zoe`rD1F0x3#iCxUMtt~rACWSx>wND#g5He^`zj6LbsC8^-sN%pW!&Xu#CKpL_N)0= zDeSHj-wIa66oZEhCDvwaYKCodkvx3k>4<-|x#eW+PVWc}7*^Z9?ItG(GnBvPWj|Qx zE6(+*^Z+(l!e8oos(0$o7@WHGM7Wh^q+@I7j>jhNH7j=E0IHzM4N7h^LtNGu$ z(7Q}?R{C}^?xscz1)x`c*G{78a8Z@nd$w7~S?g+1%KMplg6e(K>gBus3Im@dMIyM; zLYjEiR5q*V)tH!eV943GGkE|8`nyi3cWMk$gS`Tc0XE?MR5A>cGV5JH$3YDKgBm&G zaGSQ?n)I7$^rxAAsuOx0oc=2ujo;QR=LtO;Oz|3>5xPBVrEcc0{3lT|VYt*5saJ#A zOj^x9D7QF)7Wv*5XNaS#CF@;vVWhn%t`m~=7iRr%45N zw%;wHjV{iBF2=C?vmEzsfX23|iKoI`6@wC{e{TK@-MQG3;g1QLyFER=Gj!pjtsbUn zIEtdm$QkupK}`;9Gm>qq=aWO|BeL>A=I4r_L@8SSoh_fp_@SFPxoqQx+zBv0c|PQS&St(iRX|<8 zVQ}UuJ*kn20n=cGEE*_!s5$lUGSt~u?xMY0N{TuE^6mPY4A>&Rn8}m;#b|b>lDUoP zWJwHnQ^?qjcyRE3akHh{COvtyS}q3D_XtBtT&Nm;EuEP`aM&D-{hU{BM9oxZUKL0^&W6`kQbJ0A)Kem1f)ulUs>N+ULeT#p7sqdE6ty8nh-5!oC&C!#* zex--~iSGdJ*z78dYA39|C$I5$gk-Qpq;pqx1llH+86IrL6UML%^8z6@pv7DyLTUw@ zQFuDGH~GY;H+3r=iYQg@b?cP4XGoG=_^*En!`^DdQqw@Btx;6;{IbaF&&GKEdE|2lTU0v?w#C*8b;QIsAy5R+i2eaUbr~YB)6oF^xTD9{m zgWKE%BIjj{*I#tYfWms4#?}W7!G+gihaQ`w4i~uD~AJWinBmX0T^EDW48*jN_Q@bkMCYQV*B?Hjfbu&`ub!mlVjV zzC%Nx% zkyEQ`e7j#WY!?eH%UWeMqbz;#atz{MjsJlmQ!(K8Y)3TP;x+F1*?XN5Fixh+YZdTE zSw^0gq1afZYd=-k@5yF8ElM%_a4pIZhIr*67s$jSfJ@gf$R0gFfg>WKgl)=pPH)bsUmSJPy=Q=;T*zVC&r_qRgwRDx1nP8f zv=nn+$CZp96x?yawdMoTteNl@TEd+R(MgrfYIgfUi>}E%(8)@f9`HB)C_uU0^L!1! zdjn}`@caU!+AtFovAMfDcNmU_hKAta;I&pKWY*UdM*V`qLQ)j(#|uNqhEbr^Pr5P* z46vXq{rQ7Urd|dJ)^&w~|F}zlssRv=e83VQ(B-c&?u}jMVhIFF{{PKBk2nK?B0$0z z0s-lhv(0Y&lfqy)x^9tupq*c>Q~wQ+RkK(z5x_p)T)8?mOgE`eu#(D^RzIK0!w}YK zpkS?WZ)jt!ek&6VjHoARV!Kk5`P)2y2 z|B0mi;7P5lwDb4>0K}Y7SfEK0@GnwQP*}8WW@2J;mJ|l4muF$V&!r8IX6t zVa?~^ZZ&8Z`hX$3v2-pcVu`xtVjd6EF@JCbz#Z7HumG_jA7I*0JOfS)SLRlv3BqWA zi;TkhN3?CdH9kZuW=Sic8=V^gr~yc7W(AAVTE!ib>t8`$C1Ct4nHK|ZuFluSnD{C% z@IZX%Ti9Ju8~|&eQjArPy#gyf%qz-In${5>4m0@{ESQ!DdSR4N33_LFzFH3P5;E`y z65;^J^vkcngGHDPlKBuF6$1kTE@=oTW;hSOgofaPej01=R~PW_4Bp+au&^tbaC0J} zeP5UCFfqi&3ROkWBvfaoa0{MTCmVFf-c?QDs%ovPX)v%UccEn2%3upgc;Vs;^0?hU zlT=G9*xpliU@0vqVSJ*S>!k+#sg!dBA}}Yp#B>d5R04ny6;KDr1mtsCy;+%X0{hYg zBffg5OnBl`VIS?dcM<{#q^l}jTr(UPJ$5X$=99p#{f~N64VYt^XNi_o5jb7RulgwQ zqFhmoo54B@GWfMtkWT%<>C)k?G+(9<-%@%{d7?jGZ3oG;BoEMg!o&5$pQ=)KC+r`= zd(?zbl|oN9F?<|6mo3?1ABSb$XY|(v6_LA+hR@k#?O-+4S10c5vDFsHPFlJWthp*P@FgQ4ez2$Gn(r*R+SEpU#XrD-}ENFwN@y0e?UDy`xTg!zgaOUaK5_*Vy1} zicD9|a5&+QwcqcfHt$v8acI*t=CihDo2hbJB5ceX=;%*ZP+9dAmIx4>EEX`Ar^puw z@W?8KLE8`ELsLwM+x{LrS)eG~PTmhv9mydrQ4$@A)l~dR4TZg7&9mp)oey1MsSHbd zer{=Vsq!}gPfZ`nSFP<|W!e1qghiIJZZp#^0g}l@%FUHtDoNg>W820c(OlFKB4?Vl z75E31Fbg^CpaXbv`A4m>jU<)$ZNrqEG;C&M1@m&{h)`12n?4lrnYB<;$W2ITd@G?# z;eWyh2p=q^pT#ZWp*{!N+nyBX9mhDK7oR@MQ82l!rt$DeRn06ifAak?*qr z7Xxk@#zQXyZk?^n3WF1$PPZ?#o2|v$&|nVqkIfcs2*a`@MAhD0j>e)012Sm}u92W3 z!>#1tD!E>=Fj#0ue%9GcY`bL~veGPyIz%lR67PUTY~n~n&Ifmf!ZTGWl_82&7gZm@ z>Y1&=+|&E`P9@_Pq+n9L|38C;!7>H~s0Ht52&d=sR>bslA`SUKWy=C_Sp|neq3r`X zF{^M6#QF286~AP6ahTld9k+0p+-zCf{M-yWfR+JsN;srYQneCVHd>WRwrGyEzuKlm4GPfb7Y!IWTL+Ex^U>G+js* z#G1_DZNf|V(by_<$8)#3vWzKRKr$(kX$O3#DSURZL+h9)MXGqFRz>eU-mkh+HYuu& zjSbf}T9f(Sqk^y_Q%eHungiyHy(^kt{OmW+izoPElYjVqgiZ{Py^AahGdT<=u)T3E zQ`Lp_QjSl%?9{y053q0}}UE8)sKbQ7S9@%#+ zPeX+8df;dF(~3PG=0EftQIfM9{<(IRFAO?vIvr}XZw=o^6mPBzws)E-{?ThQD0l4I zT)bXt;wv@tz^=eWg0SUkhs(?^3QNyF?MBq4>xn;{=DgoRId0*dy^x}B?RKdt?T~zH zp*{C+m0Pd!X;%tBb$qY%jw-~gs!KE#I_PcFBTaSoA(s&>c(rWc(XfHLmyeTycU73kkyJA z7Npz{XMAmAb=&A!f{RAUT0=3fEg^tfBWN(y(|`!K7+*FD`^FS-{Vj1(qu8^4uKT)q zdv65~fj0oEZpBr5lFP;z(-|`?V3+Z8px7N_SpTT@B7o<0lw1(F*a`j#VOokxShfg{G|emrg)yIhfSgAE!COJKw6PTyd)mh@7`D-QDcA-4~*_*7t<1~%N03MyFD1-RX^Lx^PH@m8v+>C=|`=;;W zyp8m|KecHJ#e<5fO|6WSpN-^x^Y`9fYEvuekeqPJ;$+56F`RK!5yQd7wE+79y0R)7 z0jy#P_P?7EF{8f)LNV^767~=+4(!hGm8(B6Hc6L1OxNhqn+JtSf%NOt594kh$e=)) zNKa4Ci;D}YUlD~J0}?18z(XMx_J0F>6=0<9_wV0r&fCG&*C<12K%v~(-93q7%$`de z2L=^R>V>!}2~8g{1#o4^V<0R<{sB)rK<(u4xav;ebGoDDY zdYQ!MLE?|Ar|Vr{by>6vA)k9Os0_kG@%8T5?d8sfCnVUFq#SC=gs#rOWCp-DlK%Kf z!%0H}v*QBO@b9b+9jfGugvvQ^FAGB^4xQ))@ZmrO4vX37FM!D8`0eTH3NSH4Nnr5E zmWH_ArMH9LR`u+!u~tHZ9M+m(OOT*Yh=?|Ujy^yQxZLU`PIk^?%)(U{Y|K$-Z zr&6Q5;m5OUl3G2Za|e+7J~%uS*T#z(+1euh)&yi?c5wj}=D$0Fh{HZ~0EDNyP!*hl zvT3-YXobVWNZ>=rD6-c|N=s^GbX=%*CQBYNd-UEh6$bjnp}o#Gn*nh5jm18_R!v~G z9aL32fy;M;Y?rAu&R3W}ECyiQYmLDr!fApU07-KA3*Jb4Vj>u)DGrQZ9xo~wo(kdt z!GW-#U^k#3X9>PSTZ55H6#oSv--Cg3VK67;-@rG>jDlXM!^`r77zMnJ=ONWSRB)jr zeN2&&khf9O;Bdgjn67F<`D3A`cm&eKS42{fs6P7iT>y^>kQVq{_BeP1WMj}uu7oOI7ClQc{j2#$o+8?!bR;}i5Gg4+Ou6V4sz{gQS1FbnZL}YB zdtXTrp~!ry;y5vzbwchC1gID^(@CfTaTZC0Z3cf&6WfV^K-*$w=Hilchjq=GO8sSLj_yV&Xtngji{`qDt5b^ zMHp+yGYEsdYt!FbyR$A^x!#^E23|d4E;(B}&m>zaLl7mvr&3Xmt5RRWC}aFrw=TUY zt3zylD`-dQyARM(->YFxG2)4mm>1e5GKagb>9*kwZ8$l$5tRzsX>{k{JWOmRVSg9m zri+M_Gtzy$B8hA>SSX+smO}i z#D>kqdB7Pjy;vfE71`S4!EkY=F+W$|mXHPSTQM>CPc;f?nN*JB!j1%8$s|D+s-lXK zl^x9o(smDxztc~E+u?_mw(r$OPWUFyUz(qrT71f&d;BUa8)EUL2nlI zNO@QjOFw?aWOtlqyvl+zg7CAP+)9+Xzobb-T@UoPGH9^wf(g=iIM$oZ`gfSDppnJh zR$7`M9ZPRIZ*6=1ml5#7VCnkaeU;`je8hR}QA~56R8KF)Z%H{-R3lv&ben1%eLqj$ znPQmIiFNtNv&85#(cn@?G2-BwNG!p`Gl*6Bm-D zsUB#-;Tvt*(YY|dz!Lhb5s`X;rHW`iNuBW{zWS@s+4ygUeOW|IL(`UnHW$6GSjAJ@ zjbl-6uwDfgPa8WU!*n$wln`u|9lG#8?yB`@Y#<$$ykYj)N7U`$xrX z>w3lUH=ip}tRiY&>w<_CLH9l`u{3*E1JAah7DySxmQ$&uFN!^D&ps~gh3;$;UX3-T zIfY3xu(%-ObF)uR+k0(`2udY9m>|D2=TQc$YX!vJ2#7&0T?Q@q?{q<9T5gqwMdzX~ z=S<)5wh3@oz~&?h^_{?o@lZ=hKp2&DV=%F>syH_Hvej)B)LD$PnS=l|wdcKud z|A7bpW)v&?B*bhj9lfFR)7oXhi{dZJPv@f0g<;Q}I0jf_i3~2t6QcY3=aS+}TPqjw zx(%9a?~44}-Jq}BnS25SI5(b(f0C1%Mt!wcj~%?qN)E3>laVCWb0ZpsF|E8xeTxf4 zJARR_Iq~~eRqYFCj<#a*3O82>k8(GnUwC-&6IFC-EID`Ayz$UKm>SoL2X)+f7+&1P z;ZV!&J7y@xu0>4=Y&ji!6&vrfU%&iH@Wk(K#>VgbtB9OwQ>**0-1xvQprq%u-}!Z7 zYQyOfsg0HAWBSOFx>I9Idul&L@OauEU+alcDL0CUecbIB9^P1o<$EgnVZ<=P|p}q745kA5P6yXtc*VHb^&2=_8)!Zs?t6A zfNQn)w75=x1b@uVR7Hk#HsSPgUFW9Y-3B%%_LB^1?u`frTKHfo4ICNr!OVP={gmG% z*Ajt49a31_tq2|J5=ZlFm^!^$E0KhzVzJRSWzIvgt0q`iv9D^fs(*E(WlrUuou+|k zPPA|_blKzh2|=<-#QAk08Wu(qY;)u_-Bm2xb1r&}5b?8BVD$Jy{6a6VN1qH=)p-=R z8}M=EuRiAm`E@3&BVc{mdnjh!WiSG7>x*sZ7mP-N!xE5J6BD{b%T@sz(swy4^t)4n z;R_HIxR#}*_-;g-Xd)8i3HS5q1c%0(Edw*bHAe6G-c`l#!*71KII1QcIzOOZBW!Cc z-FwrIJ5i08*#v8-j}O#+d7h$GTwS5i8-5~(lj)RNbufm5!YJ$Fi4gy^dLmb57-P2~M1E~Qin?0R|t2?6uT-l z5#Yw~8BX8-S^3}8?teSIKc3OBo*Nhr4-Wpz+5ie>e}BIYdI$JA45RY^JorBVe$ZcF zkpjrbl7PaffZqtYAEAK!1!gj*4(jUaqN5S4pLXMf>B_+5c7ZhzWX4j=H_@@jpmD&X z>F~Kf0p@psn-rp|GH~D`B?xUS6lE`|U!gh>2}8MrfQA-NgUzSW02p_`19{8uF`Ka+ zQaye)QYOK)ewYUMlRAL-H&E8+blU6!JR|1^VC}6BH`!L@5NPhNuCB@mhet*IoLIEx zu;#@;LjyYufoqZDY_Uep!jlZVu5cI)WCuEJ7Hvjtpu9YiHiEQ1;8pffFdMX?SfVMx zQKLzsDj{xf0#`Jk?v__)oFqCBXu5}zmZ?A`n>a8ym=CSdFdWoB1q|)sjaP~_8HHdb zjfJofy95Hw@_vfGYWsx}#50?}ZLDg>Yt0USI*l)t!1*wXu#%`#jPH7%NM8D<;xk*t zoDAW+Jo^s^V*cT4|EzV4+(_CG=oFtpVpP{7!}}r#J+AfC2 z#Ase&nJ1Hwfk5bK=!-0}N=lA^KjY)W_m|2xD=7s^8*W#+2ktJ=J_Wc`7AV2vvC_{X zmh92!^{Wss`OKZR>$f`ocX#~T&YlGp3T-BWiX&0L@(1=o+zkf`0p|eXWvGY<2j4y1 z1eLA{0gnSl?7tJ`BLG6DRECFxQ;CHd`GOu*BmPt}OhCEPH$($+VbO%^C&-dY8j%!( zA7Bpe19Y0imTXlYD9@-yYz)WlUb=?Fq|&+~k9tnV^3k9_j7PF2d#NOFB*jcFhd0nQ zdK9&q@DY@+yj1ea0URmv5LQq>u8LYE%8?3M6oy>%0GQY*B}atTe1LPjcYO;^Z1wNz z;H(uesupkfE_4(`^*fJb^8&axEd3*>AUp-;m)>FhI-K;!Q)jYCs-5AJ5n+2f=h70l z@<=Kwb2L*9)W(Q3B~ZCUm)6FGrb}_u+*)&2U34KEtyzSQQWNyjfM>J24w|QJ2sz*O zm(o>AmCSri3HC4R4U66N-66vO5eF2;`1vAm(rg`6K}@C@ut>QmHPi(r16yzKI`k=nn%+-tUj(;+L(g zf+yQ|gJ*T4?*_fOMDM2`DO<`h9Yj-?2kq$+!93HM)f`W~VC`?IZ-<(n$RFsnrZ`41 zCG&e+tZInAJY$Eg`_+}F%Sn-)tS8U^>CA65H#NH1sH+<>TFOAgW!yO13Q!yRF2_#?As@$IE-L;nRa$$=}yEit+sn>{QEtXe4ER08qg-C738E#U8**2bDf7H zAAk(kg>X|q*2<3tk0puu=)&w^!DMefS32qxk!lii_Cnmvx5{Io62A(wekwzXL>JNSmGeYFnVNSFniRVq7Zt{Z(zrgBIAV!3R7wn7(OvTJ z9Hm)sxq@Pg@@h28#KCw7{ti4~?@UI~gz!;*wOANru-lDn)tnZN7XZs-4regLku7#N zeVw3L8{Dyu>{dW3&QnYf4$=LGag~N)n?60{fi0zGZmEZ(D2I1ufou7*^)2`F$(hdX zNiFCEBD4=qy*Q@?m9M;f!-}~AW-7bvoV_dYO#Yn%B!XvB;ip*v3ss+<@zQm6w{WjD z*X)Nrphz|V(`}g?w6An^St6O+G~2lVz=##9EtES)~gdg#f{kJ>vh6>TqkNROHZ6SJmJNGXBXnjkXSSX zgwWLL9cVQ-HOGR^gTv--BJAZOo7z7OxpFblCdLtvi;`1%Zx1`e^LQ?U-K>7&|t1|p8v63p-*J*)zR^zvE|wbzSQDc z$ry62_ICNQ_OxeCmtU3nZqAk7?)xW8vOK!Sm}Jdjg0SMJ}7LhqQ$??yOBfJvX!|UAD1_Y zQC#v14|8N*CnDw>Z5$#A_gH;C=&_GID_+&*oBz0}DRJNGIV4iygoIJLAcsn%u2&*+ z-tlcrp4-Deh8g}N|Pa=8udS+}PYr=dYm7`A~ zmJr21I2Lz(yT3db_1_pM7HafBDt_kVz_lvLy%>GwWGW7Gi1BI@o_$x6-~Ck+>m>cM zY3)cf1BmeG+M$$wnSkHa9^)JCsy`=jo3x5u%Q>^@9jjMeyjL#l+pckcW#54JHm z$jl;;1g_{C9TtXqLqgtjS;9B?2_9V=cA>MNlqu%|i~=RghVHW3C|>F22pxJnaVT;b11#&1BF4M(n;pevmg`k zZ7b`lrbce@m&U-o&c>}{tQT67K936=wZdMA4IQ27pRfv7RgIw>i%|&FC6xhU#KjL5 zs;knmpe>Dg1QWW>9Etrw3YAc2gLYYgBPznVh$xM3!UKOu4Ztc*GM@@9>6(`!)?SJA z>wY6RegDJ-+@QAYsY#fR+Qo&_?!J>J)1Ux`uBMUpWmwZ%@t2ekXaFik!IZt52bXbOD*O(+QGj|; zMzR;Yf0!E(`c-@#5Ip@)+wQ>4NUe|Tzs+9x@85YLj!|aB-rz-RYHMqOgO!NLm&w2a zqG&;StNyPd`om*uSWcomFxklm06?ieKu0GnE-r4&yrH#~J9EmXJ7IRERDtLifhY_kWBXtTG_^o=lFH3sdj*E^Eov#3)dG~myeFwy^E z8G%0vu$6*liHr&YScB2Z%geLt-7o)kS=^@3GWxn-&H-9PiIBh9ajCgYNT2}c@<$? zMEa9xBuvoI(Ed$9O_}xqv_=_*epnEz$mf+%(d}#}Hx(IaF|<#p5GGKL7FM8*MK_{q z1ViOOfN~3G;e}D}zFN+kXD<2HUsZsMj^TbMb$OqbhBGv4zYP!BP&cfPVe(bo66J?T z-LRGl_9~%VN{p+FDE<`+mqs1lkba%`um_ATebScN{hiy);cnCjb_xbvMiZPH6m%61 z$6z1IFI96Z6uJKb(4Yu(n%?2-rA;W`Ci@;AjkrKAP`RZPkHX zk^WKD8|+5G)yWSym5)u;o;8gA!4YEN?W7usnVFU5$h4l4Ee#zlki{k_9->{4ub-pW zSjBw}?Acdl;cbc5(_u$B_a1ylHMArgi1`M^tF|*v^S92{uyX{-?YGl=O|k2h`&cN5 z8PZM%#qbwvg?dv$+0K7|pR3q#qn^uA74WB8%SE(>qC09PZz{mQ@3!YV9ccBUkY#a{ zE3LxchI8bE&z_62f96GOG=4Bq|8$oxUt>((eh=2gVEl$P^UN?e5E9QWw-^pdxYG2y=TzwJX&u|~k zryK-#OooM1qWcZ$e~WoXiw6I-cUN&U>*MMvzKraH|DP+we$eE?Ky%NmYs~v;KDCW~ zYB`BSf3mGN4|qG7^0FP+9>GLA4#%fToR){6)Qg%#Zu+jKPWr6Mx*!(`;H33;g>iC` zb9{who8NP zDt_1wU+5VSX2Nr|pk`hui_Afvl2Ib%lxv3FO!Ku}WFrQ|xma(zmg2p7&5D zR!k;_P$tIPb_6wVxQ4K&;Y!DZ%zn~tHYAO2>)oW=-GWRePTC4QTr|kNh|DUY5g0P@ zY<02JyI6a!L~gkX&L#uvTDI$=6@7`r3lUaWS99@H0l8x4a`AM~v^zWX<)KFmA^2fk!Lw{1EsWW;>t zib9XB$tfqF7hVv=$YwgG_#Wu~_nS6t##{wDY^vmwTA^ceFKLXkBJ5+akIA-GV z6i}UVuMC)9!xR0<>k%t^TSjwX6c-rs)wA%dc-yC3K&8h>x@U>k#3A~&SjrLFY+qV@ z_knu68CLc>UoAEgHlbR6ZfiszV&&V>p$IoXiwzjpeq( zQ|(PEo5|#I`ojbI*B$GbVh9C+zCL9k;=jB)cL7|*;rE4Td~Evh{v6@)Nv&^eY1uH` zNh-ul*p)|-uf{`~W!vz4T7#0wb|LhlI|Q=!V-lR;RU_iwWa-%xsb3W}b%r9n#4`wo;fC35a|4YeU@@Ymf>L2)g z&Q&+8@iDP}qcaGjr0M1O@hatWt$@0M<6%aI_wuj=IZe&{O@XsjtT|xQ6?h8&*O&6X z`l5A@7jppcnd=Zwwz_?x`vE~_NRmYWbhm*`F`z-k;j}3LgF~n@Ha+eC_EhhH4yh?J z3qJ6ft1H?9J3s>W5eS2D)&*pp&sG~rn}*7G0bMaR zzYCyZU@BMh; zi_hR3n`xm!90O}6XTZw`^2;U%b4<{RkqJIQfwECpjB4uY+t&12D5^p0APxlgrT%E5 z#mxU&?|6cR5(sjs6qJ;tQQ+6{AP@u&)I~o6`4JX$gewyZ*CJPcVYm$f0S}5)l&pA~ zTw$J6`5*VyL-s%;eq)XJl8WB~5a6^F{5D)_Kuv|;`6WBM}#0LbcKy?jK zl_?acs3%S4!5xcXDuk$54`VsiV|w(rglnk0V;v}Sw3ygO6iaNkM%U}{lH)?*4?vM& z!u^q~k1Xl#Hy)!H(q|}STmcv3h(v7^6IEqe#d1>3LtDq73wi!&rv5hyW6VIm+Qec6 z<4Eeb4oqD#zjkkb-&8K4TCk~la^mm}%O1Y^BSUy@i;1pxU?j4ysHJi@Y7isz8ZUc)XGZAvM* zE!(=K{JELVv{NoisR)ehY}Pgh+2hqlbH~YwJ;xH_jAB5jKq#+7b`(AQiIQ?m#;th( zjZ+$yQNbyI4z(1`2Ez?IL+XgR`aNPcGoJ}O9W=q(cr}P!AJ_TJu-pD2ktI`f9(?5> z9cKLC@nPfFEp146QHn?G$s%lR^-)ta{|*&8ED>oj;J97W=cq>%Jv2KYibB#J}A%A9Vdp1fRXQ_hmp`Qi4WR(+dLZja#UZ}SaNh4P-% z`+nJJ#z)%AoB3_^S=z&CM7AerCUS^u&B)vyi0mGS9L={}T-O|2SFFW-TxZCv9ti9n zLAGNz@lvivEpZlBYMb{y;+~pgQ{UiNnh{rLHQf$6A&_y@tWpa&4ueAv&Q3<6t=nsJ zS*O$;R|iPj1d1p&ePNgD&$-~F zG4saeE0pYvlJlr}ORm`@PHj6XTXTOSY&79Quzh$#AaRtuf}Cmx&HZuZJSHxmtj+$b zr%?hKl{OV>XBia@S>D)}r!d8LW%zwmk~VBz8;QJ#Zg?o zu$O7h!Gdwi@VaDQRSEK8-jewIKBcDAq^u{D5z)}8q;2CI*EGV@n!=SDdbIm@%sTF5 z?nISGQN$$<^X`QYQ7M`N@(c7uq?ezDo|9qxwhVb~1k`&_=w?(f;l%k9Ce^j2;Q}_R zD|a%BR*Gi`4iacQEV$V>_|DkZ=1+%Q>6#C*JPQ%h@}}MSou|v%+Jh!^S=&;=O)}ny zndW3%sHEUzwBvReC+dx)+|j+(X6Ht+xMKGM$F1W|9xpWSa>=BqYJs>HqHWJzUmj^< zRSkUVjF+TQOuwQ|LybLV@Sk(QTtUKr)%Ge7!Yk`eIB8o+P$usR38&YXry#4*yp?x< zGfkf$eTy|eY^5}fIe$K|y&vYgaVH-81|1G1A+U*Fb? z{?Bk`M9tp3sGOh9#m^mm#LGdI2e%*5nswMU^+?I2$x<+2YTvr5#X4JfFg1`&r5HsK zqHiq!nS&5ZhgUPyfGWqznwVx@&s&Z8#i4i9?SDrli~m%H*1hez*0(KfvO&SJPONP# zwChCv&${B{`?eustX4lxwg*|_!_?Irr%}jFhhq+9G(2`l%pav8==UU;0ARmMS3aOK z@*u0xr;$WKC5jLhdql@bh=WvVsw{djnIY6gfI#RTOk_Y-`YG`bOr=CU__nY%8!W|% z7@0YG|1R+eb!c(Z+jB3m6Dc4lVi1S-Z^4Kw@j&9Ak8qpf6`#XZ$pKxHfCB>LY=NW9 z&v=#EKlE=Y!k^DGCk2qA5zi9g#Tqg&!BmIS*}gY> z5sZG42~p*NO*AX;X!0hLK$vC=@MM6RGZF!hb8s?H;Ze#JdIuy)i>~nOI*>OuVF92m zp9C)|EVykefN0rC9!T594UNHpH##_!l4LMjD>OYBmC#v1ZRZz%Zq0 zok>%fjYDma%^`!YEhbV(^NYbYi%o=p5!Ikd@c`4|0+^{J;C=#B*0o{Q=V0;J-QykS zK$za?Nl73U8DN}ZP62ll*&C+VK)=~`sYDUq++9R_7WSl#tXKlIC`7H@b$@7R2mtqh zv{?F~d8zASrYC0MXz71n_X*Y^zTX{*mUFT|d0!5XjC}i)`6;RaQC(nq1=D5n4MU7? z7H$TTGr{*K93$Ybs301K{hK%8G9jA1fKrVthObAz_Rc+YI+1A!pSBD}>0hv{9qEr;f-tr)&GMjtkb?^G==>yLIxl7xxnWmIZk>MfX zrh`%g;i5e_1P(H;K}E6|a+{w2PQ3)_<=R;B9`z>IdUAq{*9PynCkMKylWmN!_Pm;R zHA<5#f|PJu?5qA(lvg@+^}DrkV5`>dt}OLcHywzrwZPp4yjzn^ zlb}^Wcc4`?utDmry4djmDtLk{MT#Ra+^oEv-F|@z9L88-XhY)1?0|P?sAo;~%CD<- ziML>DPnUsnzBS<=hPSDVOFifN?t(^by5g^7Tdzij-;wf#@1g=i{@&y=M52>9T7U8t z-FjXA7B*_{33@^2Opd~VrD&~ZMJ7VD;ZjrrO58PBF$GLl)(%YergYXrT|p--zeBz< z@ZPV0$3y}Fm&83%73!t@aGT^LAf%FkVlY3^SQjC#)mCp+^%t3LaP9$Vc*8&r>bsie zFWv3pu(Dj&2kp6GzYTeO9BGXd=n&k_r%bhXMTcO8b8*G&L4~V^=Fl7CCKN7onFE`=t%o8c&klZJwmoZXk1THxO$#CO9FMuIiMcD@o} z;OpXO^6_`M%gM-=c*(*ap6QrfTEBiEB0z2!GPn)OEBzsSp(x&3?ZEY|IE+T1sjwK2 zZY2IYS8NT8iG8=93)=fzJU4sdq4jywfd8v4f>Yeff3 zy&=SQ8;dc#;4DeNauA}r?tD>Y;xEjEwyFk8j3Nh*w;O23E7q(0n={X}M^NXRK^^VhmdL@u-CX@4oIQ{CtCROfg?A<{Ve$hkT8SkLb<#zUaf zE+kvLvKu$ky`Ws)reJ1&8*$`xm`!+Lnr3$jBW#uQ1-#kNk;BEC}!R_q9aZM#5Swy}64q<;6zg#i%7x z5Pza6HDURsJB?mXw(fU7&Dz3jvcwoQXz8i;=n?oId(}kq?+MlZtErDaO!}_PA5HOS zw~VY6x51Xgyv}1rrsLSjb1jCE6<@+zS0bBmKB+dz^v{!H0{XdG`lxXNpIF?%n9M_I z{EutKxURM(Ah)u99jDSIPwGhyWefDGB5tK~VA)BvSum}&7Wa%+obaPK)>ANOsH(y| zDG^m-a%F&$FFG!*GsGZ@!Xg?S(D&+&BYxn_ z(=RqoN1+Q!hDK~SjUUEf#^iVCF`;vw2udL4Ppza8DI3zr!306o{CRD^gPMyM8=(YY zRb)~zFS57Yf(E}2qRpG!em|HoDtxQ`l^qh_80Mg2F}H`x~e9W`q4+X_9eh1`(!phw}3#_Zd+ep-`Uw2066|} zarwp@>3X`%2y+Dx`=p|A>di)xLSk--xi%Nz)Ihp2|9LmxZzx{S^LLT|I)<-queCa? zLutoo#8W9|@@1g&79FeoNwN+9=k_ZOY9K?zs*9V{of(4I~f4ZZBcz_cJ z%RBU@;oRJuGYoo&3Cq~nSQ-hq^fpE~F9}_+<8+!Ss0wXZ6=suy1x;Zu2yE&)I&!R8 zOe^RB;O_Om5jOdUX2Vc&I;>%AZXW%PrquwBTcN4Wq%=!W9nEEJ!La-^zVOu@9;Z<2 zR|w+4kBnWWNnT(-od2Oipz3+@?J+}4Kk%O~mjANvj?I<;aJm+P9{;U8bT zv-;?k=oc8@Xtf(Jd&W%6V?7AdSyh-IY>JK*EXV>?7D^Dmvwe%FeB@J|7`oaMQE-IZ6O&8<1jo|oc${-(M z$tm@jQ-97la6@%p08t^7BpIkUy!B!C^-L|42H=s7f0y3{R6zwkO-RZQJH#yEnU;jLEib*JRtqvbt?~)V@nWiZtlC$K|wQf@vD_8N<0KIml|_7=C5@Lg{HC zy-vj1WHeY3jx>3oYQ&Qo)Jzmv)SU_bdag1CJBeWfj1x9vlI)uqa^s`dT^$-x1c#W5M%crH;q~z!J2|8 z;Se&1xA=WmBn8K3Wa>#ylbLWS?!{Y>7IOhmA^_W(CGGW(nH^4&LuyEZO2K$iT_~FQ zK|%?@uwu!QSPoKD8!Bx^0KDRMBTj2#%`#iQVq(=nl%RlezdAiImqWeP+h*jw3uYNE z3Q)1UsCRLi(*`)YY&pUVjwv|1De1=hlp#skX?|apuNzpx+=$FbC<%~e_%8nFTvppD zEDY%VQEa|{;!!hHFS@M3lE59@`4L}+yEj%$a~TbqKa+A%6-gEuDIYWlnO3^XO3QM^ zqW^4CW|b1XkR*+0j~_)yBqBK~zV2`G+=Qi^v{0N}j(3l46yeOyAc0g)ms|w1ytR;S ziM2C%D{K16lLk{8GY@+1{3+M!8~AE0@h(ZeSOSc;>b^WEFUbC-QLx+(L?SLU!o7Xj z%yI1n>hkKI<$~aKF1#W~?0~rE%J9Kjt*R{8-Q{%@L%CSn+qj5slGVDH=X-hC!Ys`q ziQ#KHO0j}i7wM8y-YH*(b#f zVxNuY)0N)wHyO4`!O8DIH8hUV#U+Kf#AYgx;SQInL966{xSD^=(R}QS+gFvi$F}%K zRh%BK*g&g(?WuF6N)BD^yOjxeb;WwW;R-ip=bu{e=$d2o_1l!u-9LyV$RJB@&ol5V zEPob5xVq}Ga_7jtzsW@j-c{wyft{h-_*pCph!Kf~n1nOWHplbXIt@w*D9p;s3XOr1?hvnP)?ENwvL3uVL zvG|pDu}WBg5YgjT;-9ne5A!Ef_4E?gu0ZR6gv~Eq$Y$n8D`%7KT3zLSR!mYiXq0#Y zk%@~dql+?(P9LQmBIJIZl~D7gj?n;2C82aQ?F);^?fm z66&xlJ4s%ib2`Bi0}LFAPW5N+8G_>N=nF&74dDm^cUa$kuR>Wrw+PPmC;*@TWHJ!8 z`WqC*_(b{IQ3P}+603DM`*@PS&rl?zB57r>^d$h;(C^p=*;8XPZn|JOQx74)#P&~`KKZfkBV5{JSheX zfnK~6$O^(toj9P?k4Kf*51rGbgxii6`cLwq6oNm4o&vXq+G5my3syanO4Q?@?)u*+ zx0^pkDiJ;c&=Vu~u8~0i{n1A7MSOzC2Cue6-Lf>sO*dJT6Bg zfUl%c76M#27rMHgLn&rKLeSo*?*QWpgl9fKUat)6f-t8~J{kZiG00$N0a$cg0qvL? z8cTrDOd*r&-^If!R9mWtmisj@vZB-L4vJRoKNH69eU%}=Q(}hmDf`iae3T4sMGGL`@ zU_n8yVnXlgbl*Pr-FVlw ze|saE$>ww*5K{z@${-vOQ+$U&)6RfTfiMSFccmU& z03uS%%*@D&S$tmgxWb1;MsVKred&E^Hvl@^vu%(wC#?YtA-HT-Uq+&-WPlVB@E8Df z4X5SQw=DU*lE&Oz28dUx_MivOG~9V0s>CG=ENBD((xldSV250f%d8NP>RFhX39%M7 z84)13M*j!n8r?w}6?E2m&9Fx=l^4jKi8DcOv)){ltqqQ_;}4ukbb;9^uN!Fba)j>3 zjmrn@%I$lw3K-N>pz&VW6J%j{Y&bMWB}=Gu?ucEX=~7&XWGQ=LLyLRVxmn5(nYsD- zc=&mMVFrj|dhPlH;BwYGkBA4vsy=|%cb7wg21V|R_teBhh`y1>2_YibDwouPFb5ob z)sF~d*d^0TRJDGsYZjp9N}y!aT9`}*ODHVJxVx&KUWMIum9+Ufnt`T5l%ZKbto@|g z4s~*nMv<^9VAB-zn;@cY!btqMOUYk0*wmxzN`WDr&v*{P4@*}YTOY2dvJ$>SLZ>qy=Ga$l7fyBCh4cC}K)^)|%V<2t|X$JNRRb2c;Bu zCBKB4SgAmiiYtU6HoAGLSqS+E?KC9C2H-;8Fb5qC793FgIsKprHg)R=&(+2FsYZ`U zL&jqlzh}v(o{bSo(r|in>&vPA;7yQ?yq)ZxTJEsIIv>p${LR_%OVG}=XJC~pPzr-C z)iP0;`-r-Gqyo>=EtSjx_G5`Rn)uQ~^OY za^OBpZl#To&a09kYi%xnhRH@exHj2VS>$CKl!9M_5IGMuWRu<`w@NQ>s+uMH>XV`g zp^GV=(xVx$5xkWycyRw*=DIVVzABgmFR;YW!m`3L*kPg~KYo5QB>C&me~G~|;N6LJ zU>xqdmTXYv9Z4lsDNEMxT}&2*@zyUH80>2~ChVl|m2LhSrF14Vw5$VFw5H?BL#Ygr z3_1A5S*R9GVKXhmbx=ip7^(xMfwLK0Q7yJr`nM8#&wEtkNlIe_>HQGKm3@uJ#scWJ zzaN@e_Vc3RyU_!2P9-V7T>N00dTl+7(2o>nY7tjn(k!5N7zSV&-8$~Pr+spto5y?> z4>d-S6keK~CTG5MqZZZ`fAHymOoO|n+6dSN5K^W~Rx+75$RYvVF(({_VSE0mmCRN; zDMiHd$*K^g4u62N9Tg>LMk;;#l)MG&Wc%GI#9y2d8V;|+l|=GM2MbB&B~ zc|lx)`&Vi`gmDQS{tbKf#fiR=8dFID3P#|;5f`Kd{?GUEg) z6)bFgzld=4&;bjqmvwA#(s`*u*(o;J^<@a#;Spedv2iwIq2V92pk~T`JBW=h{+rRbrm#hLopR! zK8?~kL}A`ZmmIMdN0qAR4V`(4>D@+W9Ur*8J$S)x`CP>m{2RjQnd?;aD=*VX0Ope9~yo6dy|PX-D8 zKQy+S%8WX#1sZ7~Ww?S9o2@0?y2S+{W&F5_W~T*Fuji}VllN2)s{D3epz&>Cs!$ta zj9bzX%Ujo5rF_)9z}wX8^fJ>(7&7EvYau6#-&dMn@2KRN&bj|PT_i&x4$I@2;y8xB zNfGP6Vy1<37eOWk&s73+-YX=Oj1hYYsi>OSC9OYGS#&@8bw6C9;#vwdcj;+F!ROLO zl<-nuAqZ8_#LJ~w`~Q-N@SqrViD6cg^YUw}zOHNHWq;piQN>Wf4*Mzev?L})AtFlS zJ}HWHM}DK1gP-*oqJ`g44^t{!AdPD=63l)C)zB6AJh~9i|!rS`@Us4j{7w!XPf58Xx~97$7kC zfjo770E=%q(-LnGUe!VrA_xe1 z0sV<94{Q?FkcAx_#{dVbBF&qC z>E*};Sj_y}$UKVAKoxrf_Pm+IIj70oE-fGPrwcUpk75JON64AqUhX>p~Vt(i*7A7$tS=WyfJ7bUCH7wIg>Gp3Yb9<<2O=jd>^sifM~O*+rjMby``;G+un$1rTk;%O>i3);Vt zE=#_yT~wq&h-w&7Mkt(=iSh)Bg|je`OIqfg^tWjhfYU`}o{+AMfiw%=SH5a^+s98? z&*CtG%9B!3_D*<+f@xaYTjB-QZ3HBFlVYrl?LSawa)ODN6#QxZ;9MMLTfm z8$%T4J0A)_CGV^G1e0Fthr7N+&A)U*CX|zVdA@z4#S%7>9+Alu~aT{}FvRnokT7he6fB|TLfvzD2SN|+4Q z&!WRGvy~O5J=&KmvPW?+W@$i>Z~H4piu+5VX&%#+vrO3D^#s%?ey^HYq{(kD^qX)fBZ}sTmBjIGrpWSj{PCG07kxvZE?S*%OqXsFZp zeTz+Vr=k^HtgVbFhb(Rp_No)l0e()$7}ef;59Oon>|45XeVcaMFezzZ%^P!ipVEzK zs*R`lVDKIUHwpWL1J~vIbG9K)&kzfX2ZW+V*%(KGT7Ta>?`Ls`w?pA)`jJkUP}`n- zX1&=_~#7Hw@aRC;&XR-PP&cS9wzpanEMimN6`zl$|!#O^`4L9N^yWxz~roZC9O z>oz-Q*G-;(r=+?eRhH`vQBd-yCT`JcXJ>+DF;1ax_E?WA1qtFpHmJzLysE;PXz7he z&^6^p9ym&vw25XMY>z8hKcBLWN$&KWc2Mo6f!910)}4vZSQF484K#!kn@NuxT=jT6 zb_NWHG$fHmCc8UF5>0YS$$K$K97wK7qruNXF%GCC*3-|i^@P30d?{fi#f9g@f!q8e zaIx>v9@xx1MZGQTPd|7$fN8Ib+iJlZ;qo3sneY`0BEuU-q+WQ!L$ifEEed`Qx^8Ox z*Gn=aE+e}OjT1A&1C#Bey3cJt3!`~_Vkhe$SFds-&nOz1lR@r#wbq@daN75z>5;Xr z-QsGMkm&dC0Ul-#*G9*%4UL21>7`Q9<~NS{cYzkwB~XjM)?1XZl5OMMJU8dB?Q5NM z{k9+1p(gI#47S;*hCnFD)}t^NFg;~upP`5b(49pW3VSVCqf8zz+7g{Z8Ys)B}j`d$Jns0@y zx=Qv%;ECo-=IWj0?vIrsI%!YzB4}(_zk~aTET9aD+z<50KhN(v$5~aah)+feUG&dt zf8h){<7;=TXeG}u$QA3MAU;dai?aPozne>K@iE1KB^Xz?G8n`WT!)U}N)H z*l99R{##3jbTmX;nhHdbg;?*jGb#$D^BiulYp-nE+OTU!>fPj2@i~30rC*2#_o2SB zz|m-q>&`Z3#QE-OOhKIfC0&b@g{#=p5uv#F^`IF=v09=ZpKQcRs+7Gt5@|q$psS+7 zhYN5PZ^;hwuV9|;hB;?y{TqPg{7h&dA|gVhMn_E@SPEvs4|Hp<_+Rhz)yT0ze3b@G z0M$gq79m2OeL;}G(WFMgzD`^m`KI^h=LlhdF97TUnHXTy0Ii>B-2-T;L-+I1x68=!83277; z7k8w4X?3hUae=^dd}`4SOdrV1i;dguodC3Ev4NDCnZOir*)oGf2iU;0+nmWW0o!Cl zef?C9pabu-VMTo%;5K=u$Hp>whQng`*MwaHSm@pZZ52V6{(w;)(6;p}17;Td5P;r5 znB4`f4{%GoG712Em5f8+!4(w-)1m|Y>19YyPY0fdq`slqG9XfL=wnlIfoeh7 z5_U#Jj1mw4Z5n}Rax{h`0(zH!syQI%Ba9$(MKTr=foy9kfRgbaDMN@HhS^kacbXz} z*Wat{0N|Mdq|Nx$)YRl;XyQzfELMp23=xce6f7+43(!dFiH;amPw(h_ok{xdqJpY# zM1;+goD9GvR!Eevh%`VMR#xPWQ62>ijm)(8++8Gv1`dyAVv)F`&~|eZVLz`nBac{t zDI!mGQ1910dY+wJ&Y8b%ZlZ)#Dby+Z2yxqY-JF{&UCz!$b^`HL5m9lTMDY0yO0I1f z?fXg64Kx<~&IO+1A=GHYik=bxYx)hit?bb^iz{-8$PH-7ixVni>CZ|tzU*qM_Sc1) z+>6eP%%PA$D7tovOR72cFAlo7QCej^-Ub8%jf%YvmmJD(odx@w#@Q{+8;TOsL zV*IDWIW@uz%c=}wqu~+v(wW{BJ?E5eNt}?dep-=yfz%arq-XXd2G#UARb}>+du>Fg zHoP5Z88?+3jfOi|s-To^A%z3J#c_Aa}Rz-)&N}tLtmadqj9<(c}c&br;N*dgnN#!LbL!2L~ zY_&yn_DjsqP@Q2hekwGAH-aaTg8VnmePB3?!l&J*xg(>G>u$Lt_Ko6;X;E}rYjj)T zuLkxZy*UM1Tf@Vn7%AEBs{XVz2Q1}*)Gd+JHS~5$3(YjUrES)7vh!Li_7=FoYi$>k zp_}?+Dw$F1HqSR}xk@h6Mm8H8{Bs+W3geYU=D^7C7nm1XB;*V^H;Zh29fvO4L!r-( zYXIivz}n6p7Y|q@EWJo(yoROwK&GpLPvE~#O2gY)?^iVaU0h?WC2tGkhQwW9KO}kr z_v@FFc+#;HSBpWY%^y(apO1jBbIjxby;KhQR#?v^a#Ut<6NVA&nNyb;d(jskD0~w4 zFpNLDO^$wM^X&LvfjN+4Y6VpmywME5Xwt<1j78Ipc}>H&9CjzR5$KfUy2I!K`uQ}j zizj5*Hm1n6sM}mvId-7nMOGTw56gyW-KvprNKwXyuObs$rXesz1)Ma4|K3~gdBQOx znk~p#6WCR_VxA+*WGpRmR|Bf@Jmy9=BK6{!MU2dE>-#;`neYlS};P z5$+J1!o~?r4a>N`S6v$wokvP4?^r=(>yYT?Ntv07A6XeGiZd5;tPGgrZis{UlNt%S z_1L0CoMDQ1<#(K!|JoFeBTa}Ik22sxqqt;ITVBfw@DJO{Tg4JARxA9(aY1b6CSSqD z%ZQ21rL#UBPWm>yVS+*0hvr(jWpMoP&k|0RBDa4j)p6<LQQx;*yD9nXZ+aAY>eud4_(Mb;o#Um<=N*AkhbK$!p>t`yq$I~WDV$|M zb`#Gi?YoM=xU|;ZpD&g>MjAm_BTa0gpJ9`AN9UoDnsfYDmW!P~tvqU0Sv!Yz&HL#X zagon6+80NenmwZfFji0}T-iHd-r}TQwFzXt?AjpPw+rjhNn3^f8Cz>4?h51m-3yvSo< zr6bpvni6CYrgIF$F`7HW_8%~aDs@EU^C?t}Z}7IWl&1c~iRa?_8#d$7&I~;N1!KqE zV{P3MDj_>OA1)ix&drgg72F7You)h!$#*p|@@&fSAuOIzN%dkLcx3ejH@w(;Ftdv; z3HK4u)<3roo0mqihE#^dVXs3Bo*BT-9PLa9z3FTmQ}!MaS{FyQ5GqC&lvP-VWJ)W7@}x(iemhq!G*D-?Z!0d7>+XDS|L7<2 zz87=A4fo~Ui{jO=CiQhsbzV@L>|yu)6DWZ)8UhX^O9^DpFE3MR%;0x=L|Xx`7cInl z!8?d1i$G!GKO7T4F{JEg!F_f41~GkvS&2gEY6*(;m}Nm_bnVvl^lk@N({K|S!6^PE zy^dH6djC;*&g2RM;`mT59zMQ}fq{Yb^>sitMzRDnJZoxdmdZ&>1KoR*%Z34VyeSG( z|8%tgI10o$U=x9NN)*@EyJ_xqJPQ=k-DtHa$6h|c7ZPZ$HN$YOoCEprjpkHdfP2LP4=7a8CexdB31 z-HwZZ`rhPr|95Y9caC~?Cfjf$!ft7xwkA0~K9EV|ANz@fo9}O?-Dm-cW(RaKLqJ`` zCL|^%CM4{T--mHQH47U+XYh&!?lN&x@$lg4m_#9;H3IvG8mIl?o12@uy1E%kXOUbm zntPya(ewd1i1kDff-#RXsa;&I06=V!*M49#=L3aFQgDIQO4gV~9?W&1tc00KXmigz zG*7HqN(ir9u*Q&IY1HXJv zh+hsK5n_sUp|_Fw6{aiPi0%Y-8u)G#Gy-BT}=q{`MV?dZl>lB!$1dj-}e}2JiR=$6)5iOGtan`^}$|qat)doqm>;{MO2ynK>Qeay(Z_ z@)w-)#v5zi@^_qO#(GvnLqHaa6&6daIk|E(?5#BPWg%~md{Eo(;E0u!=MbBKX3E*< zLnK8OP4@8Zmi(g+>xu$47wL8&19JXkKV_a(8mTBiT-Ca0m$DRFM-1*2UG51^j-n&omE_lIoZ#Ew9ut=?it!?o8HZ00UO)Hod zQ&N{>nPyeH#8b4YUemPn*Ih^kIv+KRceBuBCWy9D7F*KX_KW^8q?#G4#2B)}xmAFS zcRH4F7KVKmfx<-yri8ZUP;ymF6J1n2=5p!iLtdl^WT5K^GhL!pZn$}F33(#@`vyJE zxM(1B8~rUhMG z5-7yzvBq5RoyqcpQ(}!P>-yDP8C1=1Sr`}2LE_xGEEY7z(fSHU^5K?&6_>WgnGKXp_H+ za}Zn>*4{1Nl3Q|rF4vE!ompRzEBg91FUE>_ipUFU9hjV2N@VI>Z0 z2aj4Y<%N3;lESM#SkEG9{7)fOKZZ|*YW^hhe{}59gitt0i_W#)xDT!f1lyyu^Xdk; z_Nk^xH2RDCRhf5P@aa%x1ytx%b0n1HvX~E&+?|`0?RnCxSk~Y6=xVMxF5^7D5k+~q z-?9}nw#UtP%odx+$7yHQE-u&hPu4DEeU(=m8d9fDDMGj$`{5%~>=q$$0n`lnmjoW4 z3kzAc)XKfBaoqc<+*Zrf)^vN=ei*;%Ycp2`?{{v- z$Ci!_(!JZ<*2(TM9g%j{McFDE{Zd?~p_D@v`N2hvWk_O3Mob1sja_=-i5beIRVjKB zXa0Uk=Z!d_yiwun8kuRB{Pq#_)HJEmoo6W$c}u-MuZJ^e`iT~mEEj|XLyX^KA#F7x zZ(MOoLj-b&uoN5L!OlW4fKOP8WkU^Nf?kI4q{vrQo3sN0pPl-ww!6wA&~r z!a6?7%s=}%e)l2vIkV>GmwldZSlshO{|N(mladqg?xw0UJsWa>VhwP0WM{NuI0`2% zt|TaXrz3$aJ}Ho**!I%#a#&#qaYrF7-`D)M=Fi<3Jv_hh><{5PB`=k!aKvXY-H;Dn zaut6b4GHL7MEn#jWO-=8;-gS^oclu3XXzCx+^)6kNcwwM>%Y!~RE^tPC<6l7La7&S zl4}G*nZkm=vld#NkJ^g$5&Fq_ZYOLv+MgCOCXkZ&V>=!BU-;hj2v> zOi4-tj8vtKjrV{xeUVMByr}nt>FZH{ZdJpUP)Q9@4#bdzD*`6Eg)kp z;D{B8$l8qw4G#wjbLAzM#1?wM)9o06z{WDujcE zyXv?81Kc&fF;YPE;i4HZEkY`AT|!Qpn3y1M{AX8cJy!@56LCe71Xe(b7>qd~MG>fq z(D83q1IX8A0)P@CBP%h}goFf(sZ5whC_UFFlfmDj$L*zbU#YWr+~&B+m=w`{0#i$c zD#q7{n4zXQ{(wXa{={U%Kz0xb7Xc3v)F6a+f^3t^4~yt=v4%mjTm2u0l@G*xn0H!$ zvoj0Fa13#XUoO;(@BpdSKwcjM)3;7JdHErBIq*Y|of)P>ndOC0@0BoGIEPdJ8Nke? zaTWn)-y`KJEUy!y=ZY>qg666Ncgy37fyMt@YJ|jAKG7`@Yg}zSGK}&PsH->5Qs!<$ zCC1MTL$u$aieM#j%xe&EJqSePF@9T^D?`#JymaUZ*jtE1cPtB%u|>$RV+~fKgA9c0 z(#1EO-9^dT%P+w?1y&pK>KZsSyVCR9e#|*iIWYnO_+sA5w`Iimkj3zT0lJ&E z2vEVzb7=+@2~VyZZR00sc|K zCtz#Sw}GcoN>i7I1E##=j*? zo8bP+%Xv*wU771ymLz}?q`*JUEgd=im2Y#yFUw$Ph$V==Wkj$JQt2~dEEiqhEUIks zvQ6S3>ffLQ))qSFgcS(oMdm}_g9j$NY|?l&BM+Nz7uSH}MbL!4=p_GMqp*CL{M<@= zapsziJNU?kKfxi|cZ~x`E#v!U9k4rD+^t?VRMu>pUttpIuj1*iON(Kw5Z11KBeMm* zV-AHD{Fhr-j`PaV{Z;EWSOIeuPr;2~hJQ~UTO~hv%hE@554v&n1?ZF7>3dM^R+F{v zjSK3omS|UgD)E$2h^t9F=ZQADB?&It2Eu3sYtJdgTCPecO|$%GMKja-h34$*`-Jhs zWNjy=Znr?*eUp}rh1wu47Bv4jAMS@<)`P+1yi>a6L2Oz|f`cm91}sE38^?JG5; z+Z27}HW>n0d~e}lR#!#KMa`qFdOpc1M!}Q;J^)o0Y8_Fwu4N_35{ing%~!OEG>cx3yLg; zbxY2i>VJsO+=z3$1YvP}ezizOeWzT~Z%kXe!BVf{e{i}P5>cc`Oac`CNRvIOP1S0S zskfB~cP3pn&5uNIFpEa6MAufPF9j-k9BaOCE4XS*HM!uFM=2-GGl;R|;6~)Kav6uv z361h`mF*)Tb!%f0DgUkdwkW6kjb0jtk8^9CsZT!gs14QU z#>8*_CTxmS-#ofzw+f;}p#DjtlVeF3han2@oSWV$9=hCwS3!z$&#K@pqbDx}YpU@f zZDFM;3y2XI>Yh~EKQdQ3sb`>e_pU4DQ(X=6PPnUjqZzmTaQZI4Pa~gwM3jnqFd47Y zMkg}ZNZxxn+@5b?(IF7@Q)bY7Yd99cH{}^y;QXLDFVr&KBGZoLAZ;OC*`8GGroCzM z;s)XP8E4B1?az5BS=lY9fF~*yR(&@(?!z*CDceR(rmPWB+CoPmM&<^ImLw73>z!_3 z&!vZLbjuS9i2Ua!R35wm$fQmMiDZG&7k;`+UliL+k+_M7qGB!u#7|8%IRM76HyB(k zk|)LZs3e1%bQGzAhiSKHmX+&GKWK;{hCAv!hRZ@f14}|s?hrsX)YhPUU-?zS*S3_d zIfJrUDU2SlP25P&w2v0he1f!@4b-Wns;AA|J_Ff}kyN`5#kUq#mtynI8Q`xYD`otg zYu2!I+>?e1=(DzA**HA?6RJ6ce%{?tem*|aFJiVFx7XRhN3w=j*Bg2>_?$1)F;)|R%kA~oyE7d5rZIe*!OLjGxDXMRgSqX4{p1NlvU z-zOzWVEK}4!(J;fiOFv~(i~fbT+P7oIQX6Ms*eEm;7tXZ%~`Z?ro7>kVG6 z$pLilzz+Z*1sW=;7#wobc>HD4^wcc#t<2459CJ-)e)U;LAyYdWDAY zPG(}_-4yQ?QSu@1L0vY5E>Na0&ME`yn4XPo0myM!v8}DI58M;|4F7^wtc(Ej{0b)? z04EVnng!^{Y;7x7ZR28MeE%+L{u?H+T*~|_PVfQ8#tpC!3ICp&mIgxM2{}eVLBY-& z&<0w~F|1~#KnO8F7&kP7-U@XAxPDj#&(TK2_R6{{R%_M}xWY~TyDqT7-}~9m8 z5SIoHr@2F98C~LK>MqO@f2GCZ`*&L1Pz{jaGR4DkQujhGjIXj4LKW!6Imd z?=YKeN~z(+dCnfeq^9xAsHq`+b=JDMt|wH*(fMZtcj*MHMkMjp;~ z8~BdzRwvImR}~L9!vtY7`Am3uCW34+`Gm6k3n+0;jm8BdTRIo?(n`jxGa>yAyl&|8 zCy2*@J+vIF>k{fAxHNY!Ew7gs9&_dc`X!dcr)T5NG3h`J!o9z$hDLuw2-%dm!em#( z%75>dJtUDi9v<({?~S|zHnt>tzr^0jbUwU|s|*QxUdLNr9gZcoCz#%So8P8JbEoI! z1hX4IJY5GZFOm|w--LzN%0STHOJDhi@V|aCD58aip~6r!WS54kJ^Lo@Y6L8YC$rll zxjuVKcU^I|bZ#xW5=R8+qU_#Kq)LrLIKkB-u#huq&$`*tWeQTn19buDS~6ySZD zk5Q5?>)obd)|&e0NPj*dmt(xS*w$EqX5LnqCYxY|sBFxuD(rKF8EY>lkG)@1W-SK? z)!E2$*upyNN+!o11pREIx-t=67^AG zxQ#|H!5GVcf5_>tVl@=Ca$D6D?LmP_V8?N$k=keY_w3bcq|$|6BHJe0r%`&V%%Wh5 zW14@~?EV#DxXNN{hSay}osX_-lZuRG0neZxqBL`S+Oql9p`Z&7XOmixIYzD<)>T=c6kN8KX7rQFiM|x|(tRcAHV#fS?gAeMD zP=jVZ{nY|(>z2Ulz6L1=MH0(1))_QE`vecA@9hH9c%B#Z3DW)i=ZCZX4X?3#I0^js zjYEl$D@7paZ18)0pSuikcOEBmE(8=Cu_G+y0m}lS#clgzb%i4hlE^~#8%S&eCry#_ z&^+Ryu@?I_BuxsL5x0*=V1y$nfmK7-Ev))k98?%EB)Uo-ceO&Zr$Y1o^qP&ESw z1Ho$amX{=L&mI;99439XZhqkPF|4_IS&FEOEW=-={CB9kNgz!ZrkUWpe3n^Khs++E zqB+~)-8E@M?dIaOJKS-cy)(W1=ITrp@2<;6?0|@T7Pqly#C{afJe&HMOOkiANd{Rg zM@3C&Ivq&>NGI&qE06SR|3ynXo>& z7DC-w;Pnqm?+eL%oy$ruj@}j=J)tN>+9|H$ni$=FZdL4i60g2DVOPx=_znmgoS9qB z#^{P%R3m}7wl-_Ri{YKsOwEl8VYg#_1sK1b61XC;BWa%%_>@Qk9ZE2fs*UMmumogiDt=VKovfKj* ztO|oX;UF@vivCWJ7qY%DfU-(U8ak6zw+;{R#rStg3>>UGH|CszOL#KTc#-p%Uo}E_ zXR{N%d$@++WJJYpfpL$A+(-D)2!LpKp#|Uw^@W3e2A{_@a-)^qUxMNob2@ zPDNHgIORmjoK@{54To6rC?^#`A=&C$6&JEerbwV#!^d-Q|K{{*+fwk@GGjdK6~&?q zIH2&~q3N675hAfbE&K21qpC{Jr^k)%huaU8&jEJlx?I8dj*fo^d^gK=Ktl})1ts$4 z3FHs(IPH521VW+!${@_sW;S0OP*g1creD0q)P;ge0Ro|Xz@z{G3FQFoX^MNj#JTNT z!0GvNe+qD$K=J_$RO;vFXReJY4h!b|&5|V@P>d6QA>?(Z3n_|d6k371ad32GYD^-7 zIRiA@C8`?!6+*#4Eu4jgMObL4@UgwUJs>>?KGHX3Z2_e-J4Ul#J)f?3u`wsJcv~_! zksJRt4qt7w-CcHE_20j}zY|l1#YRL#jE+i0v}#sr-JF)>bT3@C;X;Kb8~XiS1k#pbPo})Z_?Z=LyhC2?(dBK3uF9j7kHukxrvkAW1by-l$tqMh14Q zt@k7i2jYE)ZDtrW5=xz?0CySv1r8Uy?@;3PXkkH(E8R)~kb5x$)7rm72Bi|rG3;Hi z5*8#55y&bR2d#XDC?b#ybHe{@z66P8rB}IE7t}@sVl^3z00W2WZDo>9+6uLp)C%Nn zC@lpAnWW*7FiHIY&`Ug@fj1%uGb8ZLi0i?8NYuu_JZ4I`Fd(Icmat%;^S!B6$9^mdx^KUJsGTp2l_jH5D5P{;;frV6Nngo3%NA*XlU>O>siVc5#)TRRAH8lCP>w$5FiK zjc`Q%PPju4b)=#;7E#tFRxiXxNwt3J66CG7FUTZ=7h(K^$S@|gTYvd1m6G!Fb)>+M z-r8p&PVk5*;M-_dpQ^odKn9T}4E0x2dlvP}=_rFXrs6M1tKsS=+!15?=6E8ZQGS>$-99c3vR-qSi zp;l0_3DmLJ2XisP3`*N6=6Fq(r{Z#(=CM~Wt#2l;`f54 zz*;ay<5Y{{PLPX41UG$zHXi_15FMZb^yV_u@@yDOgoPOX{EVlaQsELOu=mvZt2=Ri94SNOA5__K(ES_~wlv#gC05oAgf za)x6gU6{ptC$I`hQ);KJv(ooH&8M>2<13@U_qKp=p@7ZXd~l=>|NZFH0)Lmsq#$ta zXoG#R`?uA3j`*o5@&&})6H?toIJzF?}N3B@g z_@z_3L)Dp9yG4unZ{=hN$0yryUSbSbgRXH$ljJDIUnm=Qp59|8*7xO|=ni`jche&n zkeHu@#CbNyp4llyIpR^Z{RByl@QRQM{;-On5Pa+^`vzup&Z=woj+=6yzC(ob2cCtJ z)iA=67~UngK*%JXOhl4$!DL_ipx$Z=4}2%5$10mnkd(8NeBAZOcQxzu z)VvL{zPet{i7BPKx+ySvxdeRT^YQ)-#QEg~8*VdWX=RO9)>}%R_>baa6v-B==?H|;ZJxi;&1u~8~ns27=*`}C( zvtn2`HPo+E!jqT?Dx~e0J$Y}YQS5?shnU^%eWmouuV`P8_gf@T$lBk*MU2K>#HNkD z`%{<><@M{AcX?ei>n!)Q@MS)%b-t4QPl>TT3;Awdj%Ge|iUF+EGRcmo&ljDyQSXIZ zDa!^m*}B<}cxJV%!#!KkfnF$k^^l*q@Xo@0DH(J@(dH4}P~W17GGS9`^~ktNo5f1S zpTI(N0zHUDC;*qw1a`Kp#I7I=N?K38VPsA3>xH_h#k*$<%Qe>~xTyQM zEUxyp_eN!zyU`8r?hdJHew?pg z-32qkdc~2sQJRpA@3|T)n(z%E>7x1T=ibQik9goGiIDVhw&k7m2@wzeIz;Qsu=LPG zUi_0!Et@^@qGV*ZsqrA&&ke(rf!&v-pKwJ>GzQ zbl?O!=SnDqc%q9cd-otRDNAaJ+~fJ9=U?~)A2LY5GX@C+`mgsQ7)hX4>jS8VLw*pE z^8iej0JGrwJAoVsXKVxc(U-sHfXWj>xrbiQXxw5JE!0)e#42V=toHp8D@p5LZ40pG z2L_WB5;CB!EnNGC52*o{%aKTo;U9ZxFRBNuC*;-DAvK6b`GJeewBRRvmhZ>&G4d}i zUS3f>)k)S9kC{jSqJ%+&{+bDdNVqT@2?XA*Mg3~VQyCR=1n^CAGBY#tM8oa3yWfEf zW^+0F;usL+_zDg_(BDrqcDY!g34j&z^Yg|U|0?RBkv2_Vt{Bl?#lxn7c>G@}L*MdET4#1q&BfR1m<0q@_=R$g01;fF3$o zS1+IP2c)Y1Pn@rkg?Mh=w?t#>O2>)5328k%aVEF;F7U)N>oY7YhPv65% zU$A5>uDY7q0+1*L;0pl9MAs1xusH#nruoY2_#fcYkVvHPgqtSl*K2kjHNdiB z(T3JG8b+X~^5jU23eL2O#XhV>Mn>kIkCb11>_H2esgC?$CyZe|KN>K0F=-(W&$e+k zuhF$vN87Q)3xH8^*<~F7sSt5ZNUu7_n2+7D>WxsT-P*2~ zYN{M(QciDc!9SO8_glk;ALl!dJ}>$)7;&w?b-gkaA&~B#|9BCQbTfXBqiuS%=F8@d%yKG2i-kE34$%pnwT~R`zqHA-#LDfL;I>ZoRL)Hm{$4C3=G$dETjPU0Q*$ zIIm#lB?kmFqSa)4YZ5Kc54pMHBxbGMsclEte01xF$cOZlEpW~XQ;PbcThA~_>;#Ua z_v`P-W%>DzrE;L9Y~PETKW?U2DvC8XH#w-^@U|B?mo`&M-|j>3(=}3cO5OMc<~gdd zHNN7Zni0dkt`yp`6ys+~iiU_Ym49vwy|OQpkH3`RT zoey7Tvlz7rvEx5Nd}QQbbsV~64oR2D$biwpT&l=K6{XUdz?DuOMfY48$sls%II@pP zveJzeSm1H8G9_E;%2MuV*BpP`7iP z==v&ns;r>o4OOQ~jRuDLleoVNSoIcm`<2lkbB?Qsy+K`?$CwE!LQ5Hc_noKlMkr9hh8#)pq6mHVoB9&Cp zSy}5Z0XH<><10Q5{jIa2?Cy|a=*|lH+bD`G^J9V@rJ4YYko&ur3xpCBj1L-X(6u6C ze%M?AHwgLIh8lBdzyR*@nlo@8GlWn<+D8>#;BiP|kQz!lZ#`~jbmv;d;MkOVT)u5R zV5BnM^=l9cg}G$r`|=x~!50O>$zXk3Q0wn!^uXqFK_bU*Dotw)2BbsIsn)tkqrPsN z=N`Gfm9APkYv_M)k16&1V)-eW+OxAK;HJ$@Qp>7cH1D9$Omnl~9Dgo9(l+5RTpVUK zsMAegTpmJ5TquqGxHJ`Nx`)LM;|gZkAXS(@kg4MMHCac|_t%kV)W?~L!^}=`?W{B& zqh#4JT#iV;RWh3oR$a@gG(z>lQTp)NqBcCnKwt!~)G@cNHEI!uD(R^@55DRe`|i#T z@l0Vh1SQkcoMBWK$?2v^X0RVAoqI?M&5;FE%N^AW$IUS1{Im6Lnc6cf9)iPqMx4iY z&Q}IswnQ-6Yl%^jLX~F3Aj(2nMu_I}zJS+aTf>$f3G!pGtZdaZR!Baoh)PXmdwZaZ zY@k188ZwJ`TZ0y6$PR-}O$FZfsNo*vLew{a^_Mnp&JX-dd-G5Qt_h}>C|A>bP))u! zir^QeYeX;g&vZgSGFsFlUm`NF5vpG3F8Q%RWqKLQ(tcc`)P4jNBr%- ze;9Qm|JE98m#c{QJ!1)Z(M1&_S;?dWf#Uj~u8yg!XB_sr@-sL*rX7%>y@oYPC?<{<+_W*0Q!^lz14vx| zQL~~#&Z^f61Q{YLjLB62)D|jCA0m#p^qdO5n(uEa+0lY9)Fptk&B1HZ2gR{u9@2QH|=r~ukS7gb7> z5`H_3(>-iu&3Tv%K758X3EsoW_-mUa5MsS<|8~N|&5d)#eIF&sh9WPs8X7bx&I8qt zAzpj`MtuUm3zEklSfVi}EL=d?lg?ZSqaoR4Z1Zc@2@;dsLlre?d1301gik$PEI)6H zMjki;a>v6G$EE6bLMw6k9$ESZw@@pzKVpm_9t%Qvftw~EtXUjR@~-gqi)e?x_04JP zvDcsGH!@@En`6H#|8K4(5kdQ%`%buDZNEEb&-|ShhW}7+Z5=-b-u>%t-P(prp^>k6 zlR8tjNb7B^xr(kaWG0Tx7Yrp0&p$HzVG`pwkKq8H|2gm2ET;QpucHG$mT3!0h3FUl zRw90jN3N7orl>B$romWHP@M1-&0c^RLChF2K7|g4y5zyMxL}p5m4Evk*(My#hhOHK zcgORU^DHQOu*X;M<~6T!$HCgQCT~!Z+KE4ylqzkHiD>;Fom=EBwhA}eAS3-{(G(@e z-3dijXDaQ^#kmjD@cs1OSh<#!f3v{h{3dnatL+~pnmZCrNP&F(`?`gHr)k5z0=(~& zI0QdE@4kbr8x{<~-U(Ve6lYQm?wg8ll)CNP^Sf8dPZ1atj%Buge8*Cv5NEl`_D60)Y`RYt1HI<*z6_57R%NMR@ zFe|0Ga9WGvCH8gMluH}c;27yF;Rl*l=7@a|AwU2G;+HMdWfgi%a$Yn71mmRX1PTYC zZ}vNn=w=wS8CCUu zGZ}C(lAV3aVh6`{W-WG}?Ru6Y$;A;DJi$CeC1_A7r7FcPp$pB%@SPaZDJW~PyQzL! z-aAgA_*V`Bj#Qd|65qA^WRhGlV@xsQq#1AnT2nd|<{cX-5s$KLiASPZ&k{XAQO9UZ zC%g}l?=1+5Zm5oBGQt+4qr$LN6)?xVIJ6_$dx*Va+sw%32SUL}wIVzguCxktx@dYI zKKkoNx>P_*F&$kXjew|5EI+I#n{|c|n|x?7?ac7!*o3YMtiu{pTs>(vLB~*rZ83aN zp@#VD#Ds%;phKt-^K4gL3sNigz8U!`OJHcR!ENNTpQ$#FtGbeC?c>ukZ9i~@>|r&- zDg8kRW3rD*o_^=*1a5L(a8;bapp48LtoUQMF#G*0*t(an>YN|06M^uZTZ3zuCtJT>eA&5!@G~pQjS+WWm0QT}oyhh&)=^ zk&}_c+Pt3c=RH+>u*G7k2Z?aTL|#*|XePhN-p{Ti(kH3H+BZVhizM^G`r#dqlDz)4-HP9GFk9Jq0gqY~}1rRxMzL zs58qAUWf;B37qLKCd%V@7s=3}4~13v)kaNHgD67fYB+5y66>Kr@_S)hH^i^WZ&C<* zroPYOVwc6%``x%d)+)0nnz2vlvVTA4x_9M^(eR@&W#Z9- zeXRGE2}$JY3@|ZJ!{vCVDE+ecmNe0G9n|&b6vuKBb?*~Agd$m0r)Kx%TDIj|s1;^3 zu4BZO^~$O;kTpO*?lr9aHok$cI(pd&p0a9H6w=w3W3SV?{=(BZ+;ZmIIS|_yd`J5p z_*YFhqIK<7&ycdUtHSba;lAj0i{@N?z(nO|E z-eAdW4$u)q@rCh9ahJDIk@Vk&7#jezHpm-GrB?7fr-Q4l3vM!xZLZ|$EI}74t=W_(xjN&fetovV`p`* zfNP9Krs$ewPscGJ)G11l6s8_Z-h6&~FZeg0{ZV~;GfFae=?*-x0xk@(sW#v$0w~p& zTb($>e^~V(i-rMB1|SNt1`ZE_wZ(sX1CN4F?BVuzQSNTv|664E{k;aJ@W4c&*JU3% z8}*a}*B|HPpz}}9uO=HcfJzytcz*GMD#_c9koK$DdZd;wiX7%e9yr8&@Z z_=Et9l9%UaaON8jQaJaDK|U3+XmKuK(;a0oSEKw)DSa6=3>+pu!^m1uKM%UW#N_ zTXRn*$Va_Rf(HtY<^(TvqushJr0z@T8(<{Uj#5Vobxd>#0_`%NZFlqM##K~Q)YY-z zLeQHK3`-WU+BWv}7vaM}U}oBerq5lY)0=z|>@*P(7{gq!3*2zN?N)&Mx~t_^Rmw5%>m$>o#MbI{wYX6DKGn<0HOS8ct5E+S^Q4 zv*_sP-^l$Xlm`oa@^yH~p)Cq^;ZeJ&wCM;AzhnEk2H2!)f@E=V-YZ$%s(?=^GYmL0 zwVmu^o)b3YT+@0AX*p~K7K%PXXhpOg&51QFH9)%Pymn0#ufr6MnvjgGz?ayhA(r`H z5w}=Zsj2Jm%l5evb-sYc`O>Lw`7CvQ*rEI-yVcp;q5fWbqsN+DOCOhi>kOW?cgLem zMNqdQRqo5EXN}sZ9Z{g*okw;s$8Ht}v!c>>CgO}Mz0f%Tc+cY|_)8i_-d@AjLBq}C zl@c=BeTQ7{ZAjBi(&^_u6Kv*5oJm|PekK_NxTR9eCjbN83CJqt`4tIskd9Q7J7q5^ zEtBG)9a8vSnKGT)e+pcXXh)?sR8$8+-jUE$D0};UTkO{ViO_txg6e^u6robDKR)5r zn71wr`VrZom;Qv8*8MmUKr{I2IROXSA9b0C%Rib`YtrjB=wU?g;$V8yM{VEiyB_SB z`qT2%mUrpm6#KLqQ4JCt%V_lPWN5t9|OyTY*GGgVglWs5V^lGpxDC_ zt}G^YP0nyR!zEuO@4E5+wpTi?7AT8mxWYS{?@)3a=G{&Yd-Cto+wR8+sxCivlN;kj zMViN4baI~TOS>8oda6lKX^o@`PO1Yq3EMjTNPp#5vp~p1n(~=MvgT50w_1ZH4Q4zFpiLE^-cB``<>-pHu?}rlQgC}R zcGE3scXF688;@Uv?)OYx$*9+dW`nd8iREH3oGkKZ6NiV4Snk?ygI^V=Asrv07$_$>Q&L6N@aT4xxlfcGfOOj8&@9Fe!-P zUU)6W)nMfBVnD&*+c&F=R5(rs>gAH(K62gSSLuOEiMN6#vK^3E#2YJCVw`o7)oaJxAltE|mPKFG zG&Jz}DOD=5CkjHOP55XsuJ3YjAn9e2fVqJ>-+rFxWOD5^#Z#tPTw&;XIQQ3QPf5o#B_Py`JWU#|tx;_y+ z$Gdu`LX*(O$gCgQ5l$@%dq!99*%4ih6mx4UOD#D;o2jCq@*3GhrlHT6Xf{Fgy(E9?*5+BZ zCGbv29Uqm$Z&Z`IcSqK!&BP2|MaDofb_UBR#Yi-8;|K+TQd(d1>=(7&u_M;0qmD|w zVjP6x^JBk6VE0MW5yx)SN2*0be!P?x4{weQQJqixj%wWE-mSoPFtbE0=o?euF4I3=s2gn9))$Rk zrUUMP9xCW>V;44`9rK~(_bupD+$bKlqHgt5$+ zf?~<&(8R1SY!{vTHtw;w6G@Gz@1Rt{tZiO6q3LfCj|91<^Am6oZFw4ejV?clWc&190mB zoFjyZcCu*jI63?5c6DdM-vjt4AT%*Ri^~55#6AINleG;dK0ZDH0Rjvl68n6zqltCA zdK|?HVsfsdQOx~j(-)dR$P2NG%uFlm_x2N@atcM;I{cI0+G^TT_yb24hMD{cu3OHj zFa;u1IYn8N6t2VxJu%(>gftQp3lpkz9TkkJj(SK=RLTubs`C2z{cXq1@R5hI?;)Vo z|DUg5@A#Xg)l4p1Cx6z{`msN8Umqjo z3KC%x@5(PY*_*L~xlcJYK%WP(4m^2snU>tN(BlzHYdg;x32Q~^Czfo|ON+x5dm`tpAKj8lz8h9h-tOI>5S zJQes~b*a+`k!lAVD!@Sp9O^8A>3l&Gd*|?096GG%M86iaI<=H}%lTMp8Pc zvxDQ~K?g__7dAvo=$pniLIosB*~wb)kmL{I!F8~%a~Uv&@(In`jD!7*&72<7hBUy| z_Rjyzl)wc12M^{@Z5*Q?Hi|s!H%!h;o*f(BI)*q#(Eg#EAXa%92qCm7@WZ0NTsmFF z800JR9W{7SQ!5|L)5ne>_eG!UE}w{i7^~+wy!dpt9&{wP-(9P<#Je@?PN~~#pt7x% zYBo)jAW6?pWzpJwL_#>$4eWLe4?8xPUI%5x+&+Ag`l!j%9~Q_An#B> zcLpIRXXfd)KU6kybFsN8{zv(W+q_vqbR@Y;JSB=v3Rh;E-Wk`_W^)PcuW)RMF)bBi*~$l^ zh+^CG;?#iGRuz<+5VL+%XwY*Z;0udlnfYKI-hZ?1P_}Vi-y2&f&Kof#;kamAN#PpB zvA0%ls}^eZ3gDVj)k)yvoUO9T!qViT>8hsBjb0ppAh@GoT7mU(R}`F_@<}}mt_BY6 z=xdX&%<#q&)1&0=h6nDQ+#qaPRKgReK#5$2SdKcl7T&Ds)KvWqJXQBP8*~N*AKJnb zad_u)Tk+H|s`fD9q2k%gtW-`d8I&`MKBBv()xNehn-x{{r947Aq} zvv$=yG9&ZxSWIQ(i}0N+xr=Ye#_b0mug{ijO2>?h|1XJkLYsfa0nQzN#JcqzsFR*u znxbiL^z=DK_?o){h>zy~uAxRTlK;7$_uo|Akw@!9yYa{q5JAUd5v@8uoh@r~s9UX} zJR?B9o%A{NCV5SB?9d`SSP7mRmH1i-Ftv<-CbNtx2W@O*(|JdAEv+272RN6t_Gj%8 zmuyTr``C+QM{H-jGxV#t1V?*d&n{&6Yl~#mBZBR1~(gv34>T z5hwrn-xO&*Ps0vJ@3~i`K)A78n{O>LP@v zZjQ&nb%?VZ5eIT^)OX#YAC}PYdY9ve4JE&x9AE#wdAz6ZJX4nWZ#;kTnopnAdH#P` z(QS7i3!3g20uGXPZ$Ui`frn6(4M2OJ|m z7l2*`9AXrQA`l;b8UQj9f`Wo%mVWzj+yxuJq~LoA!z%#B6ote`NB{4}V|NCa6^M#o z{Qmv>;zGYSz&~;i30e#o2-F&OR|(=UrRPH^TO+7iLxO=T&tMj8(W4&!S4nYk9zH&5 zO3FT5uN(kr`T7hPNM26Mfk6t`9S;@t_wjK?R#rP8h6j#AW^%YuOiM2=>|uwHiTFZO z2@D;#yV||)EMXURs|yN9BlD7aKdZqaBF4tY*H%^Wx$Hsa4?jdSuU_^SKT2-MGkg6J zdcR);j(a{4H-zQrTaalfjQ=o1Q z!WKdlF+^$=upu^m!TL?B&oLx}S`s_~i&j9+%*HxL2lwcF0)1B8@?Rjc0K$@Zw?dg8 zM=0n&rAUXIsp%dNV~9W^fW|?s)dTK0yH2esL+M%P@YN+npxwM#YUluWDRrd704dJ1g5JB%c1;z{Pnmq3{`6(62Ts1JoV zhlfbHLzRyp6+ee-ZXNzSYc)pu-Z$MBJoatCdQB|qtx)HsEN=H(MuWnN1KCSn@)ohi zhdddK|L{K3gOurI>-o}`fQi$q5nm_7kIB-SUOyf8CMgllq}`qS?T(K9n~AKTBd3@z zAWJVp+=S0lnp z-!UU*3Rglmb`^Va*ZK>qjCgU;-T{arQA`_4tki#lWuc->U57t~bOlby#9y~PoF{xo z=vo_DNx*6OGf?A>R8h6+_Slac?26Vi*rCe0Y>;K$tP#m3Q(-c-!%?+Vg40m3JIPy_ zy2vW(CH1V0Q7(i_Qai^-_nVB?g{5kqU)@X`Mb=-LToXL-0~u^iK0Mdg1F(5m*EWSS zPg}*aBZlxQY0x;Z6fO&4R@rGJG9#9_%j>JjCtUlhi?kP|*JhQoA$ho$1Ym7P@D|sv z%gVfU;m>r_ms$^?bbT^!a|jxZaZgn9PVML_|r`5D&ZB{!E^@=-&_oiR5kRX6AxfiUSyFc&&RLp&7ajo<>)4f zs}NiJqSilSMii3De;U^Ny;=GoX9U!6pTQ!98>k;wa)tNxHmU&n6A6u8E?33%lAxf5kg zzzamvV6>yXhH*%w(wwCd(!gIdnV>7BNC4r#Y|8>ZT$gGDr>YeOM<;Uu*0shkN>&_# zi3*z`N;YVC*ZLyOlhi`K$}eiRaaO`5o-{)n-K(4gy;RbuKGCXew9*hV*Bnjks#g(U z^mGnSZ|1R{5Xy6VqtAbPxaZ~G2`x%eco8TK()=OM`qjTjW-7woNT5iZG0ILyXY^ z6yTIVo5LJ^x37$e@e10vVfW#x|J8}@w42QNHo>!gjr}ThgPn;(|0SZn8RxEDk;3&e zdrf@NT1?{bx0ft&gPIhCOnW)>B-hLaz9$ve=4bNdxe->|+VaP}JI>Wr5C408D?Pp_ z@ekU6yJ4^3z|>rxu1aog$po)^?AD`M#= z6ZO?n!No0~^osK+;&cu1*4UG3`MjUm72lteI|~48~VUFKwK0fRc>`OfbAd)SPh2ebd5|ryaf(9fs+CI z|00pV76RxN2%CBVp~T4v5J`kZFh;0jCnpE)Da-(YdGlxNnV=vCTvbQM+Ij0xRBirP zz!_1a-;Q3i0i*~3;6w#bySafhB!G$VUMy4ik7yZ=L}2KBmGbLn9B>Wp##kUACO!~L_qVmwWUXgBV7s`AzSi4EzKwp$^|0D zA0Fo`U~@=tQ$|KcaWRGX6%aObp6CnuLN*nl(l1SV{k9hasx|6G_BUvA{>*#}m?+i& zK6T?dAuevDWFOGtz`?@@lM?M&M=k<)8vr*7>9TZ=+QH`OE?AXUczq(F<@Vb%7`S8o zq!#g;zVHf$zEmU@C2ubP@z|xzdMJeF6CbD;>;}|)R5EZswl;-yusqff%r&Yj7b<%Y zA255+cZDP)qU`vMdo6y@N4ghOjV>->Z0szEW)LI>L?y1#nb}@c`#t{a$pDIlv9OQi zAijTK0pKCWr=@s@ZdPV-feW)x)`JEK-OB%FT>JI}%f<2xdqzqG1wyDU%m6weFE@5J zwvkx8FaG_b(BYw04_8|RpK1Rh7JNc3VxaxCkc35fW}J-aSJiHTF3P}TSpi+szoblu ziVBh+NY%&6gA8$@Di4Grk;9N#SGSuoQ=1HEBRN9y5p(r?mRI*@+*E;nT+ZP zKiNq+9NfL-?#^Xax_{{i*+mGn!BgqLSPy&WD25_|e2hdN3h2MLvVGgbXz;aF`nLl$ z_Ew`r5$xdI5h|xwK0eEH=*f1}%3;aPu6Saa4LtY{+$+K3Sy255WBoxTp-Vd-wp?hQ zrdZbHc=Q&;Jp67D-I-n1WKtk2wx4O8{&#VVrvps^6F6;?5rdi@C-xv|k3ycxmRftToMgRrz!ZbW>4zcX40kJ_x%V&+vEuer!o8->@8K9k< zVwupzyVBsB-$^!#;1cR29@aZS2eW2{C6(5NyI8&m4e7mfV8-3I2oOjDubI9K3PZzI z=BGSUYT62K?Mh1P)~0k5yojf2Q)nqRJZ6earIhuTut81wl9?^5_jdr^7EXl`9!f;= z7ek}yv=6oI?yNq26{bZrc{$vzTy*w?tM~2JeNHwma0{kT(ct(+6q{%KPT#{JYDpgx z2L2Y+Qt|U;M#qUK6z&SoWN{>qe(Eb~W#5*gf72qnd}|5uM`h1qEMTL+#Ihx_g_W3UcEmQk9?5|vTv=V2#FUn8z zYUnZ3w*2Jn^_`!|N1UtkX-yrI#u_Z~S^5prppPH|>_{bvD>J@`S7jxdl#4Xh@|{j^ z_mf4dYoTV3nUZvK%vXyZhFf~H{V)lpOh8M;4yB(uh?WfmE2f1NQ|ucNFES^OD?TP2 z`sFP|K(GyL5SzHgdhR2+!oZB1u9U{yqH4&d9GI(UFL3UKd=xS#TPqI@Pfw{Sbg0YV zQ;mNyuM}6^zUQiTQC)4vZCPq0QIG}+5>;zHx3r+17n751T%<{vi+K^<6=UL9<$x2| zQx;DlNQlY(ImTW)A$k?a!a*+`_jSd*rl|3Gw9zld)_FXS^>`9v^GRsJy*sa~8GF&$ zZQ`i6$q}1sn3DX`B*@oYY&BO;DIuEN9thNwil{9Qk8S$Ixw{HAmDC+S%3rfb`70w! z+ichhi6!C!a(<3Ox9}_vaPyq(nwyRBtkE(na#@kXT83o7thj?^un$aSwyRQBQ4O># zQHX{R-3VBd%?aOg)sXNaBY!oJs;9u*6R1VHsmo^*R?zT*JFTfz#Wscy|2?974onSH z2X`G_flzAGP(J#^Z=*Kyd<4~st&;pTcja2gD{^hky%rI%PjVq*S& zex&&Z^`?NY>{{&#uy`$N86g4wui4qztt|soNC_2pzPx_py2i$k<|Hg9C#OuJHM$cpVlBMPD0V z&w#P6wx-r8Q2!Jw#$X5R_DF2PWq*(V8`8o#)iaN7z=K(A(PgX>LHy_&^zRx02(r`vmdV+$-Q6C)d<7k;ex5~f znw$`#+5PsznMeI))5!4WnHBn$U5{r3!Fpf{rniISFFA}el3I|zrTSNfiSyt9y7wsY z)=`DtS&2DY0&;UQu18+Xi9`xO|mYFc z-e#th+KXM_-KZxVY^H4O2@&KFY@sC^9gasS9%)@D{xs9{p?T_%4x5k|4K&q>GH zcvag_y2UHTvCVVM!)5bIp}=yYz_O#jbD~t+UFofE(hO}1PvtK4@pPonIYshMH+z$n z-9Mi!Y>LRyEMiJJ7JA9Sm>CLZ6v-Q9&o}aw9f-VE|L~7+iku|5vN-*tFEG|@-u2zW z+kO1#Z?Feaw3c{=_8=2&E5w6Us7inw7!^vjB3CZ7ly8$Qf)~DDk@*?IKnbPP4>vqc zxetc^0CIt`(A{VfGEmONK-NbG-9iUL4WipV8st@@LWXGR?S9&Dvf!Ph?o2(n3Q>;9 zjy`kYgV{=lP_~4xEw*0@Y->|9EGneNMYQ0(Y`Qah6{@()9Azi`8v9C+|8iCp z#|$OLa7@A}ZamX`EIu`Uc!z&T(xper5+r4##*t0nYvjov@>ti7!t#6D)cGsXbOu3q zj1eiM9wuj#N|(k@=+iXwif;zr2eoz$=;aOMhLgj2`zjBrJzrMEO~^}R719$$nVZkq zaiEQi-}<1uFE>bzHtJ3{bH`FGlnZV#bX`WkFR1c+)W&3&j%n(6Pgh*HWC$a29}uK! z^mKR-zfb~iRyT1UidCY_QZOKP^hzvL9WW(DBx(p^^fVu0>~d>a+{WrZ6h$c#*JfOD zR(cZ}4I@EQM9MOaQ>M0Hd7tJIMl1y$v(JB&x>C)T98iuOrJe$M#BnSSO;d>BeO zQ7MfJiO}5{WG7E}g5T$qQKErT7Hu&bhNz812j()0>ZLl&XZfU^c*(a_-wo1Ko;P!b z8VT?c=6KZyjBL%th-HJFb+o<`T<@vI_^T8XSs|%FB2Pc4UulHCDeL^4qXpR<{FM-1 zCHCZKH16u`YK*C@B=<&7wgheIZyc?aoOsmarF0myI-HoGB(a$5NZS6uckTKN^HaFw-!%L}hY{wsA5>+roBPa~d|;b|7b$ zP%<%MOzAt~$WB1(7=S8&V0lvqeT`^nJX3KGgHRAcxr`<@)#Und5>Y9l%Ol}i(^S>o z>`>4gWlTNYIf9lc3q!cl;ej^kpI$J zFiwx=^na_{kzJbh?9foO4ODTp`Vg<3T0}bIAXB$1Wk~8!Id!EI!(I*J&C9|J{5QGY z`_c5e{WA1G?8+fGia-UiIbwx3^wXzf$G83Xz4diV2g~2i`~ABo@;{r=lbAujo#eEd zi~mi778HuQ|IF#U8&a za;;!|9$4y^$!F~WxPqX|p(qSBH8p@I{kI>6#5jlF^TO*eO^cp^0hkz^KU`g1p{9rm z{iOn^)8=gB4ZtqIm<1Oo{Q$Xn-5wcu-lqNguxfN{ zp90MH74rD7Hu}wTYYjR@myEQv+knUe@aBMTTdJyT0V765`)4w5dwWKX!^1Htjf{|*Nj%08c!``-5KlCB^Hh-V+@tW zqD(TZs!R=}R5D4g!~Ej>BzaTprkJ$#%MU4YaRnqQb?1Pm>G}&IEf? z@&IPV%ey_CDVRQYVxXP8?&t_iC^3(&nCRYrmr(GL%j@==0?DZM?adGNp9@WSo3met z6`W0DbdwEdMa+N+QoQl?Jz=PF+Y0loJP-81DfuB6N`Vd6A-01on(V0c4YvI8ZkDXy z1mUOM!9~kYF2*ocRSKpHdrY|}s)hBwM_V_%E2PiPPzHd>VucOvlo`1Ge_>KOZcW^& zQKVkOeVtK1OQfa{U;NIWb}0UoUJ$6He2nI=L>@7>Gd)ap-`l7xEB-g5JN9ISh1!;L zUW3Joe62CD0s%LJ;G-~tEli$2y@g~(d`%L)J^99pYqH}hZ|G25fksx194}vBBe3-N zw)e;N*bNsc|PXb};;BU_iakzzRGV!18r^h<8yV$DqD2Pvk-&azSlrs~3z)a@$Bvqj5r_xBq1ZD%(9aFfLd-oY9>#1*z1vJ>2y>FjKe6DcL4}!9S(&laM^@_?TY+7 z3Z#Z*xJrwR2G1n|P?*VkZh(K+l{)P~8)(!ex+=Mv_=dO1kyjn#EKHG`tY}o-pd)69 zwAyH9pk;8kQ9mV5FIFG<%fc()4z)oC$IO6S_6y3x#^~DB7F95N*ADMWC;LD%_d+vS z3r4p%akAQn#ZH%Zag{Co8ND`{Eo zDEdVqD|M|-h8kM9(5NgPnbqUwHvMhy+K`?#!+Z-(dRPylY^$Fo?2M(N8u2tsTLi}) zOj7}=8Dhes$d(*Et@OJgKAv{!&7vxz6+fVmr1@3trPkO>7rQfjKH3)9CU?xj6}McNP1ld8R=fl9S84__?la1XtYHG<2g^L+i+3y37#Tr&eIzUDMCObvW~v zSWM&evc>W1aVU*XML?(@-{$$&=c|5lo1fAo;ryPxYfpsIH-M7|fs^w_^`2vX<5Ja26xMK)g^ z(X*v(U7~JX!tLVvevZ<0*jw%#FnL&1Lh&$eSbU#e)bpqC*FkAc;X=tIvpn*-PEj@1 zl1;4la;D4z6V(fqc*aP6*|l2{1*9WLgH2=4cI0;JBJ%*@$mbO!nF`+M;r`_`4~_e{ z*=JC}0ZiHLfJ+=&0^|)q7Di?f`$Tc8a-tuup6ejttKPe#-(Sckpo@HWbiS6<+`OSC z{9yzOtA`up>;=*d-re&2bn@x!IjsezLvaSkDl3(@Y&tR~R|{bA3&|3lv22Gw832{#6h z2mKX8uDT&KV%3exqO-a}AOt z?Fly~3C6TO05;-A00fb2mmAH0djEX@(Z4#E(}hyN;gl;J_%T-~2G9-?qN82i+(J$`1qFf0H*7~C*cyAU z0NC=|6~+R&fWatW{omttLr$(P+z3H1x8eJz8)g@4ih+rV2{=c@4;2Jf=1f4jF}8S>sm^yN3sGVA`=m?MDFaNiGY^5vWq`O77# z$Y)>W85~2;tIe!MCuc)_H|V@npc1czt$^TMQ10C&YmpJLy!kUzA;p4`+}$L*V3qps z8P<3K>&4Q=@S=&Zu4OR`f?pMIVu4^qMgR{duvr{UK~I|-G+F(j?O4g3@VcMu<+a zv0wtKyCs5rlM)F<9uSo8{H-49y~rCFA;g&w`ccWZ$(sLAJCTblBLu- zTj6uK71uH?CtH5gC$z`O(LUc@I%(yIJH4i7aYEN!-~8a#x0zdQS6G1pPvh$`}>+=9F$-y_~){v8X_ME34K6(f)2t|+GPW#4vY-RTiDW)07VMijL>8MDZ_mGv(y@emJdWaEx{ns#MSkGRa2HA%xKm(-bmS0ByJlnF=3;t zQ#R0FTzgdX9u$=0;+}pL@?P6Q6YDu+re%qD{D}UcK=BXA%>7rHp-wJTV zRpK_g_)_&Cd|75I+L$F&*!-AuK2nn)(#oM#nkYeuXSGw+QVs<)f9Lx}ni>B!zf)()=xGdDO!d@n9#NsBRx$ zv6tm!$q2pmz>SgT6i36+7jKZe7JsyJbd&hRbCvsiOjx^0A<*5Ufm7mFmX&6OoGB!XqIE@?AlRWf2q@S4{?ae3L>IGzm$dwJ4qQ>l=!9_W)+M*dv=ro z5!f+R1d03|YQ~X8*k!($J=d>;j8a&&y!s507Lw7fDBy{&%X|6{D1FQxZ49x_>82ZE?>3SUL-! zI2s@eC%9{nKnU)^9Ts?zUKP3BhjuyQ`vVSe9akTIuQT z_rCYDhA!{;oPYf*w9h$JB&}+%&zgLbJL&hDCCJ=TKly`Z&S7C}Y6ZDQi46C{O}8!) zXc)q5NVaBncKG)ghA?vQ7@L3033n!DE{l{pa=tfKA*+$H7$Hn_CiUG(g2YfxKDlXN z8h&s}kX+F{ZNra0a_u(%Jm%^`Ijg?(>@a<@ajEeLx07$0Pg{S4b$Yy@k7{}TyWLk` z7}|YH6L~6cPAFiwc>?_U0#&PQgHP8l@W38@z;>X`=mRYlkAF zf!E;LQ26TLWq?Yt7%&{S>}c;pKj~H}#zX_fx%;7V z;B7YW!}CNYW|k2$K9T5XZ$ogl^^kls1p^;ko~VC$101{|gOT2U=jD zZFbx0_BRTAxdYHT1v-EE3)s*HH)07`x?K;(LVu?k<=YaquT{_6UonOB>%Y6-cG4S^sflJ1_I?DtEg zCf@-sp6K6*Fw#L>6);{QLKr3M6-pap)NlbDN_8R4;gd1t^cTw`5%m$>;Pg)#!EQ4j zKN{X&Y>__FW=M%KHyF!)$4F+hd#InbJa#(DEu2kF6)nMso|e*#@>F$mP4suiX)g_ z5*BeL)St3Z)z^XEN>|D!-IER4WE#`DF{$n=K;X%7pboLv)4oNEu@Y;&Eu}B6bn%lm zGbGrX#~-eiDszts&QLc~qy)$^eV&|?2t+_{U_%$e1hk7Lhb1x`lgOrD()X>^i59$F z#$;VtvWN{Eei$s~)1nD|XD?%|sQb=WUJ17UbXwZR_Jia`^2CPu=t6CNysegu51Q=HLom2B^)Rfw*>+e)$?Cx$inB-AA%bA(c+`^McW~zM@;rtoNC%l%wOYAg= zcLwyGo$80jj>jw#1enfDw&Lj3v=6KV6DW*=)7-vaZ`Oh4l)Q*tpxWL-VKla$bHLg% zqm(Kmgh?ySgpB)IJW{_Fj7$XF7rBRzqC;eWhj8{dxB1zk$eqU7i)zQMr7I-Lj zt5nK=P!ch@X`R-IETQ`*( z3I?~gZ19sta@cq%mv~?NIZBln)QCQu`H=PJTyW`eK|3FynE&T7 zS`Rc}Sg1!ap%JbH6TeQ(jxYKJn^opl;2?;G)?oO(K{X`2-C@^BmldDnjRT`Twv)yvsM(JO@ z36}Ysy3|oB4(WiHRq3*5@Q*B-(E+%}kE&Ta#gNogj9p3-Q3ob{sII(2tw};x>p}?2 zM3Xi}REsxOp-56%T?*YH9Ti&AU@eYdKn8y1P8EjN%my8X0xD6~rW(t*T2(?;;cRt!$MfoC+_uZ<~w%Sv0-~ z+@1aSR4L+4sV9JtCd;FVC+u+IZss$Ro0?3ZX0YX4@I|_jF2qBRB7yeWH#kDRxU(l< zxK*VnCk<3|?)p`ki^K1{@=NK2`3ARY%Wus*nxyNzu0Yz&>jz<^m)fheH?(H7;XPoY zDk3+U|Mm<8hPGV~o4x?2eiXpr^Ad=yCJOzlh+vJvx7>LzM63IJJE#1gN1NMevlF~K z82#1AAI}sq3MF2uI5JWi*yGYFWYn4c#^9|g{Vj4K{Cqh8%yoe@+&iF%<@0->L?i>q3aoOy8ZF+_kz`5${?Zlj!e0%Twb2Ex(jS2FvCBGaJ*ECuPatU}yWg^I7~B}( zXP=v$Ez$;*y;W&WKo!jd3T8VH<&$HmqXWJ_TR|jZGD>i@#>P3i0)hjuz-?`9qTA0y z1V-}_7UvrsEjDvBQ3c{+V$-7p!$Ie>GO3~wn4J7!h}0qzawl0qLSHuzz?}%e~ylg6)z`7+F_fOK8G8=b4Idonh7Q3 z@=!%UJn&R4g==zSBmGY+4oIzmde^^CNq6bOz`zh9X#EfxYGcos@g-D8ei}84X5dN4 zewlTGJcKdAaFi}4J2MlRlUBeDks7BU(@$BZzS>`Wn3DB6uh@Hc#A2Ty#7mN{wM&8qcIxoy}W>X%BH9SRJ* zwbLNxq$NDj1hBEr{h?KpCE(=S-Uzt!EMLA18vgYVp(mtjaVFUa{JdGjIZP{?dTe6P^^PgLrj4V%k1=ULrH1K5ME*OYvra z0mQD?KRF82!cR`Y)Po=_22NnOdXbX7Z^%zuvhQ;;f_M%rmI9e?RAx`BCa6YtEl;^V^QuN2r}Bt9d7>Bnahn z7DTqpvHrt73h~ZM3YIcMtjO*PsW#Gk+`MeAf$SSYyGFSB*$|OoGgXzlRz~UcaJ(q_ z7K$a4Abrv|b641L1{*Ghdeo=F4h_hx(3GTqCeLljJB{<)iS_ygRRf5JV6fu7fh z9tUbV;69~X#!5SCkKn3C3%Lk!_t{seLtnjLJP$~ubf^|TZCO<1()alq+C|E_X?M{? z4eh62>d39${xU=422Tpysa>PLT%fR_EsXm|U*4sptwjuBQkY|T)FyG5pKN}ss)|Hd z^yQi;g~!&#s$W;yz^w=rU2tetUpzThqZqkpyj|lc^6g@aK)mpicez(9Pb=ze+peHezj^v1HU@>|N z#~oX+QW2c#Y~;=KOaao(1hw}qZNk7;=fF2G&wJdIU?7MlW$dVi(0Skm#`>#)+&MPT z@)UY_ewR2p&k+zUd_ruS-P)GF^I93qM70x$uDb9{`MgTyfm^~(TZN+z+u-T*Nm?v> zt=|L%s;k3vPV`d}y;E|Wu&v5ck8YN0K(ppw%b?jKK2rNsofjF5XBy%10 zJ#Y`=z4{K{iFjhya-8AXLZCh&8$E{&ms|_SD6vI2|0JNbbRe8LVPTVfmmx~NRT$-C z+uCq#Zj%;?WUR5ZTQTqEuI+f^bt%0PjQ&2p2GZ9kS}}@b(@ct=qY*8wD`f*uHHB0I z>N$<9OJ;LXww*?0zq{ z9a1p;;)G~P*p8mJghqD`sCRMqJ>5S~|DaqKzM4~{kSjRMgmL`R71g?E;MLf5Y)%~c z2P36HE&ZjAx&g{u#zZ-pdceg_{Yk8cPeMn`y_OE;xmZX3Rv|GjfMS+06{jl`_c4kH5Cd8oM!lOZTe91 zjt8#rr?^EWVBC@$SETC7+uPd#6Bp1A0(SY!8VU1v9lZ$2;zdIabubBV)aI&x!@n!E z+M+;YfL&yiu5~glMv1n)^uk-J+K`-@DO0QlowHO4A7BHkLPSHX3QlVEE)4Fo2gso&{IyO zBrrpccn0^a+&VJKnmG;uUr-(O$9Ox(iIB0V7K!W#juL9>c@W)1PL!Q@bP;3UVZdcW zw~aRCp7l{_k6z!Z)T0+IMh}dna>daWiw}ZRN^|4MQc`(u>InjmmO(+e=D)LFA0!o3mybY^GUdfYo1X!&TUNBAy zoKDk`8h6PMo(_(~giQrxMHU9u&iuF|{s}Gy3}WIf&F4owiWUoF`#09b`3EN*pgdB=bvh_>_w5Nz&Xtc-iFKoq zuZ0fX(Wr@zxc@^_#;NpU)^Q};poV@SdA6WLu6>DbPXG-pr8?g&Z!M!e*CAlZKwdrjir=f}? zvQ3H(JG-N`%e7h8E$)>Ox~F9!9vu%;GBHBpn^AfmN$<>FKDa)}@<#oFErCX3ir%^UqbeTDh8ISl3RHCN)>mXh`)tv_k}19*y`vF z{E#tIy{#khLf}%E9(8J!zv>sCEAjyn?4no@i0+%%4Ox)1@tuAS+}hUZun_YH(@S0z zCGC+(A-R_)Dvl4Sixat6B*~jmbmBO+a*o}IDgWjYl_SxTHiNy5vW-~`Svhd=j`dtT z)a^z6+I-b$vHbWe7+!r9Z(|^nfjN;i0QXQP+IEZ8d~~>v z1B=seAjy{TK%_sDKUbb|zF4VFod3Bx+pAh89H&`yBJiY3PqJEhOV$v(c)LtIL=C)F zbZZF1YGt}Tx}2E1N-GzBd!8yUe%n(Hddq?-#9DF!_rmD{z)nMe=~FgChwmd8S#eZ zdTrC?lQgpW{dKmovf|<4X(dx3v zt_mZZ*m2xXw#ML&Ly;(#&=#odWy*{e*Mh8KHSzK^^jWUkN!UVF)fZ z`nEpyzKw`Xlx?HTa1iQRPhg;>_bt$}@=&JvFT+(Dnf@IDWd*G{I%7p)U z8?;)B(GqzwlLIlDlTJ9~<$4a=K641gcG-0mt&HBq6xq@F1z#uwB|8_3cEVW*InuO| z69>-FmWtHonE4aN?y-iqi!GsQ83mVhWYWg%hZ1IVJ$b~yT99??=hQ&;Q5>Utt*wCn&X7EMVAofYo4H$+@ zzMN+GE^s|qBtXQ?;){LSKU7E2cV_aPh-$9>xhe*np&5rd+=Bz`!ySHc%8$Il{QWlM zcynTma-oc6iJ)yo@zZ9<(?Jf4@U!e9cJxY%p!}Rrs$*PAYcat_VhuCa~J6C1( z4)J`*%;Ddk$E_BB&|y~6A^)s{`&yk)|LIRsQ>`LK4m_?2w}N`4kG7$`ife_z{p%th z3pZU;L^>p%&WK3!Ud{g4E?#caM(wYq-_^}b-LE3{PE#fCp^3T}Q_qbuI4uQ~3c^!8 zjN&om5(AAe4F$}a5!C7+JAs)VkZ{yuSaIcN)@@fpXRz~73O`Mzt@W}@jXb{)}IZzQ@2!2&0kQBsmxzih6_87i?vl%J&jsD zS~^Vwwu0Tg?LXR2O8HW2HMF|aZ8o%Y?o2K4?DPF>`W~EE-kfKil^JRTjD_91OgD#0 z7b0sG#WfeE`}H&zA$i$7Ucnv?77MR!i-|Rh@u^zYH#hK$du}!PUv-x>X5Zj%Ya3iK z*1Q85j|a(8wZ`e^h5;dZf;)9e-5syzNM`1GW* z-z~S_SL(@H8QU6-SpwD6jRKFqGMEr9uW@XCA@s5b6Qx!_IvJLmeaxJ7mf}4W@gQBxF#X7pdndwv*|I8 zZ{D4uHl7)w`SEli5(H^_66^LlGIjH`{eGk!s4oUB5huG*M|Sc%F5nLmuD%-B;TP3{ z>P*X~e)DCSzUGvezaf^A`(~j*`lK*ldMpeCxy@q9so-kO+b%WZ3Y0dO-PSW>q}qr4^)Pp%Bt7a+4hg;!){ggka|Ny+pgw*AD6tlF`Ux}yXxPE zTD3o_bq#eM4NLFN{7$dHi#&%KT`D?vI2{lDBzNIsa;<^q)s2R_rd>@YoACtD0FI^h z^v(Pl=k(XkF`tZPBlNX3#+vCFt6wIS|Gd*Y9zL}`Xpz?b+%z(Hv9(z6Tl+(h22M?7 zWc`y&fAFmt;NES#UkV)s+BegF8F-%JXOuR^_H?uJ6 zJSL36g|Vyx>^7JV@Up?Zn2047g1LNp++Q|38nvk+TIrXoKt zFX4!^dWTK{G3y{5er<=~{Zx1~FCU*MAqL4ps}QRaP)+byb&x8JF-ge{Tg0?%^_joW zYVXusq4T1$!00pb64WjOiC$!B`rUruPz3b;{{HDFgtCt~I52h-3ein?%Mq-kiq5QN z;RFHrTA>d6PM2BB>}P!lg)obQ+~7m(3nb+kN8?|eTkorlmoj3*{)h*^T;eP`e{@mS z)7zmGoq&6!ts8#7mjWgc(#Rliy!qu^=FPtt{I4!}DY#|SDr|E1mW>|ycl2IlJd)Nn zT|%0X`6q6rBuH)#Hm}`!bnfnY(-JyK5g;={ej@_pCuPDvKYaKA@_bWg=*?SS*MD`z zW^A%sthi4}a}j^~cCk}BTuAh1{^#3M?EN5Z{LS&70nU7l!eR5Kg~s?Lp_d0`E8zOW z`R3~S`XA3*%irSXn{kelFbs2d>8EvbbS zVhC65+`16K{WgxtTJImEr+uua_p8*?&3~7@+hJ8nqZd>kSdSYV+OYD1QRwuz!J7X!|98^y+_=c}j_583{UbRE2X0)U!YTr>z^@ zj5~rbfN#w-2W@fbABU;Nx^_;=to{wtj86IS;HFLoB!>uVP-nq?YL{GNw~53&O`z4w zCL+N4V!n9zu6g?j$to3*K=~%`-sCP)`R(pRxa|3WB`}zl`n6Asjwvkc`KepyKFl7q zp48%PWD2R7bnG@s8Y8SuT*cBWHVPfAKxGOEP?^%8^RMXrb0t_+O=Og3zIfxs?0EKY zDtPZM7()Yo@kDAScucZub6j%ucm0zpqO}q~>F5u~_sPz7%CKMI!Gq(^sdbD6i(<3w zDzkGiPd$@{qhk;4W;YY1O54lEUQ6J*sgM3eV4zWt8z`^|<3A+<3tdzTFbCn>n0kx5 z9%tz(DJy#g-ljc;&%#e=JoFZh=5Fq;7A6iTz;{ljwkW)0tYrTkK_D^~F&lR`6$@8! zCkJOIM+--HGF~zkaVL8xS9NC-GYc{n2@6jfGYe@~6EGQzl#RW+g)4BhH*vR+urPBn z2Yx}u!p6$los5%}gN#Mp!_*z@4E#{m!NdwUX#DRDZDmti3o~~zmajJE?$%^%oV>te zC|EdJ0XH~UIXSq7BKP=t!KI|f6<50nc_&Ni$+!#DMC3+^Um=7r7 zyQ>sKr9*%R4VRNVxV3{eeSG8jaP<6-0gjkB&g#X*VSdM5i?WcDQy+75Y`e7EM_41?(J>Q!k=B_qL>u8zB-m^s~PMa8b z3N)#&+^S2PN%wZH4_-TO?dk5W@+Y_X8jQkDuRbBRUQ4H2DHeWx$e&J78wJfclpUJ@ zga>InUQ_jSD0p7%kk`h*Ozjl9og@-$Jb3w(x{368zbb8oTNq}OALD_?^XB9ZoR`fa zub>l}lIE$Cj8WL2n6{_Z>vtFO&~qk_k^;uwDp2PeL0YriW%JnEZ(XGTdhV>qt(D8% z4fPb==m0lo1()V6_YSGdv~_jpKi^|-rErj}o;*vf)x!1u6d(RLEGbCW|5)PL%;xk@ zmtaT2jD1s!@{*tIByG%OK=t>m!R2KtH9miz{fC>_8bg5%njfKfTGhFxVy}%#kxP$* zH>XJ{77HA0s3+Clo1P0NCi$-^6h9Ux>L>%p-g_U?L-Jv&M+Tmt`Y}^_9R$dIr_g6)s7~SMc_tvocbpkZ^z3J)UXf8+8m1IzGzH{CJZjj&1hjG84*0DB^UwQ8c z8n)Yp@PamPA=p9WZFjx;NeI#JZd&cepCdJba>2(FlM+?$J14$D9p-lU6!rVG9EkV7 zzk$j@ziE87=knE%JJ`nD#uw zCD5EUO}EqFHnP}vKW$zf$KUl|gD=nKyz>&jK|m0@M^flxI##jpdrz6%C@x!TM^2_N z=ulI6<|h~#@&wHO8vz?712z1oF)h21)lyQEzA$@y>7(@^yNGTE9v`0xGH3BoFBm7>6( z!+@7`e!pKwDPY+6@Kw~GJpn$ZfP&$K-IqkcuEhE}HM;^fQ8v+k-Mq1W;~Gw=S!YhDTC38xjhuGqL`paacv+~>Lc z#O>u<;lDz>fBzg}_^E7I-a{zs5)GXH-2K|y;{g9s#)*bDK_~4PwK0UQ|J_An%Lv9| ztbOio?^Y4eSKG}NXL;#7cRa6Ta^5Q&hivjc8`$*K+j$8c(tbEul$uX%W8q0|rzkLqz){M$%UU2k$BFc71xK@6%uM# z?Jy-UsWu12=sVMMeYq>99B|{m%*x~q6L&@ZV;TMstvWeE>Yw)&6E1^+H7~FOk8`#) zbHNVj;#CL&HmnISAnt1)FJk&(W23_fAiCQx?CdBnKXpv4HCyf7E{0nWd?mda?|i@f z{ON}lR#(vB9Zk(@dx5l+-E5iTz|*^5CrhQUPa%?fZGriMw{~j^4XYLQ@wO%3^cJd& zHe~38oF|*ZNus~@cF#R4GDf5>E4xkHA+I-j;U5l(mVePb6e~0#hR%K7Y#q*M^7UdS)(y|M(QOIP6s
)%-zJ65_A$Jp z2IT4qnfC#u#xzj9DcUOP|8g;G$>H7#q>K6e&ewd6`iI8h(9FR{(vm<1c-{CrbKivY zE>yW|%h2};e7WWVPIzv-gRj(6`%pcW#2w#z*0jaAP&dgyd1C=m+_8&&%r^mPbo8j1Kf#2+5h~cQGXWjc&BF( z4c&ud7DsqV140=H*e~_j*pf&(^VU8fzvylXLAOg`wb zq%u$|{r;Pl%eO@PsG{>poQ-Yft?Ymt!GVw4g_8{uc?Qg42@<=H+CdfvkoxT0IwsGM zm)N|)t*f;MQz1bI2n*Bb+?hZlPcMIS?UiD{Vs`RBWgpmjynkWF!jkJ&Wx|nlH)@z} zPfMG*MR(Pi{J;u&hVGVbHzyDwD(`@{?>d zTE>H}o&%DK>;*HOhq7Fim!g=(rJ7H|mH@*TEF`;czR+Xsfcm)L=}{Eu&|LdMt%bVD zGX2}iu3m$VeAPhfS50IWJJslkk1o!PyY26g#Dyg^N~_%P?rH~wcqqhcb&N6o#1ipp z*6|G9FtNcu)%iO(LUFjQe(pT5<|*UC+eghlyvT{HL81nlVLz+4z&}-p>y;$wn)c2a z_n)9Sm%G5lbcwBJqN?cl4f(@kN0*UY@4A@ZRddLrV`_{f8!{!hN-dF@bzn{yC+(G)3;DeknoMFiCIm`^qtK#LKGjc&CWWo{);Y(P`Uk+;D{mgw=+QVbA zMpNB{-7Tv9H(Emv`-@D5R@7-M89TE^#91Xaa{9xr&gQ21=sYN6>2Zyek{}(1EaD5@a5=l|5m$oc&~~<>!4K-j>NP4H4W$ z2sR6my^+YvE?9vttTlu)_3#$;r6{(@5N3hoQn`@spu_L1Z98*S41{bbfFr<0LN}k} zSp(3zRve@9MfC)}RrcFWWR}xH+DtrDlm|oCLW?;IP>}gdrFQi?=^$%xN#B)>sPXnN znC8>!oe5&;b2Q^%+jwkkqy4k2e#K3Z`{zB#hH>I-i+KyJ91DiIqN?_4_#2RuLBOvl zyJZD0B=UqWRZO-iqnvWC)D9Fe)ETPv?_UcmD?}8GS=6)P7Rm?B;MZSxs1~hQf&qCN zdyac-`Q714Q;ogK zkK$&)t3UL%6YxHarjTSM^(9&^BkWk~^}{=ejcoRF46*`7;vjA5ml>D#czH@>IrWG& z6iNNQo;lP`?svEbld)w?QS;AM79*KM>N`98a1+_uIA;H7{a+AB>+azn!;K@t)C=T6 zq!FWvXt${3x&21kM%fH2vE|J;U+!bCVz%fCKO2FPyK8(65$bJ9 z8Jo>o1xw@e9`#A=)>5aWLS$(t_9|%1zp78fGYaOQXc#EC>m@^Nz1aqpD!>^UDPbO5 zH8JthU1ov~0>Z`DO@t2g<(1m=kt59eZ74WP^PJp{e(&Xx9JCOjo`0+Ea$*_ahWyUL zZG;xvLik~`tip2$4*yKoSaKz#lLT_)*R35dY#}>aaoZ zhzgePNXKtc<)VR7La?a>vNBOqy=+8oDE$q-gtrXDnyS6X+|3+-N&K#8LrFeRmt*}2 zzIsOVTT9gzlcXv93G8lJ8MOT9dn<8DWglKp)d2n17a@|l*2HoX$u;5J9xxTu(GZS3bSDHgYL;(JEGKSQiw&*F#- zX~Xe4I(>q_&DF_M3MQ!_>0Q4!wa&Q1+;4BCUsW=3t6KYTt_Udyu_Ua#{bl6ns~BRN zpb>;r5>xpEKa_nmt2?5xCId574NB7dGyAPLU&XJA#T~aVSxo#6ssK-HMI~ZR3B^Il zGO2f)Vb*9ImIf>ZenaTb@-2TAYxno>un$RlcG)btWohd z;w4EISuHP7MX66oP~=e?#7NGML3p~ENT1#Kz23MoBHtq;1w}n-I-q>b4>9i&^SGi} z4#}H3nK2pmS1IjRAG@*9PJTN-RkNcb%08w%NUY>^wI!s2e7aAi-vN)H%5=8{rq&en zhFpF!SZ&uH*t&E${4N&-QdS6B!_{lfiCYx=LLP>K%_*rTa`td%y0lbg7H_H`=LooXl+%qKI=U?t^X7I*rzw zMuYaWO)U3y-v=rv@Tj?wt-P$SdrDG6dyr|2g>Oz)P4##~Rve;<4rRHumjubi>(x*CRr$W>uD>XrsMNl_W} z>+&Yv&kWao*!V0??64^Xwu~xy;%KOj9?0(}Idhj7_Yvt8AXq06;#8`vNzj5!Nv}*3 zO`#Bw>Zid6I&m*9&%fD)xHS#eV|4ObV0PTz>NM)r=wAj>e^56vzd6t;j-%HEJ+9T;UP8mwJJHz zENw&AWRa-kS>{K^>~QLXjZ z;6(f4XS@n)QUz}zX~L3_o3l>*zmR2WI#Tb1D%v~v?^2&d3|>q9M8zL(BKNI52Fqla zmZ+_%W`>J0BHC#Vz=gEhKQbR*HDM|QPK8fDVcxI~zIAIm@jysEVs4RIc86z`xQ~~oGP3<>e zU(aofpCwbtiCbsK6RS_bZ% z!;8PNXo*I|ta}UGb@LuzsSVjiY4Dbux)H1Rpny}mn!sPv>)rkBVl~76R^gBW%GdPD z2CeHiF!A@gpt_{gKf@<#k1$dp*=Am4qshib@Jp9x-2<;!N7;=%XvPMc8|Wc{&wqE# zkJn!iykP$g8A*B?FjgM8`WmlxtOhz5M|75Qn&k}wR!g-;={&A2(e<-+F0JMH^sHS$34>bImZfI zX@%0KkB~nvL}EhMITGyfEH)g%IpBN)h2QJY=`j#Qh) zl=qwP^pJ(l;t#_vk_x+RIty7AL$253AyC1jkZ-t7IVd0|ItZoYCi4=>hU7uJEjrs^ zTSHG8G_>j1$Cd?YPZbp_^pY|B=*kbH7wm!;sf(QgZLSriu4G`UAN$S;d|Ka!xnEI- zJHErLhL6QuuMIU7a+`}`{|Me>%_Z)V;Qz55ihBSqB9y-Bic5Nv*#~)9m~BX5zDv7A ziwcz~@>yQ0Aop2s=g1T23jI*u$C$vLr@UJw&@~DUQsq|`%X9P7Eh=Txhvj-5jWMH9Eo6Zhe=jnQZ`^HTurxxO&EEglHu%B)GNqrBrX;w9`T$%_XxF5n zRp=&3G})F*r*siG6?x!OB`Q7JD%**6 zqKRg_f+-U5Bcg1;Zf3P2R}Ac!(D|y==@jekyu>UdCAs^kVcx~bc>WF(bvh;vO7!_K za`J#bkx*sg5(QHkA`9 zsL-Mqy5unq!6UK$DgV&8Dcx-8YtN`jlPbFpj?WVuj5^7$=OFL-2Wn~k68qHbi+S8d z2pRILz<$rxz?uibr(Ba#$84`RPKnPbm0AWvSgav~=bXI~eby`&OVP_da-g0efwr4( zp-A97{ck=brsG$7AMA6G0xaktp+hFYI~BV)BFB@o4fH#+tq;f}A<&*t&9GrMES==* zbv^TENTywV^AvMDqT#d*NZD}|vr%Li4<}BAIFm8=StAi@ZOW9Y_KCTkQka)XHg^vB&kAlejTgMM=3JIP9(8G^GFMn8P znqNVu_Q1!C{~5AWi^u>TM!y@qkgADMyX8&tc^tHftHGoHkUh_AW5aTMYYpK~?E$~C zE9`;FJ!VtEV7uw-JZg~lS*31ip=!%7We>e$zhvkTE}7ctx=^3}w+*%QfuVNqR4KuF zqtP5_Y@MQ9Id^Q(U)L09pXu59rPUbpRDwQ@fV}isdj8x0->ULOs;q^43^>mxx0i%3X4@}Z(RPBUM)gNzWHZ*hO zKWLjI(ed^*;5q=}*sRpdMSg2|mlhu!U3S(ucGI27Ep8y-K*R^R#-$3lBNfM~amb$2 zeHz+WeXgLE;p0ud$?y>QC$aXDE~1NKRw@tyM`;lY)xnJ}z?S%JNR{eF<;j|1e}RaZ zxsFMj)^N-P7}$}1;y@s7lgFL&$!>WMl6trFJwvcxsRvtC@_KM#-?8t3zn4m1bXvFs zW#0JZcc!MJ7YlM-ud8eP=reHjVuPhCew2$==;LO52ksVI%m{ZB_ zNF5?Tc z%#0BHY`*N{ceo(M-}_cX_xK)8H(xtPqd-p8DaVWbG(jXV2ajL=)DXXU?%!vtWHnEK zyT2|Tqoh8QSCSuqgEB0Bx#dGO#_qJwl_}m=5TJnbX6x}&SJ4HYqY$;}h@oIMuPAEg zbtyZ&?o0jA-_yxZ!*%~32W>9jZF)#M52eJu(Lt2Q8i``XQ=mpNXC%*jT~$1+x5Em zZ6t8#N+_CYHSBUF0-+J z{g<3b^VzKxkQ=6NJt#wCm}&wldA?CfE&4lrfnt`R3+McyQ5qkk8I`=a^CftIX1U+35j)tb;3NG8fj}QEG0B#eJNu}vriB!pBnUz<~nEIEHdDA&<`ljLI z8k4WK+m`N44sA2$-wa(?8<94%p(VfS9KJ%jXHPE7ngDOgpg9#>qxK@eAMjx+LU{6c zh_6$pY}uEyp2yYmj9%S-Xdxh3CXyTIUdH+v1?`qf~TT zZfp4HwHB}3c6suDX>Yu7><&pFc<};WDSCRt+AgFz$RBqh@yf=Y(Wfrh&=X^7{UXNZ z3+}gT5UHq?`#^rqhEVaL=$IQk#R>7%=ffvDh+Yx@E(6gCCaF;SmI4;HP}v))6YJ9T zItAKpmJIUI`^)Nr>D@z0PenJ9yeErMH^zo}#=o0_-2ob*kttBT5OPD^(JqxQ2aoK2 z9g(_VcirEYlPOTi(LsrWj?+l3+lvpGT#))c#lNR`#Ihi1hO&{HG@(}7^Py6~n=Ga= zG2!1*H)_P!T^h5H$Fo+yy+^hnuIoP-a!7U*+8vhK^Go|N_6HY)feV$AfQ1Bf z&n9z_H1%mAr<Q(k=(@P}n4}o&>FmmeB;vMBOA)dFTTBek8K?r-9mh%18kHUVjFAR|a@&`oTTveB) z`25xns>OTnTx-YYt5%M|`tn?@Al%Xg)I-c{W&e&4{j;;Px)0SW5nWAUhfY@^DfenH z-I50z8mtFP$#z1~Q0<{P&lWnH;-q15dH;8%oDFJPlHdet@LHczHbr*z3p596PNTQG ztE*HZwHW_O2vLn${Op#)d++&C7Ubm)>n+@@W}>!rXw`S)L-d!dhRLckT{qS!1WJ{p zB=kSTy=72b(UvHT1a}A!2<}dBYalp*;1C=Vfc9rf`F^_)_n7dj;ToZ=89`CR@>h`he3cfNh)a*?8`ebg~)H*lAp;aoTe- zr3{QVEr)=|mui4OLb$+^m^mN={Y<{pq+9n9)YV^ISg)RF|Cran9sIT;IDY{9T(3G| z!hHoM76?tii=vUuZKP8J$1*caez*}Ms?cG^!q0W@g%_~n7ze4E11dtsaS!`G4unD( zQti`-hI33&(>HoRVpv}@P?Mx|&sx`MlS9^+$p04ml7NZt;2boTR^odMsD^px6k-+` z(1d+L!W+L}7J>xzN(xoho6bZ!zpuj2uUf=@s@@c`BExRa-2NFuR8SpiU*$cx>ooB_ zBucXQxZmA~A1-dW!om};dK;8SH5jeB4t{=AO|&dhURv9Ng;aKU4))KKl{?h!GUAb> zRvDfZ0)pg97e4o6XUG)eiUiTAWkC5Y;OZ|JXR#3Z)S$3P=+F3d$dV_>w-30brNc8X z_Uic&e%9(<$Q!M(ms%b!YL>kt#Eepf!`n}sT83-eMrDMlwKuScb^(Z~FFJ)^P52h{ zxdN7C#SW=zoxH1zxvMJO)K{gCVUZ0pm8WLhA%PsyA(T$1+~u{o$JL5KTk&9Qa zdCx*^z?pKW;ci4F`(Ca!rMCf9yr?S?Y8htP2SZ4#71JJms6P3Pz9EEl9)I2{=BGje zj=<2pSiY@IXzzWb&dlzC=~`w258J*3|AF!Xq|;PtDMpbU5tvdQh~FCjhW3mFwUCoA zvjx#`Xvl+E_@fAe2E=;zbX23=MJEesWuua&Buw3@Ji*$S@S{Ynf~mf@_pnaf1HVF{a0vPd&eH-$9Nm&v3?V%au*I zv~p9-{sNI^Gu(x-nyBs;|8PK*62G2G#^a)}iS)r@l8}7Wlg~OMx|hRq^LO*8tjlk!4<8PIYR6hM`k^)l!A}AqAF>p;^>M| z>;yW)Sgi0Mf52ALgs|UbIv{4+7A}SsoGq0BMN}Q8n|R$`5sJsAnP!z3&#y_^qUZNt zF_hp>2)byBd0S>2en!S+hS+?iH5&cOF9@%$kJ_Bw*y(TWB4vQ2RQTtvL+7+rWus}I zr?$ZgO>eB9f+6~CMXW$4D8*1|U=zu?-M7P8-jo(Z)M3-9b+d#v-%xD1m2lqrIj&Jq zF51vNc8-pg`@G?g-1Av#xLUUhMhLWU3T0A;;n#0dG?gJBOmP_*4`jqj#_Rh5^Xjvo zbG2A6@kSGxZZqHAP6o_^ZSgcua=Xf?rAKP|*L4KxhaD5)PCLC!Gu1gw(S@cpJ}fY@ zF9Ia6t4O}a7zN8Pg#RO3x^glJL~&j1G!}#HSC=(h!#-k%C_WYi&c_;09TGqi9BHHm zY=jQ1|8*`?Sfpw$>MG0fGN$r0vcKU^XM^XZ_sgkD&tF>29R7&v;Xlv-MAyq=pau%W zIj}J3{?Jj#ggGDlXg6>K#l20b=$~OFaTzj3#xdIU=Zh@Eo76?JZup)X6~uh5MC8Ti z%%?6Ma`z(40N-jXYBlCEvU!!e#q=8!CVEv8-vgx0&A(r~X=~Tp<@D+0dvP!C)GHGH z;M6%HqR>c&H39SZKJgg5L+oYy5cpNTDaS-OdPT@<<@@&s6~Typjy;k!ZZL-7x%Ub& z=Dqc~jU!qL>9RdP z(>_}_(tc3d@gq`&1mQ6F@S6}&K0aj7jZ7<%(V4QA{~a(z8D*r!y*Jx*XRuPEMi%S& zBvM;vo+Y2|^$K4DlPLtB@!Y`j(-#8Mh!D;Z7*W#E@Nvo9i62hM)Z}s498(ek!Z}S2ArMi*D4=9vJ6U zlqhW&piM~XfZc-TnUpbaMCu9LrIun{a{y;JTE|j_^**}NEMq_lTY4YJXcnhzl$y`R z^^pg?RbyQ(b`o9q_t6tf+Tz&5V5-to#kuTD-F}Rq-|?>`<6lro|9!P;OXWHL<#4@? z?AlYe697|HVh~|Q;S2I+a5*1p5B+cXGZy2AEM?x@s;r7{8=LHpucuUeRhrUZ+RO+7 zaO$|<V%D!jQ)uXY&RJ(mM3V2ZTJwhNqrKIeDQcoqb`{(?FTC2H1(M6^ z!z?IS_!2GW2!91o)=qWmBj}GwFwt|gQA`2(G&0T9_xZ)&O`sj(r&cX+%C|b=a@HW= z3=k!+m%PjKc050%LcGO4$TU)UpK$JVkM8^g!!aA6Si+i21y#O@`^X&vkb8o8y{;67 zj;E?7-(cso#MD%1;P>G3m2Cr`_om|m2R7^M|}u(@}2zttyWsop>kx=C2uwseSrZTA@UPXZ|oRKH#TRHhShrIeA8veW`$&j^6aE z)36!Wd70q6n-9s7yYzGuJKYWD25Q!*;lm8QVt#D0sgguhfo|bfDm5ORv%_WNX#QzX zjl8^sx0WM#oM1#lpR^JB`*9z1tI!^eMQvMwNa4J{1f-XTl|c;YxvY)>&#?o-qnWh? zp_z9a5pr5I64fl+I6FQo8XxOIHutBka|2R#WF~9xV|lV$f1>RqMBPX3fRd;CqPO#6 z{pdUlu{}z$M4(PORg(L59V8;(mvp0WH?? zc;>a=bHPquQsK!Q+4Yk)GZ`POmzBI)>XMLS&Qefr+uZQExoQ3@xl1ikMRvo6P$Pac zA*lGo_~i3!DRulMKHUe+%F5BFC0A{g2>LT-CNuQL5b|`AfU$PlDlWBF+y(z4t)(Y9 z-o$T^k>vE8R=9+kAG_9<&mh;RTuq2>i-ro40C33ggYMm*>}aL|9zLuU?+CzhxhB^j zjvd0B#VnZm*GWGhaV;#|Nn!-q082~V`Ik7DS~yH!%*R8{?wMHd;wZ#M!*r9AbJ^YW zX2b#l;g(Zsf_=|uB5jXic-+k}&mM#lV&nZDZ*xlt49#_L68bb@g zWb8h!Pu5_0m)bY6AZxv~Kvar~i9_l@P<4MpvRD1Gvzdcet2lX|t!KRJd`jg{l2Pp8 z?$uNME&Cb9RX0$vO@bH-38$SP831O?&k5J~<_3BV~ZXoYTW> zvC00W)a1{i%kvH2)z>2P!fV?aZa6OFxRz6$^+Py=k}+y%wTf&iBlJqqLnCpl*pe!) zuFd%a&IKy=<63z;kUqon>+T%0mtSb!*WWBX>cXW5|45D2F#mI!Wr3^1`~F_Z_^D!Z zeD@1Ku9)fhx7W-gGv3>C$LJQom@3fja_SA+ppzGQJ&UDsaWMv z=iRLNL)wYLXYKDOb%O;e8vBa-M#eX$EWfOF&^_`yk@-zzdvhnE)DRE7B-aA&c&cN^ zobm}DB42zy6O&wgMpqmjO*rE7Fuf_&e7AJnjm-a*uRJd7);s<0{X!7)`v)x>eQMv_ z-Lr*Z?MFbiUBs79!-`yWoR)H7{eA73jk(F`ZS1E^?8m*<+TKIWSHQ(_G?kUU%MF$! z#~JgwbNdP6bykywVrVwIFUMX*!`nnAt!CGNxr6)L5{EJC3x)sr#?dTcPlN zOctpdtsY{=tgGwmY`4zml05wv1AydF9wAa~&>!1ljOP}X&?XHoPp;fDE&Q}6x4bc` zCU*1(lipmhhiq3zt)Gt-p;htmx&Db^iDj#>ot}{)1(WLm%o2XmZJyjF?x!-#tjs&n zr!a;-DiBA0%j8t=Or>E!SH*xFgnz)-m<8PA`jNjmzGmkdsQufhgCV@@WX?EafhvQR zz71|yEpOS9NeC#(v`nY_l zH+Pr|yM(=T((e_0w7H`{)H`7Z&neMr=fTZmHFu zgG1Sc47;e8=C{qC5Uy7zBar~39hQG0KhffIzrVc~4Ix|jmi}~yeCNzX?#P(bQ-W;p z-tt5Hi@1C*yU{tReB`-~7WYFe*2P*P4QuOy9MdEJ0R;74ebb5ePrm$}U5Zays)U$6 zi`&6cf#I{SMK_hPJi5RkU#2H*%aIE2z{~Hqr=n&X6&m+DN?*Oka_^%ki6I%|%q^J~ zKi~CGA#(*wV4C3m=i^;sa7a_K#B-PlI7jyd`HqrMO5{0GfumuInRx8ob_Wx2`*SS< zN1I8V1a~waOx)3+Yl?KMm47a4TrWJ zDIGzR%O$PN?LZ!^na@~ZcJy`|u3DV7!#R*wK7!8L={oQMr|0i%AJvOY)sys<`gtDK zkZ;+xwaERXiIrvli&E9o+G~iQe*^aa&f`U7-bV=r*QRSUnLyxAFyfL{m4!YL($y~r zjGukVFn=kHswIoe4$g^n&F-lBUgi6I@*ntA zsnda_$J{{C$0BPf2NcUrMNs|_yCA}?5b@)o$CO&f(C*~%nDncsm zJgHvo26@`VzNrlhQ6$#fUz-GZ<6X-7>!%zXojzRbv=wjuzS|+T-&JgL*(a`eHul}R z@3>x2&r#9en!oM1HoBwBXtg}D2=3J#4?P`$7{_^h{ZLKK&yQz+Q%YjTi6WZ(OXQe8 z3Wtobmp-QAhjHm=PF}IssIEI+y45vecWe5dH#Uu+$C{~I`xr1>H}`_*E!HqfWP7sV zH17J`GU!q8=!@3G1|HgN4f8|Z$@8x$^kj-ky~IB|6yzj9^rQ`{vzdgs1NmY43Domb zH@FQfh>y=gArPZd6wLj?6>C9L2edo2oP4R@wbAu#Em&5uzI4@=e4yTZzNu!@)o&II zc`3a4lZED1MagNwVm9t2$=i(^ zyoc>+-}{DCtR$2fYd>)5NDY=FMwV#3{5}8z!n@9o`ZkjV&TeFU(u}nSo+6;Gba+g2^640#kWcT_dJ4^s#r4b4=&B@p|?H&R)-9JvML zgU!oueaFi?l{Ori6KUj`86z%gwoO&Bvt`#T5p9jr%h+T7dl9C+{IW(fCw8U}!6B!q zhdeqQ#NNbj3*$2r<7+_p7l2C42mRN}96v~{Z@BA6FW>8iGk8CxN&gIZS^4)ia5Qpf z7e~P>1A_>!7mM!nJ^@xJlyK_02mUPBfV3VA3!6ycD3|ulcJ9jG zeOkYkvj5zPci(m=9Hheka?8iMeO=-y(p;eoU>kGD-h8Mz1PTvGAGr)(YCNJk{QeRN z-}9Qt{c(czvjE`h$Pw0A+et4H4sL(?!&brRxxf#t{N}8gYMpxot3+$)hY;Q2Ppzju z6RFzk^vl`Ig^ujuVqCTi3FRgzezV_foG8A2E@!0u8Ar=+-8tn6`9K_xQS1HVN`52= zdo!V^C4zDbkMCD{jEigj+|aJt04&pbQ})Fawc<{fxd9^(xcQ2=;0VO zuGO3?!PM#Ce*wsjsC7f=G;(6eH3~scli_PYU_=D|g&pAG-q_k*D=6m3H3*w~s@nxe zr|c1wdW4s2Y=N`B1*L-F8I1Hy}l= z^tY#hh_?pr{kAR?>arnIKN)ywjl7Q3`y&kj8 z;D$XiFUeXl>(ZGP1BOmS1q8c8WPSaE43%Sd@v);#)Mg|6aEE^SSQ4*MsyAh7MI(y` zg&@0d`kgv+@X6JPSZSo*GeRc`OO5Y z%;?A^p)}6EirE0ZxF1F?X8PvFPN8EArrW?Jn%=}lQzg+l&U3?coYt`HuoqB^vD`o? zi|pZaytX}0)#7@Wj~4x4rCb5M6!#?+jN)5#J`8F%Y$)_q?e+$~Iw>J7a!n(SPKs0} zcQ_qej^+voZGcp;Ms5WqlAV!VPHwrX6Cz-8?}>FC&b#94*p^ryd0UFVin|R~nC<1m zTGtqKQJ1JfTZ%+jzco?urro4#b!Ci+H`a$-L6pTuZ5Za2^_!Dcx_wsd*PyxZ0>Oyd zKm0<@MS}~!F8f-_VP6G21*z68UZ<`UDBGSW88YaB1(I5T(7%0;>Y5%tl=fx{y&7o6)L6PDY!Z~D6DUIf2(_#upvi-OM$j)J#Q)aGn^ z5|kaNAPyFOdN=r1x)Ajwee+u3aM$h-1t`DT!kMo07)F{D%DNHq#7G0Ep528T<43G%tf z04OFx>(o2rJs@c}Y9&n0VmI-Gt8bO+X|;u7%E0Gh;TAmvD}7}(C&w%Wf0?avWN+%# zEnls{WPVcT#vGCA+Xq`^L4dXg7Dxx;8BM4NP2E@eXFt~C3BWiAN~uQ>{9nqMBeI|F zZ5L3>4JkF2m~UCDR}8U!BE@RK`9_E4&BJ{dmUl;hf4uCgs??$$@${$m9CW&jAnGvd*&*sItPgt3h8@Vp_N52x0oYDkS zQ=^n^@;~UI3CJVzFaCZrckLXWO%ie?ihFjWiowB0(oG%ifP1~*0=(RCGDRLz>JHF{AW0)-aE{9aWVoZ z;S|b5KC140aSfcEo2Q5~euA~U&@RA5ZxkDTm$jGob60<8$s=eh9j8@|#4m(wk!3q9 zbLQOcduyJBQGE`QwioH@9)7ZE3(IDSVGGr9hPb&UDjs!0r z6I2IL5Q^GgrR;8rWsW+Yxla!~w|InyzggENb|(&#rD8f=-lN~ne%pwkLa$_z6zOZ9 zbrJBi?{K7@0_*J%dFXpuXs0ffU*bq*dTV&@(U6A?3H0DP!?stuY+0x^NxfdaxmZcn zSX<}VdRAPavb_tq7g8esQlKr}&wh`ut`|dYhCrR2a`Z-e)FyQRPVoedknV5^S;f{>2M;P30b*+rC@3IM>SU0{sk z8N=MJqbYN$8sm(e)@7vV2+BykM4HBwwBTsu-i23VD8d=cJz|CND`K56PKfwhJ}x)e z9W&a5v_T{J^ixxl-C9+o_mwq=7^IDA3zLoGdvwX!5@sBJ_uZ>mgvVE4+O0dl#~pQ! z9QfI)^Kq8V@*x$feDFt5yRiV?tDjJ5gpmo^)_nuU9N*CM3SkZ_M*n6 zZX5lj_Y|IoceY667v(iufWnF&kQE%lLkyPnl%o8k8AO6ktW;Y8!n&Fz27e2ytWuF* zvr!p8fC$s%Xb^ZG8nYE<{MGLdg%#Pq9okS)o$!t*KRQo+&WI0x+m7Kwl8sXm_}AwE zi}@HUXas4gC5o5qp+m9|7yB*9uJygTMGm=zI0Kp;jbwX_4Bf$yDIDFmjk)aMfD40n z75?m|hsK>ioPMJ7b%mvd9+*EMj@BGGVG91*w>T=y#7F%whv@S$hHg}q42_#(%ZiQI zwM%;a@7}YJRB*`C21f_0Df8?JZvq0_mq=m@ZMSB>!arRyuK*%ekVzTRmi4c0J95_|` z>;zT`dRtCUbbX_ZwZQHAiQ)_r zQ&vj7g`OMS5@d2R7de{;=^Ov7H7Uo5rOvHcTRrG;!RfBR$1$SdF4iQ{o zM(CBXzK63mDxdd$wKOUWHk8qbTQ!~0kmKv|;!8IGBREh8YSB?X_B#2OXXFqOd z>rfE?V1`uiE|B_*madnqI(aU{eenvMUo?EkWqM)%f)uvR%Dfp2bUKkhV z_e29pr8~R#v(K?p=HoHSc4>CWo=g`)8s+Q~*({ky{}6NX70}Rzrm$J~AaJ&<>wo14 zzkm-~WrP>=n#=4G;Ixb{TZ7^EL>wntX{sp8oD=v#-!LZEv@aHK?K{|wN*|O&=&a_z z3VEy>6=c19kZwe9Vc!ztIUs>~zY-+NKImO{1RShqUL(FJ|I{$=>z?iK4-RML(J{(k zi@>3-{;kT2zpQ(#gOkF1u{GacpatoK(Wp%_p~XfhXl|9rbV~L4kHG;#iSx6zJ4M&^0Ru4tZ`cJ`MQRc`tgWOy|0Qj$4F?o#y@mm)WqQOmj~x3A#%RW zeYBAVPvnoo3oik<5U)GHix4-m<2PqaKoitPn)E5{80)qP$@v7d}VsB%IU{gYDq<$@Lvd$JTSb_mk62gTo3H4tGXky88YN58k)M zxlvl5Z>wpX2_7u`DB^Au;NYAM&c~I`m#vaDi5iDooja|Ef@~jNzjFOYQEDK zHHt7NYdgNBQ+x5-=MVm9NmI;MSpYy&nc2CUbLJr0dbN0&SBO5xVDh9XaP2CA=*eX% zvK%!CBUrW48w56D3-?ci+9&%%UHBZ7prr?04om8Si07YO7Ln90l&fT!ZY}Ub9&`f$ zob;OL=CsQuUSV{FTdyPxU!MPV}R=Cm!gW76plNDalUGxOIvw;}9r8umh@aeU`A-2xM zt0^ptD%mt19X}R10e1?Yeho+4!|zIQX0SQ_y9KwB%P84**)Ms z^LC*Vlu=M^|ANjFN|HG;(m*yo$vkeIp{fs_qRXdBL=ekx)c;+>&KzMFSg$cdq&sV$ zSin$zb!@~K9q4(_nf#KonW3OcnMEx-MP1IfWc7?}_f8Q&Y#=#U=yH`tpLc8HFf2H` z@lKY#^>3GhG;ZnYUkyAVQ&NGQra@5$HxA{zG|Kb9th+Nl5esb{d3kx&w{vrZKq|1( zs>2S2&KNNF?TF^1qC3Z>@=wKCe2AO^wLNXT^MM^jPXj97mgN2E5;H(Z<} zPT}@mnV8P&@EKalO}QS3McPKXiHD!&c$cz&QaJQ(L|!lE-O zA@G74z%z4Ukd%PTvQk*3LL8*OZm8Ve{cB8p5nCd!NQI0*V-ZJrz_1_=y4w|Xa2^rb zf^LWYS734^CWh}d&~tHwI?s)Qzd)4xIbAf|>a`45q*2m&g|i3P@|sQPH57GL;j8!S zpez*w+E&v!yaRMM!UZ)QH<1`XJhJ%w%#>8064yQ5!gA57qb24eh=wmu`#`APm|QXp zbL9qWWBr7VevL(}Q~*ui#mPd4 zJT`xynY{X$~ka=Zb zOKiU4ya5hLfXdqNW$J>vo3hPYsPn39J;zrLrM-`1%1NaA;Kiv3SWvpLo2ukNcI1dw zY7?LTN~&xTS_|89e9}JQI4zFW8ZXK9B?56=s{aMmpo7L>+#Udx?%!gwb94Ru{r2f0 z1Hi0Y%G>xIK!bCTAIWE*Kfhp-6z62Ut$Gk4=)SOA4{~g8Myl5S2am3HrfBHuvQpMC zOoH1ttXIM>i3hgm^wCEQ61ZWNiATFl7)=}m0d#0|z$gB8sWE(`l?c63Uk`0kWk*xyw;9WQ z&EXs1O|v{@r%elp+}T16%~`iAE?h>BthL-=ODgTCmpB|gPM1d%D|(IeLQmAX4c}&# zV|iI^*Se{oil?7=zPt^ehHsspkMryWhFxVW`kB*B@TlisGLz z(N)l#do`l-;*LHfHv42Qlwv_-p3;JzliGgs$m)UJl&S2>(#EWwBm_8i0CX|^BJBZX zvc2M+_NW)!&y@m*5O&GVIu9j=nc}S9#>}|OJ5t|jgdjM6PTisL(g}?}W1PwHA%UY& z`~yO-fmGNiD6AS>{i@lqx@epxH8+LOlvb@Y|6D8!tC!P@628Vyl17g+e%H0!j1MU( zg@rN@Om9hpJsn@tI3O02O^9BA>14K_Z_ZEMT&YY4kif(q7^{=)V?zbNocU${Tow{{ zBSWkP=eJ?l3iD>4e=@fd{!)LQ+@#!Tp!YvWv%`rN`3qA1$|F@7<8C*3L;rDJ@_4yl zCTbp?9eMl@*n50_k!d;)INy)TCu{#&==53(N|NHzl=OSu(}}kXjvw;SG1gZg=;h^b0i+ zWFys^G4nIlNvU)N~3BSDW;f*hk%&kP6)m8n^OOM68R0=(t@LM2V-)!*g(Io@G-Q$^d5KLOyD`;yT>R5TDZTC~z{ z3>VAa%FyF{hp{HUeCiSKBKmvbQ!dUT@_c4MRPnX#c*RO{?~m)yf|SKj%qNoV#5$d0e+rB*OU4g)GcbPc8pv(x`Z`e9kUF;5q&6n^i}bF(NqL z76$#afg^iT_@GHXlKYB){#x|3U+%lI?6>|sy4AM{Sh%B#uVF$gOzE6sNWnJAL1uCw zu|IMVctvlq?2oqr#6GHBfdFXJAkXHfjcj++D&Z-)*OiS+_lD7q_Als{(;W&@nkER$ zDhs~4t^xc}RV@S>*hb}Qokv6TCXsThsYd)(#|9LkUODu&8sUjDba@>lvE4L1LL`k6)gB~C8ZT^fwh7PC>h!qW$% zfx_9<&k9v^dP|ooGLc7)0J+*Rtf~v;6Ezz{(wk~H8zJC3f}s)P#|_c*#{E843KT5e zYc>=B&UR@%IVpBZ)%KdSe8xvNU88^jCpx&met8z`kLoT%K*~jhKM~`%-^+Nl0)qD5 zaC#8(9`_t z#E>>iltsF?MmA7_@QF??jjT;i`Hy9aj>IQR7r%4~KjZ*#vS3fvw*d{VVrr+nf;LKh zT2tJ*%v&0{Aevm|Z~n)#ijx~eHWvBi4mXIWp`LIz$x3-#Y=59-T9y9SRI(3L;h!I> zTd^JdzxSI+BUqwB*vCFr@=9RVQRDyM+-llq&Q9Luf)MIcYZbc@O09zG(@Vh3>qghY?5{rVSJv}Zfnecr0m_{_Od`H6_mkyZ;hH@gUq419 zU^-N0g<6-WQs<11E;heVT-;ez*dT#9yRgo%Aay2|PlN1lBheHM5x~Wqd?#NXK&&cq ze-6LXG?4+9dIkIrZ)vThQQ$V}5Y!q4qN4n%H)I}$i4b0kzpD48vKg?3_|Y;65!}x~ zA#OF9OTs#UKy6bES0O zTm_L$gmH6te zGQ^?{YDb>z4T+BznH}Z?m#mMDBVYC3ek4`}e#{`fBI@dF{Co_HwG|7HPbz!8yq z4qX?@|JGKq=J#6~#05?~^K<9!n_`AH+8K9xR1Tq{SZMYMtEfPDGqdFx5+oFj^`sAm z+o_mAPoyM9RQVwT)+hUQxieGx<1kaGKE9d9`Wu(Wn5}+|bvJcCoboL$cL}1yvd6T(`7%R2#HzSprt%i_IOsbe= zqrP@mx(N4R^K@hr;tPk>zw-Rqonv%q4-bi&P9A*jzIm(SSM0FKgr_1dwrM-9mY zRJ=!7dUf&A5W?fn2v{~o7SdoQDK)%Qvm`)*rwb>hR@WP>PsPW6cL*a(B|eW*IE)Q7yOpE>B7#V%1C*h@Uc5547A z&*mUT-w?Gk=5&9{^53eGUkRxqd7?0EK{7@fzI(mftYsaYLvjBq=euXJaD|VD@&=o} z+v&nPo6UVyKxUa^GoRsb(QPd97WR>?_*MsQMQ zc|MM%ARi1bum35ukh)W{>ygR$csAI%LiE};ZJ`-d?{wREWf5`_##cE!72?*oxSLhuG#_CcqrC6Hic2=EW8E;dq}B;EqT^& zM({t&3?AEKS%Nq|)2z)<0W#3KNE9J^WOUNIpcMkM7}CG8l>>3E;7kUrHghtAs0d=m zpvK92fHC|>*-z0>C_9F{N&1R))UJ&|6QI3O=e?@h&O7p5Ul;+3S5X_a=@LvJ^rk&( zenSo+;H+b4e{I}-b4xljt8nSNsd58adAYZ##lnbJOiy!E38H(^dv?c`az04d8qlTp zrs!aIh;7QdTxI_%f_Npt0;Z5GPxR`Ej;JoY`?K)&O%?BjCwJ$T_i^BITqp$`XTqh;jQPCmbQ(ug zT15h8`IIO6SQf5Oj_gcU|4i1cddDNdlhXGrX&~K{$O)| zh9{m8^0(aRNMq>EM0-uEweR@Cg(Gp%M;V_7j|9RV!3EYQf9SYA0f-eJoNV0=(tYEi zi^7%u%fNvefRUM?U3Z}2CLxn4rtYSBu&UQkoR{rlgQuog53Q8{U2d}@86`o=&f&drn21cY+9&J06DjZXsA;`KE$5t2Cz8u;tFEUTfcYiX@BODHq< zCo^W-MD-lnPb8vMFPYyg$l_g{Bf*Gc;t%top0giY&Q+(jeodjwfkLAb!~s{;C7cxV zEsGMb$9T4e-gApzf*vmu=jb|$56~DJlq6>Q4oTQCmgq7js~6uU*ypEc=%`-DhdlDH zeb?Ug@UX+yHSZuj-=f6!?p=IP$G{L(Ji1()5tHy!{C0**!#4aXv>8v0bV5_i$kq)t@5UCj?aDAQyW)geb z4X+k)@BZN&?syKnn6oC=uvluE6m;oa8a!c&4~N0KL1RkZMrrrwYs+dbk@q&-upLE;?x`20 z`(Xh_a8*ftIi7qwexa?F&e^ZYM3hgSKslVp&|FkcJGJz zyo{BNe54}w)QBmhs-%Pzq;|2-)Fh}`o405`Vh6)CS(B@V2J!XW{P<9?6fBW?F9e#2!?1v&X=<$7CkDre-FJqtYK3j}mQ%sfXDN%exVC zKUT4r6$UzOMgdidgcryRT1+kv53q7~;k zuA<0?HIbH{#XMh;q9gGr??r+NF-M0{$|l+rW0$$QnS_q&`xSAKI=A_3G%1!Ft9{Vi z9*aPn%Jkg@u{!5bhc~qTl5*{4Y=TfRhcW*pHM%WaAG$1>eb89`bV};2mS81HbBwa; zVrBN1p!8)aS(M zaErhiygkcK>EV-XNZqCvnZ7(52okBSt_Ml@NOd?_uKGZXMBSA1Y2%|4^{?MS_UH2o zOY=a1OC1fh^z7QD8D-g+@*c9SmD+R)n1rc60U0C;bb)C&sqZw?yn8N|xO%7h)10>>h^Uc(HYxmX@*c+lTB}?OHJ5;NaFfKFONMnSOat zeFzmU9~~*ZNzwCNV=39(!p3=5Kh2M?2Ei?~?o8=8IhA-ub{PyF{$a58!oiO?%Sd~T z6FZO7=-uGhcJuN>?1SrVbnGHR$s*EHBFtPvgE2R_q!;Bq@Hjx|{HoXCJN9doZf1lL zZ_(kS$H&LQ{=&88eK``KT$=q>ufTiY$kDsC0a(VEfRF}MR#evSPV{r%C|5hd*xhDg zADNJNU&DEt(9#?%eRfgs(Ac%7_1$|7SM=+5`cY<~=CY#FQA&s!532nOjnxDzz4QHn z3~s&k9c+@`pG^#=dEPYF9Xb4TtWOs;CcjGazbD~gIUqq;(Wv)z{xdqPkX{1l<4?04 zp;g1dvn=KM8Ja^uGaxqz<_z};{@6Ag5%KPOLmW8{^VlBf-Ck2DczCbr4>%|QT=F0P z|MlYk&leEWf7clPAJwys7m^je|M^$F`(#WHjw~p*2S*OHT6PjR+hF;B(%%1J_y1e< z`88u1h}orqEW|847s{4l^Zx*b|Ak@!TaHElfTlG1GHnNlC7L(^BzgUTeHaH4{?`i* z(xJZQe}aE$Fe~pdcG2|vzgY4=ssF#B|F5oD!n_AS{Q^oD?o(TkQ$ z@vM42J!=JTe`kKaMRK`J=uw{iA8dmaKqKdG^zyG-&j>`AgTO745Uu}&MC=maxCfGg z2zeH#yDbPwv8M4HZLnWo?NVH&J%#Z816uRXv)wMnFx10;b;ao+RyD@D`w|Qmk@{C# zutXI`_Iev5ujZk(-=|&Ip(p>qJ{d=p^QO|fLihN1Y`^LKrNMu4a#sb?U+z=q2Nrny zFF+0NT_M>}tiXQ^zsG&h2aoc9VYmw5Y-mmx-D~3cS0lbRpRjn!3!r2F>`h$s#iq>W zBBI z0@fg^0{B7fn*Z-5YoPq6(%^sbr2V?JmwmQ(7r{njgWFe^#(@_91SA9evbo@^nJjqm3m^qq`4=@Q{(xV_=>JzwJRaPQ ziIcwuk{1AbUjFU|?lo!rlkyr6V1>S_p5sQs4a8ajLZ1DPaTw{*4%3+7cu^D+bqo3z zST+5zzPe|ZXWt6aeYa|i>BV1w1Rf`tqT2r{DtA*b41p!3h7+Un#1r3IS6I`((Ka8d z1a8Qi)4mcp#I`ZERDc-%69;>b)eWFXi{T;ysAa)bg3;py{T~g2reLCpO9hiEaOqyJ z%NvP*|EB%WM1)!ty6e;8WB;FDfF@x6mj;_s5S2e3zrPZ8*>NqjtN)0FF&-~pQ(OUl zf))3gY)e0-Y2aVsO6)Zq5yc9!9*_iR5k1W1l|1C{cr_roO8yte#4cMc^f5*9abG@>78ql$D_dpWqF?LV%sw2YrjxA(FrKmhOT6(wbgI{}&?;SgM z@;CHQJiSI@chWl*+D2?S_!Gd-`~$}Q9~!2lb?>g=waH-caE#~+|9^mR{TTYGCA%U%ete zqJ`0GPiX1CmHdcp;{473s?`4R@x-eqR_zVt^`YALXVrawjeiqRHHe6Kb@lzkuao~F zqMRN%yL>m3|2|6Bz>i+NiZ_v~2a!MjtpOznkWv4^YY`p-5}#)NCr`7S2Mq*wCj<*YgS)%CJHahTaCdiif(LhZcXzuTJ2_|Gxp!vP zcjvqR%)?q#cXfGf`R!fZRhg;3*^8NT(Z(Y9RAI;}`ZwvvCmo*Tx^M;lO~wGklw6Ki zne|7=G;G(_R@k5VpEdkd6gQ{YbzI``{35VJ9sh4}ZX#+M-D5kmbP=0$@wWj1s9@zu zM1V=p9O7c4;`WDs5Ve10QEliSDfw`Ax4aT}kotGg#I1hhz5{(I_;;yAP6F~Bp|}-}a{K(56N>56V1U~Cw6(`6XY((4w}0xaKHsD+m+J-#k$J4)VczJ%O0JQl3PswqysiTv8-G7OpaLz@f^QW!< zz&pP+E|d1n?-T%M{U?(1b4TzD68~3Fz;{4#zKQum z!8=kJ5pn8WEA#%sLI3gCCU7(Sf0~QZ!|OZkK|MYIwg&ekabun%d1x-48UTnh5I~$~ z%HM%1`gfp;Wn~A*yqgHxEYYpqa`Kk!?>da4FSt=Raou^)a>68It9e~8>Tc%v!qOeZJRRNh%Hvl${&XB`wRcHPOai+YX<#`Z!drcY&sL&JBYv*Ms(zc{e3 z2&$7I)X@(=(N-uXC@m*DhwPqfGS}bWByiR&xOkTO{X(aOYozsFjSJ_Zfj4C^wNpY5 zM(N7#m7;AqtuLj@^K*=Ir5mv*19k(2F|9GP9`vr4^dtk-$2(!2XG+*WrBRKN>Xuv~ zjsf_+7qpZs_rUs^R)_-bc`@0L?S-@GSaZ+Ydy$oa96eYDl7rc5if$%;`tPPemPe7!{e+v?y(u#{S8HGz|Ad~=;gC*mGW;J|O!vi}B6qJzGDNo9hL zEoO=L+R%S+Tf_Eshwb^oHfORvdS&Jx^?=AZcsPvQC&P|rvWgmo_AI@g5 ziya59Q+~^FT@j5-)A_muZMloXnZgm(#5|J*06ZyhO%7QBFn%z5^EsZQ>IOl5UVSbe zClCpkH^fd!f`h~hXLr7K!TP&ag9<Ks<-T&gsD8g`QcnKXx*1~Y@57tFZF|_+-@I}!;rkl| z3?^Y&$@FZ1oz3C=Mh}&n(|A*pWzIXSwzmc@BFwfNXbVN=||%JcU* zTQIGvJQcL=|3>pN7D`XFvg(THAAL(!U~k-V>Pq7L$mumQ}IcUqo$bQ6NjEJ zZFSWC&@wO_p?hEN&R$SVP9F}W{b3Jg&SYKg+$x^l$C;Jnzsel z8*I+lMEC<;4_#e2@Bc*b>6@u3zpN`qs(jcBi$TsYGR`G#H`jk7 zLx^o{LeG*OI|cwo{mULk*Y8HQ#PMFQehdDQNQsAbO=RqZ&pk}GNA2gwCa+En%$bYry0@Dcbms8fGulO zhhI4souqE=bB6*T;Y^vS1GT{ahaJ|`QZ(zeKZpfbiGYbFo}Tmyi8ET1_8JSQSDwG* zwwa}rp3{uxOnIGu@Runid~r7mC_-7}J-&#&r;ZEiTeaT;le%258vsdV4~(k169DMH zTS2{>bY_lCUAfuPb~~vQ$@RZI8Gs=3q-5mWl^L8?{rDSey=ZO9n0P_$G;S637v=@( zhN|k{bTV0G9XTOp31q4ip6tZ2^%nu0jCK&;LRJhiVfuyH$4BW z(uDUTdBYZK?{|ckU%r{O?|l3EPZ`epwJ(7)&@=u_Wi@AWYY}VsAA@4y!a4GOgXIsa zT+f`!((j$%{jmRmo|n4>0*TQ1>u~{fr zSxZ|IbLw}30=!&~RKnwR)9dsj)Q+1c*oTfk!9?dn>h~v|b)%!1>&a%*k`JY4*Nx7* z4osZUfP4g|+L4Mrt8Hzpi#oaX6reR#i$VH8Nw?dYYCTT9A4Bv*zKTS+5kD%s&f~_e zuMf=a&mo4R_f5?gCoyOKRXsiLhk`^~b;kxPGkK%^N1cC=L=fNr zpj!XnnfshESEqM{`zT#sW=I=$9MFC=?T3_oZ=_}-o4@H)TnW#9J@%JS2lVxK`^&hD zvN2V5Ky-!M_Fm|ZYzlN6jGj4!o;av=7>asG_(K!NYKvt(Mcf4;NugC2%>T&1JR2-H zHhgL^5t8y3Q7POxHI13FriUbP>+FB|M{-hy+pZZ_GrC?^C-Oh)ZDsLvc|vTlR(25a ze!_L$>qpJ1XU?)e-5yt$&dtxN&hp||2NU%7u>eOTAWcuwHnFMY0B#a}Fy4=gQ=&ka z)!*Ot`CFq8Yq#q=XLPuGJ_Onwyx1*tDSz>sJ$97^`s;ezzU`l`n(u6l=h(yrxM{{?0Y?+YFWF8E z0Pg1_-)%~umd$&KU_`*GYgqo%#7o_KR!%dPe`7dzK0E+aoW7~?mn!(hO%v7K{~1X7 zkEH;>uKelN`mr$Z@4KyC#(a_jw|*@YtSkbOPv*C6zlTTuofrSY7rzx$;NuP2L~{PW zpXsOF`z4 z4ckEkI{>CAcyOT8U^b~!6CH4UPD`l^sG^iy5;svG!)#L$sUr@m7Pr2E$nb;3#6rv* z(5wnOAK=?Cz}#&<`Vx_*4yO|FvvVbjmXk=d5xNNg~d_e0w2lU4rl z4bjo${A#VZ8oeRSI;Voq@$~R^_EF$@Zb=tMImR9! zo}Hg}MJID_y-{=!IcvVUSPfMZQOb3_J>H}adsuF{sm6PJxCm9OKYTp9SmkcE_hwTt zyEeLo@SBB@Ze@E)G64fzWh13AyYGAS7X%5-@TuX+H*>;%BR!k>Y6W-ZV6K;5I*;@! zbl>lzXJ?NO2c%&(p??n7Rd<5E&1cPY8jX{d?R_3eCPk4dO6(>R!~#pMx3IzBcYM+_ zIst2yz7msqN2`u5^x*p|BA-x%daBLXn9w}7GHgor!CH);(uYxY_IQ!Kxr%{x;hMLm zM24Ujr9lzXMIWc;o)C-Tmwr1zgaFew`*qD2YfBj-=3Y5s2p9O^RyWx4z)-zRfk6|N zfBn)MQ(^(b#DuLVlHVOynXPCHgP&~~rG`)_tkp{agDvEqVNT3If*+?*m~@^&wlsJ& zH`r$%Lr{dR0_Koi2%mEZVSo4&Ms`Io+too&Q&tW(S)lJg**zW30y-^-fK5cugk6{_ z`TmWr%@Ax^yD~1V74nD~}i97Kuab)VwdtLSv%ok-ugtwIXAs~kDID$7= zk{mZ^;KY9q@obY?ty}(;y}0|=wOpR3-_+Lz6NsY=dIXptrIWHa#O^<6XtyF8gR?Pw z@}k?hzE;~#UVLOnynYFKLq7mc%4iw&%rXv+YKj0X{sfwo%b>JeaIs7nafw zqP7GEU7xa1-fuzRXrx6nl88qX_~YQdxloKS4Ld%B410FrEs~vftHV@>PB~-9cE$$9 zT);{g3V)!RMb)MCs4cH2{lRvwJ9lYglnmdIJ&jEzko3%6`E^puX}h+-@zu2R4Iz?p z&4S<}MESH=tz+yoM&p&_&eO-Pwby!^Rpd`M-zw0VvA}8Rz78NPTVpGvorjy_o6SZ^ zgx6x~jJs+{RG#;~(k=MJhB#67gxU6^v*73JPxZn9?|=<`#X_{xPax;ETBy3S<+@R^ z-X+ZBVD}tAk6gdVXJW4!SClJ4hsvF(ISqflJ5VoFip5tY z=;wiy#Lb4Gb^%L}+ThI@SHJYYBGK&zN6podAX0c7shPJb^(^ekMEb`wXr&(IIH@k)2cxpWGQ}w*D zy*qwEGT}G^`AC5B_(TMDk&Gy{MlJ>fb$P>I3tnr2ljdMR@SZftN|2iN*!?Nfys#y4 zM4{xs>v`RG6Ja3(fu8fcWGon=fvc1usHVxc@UnI$hc8!m5vLt|GO#@_SOSEC(6eow zcVN6dGRK2Xcxl@BlD!K(&9!l#e}iZyKD;HvETL$Eg)@QGKuUgy^A%kJCzVJV)PwHN zR^$#fUI`jM`%Kku2!cPPb8{E6csJW@$dKi362P7%yn8;rEsoUbV193s2K~+m{4D~k zCU$VoUArjZvB6O-D)RZOy5VcEskaXnuYBq>ShM2mGH#^a9; zfWHA-&9#_biD5l-pBtE17MB}I)4b9e5uiu$Fc*Sgk-o3#qe7?IrlI5IOu)=e$p{T$C-lJ~-ExCEh4~c~is0?Dn~rsm_e<(tNC%QF zRizNS5b2M%lQA?=@0~twF`)=tRFIA1I4>J_No6i+sY` zAG{e-jYG*_Ret3DAV)IV6geGiUe&coV^-a(8{?SXiIj>EU9Fe zTryTO_9I_QNmJ4u4{G8S>|7mcry~}#_VagC#yaw-9@pm3dgs_rndw{juKAF;fgY{T z$;|%o6ya!b@#L+pK}sRfayagJ_)BfGn@BK{b;-H9H}&h9@jVyS3wqu3i@N*b2HDdf zSRsr0brulyi`ddx7*I0I)zqUK?3G?$qdz&Y&I{H^A=ud2&8{=A3crof!I54wXdcg~ zYC3c_9+RK_LBDn=(xM?H-qt%LW*zgrZ10Q&T~-7VC-cYICF`PnsW~*)!OoXFT-pt?MT*T@2E|_T!&|->2M@>A~JFFP}T8cZqtIi~mLtUp}jBF8%`Z5RXSZ%jDS|>I7BOk2cx1rxa9g-b%`P=?UbYeG#KHkTZ z;Ld2ALXx$w9`VaY`6jQ3r6*Q3w5nw~jos>l=S}7b4pK38N-C{f6u7ofC9IZAJFko0 zAx2=HA)(M;G_#ixf+SLXV_0I?wbeyLOMCrSDe0`u11Yi;^N~@Uf_4t*U_KEN58&cQ zN-P4$I+qw;`emj?YBF20l(}p+-Y!t%NFHBrT32z;Zgv=_a8-?ngn1LrA}jQ@73g$p zE-m-YD{Mb%J1)`iI?s4@j@f?XiKZRAj#lm6%iLT*uJo`p(n?xLp3XMVBQ~qFXfjoVe<7lINK=+nDGf+tcJ|mMDhlKP3-KhdDzCw0cTUFNIM1t9u zxJ5?2B7vm??!3L7E0bMY>CH7Kna-sV{ChGIyEfC>Z*+vh& z4tOuN^)&;aSfp(?aEBsJmr8Wxy^&7OIYh{D%@bOs`no0=dP+JxddN3+{ddV_Y1NjJ z8lBts?$~a?H-u@PAmeIrsYz>hhD1MYZDS*!?QF6wU?>AH0!#@ zO`X+UlI4s@@1pu55%)~rFgKT0r9;Js;{GGCtO2rNtlqM`(?WA64Bvk-wFK-nAWM@_UCn{&sPshd7O@B((aAOIYNp)Xo9#! zEJG*ImFF|7Ti!_%xDa$L{%tw=nO#X$E3+~)V?pA?O}r4Bkj4Cx5!zH|gz z6iRItucj+j`1Bseq1WFgsQsB1BkvF`GIJ2Wt8nm3y%Qn*=ii);Z}{Pcf$uMM7z~7q z%+pVGkMnABkt83(;(cYx zQSeSa;39|G+5*XY|LvOc)6*zwiPl70(^}wLUhIu$nl0cq8||l3NWzm*2~!R=ywHVZ zq@)4^LA&Xa@N~)>83p-Q0h(pAmb%VgP=Gg1_t2D&6B$#kXK+3}-@*o$NVFKZ;8d}$ zYoo0ioT=WM@|o16hsWqTO>5@t6jahy@#^L}jsg4LFEQ zwMF>NY-1}pn&wr?Twy3yo4NFt&=)+>(NsPD^>Wxf{^d^s@B85l#I4nG%Pek<7ToUR zH(yUJ?k(Ob9cG;LR1YzVsqAanaUSKkh^X;%aev+cpQgu-p}4sVW9fEwT}rdV5Rw~T z*m7LJ9imEsN+-p?e5FWpA(n3^D0_gWA!c9U*1TjUYMyD3h4o>>tk|KzHbqd*<$d); zQv_Re_L#1v!c42?YcrHX=+70^Fs>duwuPyB=F}F;HIJ`M(dQg>aX@?N7zRdPs?h1@ z>0k9O?|SD5}~N$DJ;?!{}3I(SS{5JG$5<5{OdAir64>n<1@ed``L z)R$_>n!rji>c=!}wc4&q%}sVEBhg?h!du{%POH-2p7Q3fyp{+1=%IU{Zp7Qdi>0wd zmu4C5Wi#!0MsjaQqx7l2rlxT;lqpj7n{|}|uI&*=bFV2S8@b9VuDgVb=O}uYPjwAH zW9D(rMiuQ`7AkZvqWWLRyk9(KKl7dE$wH-`+~bY#JGxLGY#TStFTGrONyqQniUMqa zn$$%=vv0sNwH%5T@9pK_`}m24@zaUBJyhIE@Ge*jbzT#M=$n`5rzO%YRuwvnRvyJ4 zju1FRd317tgL#(DJll5iQC@52gj76tBVh>ssL?l-ih%E)HSm@U;_|8KJE>5(z+Hnr zt|xJx^)b1Sah9`^R1X(riq=6sZaf`4L&@%KP%HYqPea61)~Y)G44P!7~VC#Ik7=HG$@%T2%4GX~a^mF8mQ?kfg6;4M zPj^ilx}H-+a_N6WOAnhKF-s2Z$y#=mvv93`UQJ#0vyLjrqi6$-OUQFhq}*#jExkj6HL$!rY@f*UixNLD4y(49cJ>uui7mnAx&)%t(pup?YVb?_+WRwQwg!#dp@0X zT_ed|{tT_D@%XA5@k^xV7h8OSPcyTxNH!MPAjKXKFtZ$wZo-Vn zA7s`*S!m1XYIp5V&gLLT{Ls7B)YL)dAMYO^?yuFbS%Ux78Awa>H*X-1si~Q@6}}Qa zEe#70EeV9*orOU7BiZ+m_SZe#uaN#%$nYy<{1K|)Q}ddcTI-ksrTxl6Evln!pz+Sk z9w`2Mos6tZ_$&UpM0!zG>r8B^sy?=1`shncR4KX-t-R3tl0wZ98#fY z9AtWS3szs%s_wP_2;+P3t)S0T=YR=Ad~{SsQsA{Uss#b6jPCHsX-~IXlKJ&uv3N6g z2Qq>z^jj#~r@eUGT=%zXy=HXT*hcWJJmFZ2F7K=b9FCX_X@&2umv_U}?v77Knbj<= zuf;!#p{iSWplN~9calS(Z7{(sXPQnELWI}R%f@ioWtET2=La^Wm*sl;719*$eGsvKJtFUOb7dFn~e;vfbQ$R$1mX5hlvGSP4L2txq#+* zNczhdX46lK&Pe3xg-#d}Q7vKfxK%9}4eNZx`9n?#EVApnd}|+Z%y(Hr@guvB;MQLW zBTCn#2V$AF_jP-fl{racHXv-gf)41(nQsC!mCr)z3jsTfW3n)REr^QotOzirO>A79 zV#Eqc)}Y9;qVNgS%djSG&yt`a-VN|!BHk;L*NLb zBGE1*(HXeH5DqLvr%cozzHkcFh^@Tq796ugn9@zjE~@6 zMtqXu{JiNYhy6&GIj5+BQeR0!9z2jB#w4P&!$6J_ruGKK=!ywluQNYVK=8WfHH>@Y zr{nMv>@gb+Ea7(dhX5MI;!l?M#2T8pJpD{ddfr}5rcA52<9%?#?r?)DO4D?&U2F!K zV0%<7sbM5TgbM9@baP+vebeYIAr{jzE%8Rm*$4<1Bq8((CenIsd;VgfTZD95@q_j< zP4Y)0_=*Oa+;#&f#>ztT_qUzzSQ843#-zf974@m0(jq(LF*1sI8LRuYk5-cK2kujd zS{=Do{q=#P_=43?GQoFt08K&K|3r6;jSue!^m*5utcSuqleH`Y|- zmQ)7cN;3!2#-c@F?;D%MC3z%NO?9z$H}fHD2S}bWhtVb$Wpo}Xi6;)QpvJ#tRY`my zFtL+1yp?$dcnUQdpiFS=Wv8HDe6zW%py>_H)TtZ&`iXNi@kN#7kn*Qq=vwy)@{0oV zZ?ZQlAy!pzLu_>3cOUNaOXW5V8t80wj-1?zzGd~4P5;bWVzYb;mN5GpGTC> z!4oHi=AL>p!s1$NYlX(1GTu#wzp;CPTa+Md~ z)V@|#euYKgy{hA&_Syz!1IK1)3y)B3`0G-vOWFqvMZ^nEH<1qKYtcUS!NdX{EI?f!v0{DRMM_6z9+e;m+2#nL&<4*5Zhj& zPvc=JLBmsAMb5+k5e+qrSXRlFO;`HhIzzTRPF!<^&KgL&-bKc`n`u=YiOtCTl6rV$ z3}#1uxYOsWZf*#_CqC zU9Z%0co=&zhlu9Sw@~>zhc$x!T4;E2?+YP|(FGS@VUd-hNfb`nn$ynjxHi|srAvXy zY&LC_0B0#0cmZW>S0f%6LZx_plN$%iloB(fR(54^RWR6cTevaUx?NhBL&h)*Z_HOB zcSmZjd_`@LB#FGcDdI(ox9X~%Mg87Q?{YTMP-t;rxr2Ov>nKPSPu02E&7O3AD*Pi) zcGf89@?|JwgQT*%30|igZb-T_diqGYYtJDTacJ1?fOH4)SYlhrxXT2;GB z?4|iMg5ScYww+wY&5NZ0LLF-^LJ~0cZXWeK-*d$pP3nG z=dZ7du#zA!-5JSU?9KrpCD1jhHW|5iFs$M+(ECuGRL2!TU^LxV&7t{My+8R8W!&(U)Y zl#d=^rLDuu?^2H+XFyLOzx=ri!Q(l*TrX7t{RK`M!C!?L~p zpN#)AIR6cl6L(Sddf{qjqBiC8+iJ^gd;$XSVY?a9JKo!O3yM^?e2*YMH+`OSt6kax zrT{A_6=S465OJtIB*a`Dd_QB2SG{tR$BZJQS}E-BZ>KxM&FMOzkmAO>fR6gi3So|E=6b&9Wxo@mV}}xKm=O@(uM>*^jD z=wBAq6yhCT#TMPO-6k%%7Zf{9trwJC^L$tS6wh)U+M*tc0g>3(?@AfdLC~OASl*fP z0A>!x&QeT{ke$=9Q?QCQ3)`0>KQ?a4hlzwQ_xA5_|5h4_{4g?B@R%7yFVf)S+tpgy?szu)vm|N|~ii-U#6Lxh! zsS41P7qkS7jjv@HzJHc`bOa<{+9y5Vl5-wL(9(i3KmM~ic%@fE9;SQ zhPuSCvC>X;YReYH3|8Rrvwa3EWNXM#o>KI;ijSbilEVeJ`pUE92FiHhPBXNL#uL?x zhd>+1iXmhEArw*Qb`>xgB;g++!}q8cAOGjk-$p|&YCqX{yPjUoMF3@K-Ee`c;h7Wq~nvty+#k>7j5pR^svXRJ*^0W~t5L3so6MsWW< z-G)w=0no`q{{Kv$;keY-8HGTNAf&H6&v4f(G8Ha_9zhVz{L%o&N_t&Y;sv=G$w;R> zPztW%&5YrFaUHJ5IGA)DPVr2OvJlvNE-R=^XrcWAT&ZLOyBy(Gbxp+{_Q-XdzLX`W zvji>=*m}S}Lb_%sc0N+@%V-xmN$=pIZT#tF=ys$e!Q7!uY%{yU-Q+Y?uiRMqlgxH@ zPCrb%o1_o11o>?>qDyQf(}O`zgU#L`aUwZDRr5~8>My0|WbLgTjd#pSY;?~E!j$>p z3S89q$y=SvoqeE6GhyCX^yRp|YmU`t#|tj6?!*@GdZ8nCxUBr`n=@s2>F{Qgm8>eP zNwKZd{AppN+)1&eyl zPlEwk_i1^0TC~nsC`Fk`(Xdb37|tq8me~fJ6AB3%l_%E3-Du6W8RQ1z`CN!}22Q!n zHXlJ#p?b7%xDyg%vKmu`Pb;kHqz1IVDhR>*6SRE8a|piP=ym12aeGVfhTht*#4q5r zpC7EB`+xGEr%bYT@)7%)jAX$r(Id!XF)a@mBuaEq3L6x^rIn{Gi!6Bi@D7I6SEmoO z_k}z@dD+Wc`l2%8U!8~Y{D|{%uf*d}WTcLg8OYO~80NX6Y7|J#u@+b+;^Zz z;gx(?KGYU7KLvw2|52#a&l-3o2O%FppC3VKh6+o`CwON(E<97w{ms&2IR4JF=MrP( zt=wzeTX9kLq<6meNNRs?moy{Ak@z*@4M}FwlSk02M-U(S&s?HEija9G%z^U2`w+;o zW*pt?6mrA^>GsYu&a0F<-ZeLe{rPqEPKu${#S>-DJ^uk*_5Yw0|6Dy!w{ExL|NG6S zF3izNfXXo7@Ufqpz5aw}m`xI=(fV2ClXBF_`rX9JXx=TuqWeRj1pxBV^4&tY0eL)L zQK@w2@Cd?qm`FJVzK688z_34mF;=4n8^K8AF!=ltbjYf#IBREFd1zUZ(hhujf(08M z)fBG3?vNXzYROm(R#akA4jpFB`dr*5TC6Uhc04T~vDg^0lh9d;PbX+992;cy-j06Y z?%Hw%X+|?BV(n8_4+L#HEnOxqT?CAv~{~^t*hZ_KjZlV!y||_ z&Dtg1y2gvl&_K&$r1T)zGlfeaDsVl_-4S}x;gUM?>fD46Re*ysEv+(3p1_6Q-47(Z zz$>Pvkk*2GJU=kfR!`4KRPhpcodi|H*`kbGB*axWYQw4=tA2KldN8`)Q3O|isqNVd zC4&6aK+kESKz1SxeFUh~mdncQuyq~7qvGgAx6zp6Ec%=MTdUW{u^ReurC)gZO{@n# zaTcv(yXzcm@b6K)eFSaJ91Ll~lwW!4FygSQp*< zVlDdBj8%T2q^H9ljnk`~cCQ7WcB-9!Z)g?F#jYrVLKy|FV)RJfXi}p9m3I5~?`ZF8zB{iY% zJzO393<#qwzY)YVj1Ral#panH?*JQTG2Ex1x;2m%X&D54QI?n$d}U>+*rK?~O>K@h zadkkcdq3U2MK zW#7u106PEXEY-W3JxP0Ptt`(EwVMUu&`b)vihYGthtJj)R975sWgigzFt2o1zN+VV zNVwRdE$ie)D(@7W__=*%W-?@3?U`J zieb(%&R=BoJys+-63ie&|QBd{9gF@XLI^qC${<4oR63qN)50m`dOf+5DaRFs|1cASmr zhUiA{^#JCr)wyxCMG$74vL=NTbg^f6!CQk26H(eE=5i(tbzI;Ksq2N#yVjD*M9U4K zXQ~6^nl+Pq7Xex1-3kHYlD>k{@{=I(yp!RrjY@x<$#o-3Mb41+WNJeT4yLE#NbO`~ zx#`Zy4VuV8*c}6ADReZoAqAi;h+iOl?$&|6 zc-AMLYNYgKuh?s8{8$UIgSOyZQdy4Fb{gtf7BsE*bi#XT2Pz^&6Yp9C-OFpWgiS3Y zD?NIc&RwP3x1C3G?{w=C!&)RPA$+cu{YOfYP1;l2uDRzL_U^Mf^326XRvf2ux3$B= z6(OE#3-p;;kS1-uZVb4v~VI<2A zVPfsK@`pMkZZB;(l?R(&sNb%@>h=rxwi_7>5dylq?y(%1+W?AG& z_af3f*iaU*Sa9-OUcL_ur;wD-&t1-MQ<0LAAdW!JAsMxJVjA-Z%0Rp+GBrBI-nZ&5 zmXQk_ZS&`O*Q0L3yQZbBwMI#46i?`ZW>Cw)Y)NDZxmbhaNHwlrISQMbKebugZ$vf8 zAQZP;q=64p@%gQl>Fi0}vV<*HZH*3ve6X?Agb^O2D6bIIae!Sbu2QJdtDt0En^Zovm@rDRxw8(GMi9#j1AYoS#{6Sqxv&tMPr4?LuMQSY@FRIo&(^ zM2qLrdw+M2(tJSo2x=c3-wDuIyx38@iob!n&eq{JWVl+~XGd%C<`Zk!rUu9Bb9WkJbRrV#OB{|o3#1_XYv4iqkW;3=N5t6e?p?)cXWBoTt<50!&XaD%GXYx z(m|pc+Rgi_%Eo+2zyK-x(uHCiDN{PIV?YKWG6GYX+VxtUxc}QmJq^wPdT9+xLECYu z;Pke}uptqj<9pj;y7*!3hQm|PVoPO)1E*pzmnE1fF|30=1F_!ngRqv(ZNUmG%zIS9PwFkl9bsp|5nbkDjYLd!%D|$W`YjANvV9YK_hd& z*CK97r=~GFDQho;On~MlhA1tn$TKcDtc!5&CE<$kzCG19VIJeV1Tbdou4cHVeyvk! zA^B&d&x;)Nax#38lfDF5guGrQ(4WF1k!?2}!F=NO@JeB!m#Mxw^r9E;>y~yDdCU&! z5|jN*Uq^`1wP5TLS_Q|6$?bi#N}u3f+k-sd&dP64dM2DDx5k;7N zMkAah7NrdA<-iCAY^?!g62H^j?;N>I)4g!T<)T-JTh(ZZa?@FNFlS2Ntx0&9=C0)L zZoC=A!Uv8=5a2M7(Ed-_m;YHi^l$0Xu?z`m>A= z#ISLS`}HFzJPkkT_oeR7N96y!dk%JmXtKsx=H5v5&la(6y-7pt=EEg zUyvbBe_ll}s_x-My7hcS6908Y{~srDwjzo8>Oe?=fKZO1WcjSifooivhwa~v>~|ym zFK}O&nYXw)c-3`L3H`nXV}aC9a%_UhKQasR1CQ*(YYVIE<{>nWTAI^MGmDS$811>* z4Cly_Y*`vl+)LOApN3MtS{RLPTA4g3d1vV;t;)F9VVEfQ#6C)QMK-KdhAXxxNQxm4 z*^WWEA}a8-`BW*?MMh5#!u+BaXQ8`HNOt+Pm2U4Fk!sQ!B-q+V(3RIH!SPtxM1ojU zl;_zwO@NQsw>p1vMOdU7Bw-Kp@_3_}=}>BoB78STjY#?~VjZqms!H%!&y)*|^y5zFbq<1ZRnTj+_#h4TzY+7CH4w&FHsKQlW#lo zLO?IMNalFX=(&m|C)bP%d$;MC(e3c=p+V^)GL^oQ&Y1Q}MI1}dxSaf7t#;PTJt;TdSY6AQQnf-$3}g4SxH%qopoK%5FuNH!hd*jg_U%FH$oY zOCwM@(8guLFCz%!E~`WazQLAMwl>OwV0(>ccO7^5DXDdb-5HaA_9wseZ#g2pU^l}qjKI7sl zkQ6q;eI%)0b#-X9jn)im*I(I@;__FMbaTd0V+;&WNDWD`)RFZ`(4M-b6D|_Z@eudD z)LNsqvpYQi-?h}>q~d&cA+A73n(L&KII8-;*gNZ}I=(H>6P&<>V8PurxNC5CcXxLQ z8iKpK>&3aaBm{T3xVyUtNXU@9_v`+>el2fidRDJq)Ah$*w@%fmI<;@f-lx9jQ%J=% zp!ZDM#_$fsNgjkA9KwfFer8MGtxN1zSz+R50fnd_=6Kb^WwMo5qxX?nc?PK+i~}e8 zIPPRhQAz_U>~;Y1s%cd@=X>_~9jQG#d(ms*Q>FjB7mD)0lW9MwM~1#qwL7cNuYSDU z`qhu9{u6@GzhAHsXl2bm7NI2@VzX=$U3>&Xo1oB5vh+;^ z(Uqvs!*r6`1#DMW+MkjIohPl>I;g_vCvhn}>ue%}13P;f&|JKG?~~ihLfr-OGQvDe zj^MdtPR{W6`ng;hFI&&hno86MWvbFrL(MrMkS$tZ+F=Q#pboQ<7>r87dN9Kquu#ZEMB0fIg|DnVOA-*s?be}2wmHe@>QmN-2F7fVC?msF@4uRTkddKa z!4 z6qQMu?j{{AxouI~V)eyI{~f{p0)Mxw+nck28FVG&lMljhJoXcti7b>nS&i}iBuJ*Q zCVJRsHUY$Z9#?Kp6CFTK_(C%$pb5Ec@yh$T(Mue+s9wcl>E{^e2PeTl^r z7LLIw`F-Zgw0Z%J)=DGdlv;Fi#K5EF7M~y_D3ndpA#M-!$bNP-Cl|93Kg!>ZPL=rG z1omI8dE>luGKWQM$Rs(r&_d4Qg-bY+pnc2>ZY9rLyXRE*LlAoG6b^s5ZhGHM*K2m# zn5BuVEEdYbAH4GR?oTb5G>7`sq-9Xbh{k?O?;GO+;a`pW)w$_;8KR5zRnIcgRN8Kw z>|En`iWm8Z4Ess^eY9ml<6h0{b3MSHUCRYaXoNJxq^tnDVEi*+-t(+@fM) z@$J`VPF0~o7tn2<$m9f$d~Wsiw{V=7z=Kwej((X|&@!AGo?5{Ty;y08B8 zR;Ci6d)AeQMxnXW`C$k5`epc)Uz{gFhsLex6V#9NItXDWg<}SaZk0TWT{&%S-@CWA zo#yizmdKy}9T%;|74F)!k1i>X+%grWjIr;4ajAMKt+%#aSx_iUEF}123_e9@O$Fk^ zx|_zbfs4_7?Gv>=K?i@nAxB7Z|4?{oE28%#Km2JDJ(CI8z7Tt+kSjVt_w5- zabdnq1$-1ExaLaq;CdRm`bze=|9+FhtzHl=DKo`YqJ`LsAO?{R$H5MR@#6&dd21IWHs}4E&3r_Sf_x ztNktKWmbYELB}LDjZd5sey?og1c&jKjDPh7l9C-SAt_XLj*mWD9M6C`z@g zGIA=_HydrF>(pLoNP#k=!P{>kz`vgKvkPCP% zsHx=NtgzQS>oq*e^rM92h-u`V;*8;U4UvdT7<&{2Oc`q{)|;64DkN?~Tw7r(mHGp$ zxC=Em3aTXoR8_TEVHn2LXi$LL5yF?)96&j2=f%3pQm z0}!uiojOvD9*#eZSPZLZygjHvm{;PTxWgkC)x#Fm-6wEjZP5V~0nMP3_qo|Fgqr;{TDS= zRawd10KtWMIHC>UoOD=kZENUz`ur(LJ)Gzl0TGeeI)6mjh!VO*kYtXeZ8QV9jWIpw z^C#-hBT7FZhhZQsf3?>Ak9|ZJt{n^Gk!q!|^SDqeGQJo!JvMfH$Zm~E% z6hszbv{bUQx(AHlXPnfV2C66EiP)gd{%A+8}zci0XE3V)W)i$b{`x+g*{cEpt6~I))q|IvQXj9sq!BMSW#d z<*uzIVDc`OB3&%=`Y0XIUk2IZCxpn;g>Y?TlHDU+93moV1U382#5h$6vGlspRF82f zDZmzUs&kRzd%7RolV9jkG3|_U53d~^ROl%62PHgzYpp^&miL&VASE7Pr<&LJt$f9~ zn!Zb?qKp`NMf?H@cUDgX_;XdC#mdJ*`-?7lPusfm1HjD309>Bs6RVu>E^hUf>S@ z9X7lnUeiy(H_8&rTB@ltr`S1x1FLRFg&Yx?)Y5_&&rgy4KSeEweqQgt^M86T7`Sr@ zXD;(5>BILQu?Ek_>xBN@4muFKa<3`3`#=BKigMQp(?`erk>~&OMP~H_#sBAYs)4@) zk^dtc8eGjLYJ!iY9n0amxuEI;RX@yb?)RL6k~={|6@wQR<1LiGm>nV7Gdhu=-&4Vp zzWgq^c*vY1VLj*k^WEZdKoSlGBjG(A4mQ`@KS*ZAR zsuhFxedrJz@G2CPZb_Uj;CP=Sm^Svf@%{jQpGu|JR+zk|1JTYGSKw@b6N(zoLWL`x z=HW_i`4bNbEe)pZ&yAHz0DrnpSR#sw@|g+C;q_U`6V8-JWB{nfWZ|A&eyQ|{M<8mgveO28`4Pr(i+1FEb*`2q(dPWttH@0#H%^q9 zD*^OmTA0TQ>&vn^D(}?LLcXTA6Uu>yz>@r!^6~ws{jexz2e(T%f%(#?kCvLQGD`k5 zEngU@gS8u=`@~_>Rg2*S?`biubrRNOWQ`sq- z%Y=eJOzcB(oZ>FV!6>yTR$2IZye2!vvw-y4Aoo)>W@F*Bxt?=jQnr()5^vP%2^uYO zQpII$dAn)F)|dj|QvkR;ia{yWQqK}>>FuZ|pqT`6(vdE?DO7?kFwb!Il!gJr;-+K8 zsPfqjNQYxcEGth?@w>prk8{4DZd#ayCc+BXmPXw?>qf7i1bPeja?q}ydDicA&WYW|zk!G9Bawcb2b)4m1%Gw@Cg*R` z7+%I84)Ur#?pe_sv~|qZC`p^loX#S@>jqJSV`w>5u5EOd5wai-aO0R`)Zm7XA^5l-B)&zwuilL>A|%l z>9kI(_vQ-xtMG)Y(n;Kxl3Zcz#XWYFdDO{05vNPxt=y9g;yC8-T)rh3D;-2^EeqM%?P#RS!+|iCSss|UG;_lXFB<3&60k@DLWCLcF!FjK}qd&vpo?rmXr?0 zOq2btcs$Su&HBJ~53@Q(DExRrCx)&{$=OQ{x*9v{JI{hvyH#n29^|$!Lku>59z#i= zJ3p?VbPp~UcB4^`dHHgCN6evVw+J@3n-!fU)|3g5ai$SQ?@kP}W=z+#*-%z=zWqYV zCa)aFfMMsV;a1O%MpqA0j3}J$ebpN`H7K?7fkw@hX;?^CmrBkHrLZK_-Ov8iPU=rxynHWry=A3eoz@=43^!Cd~7Ngq!hRPe?-Kwu|gH}-{c#F+t zxw=dp1qLoBl0KRXUD(k+wd5`Dq(HCFU)}er?o7%>t}*jcXFTqExOwD3UZ0k8s_3@K zYh{Pv0D0-*?7XxNHfstw|=-Ip5EI`OlCn&HCsw4f^yV~fKLx&B$Ua{G*qrnr+ASsbC|z`Ks`Uzk14 zaubO>M}t*F+LozkyaPu#=a$bB`w;H^Gs03Pd8qlQ-~qHM(`7u*h7ShB& zlX%ay-90B~CcE$>&0XCHdJXYhM?%np35L_X_sHPdHNK!ON#$ZAA>R_y%cn`8=e5Z= zGf4r}&I&%}?IMt^8&xfyaFN?c02ZA|(_jN83f$IlC8T}zfK}%-)lkXv(;HC{+^hlw z2-iKPu9BKP%_G~amf5q2ve}Y^+P5Ldp&FwQIl}|>hu{XR#taHKEl$JpDkk*a1^2JF zXrbH}4DZYi##9Gb$dLS8c8$M|Ra`+CbIo$n(G3FNRHeJJWRRfsWwdTNTzSt1Z$|@2 zZdmV(z*K>H8~0J;XPNxQAz0?1aIfl7w52@p-~?1UxXd_;Bt1wG%hnd-S_X*!WHc0o;)jUoaI8B!3b+ANA z18TlG1<$DV=0k~G!K+B<3^spOm?q;E5p8HJk?ZIcv6sx&Gbe#-%RHZ+-4jntTvc9G zjZRBm{9t%6=Mav9@1x2ZIGrc0Q;!zw$+A!2BB^JxJh+yA=8I3cemTD0$+>#SQ=PLB z#d0VYAHgr*!dIn0$JR94%BhRqQ*MWosm#e#WC&GGK{nj&1FiA8!TQ3k*H9wfwUa}8 zMdB1P3j1WTloiX*ER=F3`oc7n^6TU4o;XB$IEl}rsf3-!-c?G-;`czD6Jy7Zc5c1g z6-E<~nx?ZKucNOki8_y{3YfBl{7zq6@DWs%=HTqR$E(*IHlxp7(;Q0e@2um1jGv!K zi2Yn%YX=jE$Pcv|cu;~tg1OdNCb{MV@GFBay#60u@C7|(s|PpW}ijpyiG)ZginIoZdwPx5+KB#J_(UBz&q+I_#bGJk5yIv<;v3lNH^ zT!&BNBPC1&e*%A0h>U{3+-vwF3=W9aM( z6sCvqnI2PS<|y$qq=cF7YjZo<#;Q439yapqB340~j+d6o0yaB%s3b_Bo|unppj7qFxohMg?bUpaFwcQkppuVq`H@)!2znDi=Frp|XSxDMxCkBYZ$GN&HXTSNIv~ZH(!zOJK>FIi+cL1R zg5%bId=#1k@r+Hrj~i4$+N=-9I`i|Mi50tx!P~Z}*-x#&oHJiu*8Bmw{jv2S z@<(*SZYzaAD$A4bxYTfMgV$|J1;INQ6|<3J%@#w%+a2#$_@_-vHpj5@Vn}zV0=f7n zNHZ8EFtw37tZ;oB8#3e)F))&JObXm+vq|W|KI8k43(dCp+*}4wmGKbVX^KqxYTT+T z#@4>s&b0k&q2gnMM~jx!>@KH)5e&(nPu}eAr#OlDP^vcPp(raB4=i=<8g*25^M@~L zq4v`;`0!(w%!JuteYt)Tm9;`UZ2DIH^DzxyC1W9D$15Wa>x5wV$u6P+r6B~7r@J!Q zMl!;qUPNA|fy+BKQ+JJL zf>rpPJe!&tJJ7JS2aW#Fo=CD=2%!>)D4NoytxN2%)fD#e_LhWVUDx`>1)Ef>!IKA2 zP1r^oyARj=zL-9iAao*yvg5;#c*yc;T1b9eX9v9@YWD@D&EeqqFs|V4d?IDOcJYr_ z>XoHDkT(`hEbl@uph!SP7>tSF7z4a8G}a1@6?iQr2AjR==2hL(NX3yn6yVarp>>TM z8PtPjL&j!wTje)!;HMh6kn1fO35wHO^rOI{0F+IkAK;z`Epe#(&C5!oumkG6Y{#qz zb8Y6rS2ri>&K9I!#Q5E(mm zn0UGKI2`4aVxO!;*P55=w3w4i)giAIDEI3jf7)n%Nehx`G)1(RPwAM`hIeZkGwE71 z26T}rRavmaxJ|?QtC1>Fi3cBx$a-cpsS#M+X-=nAj5x}1J~4TRogh7-e7|>CGzr|v z@?XH_u+g1Fp>+=A!7D{=RS)r92Y!}V`n;ENXYlHnFdGAtyBR^&Zxz(>{_)sf;{My+ zJeXeC;KK#zP4%WjWM?TH+O(^aTr>Ysq)V&0KK@$u3IO%0r$eB_vxx|2GCoiIe)Fn& zCt0*g3ra`0GR-eUmI#7FQTCr)Trk(bc4Db$ssrtAtG zGB?SD&XgIh7aZ>q<1?wJzaYv0Kli1v4D8kv=|-WM%e^JXM$n&U-Xn*O(x9|ua5Zbe z8Og&erW+~Jz$j0N+?9fYTOgry8UfDH?7aIB^l1I zW>xO4pnSC47Pn5GA;gNdtqw=>?78fD1gBADQ5#T42l#A6s1<|@`2ds$g`%f6i zPp0hrT_cdxvxW-}JM>*(1&M@(_PMyuD#pyVb<{|v#3H&`zkQ%|*$d8*hQR@n`cC@^ z%HYBBCkHwsTzvpfB?>-)3d6@wqXeuXR9mpJ@eu8eL%v;B9Ua!wDET`Nwo@jhr%(Mz zinu)m#JJJkyjeX zX#28un4(`RPhK?NT4Zss4*v2Z02ypfMhoK_c1h)ka>j87<`fdJ$h2PwVwEXp6gr6LWAN#n~*-EE5B<`|59|> zWR^S>(JAD&YQAfU!;NCt#`O1l6t(+mc|Sv>^?W`|y_%5L%k;c#NtMwP35ayL0?jqv z2WijGo0kNq&u-+@JcFQHXb{C?M|6^yv69_2%s-5qPVeSApS)jmM(-zpvUd~$;*Z6y z4h(MWsA9=dRDItt()(&OO(5sd5y4xp8B3zOf z%b?>!tSPNV+5Bcw(K^WjrmiE z6!ZB6($d$ts>K<7vky3*?u+m_uX|TvmeH;$`FD%G6hHK2xO2Zrn3sUFbof#&C$JHB zbee9|_3a*m-s5?pi4Jfo;cVW{R2|3q>A?#M2MIP)9La{Dwn{SRA;_G<<6 z+yH+1e3}iFcb!AsclNA~-NMZ`Ud#&_Eo*i%x0LGb(dOGt9qQwub^wIo#*Yx(3lDhm zW>kVrkWq*mMa{rDQv(;rKm&z=%d73L+LaS7G%9dM5E=z zQx$A3WnX^ioMwm0F^vVff~t5-<|S(0s@;jhcv zgx1d91Mg;_NpMo(C$bQ83hhx`a2$8%>iRjC3Cj?Wr8H|Nj2_Z{_CZ*e*PaX8)O<^Ap( z6@rwVZIvgRYyOCFIekGvp4jbrybr1P3V@Db1vWXjQ#+`*c#c}Z#VsaIPrITRTXtRo zIF}NfzdoBBT<*NAuce1fNmahU7W4l{fJ!9X#aEHw&m}>wxe=ViQ{F*7wEc%wXL>1!1Aj~phQwdz~R zQmDqw?J+7YPVHX2vdfR|-RVx&>{^?BZ`zVz`pS-3@3hs(hxzNBKA$XwA35q^TKNG# z%8I!i;?}J7@-H;e=9qj8mtYEctA^{Y%MSP5HNhO3U#OkNtLO5&wRuwb_B)*cF7NWV zAKD~fOZGAzRQC(=rja+sOr3WL{qATwiwwDf6-M*ul&eCxjEPg(4xnLWj~=7$N1Ui~DjOj@uPL$;?_Zxmw- zRUB4>WwBF9rA6|F(I|26naEEXMh~0O9_LqdA23Y_qtW;!aC}uaJ&#{K8#wU$8n1sz z44P(T? zm!SDF%qx&TV&W}&U2JO-20;xb^L$T1x#Ge@Q*QODm1`GwBU<#clj zYwxmOBTFAsYXfprM{+XJ^1I&o4vfBh6ufS}S9c9lKQj|`=>B7P1v@IBTe^aZHcdmx1Tezr(3$DqO1+>qiutb+f3f;@z2UaMy z_e;{mwKE-wTK64h-m8ok!?&nqs`=#F*d9Du&L%+=1JgPKdaoSIxlCNLc;nTvE6S=W zoXb7d7p;MOL{3Kp4Wb;K1-g-1?vR`oPc8&lZRF?tTpcVPi5Hy9=_P8OhrN_kVlD(? zY9X`X&A)!a>^2M~$B2RrH64F!J2u~ZJ$;YT_*jvIi&?Gwx|ZP;;Bi+`A*9F4E0R8)n_QF7bw%Mdhc1M=ZfM3A%PTQ{~=qlyz1(v zdIpW&Dkf6{R8e`g&t?8xov%Jm^Vz|kApL!Fir8zYD&^^trrCti3X{4KDTcB_p?T?L zDRs2DfYIcE?Dq-3l{F7+($odOf`-;x`< zc5|p=v2%)vo0z*5G<@B9*&y!Aj{bIUzxZv?uuw0{n10DXe$m{Zf9>e^^b6(&4TJW= zb3-@6aO&QKC7Z$|Wl;)DsN4D%Ot&A(FGEryVr9?k4o-s#8W&i$mEY#BdF`D)OBSE& z@r<1=VTHBG5T?wm+SRFv-a&4LHmiQn@Os}4THQ^l6jR+x`RpQz-L2)e^@t8Ul$Dit zntyJmYia~-t2dydv1} z^U*McEFV+V)Mj>03WIG5Wyv>FJPz&(#+sX;qOy6+?ZeVGfg3%l&Vtqug~`>!DwR4SP=e+rQWjK!x6&}9tTF=k;6A*^Ji&bKG)tUi;`m)Uvs?V$_ffQoeK+K^ z=uMm>_9ZIAfcV85Z|!#sIoFJ-i>~^k_;)k&QdQ0no1><1r>_p?9I@4ymDLW#w(d2Y zrj{~i9Mz?yAndw9C&f0%P$F)Db9>|^NS*mu3^z`ItxSnkIlmZj8KiSlJgH2&QQk(k zEU>fi;ImA*V*GzR2pKNz2j{^t@RWItCPRk}(%~ zc(P%v4xgqs<-o5 z>ezfDA>MR@*)Cz(Hm%?p z&?h(J-XXCRbh)rD9_>3mbLr92-&@x&nwB){F(zLKSjai~5K()&3;*JFQu`s zS9xJHH**=VmbPL?RlO`)Y*w{C8(i_&Nj49orhlZe10BK)UrJK!x~(_iZ)lHZwX2$t zQF#r~sqf||-EvADA24e0s@`(#l13Qzk4l`s5$^f?68sA588%EURMkx`FYJ8<>&nD5 z@L9Kw?=#Uw4oBy)^q0gmJ{ zx$tI!2+9uS$_`a*7r(3(Ej9#pN|O9HUnO`q)@q;FpPlr985Ct5_#JB$nVh?Nya5?y zyM^dMsGO@b>4xnog$B8io~k%naM2jF6_Ix%e(tQ~nMIVc?u-!s4_r2XUPTOUP1Sq* zrct^e_?(vC=}|j)?{lxUAj8^LgV>g%pU#I7_8c0)-S(BbzswF&ii#5YluWMMFX_Ql z3^=>JF+-tsToz#Rn*^O(lINI$mTr~?Fhvvfs?-#Zj$7s@=z0xZvTY$@1@Y-#A1!I4 z6wQt3slc$wHynkI5t~RYTi^)j4eQZWHmsdAO!XAGCV^90Kz+LXaM{XG1`{dx=R`pO z5B-~%b^Z#Sw0Z?$0~JhGJ<&tNt}e$7y_P7X>RGRkRRx8OmW^#0*961Gsn!~UbF&ZM!|n zmbRGQHbu=cedyT)qBCbjajM*HW2Ou?T|m7ZM>5anK_0k6{uI0p^@z-D3=NHx=7iHg zM=u<}Gr0>?kVL%3)L}G+YpOMHx%Vu@c@b=hU6{ z?PiO3r5;MM2dM3??TjO0cVAc}Qzn67gPEa^sf!OPjb{bQK(Y(}FCplDzl7N$s zgvQj`-S*`HU$|Q^2svV)oP5L#K+G1C;0;-uDRbkEO--3_ zDN0%c4Q`P{UWaBo+Rh5vfWLajmnsGdxtP{%g)w8BT&JU36_Zkd&oyI3)ZXLo9jIpQ zK;y)6*c)4mvTRg?jn@RW0#sEm`y=%(Im7;Vx(_WV_>T}^ z5hxvHw*!aP$$y+{kTrdjw*+UJ1(#fc3|+e!pD{9LJX(z-dpEu}J;lj}g|XXIfj9yB z24Zq5z$aeIjPL$PA?43_b&t5qCzWUFWkFpjL)Jy<4a&>ZM(G_BG1<|M&xUl+|EehQ zUlS_AU=VpJ9mj%aY5!;&j6sCl5${TVVc@3(NZE9Ht5vel#Bb#l-<2Jd)^;P_p$A_r zp9$y4kJI>Qc$89EU~~f8XUc*Y|EJ*p%pa91Q38&v0T-NE&&R66)B;z9^F+mEn0_>z zW#`U)SmE6dcK7tN--^}>J4Z0|so-{-SAtAMg$;_-(3B5;j2FOJj{)fGnOBjK$O#)q zU%ulk6Os|sWJe0798prhpFpbW5q1&g!rohJW1#;DQS2aiQt|`F8S4XSizWzn*e)s7 z=K%@jgG^@HmOb{0A9(_-AO^0W_>${Gam2JwGFbW^c6x?yrzOX{=xhwnX} zeUyTh1OtI&=XAYf5&k&Lmv}JG>+?7NNw*eGAX|wh-9dRK3|Un3r=^|6aZ~y+=Cs6H zae_El&NJeDKbR`Q1T7%r)zTrOYoadc+cOjr)nGin`YxVkb>Sg`%K2u!C?ZRl;BoB@ z2=c5fq|j2HuH?q?Le~>axlsFqhTD%`WIEqW-;@K{*2OIg18+Ururv8kXZj5&*$e#K zOmF3ppUrfNR8zI(-f`NvRp?}?z3mawLYnF~_Ka(-c5e@z@KR&55*`Rm2A;CIU zFeQhPAjxiRTI?H>z`FZtU9FYu{(B3Z-@Xl)w@>ZrR>sP%zFNnYjqRq<;giPb-mv~Y zK9)EW0@B<|*a|^>Vgv?a%w+&G8E1ThlD52}0^nm#;4IRzqM#lbo}Jq`WXYlq4FA`9xDmBl-%sLb18pVapE&A>gG{r-V-mb%#94h(jx#d=-H(-&ane6QGruSINiF7J%@J=syVYqgtjG z*XrDF6Ks4gwTUQahwYbN7I<-N!Zl}w^L!kWGE}UhS=gY;Dq)dlY8#2-CZfC7fLoNZ z?LQ0WeB@s5)LzB$%fP^hl}c65rmQ0kna89gRP%hk%GC3CYXNm^-#vViY(6K?=~nW~ zha%$b83-gr*Be;qvAs7{ z1eIC#$8x`nuz5s-4HaJ7^e;21RZzJ!)PDy0ZQfb}e>dkZZOWh>`>(H5miIIqtLvBY zmjlpowcu6u$ToV!4dz67Q~tfwC!(V8Qu5aW(8lrdb?&f7OGSks#;@m$$|j)-De1@7 z9~_;k3#W=>!tzTI<@>g%bzia)5ccqfR-^pr(wh!HB}VjnDoD^0$ULp&Bq&~fi#8U1 ze*Gl>`QGOzM5W44h$jZ3VpNy6+>}M-rWj10+7!W}u}(hPu)e{5{t18~wqdS9*F?>l z%ib@#`(3dBOZ=T|do!@Is)U9NKom}!CT$VhDGC%7BBF))6#&;J=AJ=wq$S1cUI}dj zV%o?Zz(48{`+OKF@x`VS?yhKd7czzC6U!N!)n8E3>^UlB_W0Tpyb?!%EGeM9e({bXuF)b~5A+BJ(fd$cKzPdW7jR(G^E^8=yC zjFRge?A>mc@G6*A?C~Dvrz<_I>54owPtSlD_YoEG(nFd=5oM=Ih z6U8%_?iI_?W^(ixigQCU7++rUJpZeO8?(A)|nVV_ZEBe;!S%@~_d9`Xhf~Bb&!EzXe%Y1gwPq zL#yoN5E*%fWY~E1NxRz4(GKA0KJSzQSQ=6B+KR%&B8#i{tH);zn1yGY*G&!5^hr)E z(!gvs*|_03EWx`d``Nfxy9~S$(_nAwIsM91ZCo0<5Y+U9ZuEuul)*`;Pwg4gK~wEy z*-m#0XYM*{vACIh)9NXDdv4^cqWW_~DtJpGk-_r&DAc@<9>iw^{xLGMDH-9DGfFE~ zrDb*^MDbyFp51t1Jp1Lg+BXpsZW)$U^R(LOB=+sSFL<$i-1V^WljxC%NYQHW@+oM- z#46_qugJNz#tNgR+(!=&7Fp8%GCuy+KWMSENgr6!)UjmTBrtE6liY5F^Ad@l*l5pv zO1u-%pwfZM^x#2mTJ{I6^!5jB%W#FXU0F*C06jbP2K+g8b!@;#cwGqt_Km74v(p*e zw0iUoLohWaLIc9cRDwa)RYkHSI46Q=0qJA?s>8C7gsnf zit4&uE5JuG(PjxUW^8Yp!316f{!2JyjsPU5#}=z@ei3((hQAxk@<-gLGvf>P?zv&N ztU3~~;{W$9-?G!G+T}wj6RAbcp>Hq(sJmqm(`H0a-ig0V62<7c9maizxUlj=;o{(8 zVFK1sXDm)n;NhB@!Z(qh5NWQF%l^+AUiGnool?JyBo_`F2e*lN*A2vzYUBw zom`UdOX~g02&@0;;NsK@K>sC`Y`kML6g2%ll{furrG&zLGK4*(kl>ScFy^-Qr#xgA zywE$vNtNcf>0Pje`O22WtFiCueSeAd|43+AhZg(ZS)bkPoNT2wdUz+l5&QqTz^Igr ztQ4N?uikKWcam9Z@01QB{f7%4O0X01kTxV?uX9(dvqco{8wJw@WfaZ-qoj{3w)i;2 ztX-TqA*jUKib!>i`;x11sV$akaD5zdlk|hgmPz(az#aD(G-ymf+Z)T|=7j_&t^C5} zvN(u$t^qf7TlX<0sAr^zLXo;b3%7mo8*k$NjCsg`0XnrAA~3fYcw9dCx~BaZ)!yqc z?D{^WX3A~2%Z{-{NmXyZRk=QW+-99DXa3C{#OBLq{-*RRJ1h?Ts=a@3w5X|lw&DXh zxU2@SxNTnnMix{2u}Ueou`iL=aO~}no(5a3Psv-lZXKH!=to;Txmc5j81%oub+&DB z^%pi~S7^(-NzCCr@+5y*snVePa4e|@Xxese(l-D|axBe(C!j}7G&gijqmJV7khQmW zp-plb@(yT0O%o1{lj>{Mnby1U;Wz+?Md{TqEpM~8HF&=V%6Wi&C~|T#e6r_aGH_Fl z-`@{2lQ|-|cx%6c&M|zwMSu5!V(O?}Z@7M)BTkKHoZ%K=rh^nOP2qB7K$lrL5BI5M zC%23flQ8z+^jeexq*_0}SunzC2kwz$vt_$NAfP-g%s{fx7A<#ZBWU1iG{vbMvk z=G5kKp95GsKZujtzhWdh?1tl8p37!Tnx3^nQUzV9bS+ZUe3h$2S<$HhW?frv`)MH9 z7gSGjSG}tY%Gn~TbC$}Fsi2N#wvF-0g08kVe%vgIiZ5FoNVn8>eShvV$!qIc7p9|B zz^qg+g%81!+PE2mzwtH-J<9MW1UX@|$>$}9(eyc+5Prjtfs{iJvx9?!2V`m2M5KF^ z(-8-NySh6Lff0*}5BzenY8lh8byY4qsBa;(p&A0NJn$jAZr)C*VXjplIu>@Qs0n27 z7#}C=qGQ3fj`xUa;pQa|UL+o=s|Fp><$_pz-Z)@Ia=@l0r zN}#8dz5Erq|F~EkPo_N6rCkZ!RvaYxvHsIBQx=>bUu2Ah$we&HMSDHWA5qyxuX^iaMP{g8Y*FFTsOwY-l#V9Wi|Mg@T6{E6*bD? zm1A-W1CLqIGCR9|9M3L!LPXk_OKpD;I}>|H`*&dZqu=1<5h-_{kKlLbceEX|jY}|v z@ho{bi~LGHP|pp*^!*ZCS2fw~9iP!>syuMxZEq^wv%F%hj&2kRQV-cFQ)ecv*u{K; z4+wZkE7FF>$Z+vl)c`ao?;ffj#5i&?qA{UiR4IFQMFmmt3*y&}oI3-?U<{ zJ~FadP6&7z0)HXf&mIHQFF9V0TDw~<*a1JlpHUqIl(D^g(u1+vD{rdS!j6xd`;k<& zf*5GE#_sZY5M%xWRlU~XVQK5Cu~rYSIZyupS|>aRtb5 zZ3Uz9A}my#Q>z`qD9{k3xN2=1b9A(ck$MbvQaX5!S!a8%BPPS(hfCkUu}aT2E#9L* z91pf<;m+%K>v3igGV#D5Se=%tOevXa+jc7~wKJS6eV1e`zJa#+ob|?KtkVj7D95iJ z*C(Su!F!sZ{hfU2%a6Xc$JPmsYR+B9lq1?xs0{GqAk7&mFKST+_EAC9^xMJx)-Td~ zd+(}oC5LdMSG-a}N<|s(JE58eWUxaTamg_8$m732(fH_NZ;rpR?B?qK39%DAcjECR z_mIdl=2%+!CXRejkf#*~hXC4z&}2vVX30$d5m;E&gg|F^H(V&CdT=B-nhF8lmw>RE zgW%;|0s98jM8km8#5{m)y42?wvkE0KX>Ge2(0$$2F11r{#mmSy5)kHigG#_ainF#& z19q4ImBUx5-=iNt7030HLR&ir7*t=uZh;5b9+-EKV`fX=E}nbvs`XdLB-A5@7nFvB zjlj|3+@)}730DwT+s@6YQuI^$nU)FrlFON4mL)6z-}tx{*Z2-(XU=M#?Ltm1TWuXA zRl}Ki1Cpwe-YA$$=9vKl?$zkbdwhcRFtwXU5*PD$_33#Py$b5mPETdpFgjv0R`E$m z#3jFZy)?iV=r^zztO4i6;7EK8i)HdQFk~EEzInnuFbTlh6Yc;tMbKhY@>!4bcBLJG%cG{Eo2y zO^+*aI_eu?Juw*rdBf_Gpb|)7<}qv^Df|IA=c=C&uMh6?>;wjgoLfbems~-6Q)pAo z+6+ze{By@22*`4nwxI!0Eq+{hb62Dc+I=RNQSf2msHm{n!?O-;@FC+t3ALhbg_v)e zmD{+B zv+IK9t+O}ICazw`cJqk+gz$I=%DHtbc8Soorda`R5~vUgU2Sk%zR+0nenNm!AA_#j z&qK1u3SG+CbJBVz4jJS=SK%rhDpR!|OK3S4cL-!`^CiFunZ5#J#V$b!+)h`UkGPu6 zOUl;uAN0`3IBw^z!#!)rg1B_St`^gvrPeIjR*$T;BA0$D$Xy^PVe4u3>$+h>{f1q4 zR<)G%9R&aBJZe8XLFK73*rh_Cc71B58gkdVRp(s${efLGUUL1_H*1Mt8=aSrGcZS+ z1Ue);( zOeo0lj~`3O0c70reRi?qTcBGz86bibm|&dV`iLhS-0D^DY6<`AAtUU66f$m;{lg(+ z^1l}{BK}Rti2D*UR{s$)#{4N{)L8l*GRpj0A)~{yB){{3KWIIjwQ~NocQ*p0T;eq_ zK0ba@f_YCsY(lO??YyFmXW1~JY2CJhEm=OM@cWP>fYH?vHro)Yp>vFC@b$Mz9Whl6 z@Al%aF_&_xkCum#)oPBiy14ti08~@!AbAAhY?BsRJKB2v<7&)nMNJriI2`}M^)hQM zBx7RnQfipTb7Ff2dKZO6LZQQQw&JVVmHY%+%RYwPlqMWX`g!C}H0+b&|POIbMuo8*9v!Y9!?rtAUgHq`erSn?ZPI0=j zYefeHbHih>_g6=ORd;x|?EFE4Ph_i@)ptjWjPDB=vg)vgPaRk7KDdBzU^=-d;PR#p zDKeVQ8LAWZrP#kR*m>d*Gs3iI38L{84()0+Sz+cJ>#$Qa?Mtbtz%%+fN#bw3>Lx3K zYKdwlCTQ5AH5OLJ02CNRXfj1cEy>LDIOpCb$#aJ;B}G-D%w2 z-6fYKYp=ET*=v8_Iq$pYy>s7P|IF^1HD}4F-x#B2)u=H<8KZmaI2dN)cPaNV`>%H9 zRpF^Dt+j?PSekxqBd|nRS+IRbjRtpAo{AIgS777js;TPPa9E4TcWY@3D2;Y6R$zpo zipI_n)jfmH_zw|;{vrAPH08|xw^L4>e>dd}ZA*E<^cZ>#10DOF>$-TMptM5ne|nSY zn4LwIH1oJr%zCFGH6*>F9#7~y03dH3?4wk1{ndC?!H`?no~8g+x%*Qz;}u6PmQ z!+o!@sC`m?}JPrU>NicUDXdXlbA%#>axNbFZ|`MegU3ZXNJ{o9uQHS+0xY_EpQQ|hPt z6?^9nk6oeT_7;9E7orSIk4TT}x(LF=U55>L3DSZ84){3Vkh4~@1huVRRel$yNs)B~ zI4hW)ag+S}*8et2XjJ=nX}p4Vq!xNj9#`7iY)g7qMM;1U{eWA*pZwzCCcW@m4GgDa zppW?S`oqVbRE|`30NfIMIQ(CETKW;v{J+_xoBc6b_kYfPAKRbY_i=LnpB9b#1;FQD z4Z0sKx_`5v{_z8Y@MzNgXwm&>W6kr^p!?Bb9|G(X_}@&dp+y}!HyK7++% z!4f*ck)zP{3?C7;Oe<;mXJ>vD>{$xmoEG2D(J+iS)lhdS_Eb_aAQEgnk$;s=2bRhk zO`Hq|QD2(b2j@NmzUTj_B-xDrbm+Z?jI*ZDrwi5&!*@S;#> z4*0_=?G2vGJgtz^8?Q{;t4^e+mDsr{hX$$l7rS?l6{akZ$n88gtYm!1C0;sqHSIU= zv^(GRj3*&kjrl})iiZ4hd^Fd+A>R9Pw&;}j7P108bFggAmL@rl)=tpo7q3v;=@$D(o_#Z$q&w@zBRHWhruepY3{lGb$^aeCcl?=D5qk!d9@JBfj(r41j+evG zg())3qSfe_lFC+7QpY$9pM2pFV*HdJQ=*x}`eUsIKaBQ!mJzcWbv!)F<(TmmywI-* zln*;R8JHy_$t!&hnJo2ucD??lRde~5hsFz;u(MU@#7t(0FS{kX#<*_`*X1b6EI!*L z@a}TiK-Y*n$2}dA^-blzeL9slY?uGuSh!Hh>(iN}>m1tt*#ne*Y>x+nHtIy0@QV_HA5TcB|wl472@;c|m#Wqwe796zi`$pooqjhU|tZ}kC z^-dZ~cV<(?+vNeG1$^&gIBo_)@-z)DnTu+)+88fmT{Ws!OOUNl(Y+q?rj;JZ@*r-p zuzpHQv%DcD@il#0=R$An18&7DOZiXHR0DdPRuxCRVU=H&iKL`BUswY`1wwn~N=|Y9 z`_kIEG2Hcxz&iU?qLQ}V5Jle-66r;g2fAcU$;yNoe-Z{xdfxPo(v)i zxK-?AA781v2gRmWemyXAYT!L;-YPaDB5X<3HFqF|Lj#u3(QEnWF%eS2dQ1~=q& zYPcbIB?atg+8{eAk_J))k4h{-*i9n5AVdA`M^T&13+0aW74wQp#vi~blsts1JM4D_ z&J!+eg|byK-vJe{4JnV?Ez5KRKYw24z3)R(r!v{pxi%%E(*j-7mGn=8z^fM4#GD8M zFX+7xrjE4C-JrU(M2v>u-QyNd6lV^`8ScLWPP`U=RQP53I`jdve7ZhaxPFo!Rf32Z z{nX=%4()X**C>FFL0sv%r}Bl0ISeS`=+(L`Yo1=pDoP(Ib7j`iT^j>FrKc-1BKAQqR2Y_umy^>CQ~`GAcYXU8llLRf@&gRhI+T8Q*JfWhb*Y0+Iy1m!pcf3AzLJOiaMo1KomR z_Ht{U8I-Ao=uPUxw4nGzE)#_H>QiDAWb2?$C`ne{OTfq_xFHXr@ISYYkI&d6xxqiJ zyHrf5r=N(ZFPT&#sHDCwypY@{`U$ZRAPL9zPW=<;Tg5a&SC&ug5M4QRQtyIw0h`&m zIA^6Z>V25WpTY9Y9$zN`r$^hhARz6DaxIPL{9S_a`gs>uUge&7>4^+;o43lU?O12~++WN>EO@g@7C`#iW{8}-`(>^u$7|E}5+T@d1kH$Rib`cTSr+LG8 zWXxyA#zxVvddzp7^V-T1?RC%q?c=%|%q6KZ5lFbL1YV~o1Ka3%x^88)^16L(~H9fv%hPD74XN13${=l1YAh>djdJb;eM zn&E{BF(omVW}|C&$bb8lPM?t#F`Jwnx7ufaF16Z21j-VChmOd5EQLJ(Q_j);92euo z&P2X8`)^U6!P%3IhSl8693ktDV81bM&FPNJljJU`dP97j6Z>hYr~_wNMx)>niB@8W zFD|J;FdzG=6$w!Dyl`8n;?JEH=e17l`oHi-J&nkRBIy3|0wrh4NM?0k-`!Gm3;Y%v z--!+CoEWV|g}o~yGLGsTs)Ia?>Zw_sa$6!IeU6liSejjgJKWuS zRbpkU?=wsHhIo{tUt;R?NT_Df6i$!)?~n#Ahhb7IX3iBg zXVAU_9yN=Okj$SN-|9>UpUAnun2Zj@yqn!W@ezQgC<^kW{=W7x>=u<{WT z$Xh{2=!cFCX?J!uZ+x_qeP^5WOZN^75s^N<)kih5c0rg@DZ0i24|KnT2k914_OloL zVt~ATyLiBf+yP^cN+?Nqfza4%Jwmh6>nXSlbNa-Fp5y1NKI{;D3`&-71 z>>W;czc<-WJ@xCh8t^)=AVj@lC7RDf4lky*Okbdg#k#2hUJY7AeNSl-PtTFEcvd#} zq=PMEBlxq22Na3FfSVB`^r9%bi{I$EBG2sV{jOYO372<^xGuvk&&mM^92;89N#RuX+KO9p6$u*zHo<-?P%TU`mL{@ z?IZ&iGf_?NfiuXnZ(Nr1F|EYhwCW{JNTqh!J2`@&3ZP%1=|Psf&frq*?^-b2Hyc3rq{^Qa~f7So&tQ8 zj`ru+_|pMg`I0t@K3{^$5FVX2C3cD02$919T|;A)OsyP)N(A%Mz5`U;zwraHZhFbieQ&}{9q;QDMDE7~J{O$biB~<-Z0mtk3Z$<) zuLc?JK6N))-0=4l)oST z|C}08!11<)3E$&Rw)4-jmFA}kJzqK`0IoN_nk`r!;)k$sOxPeenLGslvRdYiT0947 zB99GocXJ)_dSh@#nHpc-VHp{Cl}mVLaULjCu;4*BeBg3EWxrSXkmP^=0qv&kUUK^? z1@i9E9$#?W(<6skA}790gr<0DrP+SWh!AYQFFUz?WoZ_3U9qfDsUdTJ@pT8B>xT=7Ih5vdUlj$jk@FVm8zsXj((F7>5#bU1F_d* zpjh6ZodaRDOXwVS2P0UMBTWd12%BJxEK}v zS&p8&s>nm}cYyrw;`Dh1dpac`r=ib z8gqInCo!FYdnP^ie}QC}k5#bRxYQ8`RzT=)(5nJ|M@N+9)A#nghSr#b1jU5#Aq|1X zjPMlZ+9r+;s{WPwVG#D~ER#?}+Bkjv!ayNIMM9 zPD5c)KMAq^Dby~i{L()3V$4E%Y$N`_^t^f`Vbs{#eHL{zWl11LhA9g|p#FhMdXE z_12?NgU>R}#1LPK^{zN%ebv||K^W8MvgJ{2V7~;P7q-)1E?JWiAPsNqvbOJ;a1mSH z&8tjPCN{T>kCF7QjLI?#@RA}K|55TE@KiJ+X}NrEomLSWBE{?*V9A+6w~FShb&zpA zBv50+PB3-3L1!$87H2hX7O3OIDJH$1#Bh45?829@v0vzVDBh-aSP}CV1xr!2ON09p z`xO5oPokiR)tW*1FMv!wb4tTH@jI>`rwRYSI?CU#{1++qblqp(IdVuOjnV{tn){*Pk=O|4asmMOJOK3CGVKqgzW2HhnSY)Rtr6T# zIP~lnYF{H-)#HIBwdUT$!w2((62a;vHZpsmK29B{A?Bw_4M2>&?q>ehX|jhCq(vOi zO%&Od;`I*7D@XVh-|ql2YV(zka&__f7!d1x&VC>OWFSDX#W6tWDyJ4g&QzTB-GYLNLWQupy@6VS`7xuPxi3d|#;C}t!(JrRbpBeq zF0DvWxKXi3Mm>7^gjigUip;6ndF;8SyfD0kQNAN?SD=mr*|MB} z-;#=F?JmO2LiH4Q%yj96TlI%>a+>pbRtUfPv^``seu%TN)LZW~r_=R2;Qq4`**rKU zs-q#RsOgROcfj@Iw^)E(w-@3KIB{0B+h2q>2FqgNJ`2{BoRM~jE*y~nIg z%2@+fJtUNYNu`o1f2B*j^*7DL&@Aa>qrU^tzXKlLeg~W4`y+Z%aN7-sR=VsXDjTW+(Ag z5?1IMx0m?1gvzK|8s&=ho-(8V+#bKs)K-<7fDGkj?!7?H_B+>9p<_c~rUAAqVH=9O zF~LuCfm|p7HF1r-4qI=^nx-27d_5Pii7FW|GW72yKKmuUmjKsdxEZIkVx{42#N9o! zK@x;@5rJhBR3@T~M@k|xupoE#*MUD!nG$^5S5(WR_f*S7BOW;Ot>$T)auks@&ynu= z4hY|CnC`d_7kAHERZ5x`=KXVt5)KKUt?JpYI%Qr2S-HgNIy$4(=|4dm#r1cpcF`c# zU|Q(eZlbbRyrfuLJ4 z0MEv8SJOs^)bx^gW}#)|29dNQL=X=T3Equ)@TH-Q#+=2mg)AA`6N5+=w(X>~bn{ zWyCt}U&wthIVpoePd+m*spBy7soP$E?V^^72Aze9o>jj8f?KZQB2qUsR&0CPnxKC? z<;-13*=H%ru^APc3;DR%T;fHr%F5qhS8-Qg8uMhbN)0MQGzwx6uB_r8ml6Dn;7rKx zLh)D0?w{FlyCJfiiXfOt(`c>9Pkfj%;VognLSIx2uNe$(t|rr{Mn|hq2{4gGv`E33 zS?;Wz8#kZ)dP`6nCb?_PQ?v%%@(HYd*s!m8I1xY5eZzclr}2p2tIvnOhIc;)O`7SG z+xwAk%h~;Ul&;xNm;3#0Btr8$Q^Jrm2d2EI)pDANIs8{XzNu!rEP?3Oj7A74seG^| zcW(2Z5^GS3feSBQnolx)K2fCo@s3`b25;;2b+$W#mW}S+oNLq7XE%FAR{nG3gRF*5 z6WmJSp@-KEo|7SUj*QddBaCZTJT>*WLN;n$x$yfrbO@%BMT z6Ts+nzY)gQC1vRpyb<2vw8A(xM>!7f23eN8^|o+T|B}b~1o2B*=7ZMY)Zs>kAiOsN z6K01xG%TX&l(DYu&VKxhxRq^Uk|hAq;=x6xMgoNwzU(Kt}BYUtTq!XBL0>n^5$bbo1Ml1lw#SW^(Gg_t8|1*v+^>^<|gCEiL=_}Nvu z{`=yA5z9|y5W}J+vbj*yTL=@n<%d4iH%-VbpV~zv8l{*OKaf-9W%N>SLMOyEy@;Hh zaenSd@OJL-7l-{~4m{~dkK$<7=^#!nwun$VAC}s)N!`#9jX{G-5p=^sB*#jg2c?Qx zXZ0Bus5X`4=b3%pg3crcL`k$%I3IkJS>fxs`9Fatj4h0bP*J8KV^Y`QIPu(7QYgDQ zqz@hqTW#0Xk85L$d66qB-1a`z6vWSo$Re{MzR~+M+*sBQluR;!rDx~(yo;1w?{JDf zyF6#}xpFFyB6&uvnoz>23{EVD3u`^Z>MLD%kmS6)heKMgK0_8jTxa5}4*0pQ%xJbeg51q{jt` z@K&FFS^9p`w!!lfrEFS5rmc?aufJ6we)r6dupY`YuM<b4fYVI;c>usO1DOGO7m=h;lM0#t8J#%of2aSZ$B+5IFKP8R9{%Qs{}MlF z>|eL-YI&h~!orgLlfHh6W=DCOZgvmmyos$?CG0rA z0TA*^yol^CIh*ve3r~$4unGOK(Jq|2iX`5}tZEg5t0!>|DR&DlisI(3uAXuh9~))vpfy)H>e+4K z6WmS4J~rnnxF>MfwJXzxCm^skvndQsDtXHkI5UhSbKf4`WVP{9y+cE9uR=(KOl3Tk zqgZ0Yts$Poj1)WvPFlzeGNtMxJVQtYC^SH^fs z;I-*&d))J~s8TWb$8R8{Idc@PZoLpO9@>ugNh7|p+(9)1MC8z`Q+asI_PM1LAS_wi(;gsMNBUh+q0RK(#8_b^kYh0uVYjc(1_Gr7OK{El$udpiLGpweK8Or z*65)M|B?sae}S%YS(j6Vds5YY3p{K*ziMP$L6$Bgwv$aa>zgbakgrcy6|lq;m)loOlvf3CsswHI2Li zi)4>hCC|jHtNZ-P!pa=46HseD1cew&Q&JCrYT^OwV*m^I2`KHdE|<6bxk_Shi-%;y zhY-CIF{*MeY9b^y{I0b&IR@clF&2xT3M#?RRKPmaI7LcK&Gy@HuBMgk9ViVJq3WC5 z7}8PdeA7KQV3i<*DL`d3ZxZr7c;nM)T&>kFTcfvOvq8#RxK^ZDd|vMil}glPP|xoV zUPcStpAHzvUzBf`=Tws3!Xeyf$au9UiU_$w?Wz%LAa#qRVtzvz?jy$6`N~?Xa>;fM zB{;?D_^1#6xkRQ8{FfN^U=nj}nYw!fD(hoAW!^1XOC`OvYO}Nmm0YDF#D$PV!cLY# z;AHP&g|?Yp4hj~?<0Q`M(h!aaZeMK(oOV2n3ii2xFKc;C0m_OBFRM#l8aazhEn{^^ zn3~?}Fe@)OGdx=lV~(A%FN!K~*oWI-wKa11Gqrg?K=r5H3{B>%>PV=)E9O>Q0uc zUSRM-HTSCyM7H@}Cz3ljTWp#)WXK&<`-Z?p~`6 z=N0Rt4XJ-1)mOa|T1jAE_cHnV+^C;*3P;!EM7+R?_uZfs z$Ns`ykKBj4)}u~wws+GRTXRWTf$H=vArWuQqf)QF5L7;X8U&a$Q^YVXImT}GkvM>& z<=ZN)8}d=@8S9=n-`ais36a`g5K$gNO3l$?nQ(rDv9+j6k#+ngbU(u36l|I;@NLvc zZT^bbUAE0&mn-VbsVe$%seGj&=jYUJN93$yo_f#Cw=^K9>En~brMNgQi#rNS7YuP6 zmuVmF>smct4xu3Kh^0xJnuJoxJ+X#(mDmN=6}}H-!f8?46FUNCiRHUPyY>4A8_`@q z{i{p0UHqU=xKzHwdr~S7DSRwEd)D>r#BiaWH_^;RJSKzaZ`2XTGsSEP7RpzPl$+v_ zZ|+{l7ig`ae=sdY3fdkb6J*nvKYWGm2=`#f6yy6rv5|UEJ&*w>G5;`>zI(NCxOyu@!lQoR&E673b+w|^C^Vs3?@;c=4S23`vI0}%@ z?VMv!fQzY5MhTE#F^Fp((jZCEv#@Xh65Gcw`ExerzEO?_G~t<`K<(2ZHo)jI1# z-kP_x9V(TjP{covaCu=c+xCU7#XNzrPl-Xbii7tM)p@>>-=plUvsh;{bW?@#=1Prb z3#%RhLyl%$xb5v}_kQJD7mty%OF`klnyf-=4J3_GA!9=FP;}v*95H7Ljd(b(-f-J2 zs@=C6DEIf>s;Sd#fz>{IS^bf$qsX5JQ!-_TeXXp}Rd-ITf^o|)pq&mIZ)*})8Oev_ z*iWXR*wG>jm_HclvUT&3Jqr%rplmjOa4xV-e7*A!JfL|(gwv6Y;OV%X(#w0KHPxo= z!yAf|6&L-nbd>~}_=wW&OvV>9mCLb9L*~@UYd9bylUS%5n(`_PEu?CHzaU@7juZIV zjkmN%-kAY93n_Lo=@NE!@(vr%O#eW$02_D1rr*D#pXp$^38M)Uh=$`+i13_+Qx3U- z1{hVe#+P{wy(C+sbyd45=J-~1mY8rShB-v@%G`)WPGhTY6!R=|8>^SB+fnjl(Py%# zK#vQzN6Ua49l=H)UDg-kHA+`l1XP>kMHJCE5Q}Kb0d_m4Sk+m54lt-r=HfUlO9~=C zbxEJcSZxS}!T(y?$XGK{LYv11^465!!02g0+1IioowaN|q&J{|)P3Q@JE@sVJ-OOS z?&jF4EIE1*liG_4D~~w|$K&2H9$bg1YDdXaF2VgapvB`PyIU8{B~ETFx!^Z-nL3%i zK#JRQ{>icwQL}%8G%;w+!uU})GTql&s(hcbG#KZnx*q8ePl;z zi%5H3Z`@HdLkiSRr#%m1MpluHfu!o+0TC79F0{w`auvZ%1SkNMR?Z3+^^VKx+yc0D z0)n#liMDaGUx~&2E-*52jmFd92&3vMXsU1R<~2A8q~DktnO(nvkK0Tr7T{Ios8)~x zx6ihXCeroL8&G~-jepgdbXWru9J*FM&N_K~EQv!WFWi-E2~+sMbzB60kge6c-yGq6(#7%Asg`6L zdw+z8_DlWZX_HAXiq){b7Yk+Q<&;%TtpHdOSLsZQ4JW3&lN18h^d6k!L>u!K+sM+;X=4{xzLPkHm${9`j_Xeh7son~*bpc1uLIlns`m=P3X^(EngrEpa8ob9}rWq8Xjob2D5m*4?1TQ)fhCb!)$px6CNecT~tLrBbP|ElC^I zDRHqP0o~k09XSYz;dr@N=NU~0D*n*UDpsAM96PBjiSCoSSl1G62# z7`TTBX`a47iJ~@uCt}B}efN@641zx8rL6jS(4|n99tcyu1>Mid?t(2<;Ge4+(xFO% zJ(5(ie@ylu0@50`Dcv(DRU*~qsH$l%zG6p{n;RI077;xxMX7grt_`rP?d+dc>7lfK z^{ntqJ1N|?-_$WA)a^cs&0?IsbEiFWJrs-UIMfBp%(+NVnV4%0khiF+#OjtrXTVlT z^o~viN7Qnjax+)MTZI}!&JR^`?}^^aHK?sC$L=XbMT#C@Gl9K5Z<>Y9*r&7*I@jZl ztSqnLr6axrdT_f@tr5>XcECEujcOO9>lT9?U85bAcG_eCHV0X1ZRw$fqm#jQF}esSCm4=TFRmGNQ~KtH3{2NqV7=zrfKWzpoqa z{kN^wf*Y>Outxtb0TZ_Q%n(MzS07zkUp?9|0S6RS0FEU@Lh@v=yU!4H{Pw|&r$d}0 ziPMgtHZ78u!bxLzQim5jv${Y7odcZ?EY$A*4v4$_^s_Ey4*v|(_RH1YV_e}o!TG6} zL{gcz7tk(syfo_kLspd<57Fhm=4QbU-^io_Pk%I&C*@S& zDBLKWSBoi-g`KXA$Xt8PtHKW%eH^{x!%0mcW;DlK0i`HdTdW>8CAc~u+b)N}c;Bn< zf<~W}{RDq)ok5W9RWRwyUB=i-Jjt0^e=X?zSU{dN8$aVvo5N=%X&!r`Vv+FB{ zy*5K6X;hM!Omckkbeq78Kkeh54*~0wgk9C3j$U?sm->BsTIx&>J_%lpra!&?`Qsa< z7dxkx3(qFC$)m@zqiI#Q%>v#EYtm@}bYc=jxn$Oz#dXsP`<}RQdzoB3x0`K(<>qG^ zB|@+!+?3Mpet5Od9b%BYsZ(hFr}6r%xWzSr$_QO-Nx0Y~rQMn+D~l46qcy;tUlaH2 z!$AaGefyw{9F^!@9HEW4xnYBdtvTmYP_EEx7JrNl*3Vud zMB>=+EfRCKUo!<3>Zf|S=trf2v{VRyIpKFVsQWuL8L{^JS*2xNS2Z8!Ox{G(>Tfb` zden_dr-2dek^w;YI`29MW2?eH4SDUvd$D-T>7x#TYg^v5Nu3N3u=iNnk7#S)CPUqg`H5nO&6)y6dR}P5YGL|$OU;{W0m=ru zxTJ{dnO~bw!Nl4TnabFphIx_+sSdo|cRJccVU~p`gW2rnCdgbZJPsaVl z4|--h_{qRDfM379S96{Wmo$kcS3DkHLt1!)2m5KTn@pEoR0=%qszIA_C6jvU%RadX zaZL|i;pR&o&$PC2Ahc^&tcLe*a5@~gUw(f1qPL07WU)dwXuH%ng;MoYBvFE_4s7Zf z9L?hRbc}P@WXR9|neVssK|P@Z0Smc@AB1H>v^)-VucuOb!AlJc&I>Mp2Cqq{Z9B9z z;0o$r+Eo7PZU0K-^Q~nY6^l&xzZ)U@d!*Xb?VcLI%sifHOSr^zKI=?B`DmVtlnH; zP!7)7uQv6PF6w>0{JjXo1GWCjZ}Uai%3VtJ^v2uhNK-4adMh@MhaI9mYq6m|mCEg#qC57ojqtk#r9~x+_x z4tt$VLF;Kub)mLTFZogP5_%Ae`nrdOXq@>5`7fMbg#Jzn^29pY2s4?viY!XD6O{&Cp;P~?Rx3Py~KTgnL4Zea%!b(VF zU!!cGe)u}4xjdap5;4PDXk_;`O!db$q{O)ym$Q zs9K(yQX8E=pyJ?&@K_}jv}#WuWKFV~gMy8nUPFx8y%pb`yeTCDBF>qlX-4CSOKh8K zS%aky#P8z^b_9`<^_7bGlcG-X*SnPyo+-v=V#49CZ3;&A+MiSEg}3{3UZUH)GUgQ$ zruUUApJAD|b%d29)zhy%m-eX?NM2TBks#{@IoMV8J-$GmhO5AR8#!6VrL%J35dL?EeX?pKm4OvnmhV9^PQ^;~& z3tnQne|bEXhZG1YuW2cOsZIj$XE~G=SAo(Zyc~0tDv0!r8H%Z%rE+76Q_k4WR`!st z;ajmBOI)oP>jznast*cME+P z(r1{+QOn1ii5Ihh@#vnE*o1FNIyeuxuuWTRF?qfB&W!hTvy3`-d}qv+F**Jqlu|nI zGQAao)l>Av`3puV0=ZS4z6px`GwmjskGSgvAJWQ1@i#pjy+vjwtE%_sxM?dwsMBx9 z+%F5hQAN|G^9AOi?|6X-LY|j7TbNVcUR=ADBdR)@v2>_@i!e1s%r*x14@y7qp)8ORybg^BsL*0GXQqp}~X()MVc8e*YJT*xOW5;|?l zMB)egItclmkB?_5$5L6bY5;rwQG0X-YZ-~&S|nGb(@rG|k*{UxBv(RZXfC?AG%-o9 z+S2~6+F=v5X8ft*7jXlcj%Oe(!sCqI4PW+$Bcb~pD~A+P{mMuGXtgQ?Y6NMy8-5xP zJ}0qE>jmvyE0=Y}DoryQ<##{y%^~fFr3<>;c?9yvl4a~0FZfZ4QlHII*s%Ia za&fKW+4Dl{N0y@lCb3c{&HJQ>a8KQ2eeMImp)0GXaIwkr-towu(e!Y8^_&)$sOCoN zi<4Qg2Xr0cffz!>W)uEo)Q&n~=iUVbA}wgh@R4s1Mx7nDVK52H28a)R2uQmL$!MY1 z*pr^P&bOB4;5mfUO~-hz)>RQ%l}+_%tLJn()$fOj)DS*yop9QwO$!ZUn1eT8cC@j9 zwA-#!tx(-pPC~MWB=nsQ7EJB!BkOI?OpU_iFsL(A5|=Nd)Y8eq-1`q74~Z3ha0sC3 zUXn0kAYlbmyYL2B(zUU-M_d*ud+K~ zUyQE$NTi$Ir5=7fhSA6fDwZUfb4|Lvq;?iCsE_Cyi#M=Ur+ZVMbZ;~>YV}lwJ5MIThSt?4n4f9-_SIOtX`VE85jjN6#&$rk+KBq*|yj2?u6Vgag zCvv0YMP-N=?>AkV1h+0@g|DF}-vRwz74N66x-nV$so)>|t9viGeU9-hktr1BIpIK= zqMZFieD`>Kcmh?B%W3N3b6Ni}<>@%Wz^fiBOTq5|!Z~s=0s^3ojM3gk$Xw&d=G-3T zmY+9B{0@!dJ78n+VK}e2O?96{qU6&F+wF8BnyI%iH_+n;`vsfx~ zfzAd^@qW}hnLDnmG(I7zuGYD0zR%rRnWR^{YR)37ffE#5=zMC3f@+(7pB6|yR_gVe zmcb6Pv=6k)W8Ok1fh?NJr7i<3p+(VA3n44)SBZom>sKT*q|#M;_kCo*j`K9U=Ux{) zHAcru30e*eM5sG>#hnX=%uk%Hq4q{s5fUy^ckQab#n5gn6{QrYb@acrx7FD2xCL-a zab@gA`vkip!7l?@-qW@zsN>2cj*08B5#v1H1jUV#H-(ks`|;P2a75-aA=kKV!){EeS6Bf;8D?}|5*Q2#w1%kXOVX2lvm=YWKS=IWV@TWHt6&= zz?NY15=V_ZO@SM*+Dr83klR<`D0N--}Z~?QH#v@_yD= zkEXNWe_+3)==}CJ?ESc5%G~=O5+k?j;B9Ko*G0B-9BY}oxs+!2#}IW@+fNPL>jZ@n zbgaEKhBx8r)BzW~M>EHGwY&=(LprAP}5Buee|JD4AZxM>J>V$7uEUHfHJVvP?o$DPg zx5KKx7t3<5GPb=zL?Ffd;_{nyf+ALz4_>suX#0}-esdlBk`;ELy|Ql>q;})-l5EDH z_^vL}Qa0WvmwaK(Lkixd2nXV>4A~N;w8)7hiC_@&H%Qz`A6iCR|2F>CV4?;`FWemh z4H${G?V3-CqEhym+|m2u&5{szJtSF+96k8GPKa z1hD8hh-5%>13aZ{Yu&A!lW3MmTL0=~g7uhe<((_d!_&FV?|_zl_pky^$~!5v%*TZK zV|?x_{-P6XjPlpgAr)tzNcTD8Nrl+?yx_)+)7G#=y2(xdzVSCb{=4Czh`(Vi^ei=u z)8$EyNH-IYw=??T9H%$PfUq$TE6qCoWRld-K~P+~Zmt+(EQTeBZ!UkzJlEM|Uv8XK z<^j`7yVZq*|Ay<3Hzyln$2%mRRzpIPUE>+VK+@JtRc=4U-!QUQfoSQ7_)W!yDH)T9 z=JoPExMIGJq>>M<9!HBvFrt-69LsJ%fb?G0v!`G} z9SNKf$)NZ=8V-K;v3rbzVD-Evgb0rKqWWdx-nW1=M+4 zGk?{h^!n8%OZPM-E6e_~0c#=HOb2w=&*n|2+^G(hq~`9AF4vxY2kZ+a92IF1rXbc! zYvsT3LD*+!<(`37ua1@RxQuXj%2~SgW+gU`!=Xiuk|^I*O5|5j{SKH}-9r*j>Q@2lWaDR9}@ew04Ss4eEphy7kQQlR%wiq0f$)$ zM!=ridC}&Nu`N_C-MOGhX4IvUH3{O&+-}};U4|b?L7GGS+!f~Ouq`}su>iB>nC>mBVv;|?X9GN;SO-&M7J6wLjg)gzwn5{pH z9XR{?z+)P%%fS#^sWb6L=8 zm;bG4OS5ND&oIDPEF2~#CgObMfH4$qy>ztd-PYC?Oc5C3Je7l8+xCEYG#4lH#!fc* zII6v7q^>^PCw%S#S_|cv6sWdK>>SXo-VbCDr{Pz(+g0=8lxhZTU7|cXM5;EgYOwyK zi7|fs7$bwDc|ka;(D8Y)q$ocZvu{05{<^&%0PGrCA?7gVzkQ5{R{DjbP&t{1lR7Y2 zE7K*S&dIKaDimrDy40Tn$EK$`#rNwm=X*|7tBL+ZQyVM=Yo2fLECtSv+EbF1$v3?4 zRsIWb(t(y-ED?|_sWV3+?ypCoWo8CvacCA=wsUn5{$5WCL^3jcuzs~)V^Gq^^QC~i z52Tn@T(8^aq(c5;0SLKX{OSwY;~7n_DxU9vBWLY3y0?ChQGdL*1&@?A1s~ABcb{QQ zA3d1h-ak|N>K^D8=2b55?roxoJc&P&6)kvGv|(w;#U$5k!sT{8e3f2>3Ner+X-}j* zh83$93F%b;XJKO6Ok#GrF&V~`Rid8HKX2#GWh`i4gqxr}a--`?x&#!{;6B+|#w z(Dq6caroR%V6aa;;)=HSeVUm{Dfg|?j`&1;q71)Su=(s68C8NpV9g9;u=Y~x8uyH< zvnsJ;?nQ^7=&7~DsG2lzw=p1Y)9A=nhO?mcf?F-|O&tP#9n`wB*5g_TC%pL=lG~`* z%rOQ3d}VRC8O9tkR3Lm?3W6<(OjPANMwAM+Km!o3&^|p>CML9RMPucCAY4w1fRfXZ zul{j~K8k$cR{eNonUgLL;x|H##1GJI zCsHAII66BU++PMF0ISC%yledeXzSPHw^eiZgcn(PEL4yv5edq6@b>GG5)J9LGLgqk z$^!vnAG`kM_nkah`T9;!8M>L)_6IgDaK4GHTG-8fT^h{#Z7u4ngz^Jm!h4<|3myJ0 zpB5?;hdj|Zi!VQnAorPdt3g4GhQ;i~?{rIThz~vo985)OdfIthTvauh-O2@xfmY1D z;(3mdE!Ij{OOpzfjCwG})SDbObRVeWXA0!ctj9$a|0y!fzm1RiKQcpTbG_@qBHQ=2 z?Cgd1?|;dCDCqN4=7Oat&if6-DxN^$=6s{F2y@|}j3>0;J6xRd#=*gmCQgmb1U2tT zOS~+Hoj6iKB~79HU+leQP+RZcCrnEzrC2G&gEdHtyK5Xq|zpPC7eVljs>eWzQzvp8&g6dmP5K&q(Sj;54CEBdopJ!!zvBP&q zw;bfg(gM^O)}e}too^({nOP5aRvKAk{i!GJK5jrCYE0Gb)F1}SOf7D**8qd5kK6UA z4XrxTldB6ae?kvIzMl*IhBRUu+Gr~>dgLmJWIJ??5fGKB#36Qr8HQTibEMo=a0Kxp zCB_6s?%(+eb#;8Yr~cG_51*$naha?L8RihPqIz_+D0R7v>E$(H$tkGBKf17D{}ur* zg4xzfxK7mXfeTWSM29(iEq-%dB>H}mj`7r_iCp^0HwA3omL10@O0%OP zG)}guC8$^1SLZ$o6jsL)Ef4!ipx%WkzW&p{;%C|WCgE#sDiqF?AaIF`Tt`#Wp6OJw z9yxw1;uyEPG!hL){ICEi79h8k2Vch%&CMPkwjHWwMlXQWSg3r{lLh$mCe}9A=@6%$ zKB*!tcArxle;AzUgG`tttz0DG&0G#=%BOG15k_&uos`0(nV~g*1>+T=udYvIi)*%F zQW+Eu-SGGB2Zh4y%g1^nZ_6Oq2Bhj6Ff08Err+Bp6^iE_j4=?9>tx}`cjy|oUbLdw z`2d(CUCun`;4%LSg7ic)E%=9F>JJD;u#B&MjRKDu8WKTGGjo`a_F_t~2>pKzqNqIM zj^9WnJ&Yk8p#NYkU3B#8WRd)3#+?XQf{g^+(V?!NKby_}%X#w-_TT=ho`eU}hdRFK z6v_HE=M&_pB`iX@c+JJ!c7Ax_S<$G?8P&N2C9(1x$s<|eP|(-NipX`)uI|_5&td7Z zp4k&n9y?mLMc2E44Vb?!40BMiNhFy{H>--soZ8 zz0Ld^6;iNwPygw{BQ8OkV}FFO8BXSz>(AY*O7r>wMt$1dSiV_bMcE-btxRZo1X3?Y?Xa;vS9-& z;0o-o8+C@Uny+#Fwqm>T7d23$Z+;{TmX(fz@iVTN{G}R!CBc822b7)hV1Rp3@ObN_7D+4sqgmuj6O6YDGR>!p&uZtQHrqeOSDqPit z7O6uj@5$05e&k(jNk0+1a-=uwQ;j_FoYiNmNiUmh69;m5VmmUz+rN#Sa7wp$L^Vej zAEg*ZxMaJ94hlKc(wSvM$Ch~!13gA$9IHI-E)SGJmEtMYS%c{Qc{{>tQJfWW7h!MY zc}*2rMX5UD?6$>~VW^Af-Ms^_IHTVL+D2E;ihJVNM8wI8xmJ`jKBwrbT9JBuqS3HA zHMkg)(tT&XzCLqP=ym^~__}O=+A5zgBCCKu8lhYKBJ$Z6DA#!v{JZA!|AE(?o~-_ZC88~NKXdbn``^|t-dXa9c-CcwT4l|C0}r3LyFp*f(*|8E+$Q@cC2=|F z{=l#EJw}evnkC^GnPVrNSE9m|RvwVc+bKy&XCn;25`;Grd7WU?Z|g8)%4s zoL##bZPw~O|GBnl#+*95vG)I*Zyp+e!^Y-Y&UlRix4#Mq{Fzsm# zBHEJp2X$sJwYMV>l`F&Hh3{fxQVVMyM~925&RqaQ?XcZ-2Cm_E80}sr?GLVIIt{|4 zg9p%xr9ij(rhZ>0RDDDXj;%ZIgU)|MmI2!#IP`E6vv84Ccj!Z~=j$J(R7(4C`10{4 zF9oOWnLgJIK;5s@w&xU}!La|m>G3|KhtTE={Evhjdx4#&;fQN>M*5S0w;{%j3sNb( z^mwmw+*NC)2F}aY?AG4)0uT`9o%&8i>42xF{u;M9E@^-4tq&Gl>koUMAbx^m@ka~$w&)*n8tRuTvV7x4A=VPMLaI@l$r(W6MinVshL18MALAf-$F!!G8T!Pl| zJY5aP%Jz>HH)AJ=j-$dSiJyH*7Glo#WN2S~)Fn|T5E2VK$M=i-1e57P8 zTS7#>)!S+wt0!W#rCQYp&#z}{mJC-Uh@qHjt?UvCFq<#4C7)nPJ(yO4vwWtkK~yYI zFmOLHR=4gXkDI45E>umTR&?SaiBjM&@Kt}w=dYlKfRZpWjzGdK#qGv{=*LwW*8b8V zB2kQX$g+q!3%ixtYs?@pez*49(J}7UL3Ozk(xJ_Ve<-qvq4I68{uA^1%7uCiycru@ zlV_ zepAoYR!^J@0*62sd0z;{B;z6-_Y(M3^Qjid2TiL#IjrXk#x&FQq2ft^UT$-7gtQlj zXWQ1vEiocVwCR1?#nEwZhX~8a^H^l}px;vF<_eQ@5l4q>1#I#L2_$5)|BsoJ#fo&# z_y4Ls+dTRYR>Sd0dC|FK4V1GEjldgxcz%iS5Cg_6C*{f51546WT1-qlC97VoJY zxX>oTOGu>O5 z-2aIC8+s?WqKW+c&}FfHaDkAe-Lx8UL}u?@BW|Xm{@n)Bgq}&~DFGLm(l;*@y=G3c z&P&9lp#Jr*|J??f|HIBC3+}!f_ckM=*^4lxj(@wI^xy6KpI_|Xp2dIuV*mM${pYFt z&v5a7Nq}{gES@E@krJCNwdb3YO&iv+@XhJp2uj4u?9;Bqt)SirR{L0&JL)vf zoLV* zyH7vB0X^9SzqTd_R4Tj9flrK=yoX-p$I7Y= zg0HEG&u}nnrnqT6#Ms|oMfDn{7ki8R;eu0ijbIsOA{ysnqh^e%y77xM5PY~vtfeGL za!E3h<%d-P%x1o1rbkEr>OE7Jozd5u4I?TUjj&>6dsJs4KCv;ah2gbe^5bh zZ5W^wc7zvChbgWd4-khplUuqyo<9bM0e0^5Ec3q2` z7ci$z@!{&z@~|Drx{K?Z8ThF28rcvC`%4Y|gVY_Vk2SGF3*F!82CJ?LoMTiKk!(tS zq@vyqMg62?b^YAe9H~N0mjH59FmCKEOA5e(Lcj4SAmpl+=Z2mJUH!s$j6&f`l@iH2 z`&dD+62<%ECF+IHn6jbp9bnmy4*i2UOt^CkC#yP~vgoD4#9w`CRNoSvGRhvJaqyAF z#)DLT+TO{L_;c`I6$Aib>v`o!^%fm3Wcai;{Ykml9tukD5td9G!&`rJXP5-n+M%uz z``T}p=hJ4o8B`o35v;CUaeYp2pY#8U}Xp9sdwGU zK7DPMebqdp@d9-D5|5CMYfPNM3)U{Yw8||w@CBjdR8>=++A|@$4M2d?pAkYZrpo%ftFNuOT9U8v58 z!E_|jYOO&iWO*LmQLoNuCC=ch+wIW#Dc>CsD$3!uH5_jc%TFQHDr>`F zy1*2)yi^qL9Qzp5uHsYBy`$+s!x?59ol!{;CS>2j(96In<|HMrH_iGUmn^&^<@Zf8 zL`r!TXF_1Rjfd!)vly~%8$uFamP*;Y(~nt4fWJ^P_b##;?8%}J8t%B0dp-|aSPaS> z%GbY&4$oyYe-GLZ-Vm?7bw6ysM|QLJzLF+(n#ttIRr$maOd&Wf#44Up0f~V4eMIj) z%HJD%t`H?w?dT+>k;{^tsGBmQ>M^e+Rt_}=j8l5>FkFal;QCYRv4%oJSZ3*h4_XTN zLq-bbD4-ICpdsTS>`rUWZgtq9*#Ww7UQM2wnnK8Wqv$b_!6Y!#Pevk99BJ3zh$`}*AoFoY`P*W`82GHc$!4_Y&Nmz zKGx2~%TgcD_AdL1^Ibz;l@Blxg|$wKV2Oql(yTLFdF<)R4OsHb0LTb(-<@yhR%Y5& zr9-MKlZx@NY^=phCEDVu-ZV(ZFuOw;!AzF~zI(Of!EEEW3fqqeG2L=GC$SeRMJR>* zq5y&o{cn{Cet0k?NO$8YRXwTzO4wh_5nb7GWWT28V-hCyxa{n~v-{137DXZ7 zm&CV9HRNKPF4?H&A6aB^{u**CD51gcarolIAE%F)^!vCX^)-Q&%IVyC=#AU8k*yDN z^@qq7{ZZ^f>?suQjKkc1(&qQaQ#-@P5mBDN2;%{II*GPuE&w7WRHU&-xC45|X0~)! z+^Bo?n_|W;dASjoi~RF?$59UH;fz^#UyL^-zwX)x}OBp>>N zmf9;SJhQ=y--dw8`4K1v>DcqOtZPYlK(k{lz$3C+{9qDXm^+_Ys~=Qn7Sr>7X?2w6 zya?H5QZn$CY*AqRXV%g#^Rao{Pr~R>;XEBBgNvswVYq?_rb||E&A9BJ=H}`elQ>%5 zip~{}?<>@v_R1bXV+$P4Sdo%(;(v)A(U=)I(eGDO?yYbXfkZr&!Mk{=(CYUa<<^)NQI#~- zKj+$rNoxtEtTp^Wz~Ko|X+@Yk!8t3Q8fza_}6a3ClY`-qEM4}*Xv z+yz|mZc_Ib7Nx@+)laSs29Jy#Vn5c?DCobR93Zq3L^#I_2AAq)ZlOqAw|N~u9l$b$7qq}j<>KU`#nA~22HjI zA*uDP8H7%H^e9VTI-Qc`-$df6-yncrR1U1#7O1lhakrD%7?1QwV2=uf%r6LgJoNkW zvqc>#c>%If2zA?5Dk0se=|WWsD_Jt3Jj?;qCmF~Jc6%M&TYg-R?KT%Y(^5UD*i9F@LIY}#^3*_BeN=%Z3le&J5 z39v5A zzVvZz1gPe88TK9nt0&D(d<4zf!sR7Q`REi+$xj`vdUofqjGabF&5o})EFih5EMYxMdN0@t2(rv8u=(>D~+ZB}(MQ*AGzcbD@!xH75|{C(ue2Im|ITVr#fAu3bcl5%;ECd9L~%h7BqH* z34lkOS!5GN8Cq}nbikQUZ)o|J4(`LyZmTN|4I-=fw8w({>|Zj6Q-4Hq_9qNyfkfwoo+pDu-yddz+p3kv6m^KkMNINno#hRE+d%5{7jXR-6jh*S48OhU3S zyKXBGZ$>h1s%k#0pC^82qwXe=+(-Ja&OSAb3UWljhU&-#0y)E~<>Mg`Of8 zm&*em+AGfZ6*~sl_VT61=O*3=3-Gh;mlMyt+%F6B&lsk_oNiSMq5;TX$^Fh}w=p(#NV-e-*IeVVx@yvgN>(}8 zmt&|hgA^fw)ciy7*XmQwk!l9Zm4anND{nc^7V_e>cW`iqN;l@Ny0=_&MuR>Z!1mmJ znx$&WBS8JXg$Z#L*|UcBuMIYCo=;7xFAxe3t(RAK&1`W^ma>TcO|Ty^=Y-69pM@r6s&p}=K<%2KRICcE4EAOV_NE-7^*GxReU)@MvcZ;^02&dH=p!~K)R7Tk7 zD2Kn)rgEa}mp=*FR^#ETX+Y!56w|eCK|ax04AEZ(9qh5J*hT3u$fjFs-Nbzu+MskGYbi>rB06W#pFV*+O`4QI zu%>&AALE0&56dzHptU#hwpW+)oJpBd3E=F_$Nyj%W`w_YM+I0;2|^!rdqKk-K7C9Y z1C3pDfGvu)#M6A%Sv$A+LBgqjP?5el!m_U_w!@)N2>VAIX?c#7Oo!!Yfd0Txsi;Pg|M~6~ zEg{nxuuSHFdZ_XFT) zj3e1!-l+dhWqgRE7!p8d&V?AR>ZizJ3;NNcoK{>yxr9N%quB@EdV#A~=0=!n_Fwy$ zo6fozPS;lHln6uaVg`GKzU??OY9_3z+so>uhCB86KjLASm_Pa*7Kei^uP+TEgw=UP zvs-4E_F^~T&4zop^jf8nU1R;9eXIF|WfM6aZW3zEh>Y+10WFIO+UFM}?nhQHMD&3) z3|k`goz349v6uuLkz+t_y&9(lV+o+kLD8{Hb&Z=49)a_im%|0MW}}M5pE>Z?iYx97 zsyBmt^I0z?>#4+ZnVIU#6XsV5ao-CYj_K(WG5yhBRzz~-g5vTOb%4z7^~dB*FU30d z$2}9Ma4cOQI`jliA$-14cd*fDx*3kh`uxIiZlAby@t+UzsNsPO8I1hA3^>pH%qbrS zEvSoZba*h6^;D zhlqvJCzo7(-CgVN1cuGCNfRw?r+-!XTIhG3>(SjutX3o4Ty9QQ2YxK$^*Jv@Qf4mNcw@Y6vJ=2T&^Ln+>)deU!P1w1^Vbj`LobkKBp-FO4 zVGNHxL9tqx>SUR!Dp0VgAdKOdo%TdloLVwSy3I^Fl1F0N#V+Z!i(54pT&Pj$S?=oS zE`?*&`N-XwM5?7_*4q5*mW`=n+^6lKFiSB*^0h>3b~s6>_hBNCb#T$%C3p7duzf%= zh%a%#9p6crEMi9tVT!7%F#}L53Y}riEc(wHM$Oqo=gy0o39r6bgqgPs)mIY6)~-0y zFga#)S0B^pK(jSN(rCOJq~3}l->4f+!1Z)v4t-;sBzYd{Bx3&XZ&LUMqgi5#Y4fyE zq0q#K;&DcPA;}2l`@Jn625FbwU^Q-tZADt)A8BawC&g0jsv8&DWlj#N%B3&)nT~0e zD>fQxiCm%ZCA760Nyrur3g0Jb#rD5^4|vAgv}@Ewbo_(~WD(ky3||~XB$%TyCo>U> z*h*C>PYNsj;Tj7Ye(WxG!3>+1gZb zyh#@NOb?(Cf=?_}2DIA=b$x%R#ZN_RTDpDmkbjQu6eZ*-F>sy;7 zMH?V`rGC|e{mtbKB%?sIriXBM```7mCdsU)Qm@o{?+){lZA!A%k8d@k=&8X3n86vZ zk>A(0lXQTFJztkzQbevtjZ(G@iaRYsZ_JcWyK|tRvOzL`|2&Yn1XIb(u$j`}JYvrg zRS1(v6d4p?`M{_;~%^W@$!Al!N0k-}nG_wD*V#ZV5Y>Y`nc z<#=?#J}*R>g?gBGcDH&zm5~aphaGbLAoNk6M~tuU=I$1!(!FUXTijXqSUl;FPSwH~ z^5R3J(CYqgy_KNM?X8ysqH%uLY}rq6D$2AO8QnGvJrfy3Wh6Tf=U_{F*_1`YYVgB9 z7xOc4jL50pw}=k7p;!LJZab{hGzoOvRJXOq&d$!(2T!a);$eG#?NnOO^*EgKKaXtJ z5c%;o9JM^^L!cHBQG@<>avA?7obkWE3Qo&$N!W8i`$8NIc8Hhpjab+2+3NmxS@QkuXjhJ z${5*^RHoPdm7D^d-h|aDxZ$qpyi{H7%4y+3wzazKda>!j+bm_x_&^V--mX&cG()#TPcP^e=r+@L+MLTRa zUNVLQQjoL9X(P@_Eg5#Zi}$C(bxs-C-V51ODiA#0SCL2jpyvCZHI_2}P8gG2 zQGZcJOznM3+|t|5}6(`lo+6goY$_D%E7d4(z>tAfA%{~z+ja? zDmKC+esgVdN)Zu0D1;+X;hu(^Jv_=ne3Uu(OfSNR_+Jyp$Ak%|$67(!gFiqx7h7CJ zR$JWX(^szvb3K%3&TvVK^#|9Yc3amfPf^rg7ZUG#q^~^A16<}mE!o|-nzrF64O_v6 zNHT)0Tyr;+e-3O&R6yE`Va51m&&pI^Wvy+j`IRXfrK-o;jWEkSkU1c_S%Ftn>l19C}*CZXg%jpBD^}SY}zYCRz6Zmib+Zhbu{tsx`Nwfb5@{y(aB;DJg^!XS4%3=`% zj2sj8v)lVDc;HU$qnOrw0;1B&;pT>XXIQ`n>_Q6HU~L}jnZV8)M2XH{D`V}-&~AG- z`6ZD{(O;37fOD{ixjXf1_*1>B(wLLiypOOLNXBF-ALp9!7`h+z)wDDYbJ0yxHg!y^F9f_q`aHUz$KD8UjB~&ou5bK6RGTS5YS||I_&kls zOh>g^?VSz}7Q<3mwDuduYR%9uRZU)RbfMx*g_AF^XDQVDl86W-w|k}Xd`^ZYtXIPu zR(d5D_Pp<54;q24YA^O(at^zVrymqTy41a6B~&fD7cNp)uIH}$7UsilhUriqw}X?; zVB@<`{zt%7H@%!`AJDE~}`SfXXK;FdKoPzU#C5#y|UcGqF4x)H>{W z4Tfvu1<9+&u8Txccx>&uprDE0{myqEX~yVAW)FV0C`7=~v0`(z-6}MarYEY|`;P^e zi><1`(X`j;m+J-=L;W{l*zX#opjN3u7|52cJ7*sY)XZ|eq2Ya0ejz&Cz8 ze{ojHc-*@9n7;R1u5!jvAdXA2uOF&(f{GhU_n~_mE%L)&>bK42Qz=XsuUQv~e%y0N zWpcrHA~PAl?1A6Vv}j!y8M^G_r1QJ_@al2tFDEv0oq4>=mE0{K6Z@v4Xt&)8puOXy zwhHrUxYlVG7q_^Fy5G>prr*-DYww|vv#jO(fA+7Ohu`}Hfpo0inmIWONp9}P_OZ@^@g&%6DQwVHNPbp=q*4Uv>sj7SqR7dSR=m?%88uhm!M z_6lb;qP#~@)B8?xy7;nWkSVG#_a(ehx`%9|*j2kMsmz!t-q}O_Zhg5!WJ}=M1_jR_}sgJ z_7(F8L;s~9BJL-42WyoR?Wo_|3wpw6xYcrVY#|N1%UtCsVM$9)Oho()^+hgwU%D949Hruz!tiDu%<-sj*V6hmT1k@N#V zvHJaHeHXmRqpToDi`$A@ev0krkXWy4vRTj*a%W*+2u$_L;UIDxFt;b*bfP|wijce` z`{?Arwoc6>_o3Hf2uR6Ll~P4N zdhrD~+OhD%23z0NvVRuq%EF4w(UU88<%XLcvc(jTJ=Vd1q ztcrj9KUnZ*1<={0)j8T2dhh6CS`{aA5tj|I#sazcQk)VQI%A+$0z%CBu<{d=6^U5U zI{yH*4=v5y0QH7si3TJrLh3#J<5D9-@sRwSQHy9#A3;0R0~$lTLh(m~u5^}sw>l6s z-@){&XqkiNtN7>-M}v<3|<7E?w=t;-W7ZabFnvclBqWb_>ZDW=@jhLYVlpPNv0CJv$i%&|C%Zt zD3(<{SNJ@S0QJ5aAs`at4$VTBk&LqNm2F7CHkbrq%&omG9&f~Euu>T7WBSahvK3i- z>T^b;!Y1iy)6}Uj{Vc53BEn$5PU&Wq*+D)( zwq-uv+z6W@y2s+u>1aeF+6|(gGaVgTGafCR%A>qV3|OebHfq%|bV?U;nz~aKqSFac zZ=i`u^r+)qu~INXtmrLa2TJu)7iPZRkq)L*)sNyqMJj!PtJAi`xR$huGIC97ru(>^ ziEwy09pJR`;*@k#CL(#(3cWJH{3a(^^bB9D80bowF&9PXh_bBL*~ds+`mbFd8@0av zTpwsvc~ben8D#+4@Q62K2ADBYqxn{{-RLMXwlT^9vuZGb3oi4tmz@iiui=T8Z{K}B z$%qUWgTPDWG3=g7bMUH91`q(R+eSEzM9?{&I^+_gswR4D*#zk9ZQkXAc-40;B8oG)sp2)=`jBfZTb~rQ7r*?T@2FJrh+@v(t_Dv2 zd|N0J#%DwHohMgRSkKX4vmX>}bLJ^*=*aQJM*@jFt`P*#xkpa4aNb9@E>|~AvEn^d zAER}$#!I&N22xp1bD#?Mxa4+rk00$R1}nI-Om9VM8hal=?79TFoQNLl4dLN-2Kwcm z+q)60TF(z!)XWfkZY_#M_-dhXN&$?fj=b?cvd&_J*Z(zSOgXnxkanXJ4CoZ2EV06%N&5)(l}10fmFAb(8$*bh46h-)_i|S*T@!inD9z&qCjt zr|;qYr%OnIraZ^MPlpfm*HY7M@bap`>o9!AI5+kNc6_1$2^Rs~wDnWk@I1P1gNrih zU{4hZX#?Tk{E(F^Sk17@prvcLQ8Tva6Yu?Y`+dFNX0_>Qb;9XQPp=PaN?l1w9J}2j zp456&Tf2$gKd=FC;;-%(-~F;LSw&7TFCRw%9O-y2QS(|5%NFQSQ0#CkaClb4P zF29k`Htx}(%WZ5;+C~_Q(f%vhxqRHTZ4_tDGYf-`U%yVZ@mfVif8Dc&TrSEaRNBs> z`&JxQc^gf_0d+@GSzE_SOLpQ}vK7FGOi1(FYzoTjvrS>@>okj_Dg7P^35H5$+Zny@ zxhjrcT;VyhP$W{H8Z;Jo%zSYerTw0+2m|a=)Lv{nmGXA%kJ+@{ML0> z5#=jQXqWk$MfX=BttF-Cq|P&GO>(z*sR|^X)CE3C`V?8Gug$pTmJziZ);LY_v?@F& zCf7_tr6U$T-lIMI0T!1t*%G!^b(ilGV#&wzl6nzK0_)ONzC_VaZ*a8TJ9G$Q0vK7;^}Dx?nINr1^hd=A2bMkGDgsl(Yp9*^h-SWBvN|#Q&fz`oG^I`49Rw!QZ4= zw?F@4y2~rp`R^a$`*k-PobdwW4Tg#~+f?(htkIi0f^dJMtkna9SITW+m!$5g0+xE1 zmxDv;%53na{^@VD_)%`BvfyyxMmdDJQ=ckL;L5KH@5vds7_OW|OkARZ8}PwEO!;uo z=(h7?kxpa9`SqJEJ%7d?r;3-hdIaqnkVNWYld*7ao&B@%OrLE2MY-QFb!V;FVDKpSk`4c zw$p=+N?K}uIk5h^wq4YBPg?MLya)YhT81xI*<-a(WR|_2~s2xbaZ)fnoZJ zkleafF!T36ScJ}-L@QU0BsirUqn53oFicP1f`Ey92Of24XPtn~I>GO`Mj|Z@Ri(~; zZx{s^`=|v&gFCmF7 z*IACvHA5OPI9`!8n&-Q6MIdV@p$2~@q2QDfd+N0#9CN*8(7dk?*HB!t%*vR6T+Z$T z7$>BS6z#6d>E!NT|EqqzZva`d(%%h05t#~YBoYN-!)@;sUz(*fhWhfK&9NcVj`LCh zR3c?z5$yWWf^$nk%hY1Xzlv!V_7^)+lq+s;qdz(({8+R+gd3OC+hue$U(TnNIUn%7 z?;E^yqokxnq3&k51iXUP^QAXf6;i{QGJ&%E&W5;2y_z!$3Sgc#ixB!8PNh@IyxSAjX2fk@qkX zF@vGCkgbYi*Av-?&ckYex5=P>LC)ae(OQDjM>VWz+Uue9=W)fH{)N7 znY-Gf0VN%88De(Pc^`X=di@HQ{nNtv$vdL&>W?ijgu1$sG-^Q@49;Z5U+MUtqHjO| zLI{&+Fl)euXO*Y$nkP_^f`Yd(3F)B1n$hs|oV zWM*B4un(kQR^fspNg9kIP}h`=5A6fk16uv^8MkL_{M|6tu6j4={M|QL|0|u@g9g}jIk8;o+dtK;0uSvG89LI z)m($fiI^ex0NcN7@8b4_UeveKYNO0KFCSPfznpozlKYaxqZ`R6qFjx0j-WP;xubdJ zo?&?WiqT1fU&}=)?kguDUE(x9?|Q5H&li5wts%+H2S8I1?46`VxG_H79es|af|$nl zgSv#@mI**rQEC}zRO9zY40PMry72?X10egBi_K~q7=c}lL>d+z1NH$^!ddx{gnD)@ zhl}2jQKHi9Mtz@l6+qyy+?}0;h-h@w9ienwaWVV&?WVpUfwO%6lsk84f?Mw?BzDe&i<)`9x5ubjW7^9-*|extf;RT_Z4OYadp(sQJKGR1 zfyDPfyu;1IiN1kjE)$^rB)Z?gc`631Rusm2OJsfd!tD@$gt8ksVqRD%^=Wl8V?UPu z4rr>RaoHx>`g?z~?KX9%1PAWl@afBmmN{%ZAtBy2viwO?J!ZqF5$3t)S^P|K$BM7+ z_~c`0LOr4IVJ5kfoW`AxkI=6?>9lGEYV9@HfY!@NfJXdwYX_@|V(owh5%~aRGtCn? z19#@r5;b?HsM2PMPmjfFusM$?z+Iu9{m7NM#&E*f2}*~D><;UqQI~i3%o4Rh zxJB2`0FYulMsKgVsGaUtd%?)zwO{QN$Tl-!IsCFdgMY9RNMnZ{>|tNZ>>p^Uwi5K$ zXoJ}{GK6&Sz%l0~KOs*<#A%%$wzt@c9@kRzasNW8aUN(Y`Oh{DP|o<^hVbDq z(xBpU;~{G+9K-3$@6?LnS_C9#Y9(pOp2US!pA1GVU@z^~VEwA;Cq|418@?fDF*%%# z#77Fc+-89Z?I9|2ao-w>tOMB7={tH)AG_))Mi^@^>v1C#No_sfS-$F0Qzt0B#6r&(SiMUip9p?~co1 zZ&=fA`ZYvx=Q1>+sjscat84??gWU!CBu6i5ovHdk(!l3_XRTE?FVpi!8o$>>k7}WF z@=Z|;tskSq&m&2@q|z3uo(HL84%JU1chHRfE3`2PNhRqXFX1jwcMT%=>@>FSJR`LB zvmK1RxHVF7RfO#g#m_;x4;tWW;1jJh~({(_R71b9EG0 zRCJ@_hgdmEkY2~mmt~8=86A;Cnqx*=Ms!U9;_l$&g<>X?*nhC_nI!M zD-$|hFGRyd7BD+8TbK9S?_kV;oE-}FL7k1@@fp7AS?9&ryNj~=w0?|lT&;IcMWVv+ z$u-`Fk1V0bgpk`A>}xIwLN3vQMcZ{^fT_9%?Bo;=KdE1AG$RfWw4&`6^^Jmv<*>Gk zzo6mm2T_mYo)3dG0%!I+UJaY^v-?YA4DhXoEpl-SVULQk_UxOBN7QmP3YM7fXOYT} zC(1Em3a?A%5A;7c#o8R!SrjsT)Q(rQ8HFmUZPD8tcxm*t>bB~43pwc?gw^P-=GWNv zS{exqE{$n~HyR!&no~|b1a;el=`2pS^VPNWn2AJOMx2h1SaJCf6Emw>AtQD~@ZbEj zEab+*b>n9HtznahHqiOMu=k!pO}_uSFA4%m2LU1UA}U2{=m;WB1jIt`p&B4SAe7Lg zgqqN#Hx&>SLJz&S(3_Nm7JBbU75N>`I{Ux&p1sz2v1ZQRGiPSM$$aN|@noL+DRY1C z`?@~Y2@4W=YA*Gtq{nQO!RAKB3N+uot^RShg{x?>ggIBRQ(E+NMiK00n4z0SocX;~ zvZHyfS0(%zz5R}P$jXIj9ESArO}d_h^m1w8%v$m6pzU6LKblY}Rxw%O>U(!fs?ddO zR`H|5HMv8e*nK3KSH1ZSQ~mcPd`epB!mF~BQAd|B20OV7=d9)!g}G|&_k&0O*&&OfL_PbHW_9~Kt@LqZaDl$%SBH}aQl(*4GhULlS134dl^Az_D ze@sm5RrTJc`?9>cXCQhdl{zqyI0Q^u4@U{DQ4^=PI%67?%5PMPu$H?HLN=KR5gcdl z{MGH{vm4-KQB?CoO-)H(D2IM&G&O&x9|#u*~Hf*FJwHV z2)wS2N&>wf1a97~sI5UIgO$VycTXD6LicPih)^%E+H&-E1feuoQ;qyfFo?7g!_tqbqsG4d#O~6)HI?k7yW9sUK zkt^gE%?1%I)9BfrxrU%$t|l2^xD$iojGOSm=qXq`z&oU(eNU%$IuT#+rn5}kkO?yu zIjh3u>>Mn=X(JI+Y!}I zEfilpr3FA=qej$Cj$}dZd}VOdFvPDnH?mS!P!Eq<3YvrS!`wzd|>(OtmNlUd){Dxd0QP|vgrDUzHdOq6P0*%zL2 za3EYRK()2wiq3TQ%PvdW7>28cvdu$Gl|!45XFE(Kx%bxgmYd|zWW7fF$+Lq_K4faG z3{8-kg(RnMJNB*Tef!ZM++T1mx3eiGPU1gSwv!KATdi zQf-U)hbZ#M_)4Y2u&l1zW}Py{S5{74`Znjlt-o_a4Hv#oN)J$eu^5|jR8r9gi?COq zj3e903+gyf$=|=(BQ{INc;{D7BUlnK1_BM~1JTb%^m50Fv#OQ*$EBUUzgMIU+Z7Xo9eASv(cYKJPsr!Q zSh+)#DX9N8;10C5#J<}-U7qQGzH%}%?pzUb>O6i--{zT)0^S`3=)SMs-J{66~8>v0JaEZ_1SSp3#B|C{Qh;XyYEO@32^!T{qe zmfY;plkcKdM3*_Hj|?X5TbH`}IGZVlKXp#n zH99(g1qzOg!({(DWvo|4RMqHQn8JQbHUD{J+uFT$l-39Nf$A|v-+)M-JjyZoHDkds zH2<>pJVJ~^VrkQa`r4)HQTScF?@45kcc~eG7y@7A*$XQAL zoXmik%#e6T@!0Bb0YHTwoY^j>M2U|^s>b1tJH<*__l?rGRoRDLVioM4*WwMv-S-T+ zOX7&?ybmDG>|>?+>cOM~?6#)8;U=}K2v4Q7?v%V+V(Zd=*kzVoTPOykKkb1T5fTZX z;R-2eT0?~~qx54v-ux5|X2pGZ5rOHKU+DgqTHF7d-Te-Ipx#)~JFSanFaO<2f4>Ov zcv-})ltHdbKr6A(aEzzIj!P6;{^d((x#BF$zdgD*dP-;lG4OuP-yI)UAYr=53#5)~ zu;N>N_`{pH3jvdxO&@sn<`dhPMX!!vhG3_WpK}=T_RIB@U`g*5k?r>f?z-7SVqH^1 zQxaoPyTndaP#Tb|rMls~%1K{4Rwfz=(fE;CexDWWXI;#z;|TKCDE~||TgIN=)4(?y zTB5y)cF?JWQB$o7O+^gy1)hNEq+Xh!zNju#lP*-e+r?S;4Z?U|61+Xewh&R0k`(z6 zvCB^$y5xCEbogQm_3<>koJ+7#n79Q*Q&d5QdKx#Qc{m@rJ1;uVJ1BAEa&;;w?_$sl zjI`BVL$;{uxmA*}9twI;H_lF!JB>!h{5AIoEtB;sWoV&c%y)XurqvOsfW`Kzb1i=G4U`^Ua7z&YZ|ev;0RvohKYghv%XOmKl5Mumo=gek zAH=0-+4q_FC%&+T#x$fbLz(^|V)hDR>0*9-NQvL3Vy+Ycg&BPFb&A#y#g4DKhW0q| zYSaY$Vf>Op4A!9~E-r|ZF(GMb zkE>5?r)RYo2U_+N0oN%DxjVVZag22*{eCPp3X(4{V?lJa5XY7~u&e*RP)OQjwyfUT zMIxzwf!uZ{yrqBAppxDES=DS{pv=S+%GtyES$!q04HojuZ>;O6)#Ygom($k+PwDO5 z+|V!uFQ;zFZ^5Jmw))fkpt9h+aV?%I4h zAw_9}J!{=_Ear2aw<_1Zrp+r3-aLrCE6%-1VrqPfj|)6ilP~diGDdOiJzQtF1D;83 zl*E)or5&h#D@jSC2e)uRxdt{-&u-c+FJs^L|3USl0~Fl}DrbA&vH8t5 zJnm8V?e>^9n?DK4X>{_`Kjmon&Z3E%uW^7#rbms~s03pl2hQ>tx4(zX=4`!)QpZ-HTWHh-rCTo3UkI4K)z2Vc6;9P4kdG> zy}yG=n?hAgR-261a#aqj@AQ9DUl}m^!?n$z7$VfP>e%Pr9Z$ z?M&|gkaY$ohj^3n!>SpkqLMo;$|nu%T&1zIhM0{`n| zd8Z2cwVpYslBn;^x1)v@$Sm}Nr}x?+`Y>@p+~;Tb9_nR6n`PlkxzVa9?O9^Lktu;N zo9r#ndCoSY2;c_l=8MiCcHT@`t-~`ZKv2D)cb5&+cW%;w#^L}rxzSOB3~KR3BqvJ} z?@*nk;Qp#t`zMHwCWz9ub}K&ip-AX*zu0a?e}ZwezOKALLa-ll9S5pbG6L-5kdOQx zV&vqAmaZIo7uRnO>weQc=~Yx|*2;&bmt4zvd%c*|2rF0bs^e?o&_BQHhA!qIWshIL z%hr@PA>(X5@(4rT<%Uow1&JUmC1abboTMlRuri zf9!QUwqi7||F?ho^bCKYtN=UMU8=j}BLs5ZEs!ql)4{Sh2foZ^H7b;v0H2umJi;0P zQxkAvIRZ7%-6y8!E*%3cy8HY;ievP3__`}^%z?#FUt72)1INN`_14o8qezVIfthwV z;**rPn>L>fxKv>z%4n#%&df+Rhb*A#bbr^hzs{c&j(}q3>{tC5w?IYB_U}M!W)DB1 zjHXq+7I9$ZlGlw~^mmCttrz_F>T4EHS!WZ)JKsF-J586;DkxwttOWuqGbb~!JtV`<_Lo#=?Y5N1{Via}XPKTy zhKUB$z7+i6{#-)Yh}@yc6{$hED|_$qz8p+jI8vy6W0{&-tiivqcJgZ6g5iy*+0wov3Dq!oobN&%Hk!a8!t|UH zMS?k?9(E$0+ylbH2lb_QRmNGm&15!33|&ALKQL6ae2Fna!KdAm97gkcdMms2iq&5w zkK-#PI;Wgj;<`PBry`F~uKA^)4D*tXmF!IoeWr&|6M@_vCY@MhPp=0#0vHaJ)F;a+ zzSGrVdLX}J;0lR4hby|Ttz3MFH*DJU$~FG1^cn=xl=J?+V2p1)#hWcMrlZGrP)Vvt zQ3Aa*qX!EoWpY1X;)J~^l}}dByuHG;gw>fqbN7`2X;74y?<8xDvjyW^l1s#^z8&zT zF!~p(;|`4|dt=kjdohv#5yln{iAeWUbcAQo0G^KynN7p)iw-mXS=@Da$Af>%vW9Kc zsj1GC+no@Ieer1P3D{686Djt_&bDmIGODES5^M8r@k^%#GsKlCADNI z2b-!LU+w7*86rjfX_Smf&YP2@O%ZWB^F-UR*C@l32HWnDr5CpJ0oJo|AJ)ESjE_Z~ z1PXO)bovXsUFxG_Q;yfKjMjJ@de)9^I;BGlm?F3B;P{~G(6vR8^-O0Q_7G%pR{MNo zda{h&5$b&Q+E%OiYVybLES>P6drDQsIq=Lim7G8-#KujvK5(p^CUNW#p}~@| zR6UO{6)vz++a&2GN!Zr$ah%Wo7OZmo1Q>}*J=gERRGt^TD_&ti=1;Y;bH9@+7aMSU zN{}u-+I7yo?qV6VJ$!wz<6}lwBk5YcX&{6L8JX=mN=ibMeRtIpD3dZ~4WF|dR(K2r z0LXXX5nTI^4^FRggJ!GiEMzz_)22OEbYySE#CZRI>tMqrLQj(vM z&+4O=@ORQ?Yye(%o^H1ZZ%p<^XApliLR+YIeExhPPx90&XVZ(YeQ(t?NhiUgn~zQ` z5>gdhgho*#v9%{3!5>Lz%iR@JBhgqEvzX-?YLQun*6uQBPwN4Z;&AOB_#+dpjo0pE zR2u4(uR=N}1V^O4*4bn}{59|qdq^{*IWjVTvQo*?EyDt>12bjYjTht4@-`Vs{Gd*^ z?v5>Ik7v&OB~H8en^F0un=vob`HYjr)?>WJKz8A#)uonQ5%T4)PoJ-ZbDCoPOa`#D z)~+LiW+>BT;p9RKvPn-+9z&kke)7x(pGNu2wEsBc{j_3z8z-sxowS2JFy%nt-E?0M zTD`D3{_m#mKMt0$t+nX#q292~LDmwhiIIWN(ejZTYYbb~|`u1Dhm`I||5&TCAi`#V;-m^rO!_JE$jznXgV<@}@ z(jK(B(;po3eOSXs5c`h?_bgX*EAo^nkAF-Mp^<=VuGMi{_`efjX)e$c_1-p({E&Q82IfA!We}p#O zleOfv6G+u_pYTG2S^?EcPkSqTvd)9W)j6)Mb~)K$ zI~C$tuli()?FA7+{Ubl|a@9iPQA3}aX1Jtwy&vuF?0qHAcWaN~in6e#lKE)Y>@i6RA_}cwJdzunJ z+swf6CmTw2?gO5#+v3-({5;rMtD&{QV+@Fa0Vjl~uxe&6He+;W00B9rLB7*Ht%oP9 zK8^eoC$=dMmc9IALcPbMGHGw_E^!k4VD!)oPwV+a!7{cI%8)Y9{M%Q-teW3ev`S9) z5+y>MmTPD_dVC9{zs!0fVN@LJ-q@?hz;B^Zncn10wq5NY$#8ieX0}%d?OQ)X&amPRIiDj|sbtyjPw;6;;5=gn^ijV@w}x(mjUw^V%}J}CNKkc;(e7q>+2u5QfAGkT|c4x!;_koiE#@H zzGibiRcF*Bf?_RvQJHm~wOO(1-xI?>Z|i$4I7W5$a-0kh0eCC=A=I|!9WCq#wZovt zLDH|GjxTMLO5^%T0@$ZCu!oZj=r>?r=OlARbwe;;^c$95+aBLoJ!^MQ|Idzy5Jjs+ zJlLlw)-9cywHLI(QeT-)Y1P(4Fj@ZgcfP0@6O|rYUqi=s!Gk#$c;>*&c=zs?5?8^XQsba6`3OuSyp02s_61pexJ$q$&fhIL$!^c9XZLE7FpdqDr z@J*xsRLR!I>L-18)+J-7g8X)6dE^Yide2n$oDe}kE|F&^HYiM)`bhjBnXgMnZ#t44 zQJZ@tp4Og{Frly}qexoHr(_NiqW!{h{$Hi5l}u@9>osKPC(E9hbs}h7r|$D+j4C}w`K|Lzz% zQW1nB&o$dMC*@{Of7ENOm&@}abWm{JbhL9qT?xr;g=u)iZN}=Wd;M`U1s}3MObSnC zkPDUdQznXHDkNp9;`J)6{vjIvNI{6%OMmek+5E%NE$;KR&94RJ$&>vzfxJtsc>RvV z25!F~afvBs^@(A(&cAzLKrKsFqP<8i=Pw+9?L*X)}~g@lPv&L zaPdF4!>Hj_QAfSpIFPD_*q!+s2Mk%1Ij=nS0r)(2<-NGHAO)kiSGuNwqWl{p*mCj^ zCtnrE+a|OFg}c2pXZ!21@`O4IO{clyOR-jS_QHWZdrdRyh({;zaLFGm7j;)&LVetu z?<=k=TAuF(Ufv`JSTLNh)`eO^b|35_JJ{s?P#94A2*4?*oMS@KVtjGumm=6Rq z8}s6F+b_?X)xx}z_K+T{0ep2%7Q&2WtFu#ljK7es37V}CH;TbcVN$j=r&tr_+`w9! zFAl5g{x<2h?)!~xQJ<@0je0GBBIdNshhC2_6^05KemsboRt(c7d&p}yXV(-o0{FI{#Z&jzdT#^! zu=_%3mU-J7ZR+qrIvk@pL&Hlx;WerirJ9c%!yh&$uIb!!SyN3?WAfHo54d05 zINHlgRu-X{tJ~CvPZS^U~wflc2!lWIWIE6?e#vLFtf&4jNo!kRh$B27RC~l-!w}O(!kG zMZ5&89k4=3%9|2Fw7&nKqy4WVwE=+~t%M~C2s+kQFFkghN6YlVWb2_j;LvolnW`2= z_7cBTNEan*u zVp_HC2hCXcQM;I$`wV#W{t6~7?>wsA&3A;fUhu7MsRyFmHRwLl#By$=XeIFRwzgUQDlDanUA8TlS_Qpt`^TU0Cg#bvz{PykkT{eX=I?*@wfOAq z#{t_)AF1#asRn_8ct+nWonj^Rm%vNc93WZ4*XulGUO4WI@z>TNGPYVEc3US0t0vlT z6Oh2~DJFITkcM@8ZSRRHD-IXSBSUHoj?eMJtGOMGY8_Rj^%G81MH1>8`Ng<2wh`|_ zBGuDdWG#`0bY^U)uD`v|rxZ>(ZF6n&A=!%zg*gqvVr8{0SW#dTU&4W#8QHsH9tS4qt zYXrjb4rFKPq)fVaL1>NJ**@Dh+`A_2M0PmIYw^$IkLF6VwKK4KFMBJ8UBI1Zbj!BV zyzT4ffr#g(Ugm-X7(IS^&Px;bD~@ZGkA`kZG3E2Hm69mx6^FweZKj!1{(e<4WJwiP zT1RLN;6}hSD>nQgcjKLwRtW7OFqE`8SDu@u3GNgI^f$p@8&O zX&S9E+e2Enna6n4HPB|Lv7+`e!W7I;3l2i zJxl@0@*Nek=&j)g#8&(Bibb-L%hPfK8me`2JIonMYA{LxhUMiBt8$=tT3cKY4bHB| zHc@0!QMp?oXK{tusR4f^s06=~nelZ-4h+%zNpHLf#{le5>2uQZKPfGg$XcDVIr{eZ z4>An>{A*7cm>r~23y}rCm|TuMWAicjQYc^f(cN#!;qtH8UERDWVmM5Iw>Y|H)`nF$ znL))XwV4QkuIj6?aXKb1aQg-yShj$g3Is=NwkGPumHSC#Ny^-QIbzn8hGfah`IN!~ z%9W`0?lG}gXLb&uM*VGmZ2I2GPiD`X!GSD3K(L$RhIGVm|Ksi`dqa4L$)W(a6Ag|n z-y46hQ*U7@9F??)s#XV__lgy7HjGiCy#f%N%+glgr+@Cx;pYJ?8@V8PRKT&W(PG*# zaTEKV8+Vh{yt3x%Dm7Wh%vJi6r13O*>k=`Y4K3IK#65z zLK`p*Q|OsZoxjcc$Lroj6(O~P*OPT(qO~JM4=SupG#Dh+Dx^OF-N*vH9X*ave@1pORdt=`PpTbd+^KmCvA!iVF12iH zQ&34gLFHA~>ujI>9>;}A(Hg-eYISP{aE!=z({Xn7p1BYTe}5H#$@=Z~g#{DSCGt_u zmwP?~d#r-aEIae86O(q%J-z8R0u`c0FZ5uItfZM0l57evAUcqCB#)TCC98e7^U&rY z3LdXTXPdb5AdNcIY;l`*M4Hf1e`DVPoVhdKo}xW4S-d$j1uy5z?i)iK(x|_98V-so ztPyh`d8W~%R~z#*qNTf0Fuukgzu3*{yFis-0(NXm#)Pi?6T7gjUAfa`YRu4)5Q_3`7EWif4BqIkU=> zO$`UeE%<+k==_}ryuYe#(tMmW^4r<02gcu}nQvgGo<9ksC>`|Yp37LL>p-IUeu=JF z%5y`MWe}K>onO@(G|xD;z=n^bN;_$asNo2!#mjT2a@_T+V#q2VHcv1vH2bSpC5~Y& z-_DY=(qy#n_*@~5X3oV710^YS&<~?sYif3J+j)~izuL&5Doh*s_N(#4Q+H~Gb=6Pc zmLMO~J;fSjw||Hzt&42G^lPI^xyo(wKkg{3)`K_~Ja2LA^%ty5;G%xmE+j>XS~3}> zq+=&hMlnmsQi#&>q1@ze@6U?aV>V+6(h-!{QA|S99l6&^;J5n)57-Hx^<4gZd{$=! z=fK`jG(%ZdgCL3CXiom$YB`VE4f<^QeIn?1@m{+LODi-`aJ@u-^J`haVE=Cz-Uye~ z&C@LBcyy%~TDECvSFKWG0hArOAU3(PZZQ`}^C6k@8Lg^nBzJy1bpw5cqdq47zy0w) zL|y<5?^A*5zpa0@F?wl>mF+Sc!mH)FKghpzydlGK1@YnszhlNh!F%xSifLP!n!Bdb z(Fy5UW(jhOJocvHe;={4uqvS0VrMkG+4rY0z@(SWehDa)29`@0&StU@5XMfM?@p!7E#d~g#b?MO!= zDk2314UFEx(q)w>EfD1q232eC23F}X#JEc3l=bj+^p z+~xHR5<8{Z`TNouxik={b}${pL0#A3aiX z95LH72w4aF5j$9bYbymi;_OUMDj%4f@OBKV-NFJ8eOM$?}M1H2{L`15Q%;)-2({%qIr4-oMew9=0NHQB4;X2^=kcL!(T z3RB=dcU}6V^7oHc+&z}Q4Ziv_YUugawIOF+3Tw)pHCuw4Y!%c#nEKRhTP~UYa+hL? z#YUsTWYQ*mH+)d($7YpBRgO8NrD%7rz~z0x#yUP$tyrzL$dpdb*v58{woy~ovWY4V z0=Z*XCH#!#p<0>3C<>zCNl3&?(mX!fh(p7i&zP4Tmv?kR2bP*7YuEvDZ&i6C=|A2` zt`^#T8~pNGYzN$#M0P2=+%r0Rk@hx^Es0VD?rx@Z+k%P6vY5T3(YbUjHsf9R4yowc zk0eBzTryDVdENndJf`jk*oCfuGL2tL{1*s&d56=!vudA6~uJ=HaEPISVC}ZE>6G$eZoMMRAYZSKYZ7J_F%Cr{+nVN$oKTG$}z0OL=iLad77;uj9^ z33b#wCJP7cQ)IcM+b!eBJHx_`{2g#d0I!8&Q~!pb@ea++5(CPCOc?hWL$Ttup^YEd zxFC~P2Ae6%;f~p-sw!utnGBy;4cW{k1Sw}ebmZBR%A0g}T~)r~J&*YFht|z-8C}Up ztuvzVIz#n7^mG;4eM&YSSP-*AD2KT|ZsBBdTk#3x=w0d@?oS(3A0k-rR*ba*YNT!D z(`bm`r(?1H_dAE%XLD(6P4G^H5ECc@Wj%z2xha|FNdH6B!)KPB&m|}4KKjda0PaI0 zv1j3UI-+>R>>CsP*sMk&ZD)v=>0Eq}8Q?Wq)~_zgYBFR^u#)d+tvxfUow<@0QN5FZ zPHP|gf(ke%+{qLQ|Az<(edUjaH8@3B>^e#QK(ncRu4_Zz8B+XB+K1Cd)G*V$ALK~- zj!!?p_AObk4Ia}=;b4lVg+U~~toI8CVvO%eu4eDhH5=n8!@qPUVilMYO6gV#^bZ3? zkUI*D66b|unNO|eyd{Mp%H2m(GMw>a3k1_Lfz201VH-!OjyEl!7mkzPOuydP7@$#n zMvDA16tbJ_fqQVsxES*|EDo`Z#f8jE4aF7lhXm*vfYXp`WVUgPs7^8HKE2!i^^)JEI4Wwvu(Z~rheLaZnsOC6<>YVMXL;8Jof z1tsV@*~BOyeh)!31FpTosd7f(>V67O_HD`InpT$#Jjas(eI771g&wEt4%6BX;??N< z0OMH#IesmmUm#L6Rg^jWRdC!-kZo}1(%NQ+6-2eUDcb2EAj1Xd9{#ep`L7u=gaTCo&dol#`epd2{ir;ZRxvcS;|SL=!|e5H zcl>{rNb~>cvHwK;r)&Quw)`LnrYYv$mQp}vTr>DIzfnt?x4?}{v{U6)V|XXM^p~nF z@C%mvQ^8Mc_zL{YFfYfZJrAaSUPTxu%VxpL$JA+fdk#c)n;x?Vg?Qw3sZ3A` zW0F4^`9vf%_*^kmEV|p)IC;~sk%a;{nEP_wTfqvhYRrcE+h$VPcAu|xYH+CnxQ3m^ z+%Yn>q3u=i3MIY>-o+u2KKp9sjv4QX)qqQTwAcm6(j&FN@iU0YW z!WIoXbhV0Cw$}ja6X_tz{V_b4=ozmqUGpY`QL*r2$-`46gCeV>u@Ls*7x#8%xnP&Y%bgo!>#Q3cRCVYh1DibCr~B1Hr7Od!YICD|Me zes8`F_P~yHPdaiQ5#sS!A4x|as$#WC_KpXpv$77)V)GasU9E+T?cmhVa3>IM9aM9c zjcWb#vWBcJTrQo`sAqMcY*3Iz5MaFP+}L5I!G%-23i`vCKOuO(DEaHBUemi#=PH;t zjw`)3;F-i*OarEF?v9|S{cjEB-ZiO#cRalha%noeQUuy3Mb5vPCK@1oIZeNJ%p~g0 zobI2&_P!+>SB-XzzN+13j*Yt$GQ7A||4H@{Vc0OMQEYIkSD?88-E@%O z_K_U5m$#v(70`jSW=i12&VEff8BlFPjFU7C4;3rFDEGsd+GL;OiI<^v!Z@&A0&eeS z3LuBkUcl7xx_qrTa$Yu+}6E=;Sd+F*pLn4rnL(cpLWx?m$bH6s5&Zgjq}DU)3G zNCX{nE?-DPH~tUNJ}chw`%OcgZ7f)a1ZTA?19lcG6s~t4MYKPo!nRLRLaL_Ba(`G& zMk@jv)v)nU$ct^9Li}W8y9lO}lk9xc11Qw|`$6>UW{gD^V=fnb#c`Cqm!z)4p`-fm zwk2nqZAi1qDU`1KK#z}dbIKFVmghfqv{S~px}MKA!K`0%;EQbWj`(T)v?C_(Cfq2{I^KwT+ppgj>RMKo@;p0i?y-%zC93HHseHU27Y{SSLfbziG56w z+gaO$vnAczEB+hgvWKPD%LzYH^_f*`_qB^WEVc&1u^{4<2I3i5`5J|`b)Qa0g4IJ! z_}qvBS04O75o!N7TLb7LbZvNxZ@9xECG*>@3eQ%B1&Aq53GmFM4Pyg_*_D4V98@TI zny8VT%CLJw4*k#boGGsiK-^KN6`z?mhAy?8Z;!wb$9crx9D$BOE2sZ=t^A!WgB|@E z_6(>J1zn;sFLl=Z^zNT1V;TQ$)q59&7!1F|FbhIM!W61kS>O8N->9bldw>4po_&4X zjhbboR4d(-r@&ikRIU75N_Gvt;?~}+ovQDlE>TMgLp)&mWpRngfC%VI| zr~dCZd>SIHOE!20ZaUhxVs~0|Joap0>VF!V8<~(0(JR5!0no#i=M8+NEKp@nGd?pS4UY2f1fldPuZ;M5Ypfc&%ET=?X*VF1Kb+ut~+jz4v}51%|~&A7TY znfrk6XxoInSFz#D=zNDsn>VT{>Bql8R)?iUZ+!+2?Bp!B8FhkO*2G0euxgW)V%%sP z@l6s!PAkz}zCzU^#Va-IG-m_3u)wfsCU!R+wi4({7RsV9aPSx05w_<)aeCOoAbz|j zE$wU!C$YOO?InpPnnXkiSh-lQ$-o7oYS`gY^{5n1P2@zD0BD&#rX#(~1PBa>I+xAZ zU>Opbv7#04jqg@{p(_VITWu+tMrcFV_GfzEG{jf(_kQTXvrH)ItxrL$Vb_@I9`}9v z*#YeY7E{_D^T)_o-js)$dp507U*&IAR&Iso_binx7nZaCJi28*oA`1vRh{MnTXgp4 zUb(QgD1v3%C{n9LH(wW`bR|_zIOCe(5LqJjlaF}nIE~zg+wwWfE|BF?u_LNDKR*>Z zvm*oPV_0*E?+@nuClT^RI#5@{!*A+GJ~ka^{ZPak8A76f&s@UY(z_&d0un_ z%s~7R+fY6vIWG0k8Eyhxnvz#OJyJ=ESu&e^q%MXRWbAbRZ8EtLJmme%gxB)%&@-(k zCl*haSidn?ia8;;PAh9+p|fm@v)*{3v%md%Qeav!R1PLG;~%1U+o-{{OQ3vl5ze{b zZZ6e;S8YCsc$QN4#6-PK?tomJXUTcim>i;!R3Y5n%WK;}g6LUVujzOplc=dg`1};? z4s%Q zw0YW$NpE6Rzm&B~l&-4QGrO#}K8f2S7Z*MRy3V2!C*KTI$X47e3_C}W=5ugnrHZjp3815DaCeWfIntWd*_j}yj&*^` zYhdfgBuRsbM%Y980+o(gwKrzLs@lDtG(?g<4ILeXg_74@woh@TcjSQfDXkA_al^|* zhr&uEcc5~j2^Ngbb;VGykuZF~nMc5B2r#4Jy5hLwgXBvV>H-ji!F1hN&-wMvHAKT& zq6Uc2(RF9`e@>4U)QP+?yoYW+J1|&(c1;7m3MX-S{SOh-Uv7hco63YjI?`tv72~DF z=EyXSHfDR5*O9wQ+#Oci8{Ck^bM^j7jLL5(nw@2Q z&r@nv4rM*#`_Y%k_vmAbN8}}A%c#)bq?Sa#e-%{yG9^0-hR zuj0Q7Qf%aT9Cs&A5v00UbTY1x$}hq5Yi!V=6W?E3bX5zvF<#ibvOf+5cJE+Ap_oxB zbTbsdkChgta_UAR1{J9n0M|%G% zRGvgEGDt9z8Ze5nYZl360lOT$mXU-*Itojw~veK@=#4xEBZ zFV8NM?do*&CO{yS%}dSUkl)(RZ1-1k{#AtBXobRN*Nu}5e?*juWf^CW8o2#VC-zm% zKRJbiTjOpK%u^zwYEyYTC%|o~E`}L@b+dMmgQ>~uc44WsCi373+*H(l6Xajl{@({7 z$ySLL>b;aGPX{8l%>htvws9#O()FTyZt@=k1KXLLIq#XNP)l zLMi?a*V6v5Rk<~>n2c~q<$$c6_)D&jZFkP_`+KkV|HBzo#3Ahnc(Fr94t?iRkXG2B zfyTWfrlZ8+Zf9r6v>udz%ae^A#~XdIJ#l=#LEybiUKH{V-xE<-sok%WJJ?-cD*KO* z)15HG|8wwve82wBfc#G%oc|vUh#lI72|8%ugD;zR=w3s{YXW$&%BN?X{7(N%y|Di# zn8m!vP5S+F;9jk{JV ziA@xjpYX+{$lCcY?yQkj*Qv+EAts--hxdTnLVjnpbc<&-*3pigDjpg@JJLpb)~R31 zQ!lW?C=#Bsj?5heOA{(Q9{Z-g&t2rMT*;p3nGcjb^Pou-kc{8pE17&grVy63o{DcO z{=??m5lTVjvon*T9{Np*#SAnj(9x%zV|vtcaSJ^q@Tbr4g; z%7PnwcD8G$mwR$cZ3#s+Z3i(O@Auop%55CPl5rQNCg!zRA(Y|@JO{1-c2$}vT`UcA zHn)G9DP}V}Wqij#NAT-YXg1p<8~jgzF-vI?uI~0@I?enCE&b9?3l`j0TzRN(I|ILu zldXRUZCh$m126TIZuw9SZX2C$sS;rhFs70E9+m{ORP(2Q79V479%uQD z>?tC1u%+)*=w%D}gD7`jz1Rxo0*uuh7?dJwS1I4@re#k-|Co+2^Dx}ODY)7{^dRRIev>?j%hwY1&(`C>T)FNrvaXJi| zFUK5t?i*ec6UJhV`yqA#@=Lxb2p*c&jXIPAGB>!VW?!NO{Ck7~+1(9yskL{b|Gnt^ zzu0^0ptiq%|C5#iZL#7Spv6j&;t;F?#ic-TC%8lK;1qWYE=AiyfZ!ISNN_E|-Q9!p zS?>Mq_s;Il-JQ8Rb9ZKE?mwCHM<(yQ&%9^OdB4u<`FP}pf?ut!^@)7%$;v8<#h{2^ z+a+#S2otKVnq6z4VFB~+S{Ns0dwq{}g>ia0GU8?}b_E(rP${V58zfM{Pxc=-_aB4Nxe87Nb)8G^cqbgrAqkLdko3hjWhE2k&e{Cm-{u@UB$?@GQ(zE4DKt$gpG**F%W`8=i zD}FlVR8TJ3?ip`u7%ShS)i+^g2z9%QUDt56P;q zn}j>a&XKH7i`pu;qiXmeF<2XZ+^g@pD0$J^Qi%%Jtf=^dtp;??x~IL8um2;)?uzW0 z7%07Kre5?H$XL)w%z#C>I;EV(Czhys!!y;k-jOUl`AUS87w^~3VZssg7Bd)wx&pV* z+{_Q)3+C3+l}-fyzI_QUpVH>J2`Cr6xXS9N?$OOEDg((a|i z!>sC+XB;11kgSWQ_VKhf?UdCnmqXe=7(1Kpt%+q@_Ghwvz~JSx7p$Mvv#V|{Eh}^@ zB!7seGtF7qmB$>KOoaAXA2ZIjL<`oxbYi5=3NjIft~iR6U=TEOfG!zTCZJ z{m4)JqpqPs+`(N5#BP4dtG#8i4>XHwrp2Is1|j?A*%odkgUPnB-_`quy9IvaoO|S^F47#!md!EV9zdv4|~%7Fij4 zVeG*0x#-68dV`B{csn#5Bs@*3-RDWBoP0*>fI4`796m80Vy65IP3GXbvtBLOZ){_W zEcskV42KsU$#=!^HJE)vd56kQMyu|5z29{vnN}ajV`cBD+2%+pP<3dev~ZLTL6ojO zA%9pp>~Ph0K!uP}-M&dMtM6v=1l*Z7Rw19>J?wO}QoXiz_?2Jj_G^>~0b$gM8{F;0 zeN`gnTEo&g=}*u~&d`~;Ovml*l#6gbQ-;OCaq`_1W2ez+3r1D&K8Sv(j%?_{i| z&+j+iZQrWI?lUNGZfNV6U*1c$H9(ImnGp9FSIx*nIYG`Sd#!RIjszhSME&h>^R``? zJn6m}>nqievq9ahP;R@^8oS*2C-1zDeJ!cJ`zi?WUEngXSZev}i#)wK#5#VmEAe#; zn}|_g+#tpiCiHarM`w$k;!GRvj!5Z>Z`$nYbaXuQ@|oRf;*;@eio6&VL){B@-Rw{i zlvfk4Xtmjfh{z40xl(wp?z>ZMfbc%wFOn>o@ z8x62o9p!4z1uG%M6jI=RuR2do;D}!@zre-uXZ83mRdl^l-IwijA%RbAUxbQXgTEAF zm_Q%|Hw*;#DQPjlLWOv6eQr{E%c9;PV$(70X3B0@QX>m94IV>Qdc4&=WG{@*N58gU z99v+<@npU;Ro9EP*V?uL^wGlgk*@H3#tcAx61wAh=nW~$R> z{SNVRq0muF!b9h_YNyHYjIusF`Ov5lRVtx-?s_tK9;Iei#X?m5qwUHdKb71m^VjOa z`|gC#C_%%_%&UGhfHqspMUZQL*uw+doSE5fZC6>(z6|HD@}k-s>!_`v`b=e&mWz8s zeBo<4(gOz2buWYtBkJrDd>SSE!>NAwz@J{FmwVLM!)8Vwd1*l@vvvcd8c0n!!|60s zcB)u5cAOUz)}-<3ZoSjEB!ul#l1}Mog&^HLK`&4>t4_Hs)gE1VxRotvmY~13+viKn zhv&5H$w)IBMD3O>Y~Y#6V5CIl!O(;w_fD#3y*;2##m)3X{~Njf%FP*F@T!a8TQllu z`}U7e-eoTc<|;wUbM0)|dn1F^`Y?Yo)J*re-AkT= zpN=su84@b1msjV>7dxU_(g%f0TH>~XI_#D^RUl-f)6A)G@>6NQP5dB4q z2J-9R8O-TTu)q-K8BSGBvrugR2_d!_FWUAZQ}aWN#ks|{{EKVFjSZ25<>xoafv6K5 z$56Ug8PVql6n{1i5_yJ^vZ&!t9;?%Il3t>xvF!vmavAE~URS{93E!X6Ej&hd-u>*2 z52V^+It^I7l`xXu<8`)p*9r_By7N)hf z5;7FU>CoSnJMl#H{eux|zWt&O=ymPY=$GwbP&`8M*l)})D)eP+$mJk{nQX4aJHeY9 zygB4(#-IN9(?|Db74Tk>dB~@4s3jK<7IHc)%JZgmfB50eaR1aO_QJj zBryq$sacW&nOl5Ues+ObhdQ;(4yw5`%0lWa5Bxp3*5&<{Jj(A}+0n;oLSuy55-Wf> z<21+B#F3S0)~k*<)31K@Wqo)#zxCJ?bKoBj6a2q6$dF3?pU;vkL%MVaXWhJ|8xmlf z;?|k3P#4n;F*$p-{y|_;tPy$A+1-@&YIYj)3MY`jApb)4O(%RjPHThQ`2DC=LSfRf zXoy6TsBQ_ZbB_XjVku1ta+|gBLY0U#6wfR_MV92&N#d-jqjVN{T{v+BP+QE*2hsCWSsI4 z-8j-?l!A~Q#dUKOLU~(0Mc2|)jmkxz>#TXfv=IxR4~-J2OhB$jwsMz+hE_>d<)@`d z3M8vgt94MumA)RsR4>DJ@W#Bgx!Fmbb8$&wsLhb}L65fFlSHt~{vP~7#e8b}5r+XN zFP8xb!2P;XfmPf+eFYyDQWFU^eq_NWe4bB*AC%?3*Hi>aRtqDK&xAcSg;BaO<8@Q8 zexQUAH!b2mLp&%$K%tT68Fu1xg2mWofai|+D*SAN_uKX)9?6`inY3j8xJuw+i}+s2 zhD%x_hgo)V-x}BP-^9PdP29c6K0@~NK`&}x8m3og{Tc$|TE#f@%|f1yFMa+?arB_( z+g5g>2Ca&v^sczWnetip=Abf5ykT`Nk)v>C1W3lkA_nqRhfG^V2Al9uywqog zcEz6MQhz8JK<<+0nSyDC&|8S-FN&CRea%eapU>i{UodTBh<$y!MP5U^{DOgC&G4OX zJGQf4^g?9qGjc5FZ8R-T$L-AY+sl+G{a zUGkiQPa+!*p?7uP8)`kxs`;HiBITIkY$>V5^j9(HTjD98roPr2p`D5@U8l&}MNTIX zhKJ)OO__-No{{iiOehSWF(XHh6UIm@!r~sUFWqZm?T+|vZ~JQzM))VYIX&}y^ZSr! zUY&(qWwp-&1qFWnN20amLM5weQmM-6OdX$0D~%l)pl!~Uwue$eEsTrHYitz5>m9a8 z=Yq#rFsF=Cv0PF4p#?T(xiQ3!E0vH)6rfq{0^C*G`XUO;86*ce9sV(i8FBn=H2BHt z!T zIkF?UX-1DKOxqa(#y>sjv==#{+xUBQB7o~*@dLuFHvFQ&yQLBgKj5p9z0O$hc#pp| zC*y~#H{beWVMGspA*NwjWF1>K^A4C=kUnTkEtUXp2Dwd#(bEf9%E+ZOJkDb{5Q&%L+iUve zi;u7ouqQ<9A;h0vxkq|m;BD7Vnc|@)Pb*V-{-k7eeYC< z0p?6w4l+~-r(?KvFH;*np7j3?v?!2E{w-pi!TV+)>N$jV;XKN<=M1nbH($X?sN6nTkx;UPDT?AUgaxJwW6~dcRxFVw{kw^jS4MBrneAzI)nqOEa^+en3BF z_{>C5_9I=Kt+GW$I@G)7$cM~qEQcjE@KO9cbYbonfRoxdP516~Oobj>J1+>3)&+vy zx!c*?_k3KTD(i8h9(O;|1J&1%c^xG@Kgc-Ks1|_mNU*IiDbR#IatGR&ZDo?1Ws(6T z`&b??TU3JJ4hLm!P4L0Jd9(<6t#pT|POwsZ;iJOLG#YRlJD|PaL+a!j{}uX(TaO9f z{OQQc%>vGHf5W*2ju-=lRYFML97=z}e-=co95`jFsdoZR507Hme4;EDjqlGr3t&~V3NC(Y!wN+25hbz+yWffJum0 z6?C`al09(8zyy8mn~SLpFDa~Qm$z>b%^lDV=_{(doe{s#CtBcbV1r)nE%h7(yxq`0${-rxF#hYL96iOCql8lZ~H81|b2+&d~ zO}*-*n(Es4skO?s12V9gPejrK@!x1t(0`K~I?bF$Pc@p-#&!D>5D#$n?rOPiB=v4# z%M?)9=VAaGHPVV=X#2j4hwLA=KjEUo^ux6eY&ROdkwexI=ynuqDe;-&Z!!@aGWNL= z(De&adrHp?uNJ^?v@P?YgI`eQ&WxFcyf}Vxj)$tl`1R5p9>kqYX%@HudYRz&#*(A~ zvTLwZ{zPoV@8`oRG$0zUNe_p6{T}Oh0taDf19D8*uU>dK zEcB{JTYy)^^oi8^R%{eN;l_!ca3~1Mqh2_AR;4cq%$z@hqhTp@%)hiaLrVyBlRdq7 zsrc-Jv~LVpMI4TrX9^z**iC% zzkDfEkvyv46~0)%pzAoycnTy5v<=pO?mUJOL(+!j(n74_@NTUBWW0AeV`2{FjDfi^ zA2JC~x0*=>T_uSIH*%B5yZ~hgNi=7Pf3~CnQOopUAzC-+R_|i;Xe~R>F5U%WmflDW z)%86n<{ipWV|oQVszGK8EphmrjyJzL3$Lu{KxqOIlJlHNS1Y6%xz$t_QwO@ zPO~ajkMdpUw?5GF0Ca;!&11Oz4dmdA&p|xHaPfe(%ah6c*~~z&zTA_-gi7fafz=lA zjHy?W`hqWxAF&s?$*Wr5IzHiDw-}UGo9)&8gVB8#GLCxH{1Y&Lc8N~_aeXnW)5uXP zG$LB%l_lO3JD{0K-ZR)o^GwN;iwpm-2LmW8G3xVmtPfY#$@-b_-pAN2zvt9jVsIGq z8pI0F(*l8{g8DYL<-FM#hsLNZCmQ+_v$E`iKFwoiI@w=gK}d=x4T%745v`s5 zP?9xzE0(1X!&YH#@2WK+ zF``KS4~(tsMvK8#$;v=1NCrDc6}C-JGVgxEPKma1-k(5_EW52X&6Dp91tG{?F2ss0 z@WiKdGs<9Tb##ShJj0d__I7brSoXYBhqP{ytw|t&CDD$?60TD3`VKaF?zOE~8nxTd z&}9+tVv)W@3XPV9j{rlWjt)sNm|8USN!8I05W!m2p8kjIzR6lzU@O}n?Z@SU)2vcE zI(~Ij+kpFZVBw{$>@(kiq=Au1e~pe)Fr6jNqoUyFqd#|41nwzpMI(}N8S7B0X9R`UjorI0`wTK-bt5~%Hb5<#D7%s+SWCfzF^b$$BA~c(W384nTc-OUQMa^7@xi%c&!WC zocEq`k-H)9kLwszl}tkFAq1%&YVYU^7)z<=Ws5g6}Qd9RB5#-kR4ne@{A|bNYbxp*!Y>_uoPr zQVI3otnxZ0Z%2-w=>z{<-$LI{^$wA`#aJ_1M~pV2GHI^UhYy0r`Q^^`$(fCUGs%Vh zsi45f%BsCm92tJBAnd^d9EvmV*%OJMbW&G8(J~QdI@I95^CCvR8F#OuKfW zJcxmNQ|Sl^(MR8hCo(**#1Q6Fx)RzDF4JfD>iEsWHpMBpL8u+)FQ^A3B+dcX;5wJZxvhQ(vp}}7#u?N3Wrm)zC2T{!nTUlltzUzqOIowW#2L;JoNb=xlTsQf7Z|axGgjrdS1FYvuh>xAaiif*_gWM0qF?|UWPBGYu+i5FG?A@D2OB5Hnmz5#TP`Alxlxc!UYQAq+&T?b zGc9sT0;2o8p)K6h%s%ezQ8zFyx~b*S39U0>?J(`O>m9a5Fg%-n>(^VcbY2iuNw`wh zfgG?LSCLDM5Wyyvte>t)+Sq6EA}fhGidi{940;}emBa7%*x8aAQ!4vTNd#q(G$zr! zEK*q+$nxWlxg_dj!ZBKV`qXN8brXd=WhoazbDc$I z`!Zq_YbdXA3x4_Sv83@*Vu0d!oEK{_FkU^~`?I{disIg)=XYLnK0Y>OC09TBra@Yz zDDk>VSp3CUk8m{tyz(^%A>F2XJ)PgR-yZIoZhy)4OQtnX>bZiV91NM}A6ZM;`hDeC13xkT`u+^+Rf{9sAXCgk}l-&4);kO)n16$PZsahwr;VOZ9kpCOgYqkRcqZu!j4Aax!!5bmV|hN%E^}*5Jmpy zMZ``&?M7>K65x#^pamweLh{EKVaEm$?fs5#V1U_Tyz04$*@vRNy|kJbzYR;cbLA{( zkyzCKsVQ7y;y5lpVXjNVR?njAPpxzZp=SXXc=7&L=x|1}!s!ZT+G5)01YNUnOx2yX z-uwsSyoH1UwAvFDR(A~7Z5Zf# zh~+5wye&%@oB4ZkLk#gVUIe3t1qN%>fKmha@Ee6gS zcPT0JW6muu=PxyIOG~d>?SY2zj@k#UeGvQ)l?Z zD^^B=(<(G+aPb~#&pPV-55^i=D9HQthF=a$a;Lm*onQ8H&TRN?+?OqR`su*VvaTaH9-y(7p3f(1` zP)%R_mb_84aQ8zx%ILihBPrxr(W57@zN`I`Oc13&Xk>EE;8s~>s!yq zUI4eVBLg$iuhc6Cb*tigg@uG%c+*~xZrrgxDbh$Si46Yzx7lX?EFl|Mp~e0ARJGnE`)8J)B3vn5`uJF_tX0m? z*D#s!^&rm~@=TRPBGz2rddum2Pg-WZzmDrw6btH9s{3(2r}5$-i@slJS{Kb_8%FbY zg&qj05=YFKexE4^WT%wAoKy^GE}WuhcnYMd5`*mLa@}CI*GcEt%Z#3Di46?wM!iub zltj*+q}qI~m#;aDh2=flx1wvH`{Rai$Hi9=&JTT8ha1MyCAUK~%?F-#1+ey&1Ym|W zI-b!|Mf5t8ZNnP{|DJpOun)&lLw%gVu;LUQn+RVDi~I_f(wko*{D33>J>3=?5k;&x zV`AsCNiKb;b}|pP3;r>qP;tCO@GU7~k4Ehamdpfx`ebzCpRF3=p39@5L4w;S2#9F@ zJ77Lnn96ehU_Dp2_zVrH`xkAd(CVTs<{$G%7&p0^vhB96E zo;du_+!mJ4vs^KtpkGTPN?1Cqzn*FQ&PnmE$nGBJCO-8jj=2;SJ;hc3fb$p0UoDyy z^!-#xOyD}7g>T&1+9z%?ZCu9jy^T_Miw5BrZpY^uWQCFXtyL#qsi@vnHM-hIpY9-( z_A-^N@3)2Nx*F$E?_kA!5+dEY%Z*w2%5V?Z+FUY!uES)}POA}E{>OF`)=2OVO<5x@ zWo>JyvN_a7*>={{#0#v0Xe`96u<1?;hF64yLS5uo;BoHqWoqhRV7`?oq5C9qHoL;Qc25eqox&t()$NQ0~j@9-$IEPgS|k0=c~8@AC!@y(1K&{E4`lXz0%!cQUIDxu_rb9tMjORYitWrBzF$nm$# zlPS0IAV-3*K%R5`^}jOA=vNyGm)YniLf65 z{oo9~kJFH;mV#llYW#HRerQ8;JMpIp>2uL+M+G6)Kf>-l=|uVAT22pX#`O(Ptoed}Fk&C8OFPSGJ0GiZI@hJrk)8u#VG>2R{-3|u zID~b{^N@A8B!UXW{fh+!Xr96=$A!yMvSYUcC5=fg5qT(M!hv&~8u6beR@Ugt{aW$3 zs3O*35g%mfzpXO1bm~*9zSN|8Vo(w4B#Gzya?FXF&sSZU3xoGXj~RCC?AJ;^4{(|( zfoH}GNFpR!w!R$Q&rKcb#16&UPFxOjE&^-COP@(8+TLufU9QLE$Ma$8b{|QVB-rjt zpU$LhS_R;EaQl6IW_wu%4~k57&t=%-Ij%jqDA@3^{5k3_p7TLfuXDPgA-|^fNPC>U zfU?;in~khxM=ttKRZ6Yd^IeizF~I)|@vNJbX)I@rFgMXT!R`gmQzBJ!{4}dDZS?6(pNvYa5n8@C725f#QAfs??>$`!zaxh-VYhW@S+MZB~;? zy&#PAyVD*G6IpS6pNq8-jW$)?X=RS3<2KC`ZOTYAEAl0IE5@XRA_gC%) zT2gfaQr7LMxsKBW)j5^0p>x_uw#W06bxBWK7~|{J3Iz(g=c5Hqko{&ALVUVY5QX3j zf|1IdFHLGL}g3{ywW_qPrP4yICh zXYoFt_gdn34-&OC7EV}$Iku*LTJZyRj|LMO+9gzA;fXm36qjX1R}YG0azwZJ6u=!p zFgtrzrL(I`A@oubR6pYUw??gF;yZM3HePH(lc)6~+k7};FduCrv_LGXKR9|P=h5Oxa1SMXvCgB_Fc8Hl@-sJ-kV~pLshILA zXNGakARm~8>sH|yB*Ui*WPrc$cnjq{G_(if?~QC5wpyiq^oh1+dQ$&n{+7a|`{aQ! zC#9?i)ml-|IoVZBr;y)_B7{zZG&s)9I~`ZEM)=}Kiw91M74;vC%RQtGhcn`lsw5A+t3pa zvTJ^&69XoYVEs%(i(`kn$wmV+clR3{ShAIBG)JdLZGe3p;O8KpwvW~+4WA!ljJ<$U* z&ba?FY}o&fZtD8wgz1N_yc?A}vc|{Wh2YtLFkXM6`FFU8#&7!7r9y^9r%;15{e4Y4 zfsx;630!=uizY{EXS$QM+mwGW6oO#C^IMf$q6_{;;)s;xGy~xE9*3^J8LIzaSe=Qd zwv=yjZiuy??ipNeoR?=wFfC^ww@$b>x>sa~**?U6L$OLlqzg-?ZRr)CGMrnUeM>!w zMuR<0sL{4vrFCGkoSw*usUQ^@a}ULv3fJ!#fIA%bRre3P?FH8^q@hb8oZvt?z z^j&E0Vbw~fK_(}d2AA?y1MkmQ`r?PT@ux3PcZB23_m0HrGHoMTgg;gWQPl^>vm>dI zo^v$&ph2H|nI3C--Ak+3walf~*~4FcpK}U#^z8hv^2>}Oy>{wPSV;>UYqC7$SRYrzA-xpdb+z8CvF~d!30Bf^>tEQux_i{k(QW*|clKwn z-db#QTk?PSSQZ^a1;1p58FLT3!rBxHt9kll5$R}CJ;yUBP5njeT0NJYXu49FuNne;TynTyC8u_eE9VA37v_3=+mWcZX?%t zo;n()1gR~$f*Hz-9PAdMX^%s7LTjYaNq9x>@N)NzurvJzpX~9;`ta4Fpe*u}P4trr z*P?`pWPu~FESHCG4sr;DY6iHE6PelmgRv?w6qQjH=CdfaaQkq;DWzeVQcbz)^{PHY zd5H-!RsA>Tg%nr{-BKegWN_|7b`$&l*5grAC@!8TD)jOL5`n(jJDl(HZtFQ-3OZNh z7AOm4)k+w$mGp9(le$W}MTx&y{Xq&x_4Pb%9=x1kubnuUVj$ur5G)%^T7>}H2uBx; z;~NcCscZ9C;aP?C*DyM>Yz`TE3J8LTtn6$UJlR&YHwJ_MFYJXPKgbHc6IcS{&y1=%& z1J7oAC$G2bn0$40c_Ti+;FmYa)98Bns0N|C*7xwE~~JYNn9KMzX9D6h369U%!1- z4zm@3s3SJnM)@nsZI0h##R`dL&%-x+kZfFAJY&h=Fwfw6x~0#*X2SJF95ShG&u_%(Kc}W9 zWg7cb0B*lo)x;VE0b7-kaibMeCHx0fx|qInd>nzdILWU>A-5q;=yImnH{#buhH_Ex zy1WfpyFq`W;TVt>!{i) z*;{C2k(~WpP>ZpkTx^tvg;oV>*zbzv%N$3jIhxTNop8U z#Vk^>fZib5Z5N@5U8Ay{=*B<(9KGk;;#y@qgrZof7MoOIj9MBiWz;{_m2r_fSZaS; z4T3bfPLu#}i01Zmf=TSA?et?VO31ph<}86|J6{QQPf->S92$C^4I@az8L7vLczv7=2mE8|oXL0X)lA4=7KhJn|R)IZgJn#Q3aRu6%?N z)+u`*Q7D=lb{qi?Wz8F)sIYpZxrQDyDLG!)Ya-i&8@8!Y%^%}r!Xc;h{XsKbaJL*!EtTQ40SeUsp?K0J409~UJW^O!;ph{3YVc$q*sxMoyt5&F;^Wgz~bzAgN%$Z~F_zC`TX z?py`7Mj$kU=C!~&7xj{pZ&>4*j=H%w$}&eW1JuD)Q>%VNiki4s}4 zVf!A3vhj&C;(VLVc~ zpPZ>CYp0abXLC3R(n7x{5?q4I@LKDFSu ztO%MDEL_4yN3+_n9+L6}?n+**Vi!2yj)`zNYOP&6(B=Qco*G-l)vMJ8pjFg*lRUgb zgBSMm^v~jQTjj)~HfpJ@(X_Msnv!QsD;9z2@Gd84FgV}2a@wuj;LPRdv5@>e6p4^Y zc=N}(Tg+jP$|tE(U}q|5FlEt!m!hs~fu*uOH+^=0GZ#Y;LtE_xB{!AdNXGGvT<_6<#$3IPX1wfX{rZuk1l@w?ZL-EIS-)4{f(XyZ?qbBzZY~)dT>6>SU^|mAQYcJzyIEji@uLU^-6DqP+fxQ;j(EQUXV`p6 zRLzVz`7@VpRt^{J(1RvVk=F(GOu-eV6RT$~qi&n6DY&&2w&U!x;f-O+n^-M{z}!3oa831s$s0QSkBF=q%zY>~)`@yZ#j zx9<=v_3d2|%RQN&mGFD{E}Vr_xVa<^-miFk@Bs=SK z63DG_zoowJ(ZM~D*&%c8nJ^&9wv&1&9W*09a@?S2vI`B;6!Vc2UVG~qK21@ZqaU^Pe zz$MDkN;E@n;uQ_qygTj{aU8Y7D#~hY1MLc#9r>Q)RDX_CwJpD{Pk&(Rvs~fX&{H!| z-dRh4H%Jg4fO*2w9yk(OwZI>gb|0?#O_GFVYWFd zmCZ=&fM!Vv`^-SEo~Cr;kGy~(Qgx z9ttX54H=4*egP^+^Mex*LXC@IHMifCh(#@gBf`Vm;*=4LqdA?O=@IFfo^CrRU%fr@ z9kmsiH7jjF4uh&b*oR_m8d~F1ZpW@O;by-B&X;dahc+wBzPt%~^EUs#GtKp%rLW%P z-qAS;Ash6I^P?w?Brbng?0s5hHr*1ss^MR{hr*(R_=kgc8z&WoI5$a=RuXgSJ#&WJ7aR>^- zvOlI*UBz4u+J?u#4lKm1XsP^=*yVPLm8xr2>QmEwRHUxQo(Zhyb>y&k(g&t7j&Wdo zAXkT@(0iAIy-SZ0)?=L$eR4aaRqq6ivOm{m_N=mK>y@UR2*FQW)b2gEXlhmc6$GUJ_lo-n2XDmZ>26CK3n7^_u?DO7gKdG%x`>x+5-&DVrf$S!R0n^Fr6?wT#xl_#AN;` z#z^noeko6xqvWsWoAhtvKS@kfemtmfK)DkxkDBdvB$Q@GPBQYzD4jnV3n){{oS(I& zH_wb>T>0cdn2aEsz;UiXP{8ZZk+0@IxLnKA8;)d>9#bgYJyRcIH&3t*&~|a&eYwl+ z(|xVwANX`qnKv_DzK5~0o1O<_%y%Q(L^mA2{MmAQl2`Yq`1~zD|g1>J3=%4Y` zHYYRC7dVKQcBkDU2FArDrtmFqhRka`qM}0OY}nlkY9XVGIIE2`6GW+jGsD;V?evZE zvz2P5)?9{Bg-chwp@=L)VEP{ycc!|TxF3yUp^^i8+)pJSp9tLJgu=%bL*~g$T3kHM zmZBTD<+%i(ZbB)&f6Qks%II#6N-TJ25J?$ek8~ubSmw~se$q*bC=6=GVr?GBWf$yV z?zoAYbf!<4_smB3j67D#C|;}sI5+i&PzW`)>0Tqs-^_uhE?{D*%nRjq>T$(!_Lak$ zr67Xv@aX?CmLo{x7|iJ@oL;5V4B*l>+v#I0Fa}@D{5Zmbu}*Jru1YY5#tQK{lBDJF z@q*sAPobL0((z!GE9{a17xR>dTdCehNBS^gO_tapL7VCdw_~q;abT+e%00&zhjTjao+_-4L4ojS0Z-}kcMXx)^M|)|S?pM(~ zy%RJ~*x2WA3B>_G^@{AZkkK%t;9u-#-r7o{7nu8 zxf017lOx^ZR<`553(MBKh*QSMJlb&upqFIMBar{YmK8r#r{geCMCa}0-8BttctC?jW;NXV5l-XpGab+N1 zvzuah%_LHWsJt07xP8gN!BiCTjoGpzwGE4WBC>-l?Tk@dcM7}VYR`Dj`_XrEXd12P z{HS%Yc+oNZHeM3C+kQNsj91IT6VNyz&=yPJj`oe6I?~#x7f5pP_Ih{VT=d*5Ha$=J z(3tkc2c}$the?X7#mg<^4Bz#8KuXKv#9TwnYMmc;LugtI$e}%aJilKu~F7B?UGZ#T2Xx{A!7kTw!pp{GUtt>BSFJ(-fZk z2`jbKSF``?URZlB|JNlH0d?F;r3yPMwCCYNp4q_^#P8+LDq>vwEnIe9a7hZ#it{ON zSZ(-jH{G-89=f~UTar}Z2fP3@zf;z1(V>3T@#qYoNoqBos!vVLv}d}-sd>Bguiuss zLR_Dq6(8j9-S%`Ql;IyvQK_;IA_+vY7dt9fJNY^>m8`KW6ZP7E&4E%Wv*@!6(aVjK z5AuIV)X0o1^pY1ylWn^vc58a|$AMw*a$b+y7&lj9?uL;cX>y zv0g?@u^q827A)f4Q>jgsAU2tCkW=lav5UqPSE%lCN570Q&QI~|yO)1zT~do=nR@>m z+8FQ}F*0#_sFox*xO<_7o6)MdF-LwrzDXYveHo)%(|aOuExGvO|Bbl!4r($Cw|zknRGNTvLN7{{-cdxU0t!kA5K17FP=r7zp$I6Q z(0fx+5FzwlMM~%$0)!TNFA+jj53}#gK6lRDXU@!-xwHRH=9_$Zzf9iuS!?~)-`u*^ zc-f2!Ik^Zj+&EvM$2DC&FQ$p5@>@PAx5T~*Gd zv%6CsQooJE6fLyTSfhVsbcd(yu1=uFd)Hyb`se#kB~WN|ANgbISi*t%ic0GUa;kov zC&?k)uH+7HzoZ_^9C1WyGjF@cQ1$8efocV&XEV938?p)NTCDrHy(12t|Gb^;C%rbM zhRvCIepUkD^F5!;!tDtqNV3rYCQmCDehTBNwuyr+j@=~x*-qp-+e7R19k*?*ix`bK z1_n{W3RwFb6yhr>wY$p0!~b=Kb(s-x=Hc3J!upAv_~9V~>+Tf8oRZa>B2_cj!vT=R zg0NV#_6BvBi?Ytx@cx-7;l9lL72~hg%M~q7D{dXumw7%Gad>GKVvjKrd-hiq zRsBC}kkW~Lhs(@BZ8Z7@pQe@%m0Ct{h)9GxL2qnG7rSmR=f6tIy1+S^0o%7%G-@NJ zrFu0V?(aL^81lpI9A6xvQyX?4tLvg{TFv;V9OPcsoT)T72LmJDj;!qr@|QcJACi*G z%HEQdnF*OW0~Wr&p&&RCf4X&iT-l@iwln=t;CI52FD#7QhcWmJzguPJA#vna2xH!* zy_XuuP~J#kISHKuu~-n90_Cc-1>2^*TQDHhF(RN1YC+aEkT6i|R z`9eYCG5Oi?8;cF74165ftS$qp-BWH+%kG}*d*Qq~3sI8Ln)&QSM@Fvf|4E^{1*HnE zDj?1-d2&Ys#?o)3?ffwWqGV$8Ax4Ze+{;fr?;p`Ec7wEZ{ucsJl2YK1E?lhk0*u@ast&BWj9m89=;5t8Y5+{a?zXQ{${-99H0Wd-E?j5^t>`R zj_~f-bnCy{)c(Kqz5iEkcMAKc*7Q$&=?pRrSz8PA3v3kG{RhK<4w9fCl`966C3&d) zw8(8r6?IQv8>H^I1(-*STHMZ-#vxGt$rj<8@S8F%n9KTeLdMOnF7U~-iXIHqa%)*` z@*UNlbdUc+?u^Ts(%`4-{dnz>2|xGlBPYU#*BaA3YB5he!<(o0i=O6Psa%zXM6T{Z zx4Kv5?u^F>n=SrYCKKK1DJ(T>$3xz&@7qUJ zGph^Q2CdE()W0XMI0LUVEH)Myf4xAwDT=3HkS~v-B<2RQ~o#l!-p{ zqi=W5)G%Q7FfY-v>oZMKpo|T`PevW6Xu9svDM|v_muZBxIKGdvg8H&n`ssmzQKFLO zlKx)9c`0r@+H8M*`(^mM!7NeL>D9F-Zb5YC{dNAW_Zl8KMCW+Uq;-kK6moJSUU~-d zkt;0>Ej29$J4)voFp5Cjb@`IV0+)N)*?U();u5oY^{=72&Az|kdctc>{kOv#yL_YR z>mS&mfGzuZ8coa==j#YXSC~1kF2Ph6K89NLQxr=uY>p(C`q8p{1(3Kv`uZ&{Hh zsgXRiu$ud+S+j9E{Z$N(EganZU}9dXfBL4N1EBCzIr-&3qJKop7ORE59aV_4=@zp5 zh_?G4u01RE!)93)EiT2|FxK_%;kfa1ACk64Z26^OyH|hZ*hr;W9#@y5&iQPq8h2_v zd*|CEl`6SkuQ+}IHLf)yk7wSFt{m`mPa<1SYiF4CrN&s!j6&1-hkkc*e_e{Fp*Rtwq z(d653!sq(we)aI0vJSe8qe^hBwkV@=o5gDr?Si4y`aFZWnF>zj-kxHyzb-6EEvhuk zCPcgfa^<9}r^ejceQ!G_Qf6b7t-6d$HC1wIlJwXXcmbmyn_yqI<+n>oTCC-O)8Rew z-VVH7v!!`58CPe6lXDwNh1OAJu#E9(g%K(m4=O%t2abVPQ97x#{}=TdL3acyR_2Zd zq~6cExQKE2Z3&|RzA^{i!g^3$yAIPn0=zLndr zs~cf^6Sx+)pLar6+Oc0vI)g;aetNf%RJ({^Qu7Nh)Pht zf^ygUQO+6YY`b$~bPsrCWJvftFasAesHRi>E#NBc5Sck=u4Xv5S}8;OozdzBoP3mT1OvF)ivc$p2EcJnQx7X>LL zskpSR<4%xVpL9fOto5u1p4e+(UN|b7KdtN9NkdkANr%@UKN&UX)+jw_j(Pbow=)qr%@gcFMlJ81_WPCPmJk%5^g4ZabF|YL zw=SPgEZ(%o%qJ!CssJLA6j%>mUoTwb(cIv#?=nS~3G1Dwvm`Z{EJOAf z*8H3hBW24QG?rO_)3QD$MTJ{*M0^BduQCp_EnbsY0vE2w2-zDOnMksobBPSEMR5@p z8~fmA^S)5f0?vX|nTt)BeR~hg>0)XJ(-t?fj1)a31^=uEJgXfu&b<$$%u-cO(XPy93^z3BVeNEZ zkOVfX{Y!*Y-If3HO)oqH-STXsyjAojbHgFNiafe(6GpyDUB{{($*JRu|M6n9%QC$F zHV!wStR3eRI`rL|R%iQL0Q)g{lR!8$^=IzymR#yN0tzf|!YuE$%BKAV%iSyo^tlmC z*pZY;a$ysqQ1ji#amo7}Q+Jv(O2Y5H@UZUs8{)&23@V1#qb|0+2U-|9A#0|Knq6D# zrPCu6kWBlwCI>KWy^2Q9w_@wZfr9Z%9_^*xJBSrGS#~q&*E#0xJ3w}K_^D6LTON$@@ zA@^THy5E!p5{FY>dL13DXhc+lShb_|BW+<+cl46^xdcVHk}cj(sB^4^qPDy+ky80V?UM#ZSDvo`#V zJ*iDD!wzg};Q9O0ie))KSnn$9?a*s|QJja^>K>KiXv0|RsMoRQ0J|G3M?E&+Z-vb? z8dgqZypv9`T2+0j%5}mx(Kn>kjmeCyCgByMQO0__<85@0j~F8teKF~JMBrgjkyU;J z*R7DUyA7%GEeIG)ldTXsABtA1GmzS>|G zM3o>7A0~1l9aij^7@H@zHS|gcv~#9+VeTV`Ef@zsgA%h4KhOVI+K|*kRFp!MaOo-LoHSxkfrD*oqh>f5@ znRZG^e?a}HiYde{UsLV1>DS6{cFEQ`%>c)qZo4UVUg&l%+Ifgh01rCVs~T@=TtCK? zqpY|F?wc`CAzTy7y)3oYqt(vqGv?J|FLpaCZ@+x9&JbSs2!ynVMreL`hxhQIoIJUL zd$2^#6Z*z!`zb6o63eZz0Rd?}o1`m}%%$MRb@mYtFE^}bnf(OgC)mi9hX0rl5#X$V zXon=Iro;=W6%b8P%!)FHBof4i>a}Z znFD)QsVNTXT9N0NHIt7>xk*{R=*msr!+1WcWN6fsM6e!_%wdRp=3U=3xh#=4l+c2U zZSW&EeRzQSit5y(*~c%ZIiB4;BF(2?4oEF+?^vK@6TuEZ<|VvWllrn+ln(-kZ-nZa zo6-8x!Nh|d2M4(qn93!x2SV$-=*8A;lYag_6ast5lU9KH%Iu23~V2S>Nx^uD6m zSML(BGH(oQ`Rfupy%}N=Ntlx!QKqmYDPi`--A6E#VG~+eI#{i#1CQ>6LGT zxe+|3o^q&p5q6ZUgV(o$g+ZMj15Q$=CC|xl7=N>Y)O!K(%&vZNNW|XdPe(_`G>t#NA`Qcq#?FJ8TZJ$pJLEsd3{w>oIO>D^?l=j18(-<`V<*s6o8 zKT1%&l6Q;WGG8>gVhfS2zRYJs8kO)rQ`o4z_{?`9+Sx*{)lde2L12^sa<^p4UwSL(ZVmKod} zb?7$@MYo%OmzVg*FxpR`pMF;5kV(Hxic++8yeszEhY#t~^zzU5&y*mi^SOBo(~?~C z0SLh^lkbYW{3Upa>2h7Ee34RcSXO8C@*U0wC+o3Z5W3eYzSXS&SqB~7x~Rpq8OmKv z3vmsVr}a%qHPsn$lN2dWC<+_OjC0D}ha0qw6>=7uCY$9Ny?#Wo7P=pXcFY= z>PVu9uBLgodYQ^7VU;WEpW{9Fr=AaQ(;;%J16oIdX*hN=UFR}}A@vaTCyz4P9F$51 z@P33qSA<+Ef+0mM-CQuCgj}QW*}kveWk|vdf)#**InpkHrrapfN}8HWu+_AvYGR~= zT)_~&Q997_${Tg{_QSDS)#uE5^Cu?aw%TIYP{`1bt)_BI4dFJrtsN(+FqnG%sa@qN zyvrT2>`{S-{sttn2)c3%FaBh#z3(p3VwCnxW_!VZKStJB`*pRh^Rsi3c?^>Tc1Fqi zX#8iGXj142#=3U_xwo`9JU~OIjRjSQ%o2PrZhwdruiNkGdl1*`qD`N3o+Ef8nxNfa z`hN^p=M(3z-loepb`SFp3)&@D8D#wi`xCzWA9#&iN=N@4WnBMN_1FKWpzG#MCg07n zIU(i`@<;Pbb4k{qnoj2~5I<&j?^8T8f^sbDQlm-Cax>5*Icb1hVafe3B*t zgbsHdEkY&UPJ5Vci<&@VD6k?f{2eXbC}gNZz@tJIr+AV`2trdte9B&t?GC6rp#EnOy}ZV zQ-IuVK>T!ln-Q+neRuz)7(nLY7aI!$uOrt`>74(FmXWOUs+B%&i*|We$0ns79JXEI z+MZHOX^JZEr!UJ=be@i~0yHAvgm|$5I?BMtJv9p9mz>IjWxqBUYg!Me9K}?^`#6MR z-a3@$om+wU&_2=q_iiYwU8Fvx1PJ|>X!%+#f^)0 zgXV7k{)t|1c6#Nry_vqF(wjQ1&VfC9k$(K_?BE~z0lBjR|0AD;z42dtYi{k1tBZD1+R>Z{wMy`Pb#&9j%cXDnFCgp7oqLPk_gkPuPPbc!q6cL- zF!ygB2QH08qoqdP*|Ke&IG||8FQCHbI6C7^*}(f|JDi%5hZe1R(16AAiGzdIZfi+1 z<=PxqM^>!N^TH9JMo%xPT||0AE*0%p)*6!q{az3Ef?=)O3k4on&6o&xn&8mmRF~FL zn6Umvuh*}c9Cc*NfwNvLKw66fK|J-VN-;4$$HR=|6}ZYmpKLn?zWrlSl#J1; zq)%Dmc0it0@=!aCzX^X%VN?T7hutX->7vKlpj&CTq_#oy(jcx``8v1Nnmqg}-*L$$ z+KS9HCkW15mA+YR*lo7N?)JWi(DqnXKX;iVgd^x4twX$2wb^HBx;LNZj1+x86 zMbr;kEabVlIk;~NqMI7e5h-qvIvrNiNWPGy9-i<|cAj_dh;OoCqj*Q=0{UZCmJH8D zycb#G1x<6w9_tlQ}aZBB1$_$+|f4K_cdOf7pwzuy~CEu|zRgzDanoT11wD$A*Dco38g|=6A3PNo z8*R9e&g8#;yNQEsRO$D$$+-gBvhX^Ug9lMD6xtQ;W2n66C1`LvrxRQqiAk^TcG|n< zap2Fo%LP%)*TJ>ic3uw|PirjOIIU3+TWEI@M7)_hR~yoMFxt&1i_#VzFdr+KzKM{5 z@bW}KW_-C<*UCQGZ1{AyRV%7ps(Hka}TJP+uJ+sB=4S$YMCF@Jzc91)q-nH9pa+EH{64 zxH{`584#G?r=A==Im(QeG1rIJNfO1veB4p9=#0IyrAAA^k(0!4{sI?^4}RuA;6uOq z36EAvhFDORgDwbY4So?exPst(N5ORq z?=at~B#0Td!l%A3@|Rv%u(6$N+y|hltJSW{8CZTEw;55ym*}&eV%<7r)5rK6d>9~!V35$TInD_X5Lwv|$hLOFI(joYiq>OW?uCyNl0O+g|j?nVV=ZXh@J^ns;;*~8bf^1Je9n}Er(C8YX` zPrdUbT25}Or>%x+V}d~>eLSlH+3%)~tERev$U83_H>zK0Ka7OF+*pdRR=)B()1xvA zaPLTMFlPY`R4u0RV-9j$!_xD(rjyR-4!W42>Omu_3uF;gHmX7$O&j$0=3Qfh4cE8r zX=lI}2*!&t%PwVy^c7a|;zdi>mS01si@Xt52xHh_Iy?lrIZ`+4L9}xlMn1E zTnxDFiu#+D-SsMX;Gbl5Q(f#);OjBjhrg}Y*9-T)J{Yn!-T6tleRc61+SXzaGCi3b ztx8j}c^+*Q?fSydG>}oDM3T5rgTUEC;GF|tt&&$h$u#UrS&<@`XQfQl2A?(&5{ZGtlUKq=p$=8sU1&*C0mqk9-@YM`Xh{Br;#=SUL$_3d^~kB%4hMRQ{7pM@6V zWE@H8Do*2VlJLZg-LM3Hz&wGlRED_)+{m{+{ztS|FZhcikZ@>5{k5;Gjj7+9cvJ#V zpTYz{oVCwa_@>8rva1`mntK?Q1Ie3=yA3(Abr)7v=g$s;YLn`(u0Lp9f)`}X8g&rm^{QQ~V{1KrjpYyBBRNaOo~2GpQGZ70gQ zN!R>vKjgk_=hl&Pyk zcrOBt$w1J^_g#;S1NN8*`7d3WPJ-wvA-Yp=DaNdHQ(I8l%|SK{`JNBp1U2u;yaPtw z^Qx42NFDU;q0r9VPHaDpS6lffa&E@d4sO@3VHJ*jMHu^CT*P;OC@&@))1ex?rXu7xhsmI_` z_;OFXK~AVX@9VMTb94_F-s(&PiAE}lp;B}v`+BNLlcowM3%M(Z19me)3=mqoZ6NWGYDyhr8&uo3u?p*jSaWsN( z)%E@;4j?hG+n?#+R1bGk!>y|jjr=oUG?CW1oLR_N(Visv7oOIu6}uIeZ%T_>s?jA0 zi%`mGrP9Q@GPCgQi@aCVl;(V!yU2`}jB)Q%J z!_ys1s7Vh=_Mjq?Lr|Y;`s9r`JsbbPC{iOr<4QteNR^c7z2;=ynS9fq&3<>|o*BEe zIP?oq&kb+NXW~hgD6){xdn(fmEphIDH{psY-s=VQib;$}C|xafGpM$mNa<|+MPZfx zn-3G>9T~+^QcQMy=(+=-g{wUGcaGjgnO+M&bV)UexD99SK>Dh;yYqC*tdgp6xl*PP zaOWM&*4JsB{ZDQc5eF$CNv43kzWYMgSlHlU$%qw~r^|Y5$TT0v-}=5irNEu09Q%|K zDy><7;H6UG#Mg+7HU(|*D6Gni_F^+fXS*9ga!EWT=WGO5-}g@D2BfDmdSToy@WEiyi5wvq#CLcm66M<|a)l8iH$iq6{=&2-Ugss= z{`e`mphDTF=1rj(ae=x-I17awq{#WWfX}^bX8tU4{s=d~Eowuhdc(fQHcj_+dhLUS zW}w-bh$nV7o*0e$Q)`9Mb<<&N8j2aP8BI3X?w*ol4T)wcTgZW*>Bo z?%t2qnqkIB!{CaJfBsAa{Uc(VT*4B%R)xUyou&fLat5@0lG>~b-Y?+_K!XF$MtAwt z^H^i6N1b62nbI;`+f`E#CUeP?xxd9qfI37xRiMtJtiicKG)O77jD@5KLHbhkQ0ii- zHM!lq!^OoTT*E&};^-9@d{NsFk-eNR&j8l1D>j*O5qO6PXocfJT;71iX|s8G4wxZT#? zy9e}H!lFd!&nE=7EJ&rJtqAs1GHjr*&II*S5w!QoC$W&8L{gN~(-vYY*~6D6ItU8yCJ#b`Dj+C7+#Vs4h! zI(xG!>23pSQ_F3tn3V67U)%Ee1=bwmYC~K+07&D87Ff#-B4h^Ic(%2DY&mzbVCt1eh>s4AX zu1$}?pEc!TU$5Bd+Jj|i#FBP={(OvdL+f!U9iZyhuh<*{09u1=W$Y1#V6*8}FOT}M z0rD_~fs2?NrLcjU_>GM`Jh${k0Mdk4RNf7EA~Y1<{wAGpnp;mL?f%n8Xev*n9mctZ z*d<9q?-$E@p!u#%Y64V=dblsF6z0(k$PRuEv6O*r~xX;z+?W~od?LmVe29*6S7S`d@i-4B2lq@82s+!nA#dNiPkPkso~;0HEb z=)P)f!|WnK`?sb?DlRGf)LA~Ox^^U>^s(D#e-S7$=)5Z|c5$CeFEXW6?vX_7x_zHiE5 zE7WG@rjB|T%S=96@PT=2Wg#fp*|iNXm*l6r3rqf{vcA7gQUdR_io6wi)-q*I1?-Wn zP1`w;qT@_OTJxweJsqqJfl;g)IFzA0rb} z1l>Lz`*d2obNu8A#gq!?7p{Lq=(fEWg$#q$NTS2`|Jb#-nDtx;Om1amG}_-Eu&lE$ zds!N-mo8C8%?kKOgp*zpPqzGh?gCi`UiQ8%PEB&Dy}`*;G?j}O1e6h@pci#A6?a7s zTjTj1V$Zpb7UjmX-OOU)FU;6P^kS9M(!F^n=KbIlUdz;1=b~vQaS|gqY~uLN6A4dd zo20QHGvC;=PIGw;O5sY5x9LYbC@%_~huH1tnm{$x)1|yg4qI`JSXCvykWU6)c^|yp zpP!ckf;bDn#AJGn_h>C^A34cdPKyyI(v}&H61iudof)qz&mFpWt4d`y`fWE|KUS^S zu6r!Yi@2yCeky3~=o_fG=gzo|a01b)bM0R#flD~`hr zg(%C|UK3a>r&t1y4hC#}Vo0;Edif=w&)BpZ3}`^8iyNnxx5N~5p=SEE)9s`hXIVG3 z`u)zv(Zd6w(gw<@(Cw&V}&I^oOelR^H$3z#_(*A4`#98o4m{Y->ck z7wR(TFTEcAq8J>)X(SPiHfvF=7a6e}v9rva;3KF0Ly-2~TdK7O{#+4S{h_N{EE=(V zFrPjSjgUErm50&v_{YzTj`G(18F*`e#GejWiI`rc@#g8$8o1p$>$;E|=~rVV4{T|T zp*Z?-)*buRrtm;#`cc#Ztz;x*NlBrxgw%0g>5OhqIwp9e|0t2o`Z9|Q~cIH_o)Xt*vVtf3c~cd3%TN0p_xXAUZ_;rASG zJ@M~Kj`8$dsJ0)8R&eQL@Z+SH-l&qK(3RDqKGlR(9+)pmuH5z-gJOPMe7U@P_z?*nXS!qLqVp zCgS~%Ya0Yx%)kJc`&qZ6i>!02{HemJu9n9vhv3M3P3rltYv$=m*O%sta}FG$ZFQRr z!#9=+jyrp@-q`KAdU3{eyI#9P@(za552@>!#}#pk^mU$Rd&+06CC-QW9Yx02NQs6Y z%1m6{GimdQ_Alx5WIX8WW_r0Cb88YSKI;bNtiCzoOmNmFdS9l!4(CuU|syZzlk3x<6L-YDeQ`j24)6!^cg!={f%$m3V zlD{Kj?Fr{;)|)l2DXC`esXgxILv6zBHYXaTX>67njCs$cOCYCo^To(ELl1zZL~G8p z&J`8!KcWb&@H0YO@xsAjz_dVp+(FzhH2$${XTi7hB-G|qEv28<_RCfCJY!XfVv4-| zdTTs^6i!8YyY#31cl*B#^4S9Wv$$rNO9JO(+U?57oIVQ6d0IYe;`z&NwFWe;0Y!Rv zo8)%0zJKK$((rsyQ{mmZSZta+(sI8D$n9v+(@@j+DJ}Qr43OwvDS_Xu38Ix4mu%hp zSUk5F>C&JvwP91aZE2-V=j)z6yIT1TvczoTkpZ1LQZQR@mhz-Q!Y2bP$|@lWz$H@z zEBoCu1V-qEfZU2A4W&o*;~a(9Z?|Wc)1DZ3hgXR~8pogGh*glMg4}0Nd$;2jL>$af zR~0nGR@oL8LtuN^zq|6V(Z1|pq3*C>1&QH(@#FC4*51tF2M$2FF$v%AdzOkK5ZanP zd(G!~yLY}5mMhc|!6x+~((A46>Feru;W+n`Mj{Q~_y4pt6| z@GnskCi-5-13zPzP3C>jrFLH6aCwWLv(G;?Y4&Qzjo~kG%QBaMv*p@!-!jD(ceDa! zV6jaEukpHnS)_Zh!~{6lok$B-7v$qjvhFS}{%{?SvEOm<+dU}zrFy@TAX&#gzt9p0 zH!b3mjE3lc^TnReU4L@@+?8BCYrXcD4#?u#pagfsNdJ)h2I>AxI#fOM5K!~vfv(ST zz^(PjM5ki8XgPMUf0;ov9#eUBqQqz=V(uXLBXF_1=(!PbVE|&HM)ndQsCt7s3Tw?ia@+k^*cueMg^+P zTr0LkMKG`CeO)kSif25m5B3~bR;qP!;Ah&qS20&218Q!2>4xy4XbHd8S$wzlrU9-6 zPLBMMQ&TZ(52He&6pa*BG^KJ;7etzFfIy>XA^6=;|de+p4~OQ5-~HhcYby(2px=g zP|pqH1`XRJPoxvm-%$5&$2S02_MRxoqLeK?jlt`5s^*i(7<6fC9`7YSuVU5F?-w&3e{^Tg(? z#i!biau26a>A~LGs1@S7K0M`5ux+Z|u5b?)4M{L-mJN z^N1ZtVI1nSvx7+#_5ehzf=-<#|#X$m*wVuPYu3%`ibP} zed>@|p&g{QVQd~ni9g%1;Ec+TDV^&1YT&(N1^nzg7v9eA+8A4e zSb5^m>QL_jSlkld%m9DHDt2iEM$G@3t}`U|ktD(o8 zHkW+d#n!*+$q6JLn5h8xC@n=p*(P&}{N?wKbV&)^X>zhUYjv+t`1@WySAR;xX=Cyq z?y3M}DooNk^OpUztY;ln{Rv`~Q7f#qEJ{IAJtNK{sl|J$J#UYzd>4Zm1C5(Y$5__Z zmBt)LkA&Y++tNosOB^0fcV4UUKZyZ2BpGO9Hfrbd@9*Cn`3HweTGDu~HZ~J%Q3fBY zQ(_-amJ4$Qqjq6WI@RY;tCiDu6P^+s@&Q}p^vXQ;p)n=4{W5e@`Vog=96K+#-t!JW zOFyH6U4rz*+X__42-0eWblsQi0!u%-!~bxC`;F#q#%-1N_f(OT3t$$Aq~+V33}-#rGEBE4@0Ua&-bV^)^wqOTV@r zMs?eS!9F(wCqi2vJRw;*@J`1rIZH!g#K~H9RN4FN(VI*N&UfX&X+mQLf*ZZ$AEu&f z@bab)nAZi&zLc^&`7wUA`J9F+q&>N7vF|Ndg)Y%LbHwKnHj;?AY_q{^Z+Wn&&<;8x zdJpwk*g6ZrsKB$M(9#j-0T_-{1%zhV!5Z$J$JPnc!0CZq&kiNtvH#YU_tBz}m1sq` zZvz)a)8grdvWwJ32? z%j{F8k?39Gp^nb+P(Q}(tgZeu3)QZ=V6DTIuyrKKH71eQge=({9Ig*ClK;|IG?U5< z81#JVIN&onLo%X}m{6g-3tXR3Ia}Q`Li+{T9W4e^>f|t_*Xpp$IMtbbmgW9_(dm#R zaiNxfyr4QFHwJ%fNx+w~e8Fkm8MUU{_OO!2^0t$iSD{*T=O#@sE8x z;6GVaIRi@#Xwlogimx}!sUAKqhw{iaNyI#FAX9yMH?F&A-8Y_ibwS~8EG{XI`?cb2#*heeTZDWoM8w#QnZ6$1f|8!lPqCiS!yu zdU`ZVowG@X-ydSL^yYuBHAWWMQ)22#=?E1;8k z9rT3iZ9TYIN0&!RBtGCHlIqA8KBKl|EpwnPwtDdH!%VN|NhzD@&glzM5uV%- zKv|8OUZd6@*QvS^qZYeQx7J~E%(FLc@m>2N0`;Hh-Goh#vcbiyI_Hdr+XAzx7(J~b ziLlAkDw&pX$1P7dsqeQk!JY|JN-|Sg5kp$F&}GjfbNo^Pt$S-1{I{WN`|ZdQqzW?b zz%CBPvDjU91NXY89=&{BsGkabteO&(Vj--q^1e^R^BgZ{dEJ&;v65Gapi+N0FtyhX zh(iI+`XsyxHCuO=0_NG2J`XVx zr*fw3oS`m%(524a-BsCA1gbdogA;S8+e7L~OEL_54BRMF;!Xh?vf6xZr1HO2N{MJx zS{TBoRmPd;&@5zxpW|AXcz4eo>EB-#Z&k!ZLXC$9KFH)c54CBN=E4)u+6x`sIdiJK zh%;2J8&6LJHd;CNkra=jEg~<+S#n^NJf{v4Z%TrSCwDwAY4_7P>}R+9)cSGn_8<}6 zvMiA*IW+JFE?d|cV261_DUAiB5)m6?b0`E6_Y9!7p{?eun9MPXb#7i&ec zYWW4E&VK?|?vRGUaTUtHDkH*C`t)i37p210>C4`pSd%%lQ5l@lmAqXkqPl|3=*3iF z`!59@`J$!#F43$6>@oWh$(3GYE`Q0ayoS5%LCw@H*)9x6tVS?-UoZM7QdcywY=9fLsxK&t?$KK zGw)gex@(1N+9om$e@0R@9-|*^j@}eP43B@cLi7Pk8n6uRSs#&!xFKs8(9gnRbf4sE zN^;e;n|&mSJiJ>_dlN+ck7(bBMGUCGUba>Rt(ca%0Pe%rH`g)$h>ZUc`72unobk%q z@lN0}n-i}Kc@VWlowExiJ0mu@!4bHF!qs39CmRi(-8Ay>xjn=0qW*j5i^CweO3Nbq z5O46a)3IKRLRs8C4AH=tpgCSN?EM|pfUWAA-;4Pg(ovRLQ9(`xARgG`SY4W_fTg-) zC0^(UYHd*n#EmQLD+NO$@Ry7Js>HDFnMdIs&j&|@tPUHzIt^ zfHZ;Ri*?E5?9XGVB{jfdUnz2jwRnSL)`_zYaSNt<@*Xd{^d24NcL%Hv2L_2qXGV7o z|2^JTeI?kbl)sW-&x$LM3v+4y z#@;}ecTuT1S};46I-usw%(ZP=#)fNT=z&YtL#VOLvIlisIWHQ6DXDJ5IU`7)cbO=+ zLjFF8_k}!3$V63*tcvWajz@QEI63Pn-9)D}dcaitSgtFTIn#Y`4hIl@fB1llj_u?^ zEW|sl(51A$ zKDxyNc#!vuUm3gVP`E>Lv_Nh2MHgm5upnx#vP)k?>Ore=6~0V9SuDj!@Ut4>O4%VxE>EOe7kqkBt zWtBR@cP6Mu zODVXrp`}`XkCAQgjvEuOgd+F6cYcdwU;rV@AXk0`|$I?{O@)Trd4hJXb!Q6XCHT}K&o*)P)9h4fHRHaET2_S;> zqNtS6NvNUsE`m}7Lhrqc5PI(zAVBCy?=653dKK_HoZq?U-aB*e%&a>zXRY}!>$8*3 zCwbP+-p_u@`!y&?z|d14z&82uYvza5e9v*W77I?mtg@}sGIHxGaFL^FRU^nZHUQe2Sl~gOttwBC(H0wD`oGr?6 zNjN)c`g^{C1pz=HNp7-E#!J6eK^9tC8p+<0!$4wmy4`JtxP<<6ht*fV-phSwGJodWg;(J4yqv*7zsH7j78A|dbU-NprsX#YZ zmCkK)DI*bZ<}VFB(!Xh>qDfam(cvfHy>~5$2(ub|H^ob(ixj*;Ocv!rBb1S{M=E*4 zIL1+DHGlh@#x&5I|7hWSQH<>Ijd&jv!4OZFJ-^0MdWZ$Mcsqf$rL2Gv^RL4 z77I%q*E|By)66p{$aD819537VV1}mQ!sj)UTfCbH<+w~5v}NVwGFii;i`%&Khlr|4 zgSZx?27g@MzA)KaOm;Uikae$D$7p>06}FSbf;4Kbt?=}bbw4=%on<553LA&kc;0<7 zZKP74in)D+Ib%JL}b)0tvyf{yir8Jj!+yW?)@)% zI*^-ANuN&7P9>4`b)O&Ol)4grncHyB(X6R%x=B-yDupBXGX(L&HyEXvxff-&AQ~hB zewOEep9-HZDAz-J^bUSiD=j8DzP_?odgbrLz%RY>Y4LT{eS0YJGs9%7(uW4tap$W7 zLP{G|!&iG~7}NbTe|uQBN(L8dr&~NCu{n6fDCbJd6CQlW=*a`NOm~x|pX)H7pN)?8 z)XidFT-K90u;H8+;IO~SDbo3OB^2W6ruyz7&I&@akbLs=V+vnYj(Am${RFIDZlgt; zI9sp}`!}Ql7sNP^U4E8u$U*-;10}!X7Xs+oCXfsUlUpc?W3G7u#$^j)hp3sk=d6sv`6<3|P$A7^=uQlxEvf!d|M z%lm7?KL`6v!!-+JiFz@}?{(~E-5eHwikNfD+^}$HN}(R{i}zIX{TMUBb)#@Qm8f+g zVC_cEd8k(XM{&jGQ~G~5a&`(#^%4A!3zHM&fpHS^ESU0quWGmd%MszFxR&3;6?<-} zh6s_&9auN`tc;~l^CIlJj3*)H$@X&qNGS3DER% za&yn+?LO@vM$0+ngqCPZaH8i1%81oL=%ATabYdoc4jy#?D|GOI_zeh&4&?9LIT|;S zeqK-@4r$8y^70dHuM;oWNg4IN$Wb1i6K{r6pT={h?eyQ@S5Wx8d_y65@_h8iySbN( z9wBhBr{E?p+7$qO%I1Gw{GfG#H)@!(s&%V2LGb}On=SuJfHDzO=%BjyvanCwo?pV> z>3zJc$^uAx@txNJ-hj>438A?g=i9h-2_@!Z#&7{Mmn{x&Dw$Fm*pq4F&@hH0q&<@B z^^%X!x+JNc0^KR+^Rs_&RFXz0cTU{`)iaGd$-I7x4>Kt5Ngm>wQ?Ot0Dz+-q z{bSb}!?4Zh_GXju;n}z~?2Lq>O1d<&1l%BMt^0L!-y04|;eNv%`QB#cIyf1IPXk{@zw&$Qs#)fBs*k z;dB^mg$5h2C3SKhH`OC|*o?Bz(YWiq==X#{0FJZ~GMs(9+C`_%iTG;!CFj+h4c97z zo}*jZ;sg<0?yCBcC;39k+`w0|mF22)ClfJ8{+T(C=cXSYYwdFP6NJ^p6S;>qgdZRM zc^upu-$}jcuZQTn#(^!)EfNPE;U9J+ecvB_8l&>fCkJ48{AUei_vdp0?j1$hrWoGxcc4+9c+X$B1HmrQ!xw;Zoy z%1wn_pr5-g#uz3r#Y^!A5G&UMo#Auij`!;;EO@omtDbr_OZEIVd+K&QNgLOcvU)TU zJGRn~Br4AJ>m>#zI><8;UT)k;HB)|=s^d{*T-edDg)~QDZRgG#D*|t(lMVne{KqMH z{Ya-Hcjwy&m%!2d6FbL1=Kvj#f&G=Rdy4MC0=#Tb*$&AoxAk+*i0-hcbzIq`>G#jt z3b2QnD4U36A7P5X`6buidkx=5+TF<_EWX)R!#iJ2CWEQ*-|GfskGhb#<(og;#36(% z_`T30CtK&pk-n&O973+WT7%A^YR zgjk>>-j=J=J)=Cn;4A8VZ6FD!PZ3w7ujg+xiTQ+7_W@MP6RE=VnOk{OJ-z0xk0?aI zz$T*i__rRJG7A;(f_vzNRkNK}|1MTPZh~SvWpJC8a#d?Izz>x4LuaH_{i}-te?sx1 zs1D&~&J)3G?g19<{2BLYJ&u%xvPS7XY|2@PGTZrNV=(w2wVqUb zHojg$^0%HutA0SC*U=@9dikugw?{-K^WTEcvDw!C8!OG1SGm=a)w>UT-oPClE2Xwq zawOn)yQKx+^>!^>k_ALa)$W_}$%kdqZ)0HkhN7AbPU5pOVfKxm;(=G+SBM9-agOWW zjR!AwM9ty20k$br3A?VyS&H7=o6Z)R4AbRERyvV6OzLUxP)u0#OL}RL-6>Zv$|s|g zELVK8Y<11#9phTRk{M@hy#6A!{E+6D$>{6jdSkT%nc{tDgQ{W+EM(*yfkPM%zvb8v za(9UectKWX2aNgBoLE$WQo>~`$&9@uTdq2OtI9VUI|!{78EO%BQ_+@flWM)3sSSKw zrsrTtLW9+>VrQ@h>)q#o8vLFY|DnvNm|O!uQyY}6>G4dTJhk_*2-Z4q4@spTm_&Xa zx3QTC{GBT4#RL=LlW216kbN;%pi^bz79hYRw`GrAN@uFxE<(S+2#cBOqdW)Tt@BrF z?xR9~D#&o^SQ4s$d=1C4h8tI>!n4#1@SbD{T<7vNN-o13ftj_zu##TkE{T=q2Sg$_ zO1>|Knuh)!Zv@a9&2vaaWyKNDsDQO@sKDBdsQIVqbVdRF)kBgmkW79|Vhi*%gm@+< zn*^=C5W~5)5)=g8DW1XR>7$WgT+i_}OV)a5-AC4O;OO#&q3h%e=`wzO2R)`1O5PCi ziKU&=Ox_>JpQ(+j(t6(&@YX{UR_u5<0L98xpz}o^=);O5Qrs4qDg7S~rM<6Ozotcp zBG6CQY8fLLL9e-YD;_S6y5In_3SKd>SY5yC^q4Z#U#f$NwdGTbG~=9XE!Q8Yy3$(u zYb9e#TDX$?j71{MWZBwp@D9L;-w}a!?Y1xJ7K;F+x&y9X6M(F+W>ylvSDB<)H1GwP z*2}jZJ`Lp!vKjUx>|1=BRFV>jSB-{8vg4~Clm0um^S?Qu|9^e;j(hX&MZIE9!r70p z|8SPIB&qSj(Q6LA_suw%&AO}7Um2`_M&rd_GaEC4CzuZ)tL{MW>+b0V${quM8S9xo zBYR4zgG4-&=Kxf59^CCdJtov!keD1dCz_zENBXYUr01BtsAi)k@%Ushs2*$2PvY)y z9yTbj@Hg6T3Pc!HmcBkuchfzBc$wNJ)LSz3^^ghGo(CgP+cO4cKoVz5cc_wk8GTmI z0~Ct?!8x)nmw}+~Q!=dtFiz9wWNPhsq*n%$aGTYLHpC|1jG*B2b5%z6z@@_$fB(N< zs{g?uIJEqcWY{BWYJS0nFq5v`ppaEz5>es!ty%xY2G$XJbMeC|#DFzzY<6@iSZ@*j zRi6L3a-k-N`97?^mV)CX-Y4kt`-XR8IHTn>!Mi)_{s9wBZxy#R#(c+lgM55PHxC5U zez3dlS`Lv<>QQ#oK&tSANh|nDe)o6%*%k+=#^zRxLRZxT&`pw6S~ruS(2;8DnVOqw zY{8TG(2gD*A>{Z5zkmQ4<8KN=&md8i1Am3ED@J19XhWCnje&^DnMy{sh@<-bSl{7T zFZX*^*z`yp=x5ZMJ|hK+3X6f00X%S@=FV7}I+zGz%4RJxKelZn8MnuT^}R1pq=6O1 zK{d@I#%rWK=KD1i&;B^J!e0gnwoD%Mxn8FJe2hv38+s z{qCm{s5LK!xJoLi?Vh-`4la!x(CZfw8QwWPGsUrmMH&HS?1RKMuqVuq(UL0>5Fu$$ zDKst>ZTZyfH*?ye`RFFb53I^o!9kjh1}`paR-$AYp|IoLkz!0e;^nI~j!KAum}uJ$@CTHXvAnkaQo%Xo)G-r*Q$Se@pt)TcenP zca9_zp02bAjr{{enhU*QL|(|g1FY_NpW=uOdeOJ`8lbDtB$xBNXvQ^vA>B|e8kx02 zK)D%C^QvT%ZO>Jfr>@JQd~)C0_$1-)%^U_eJKc)VCO!IPFhZWd zY7LJpTdv*}FM{PvBAC2Q^B4o)Hy6q@NOUrzA9T`Kn^nxx$HMZ>%%W?ig0)gOEVz>0 zUY4Xi?4;2`fa)kgi6t!T!_Vxk=tU(U<|}sD2E_&4;P)Y zlV{04^vJuD2F%+<+@5H>?HeddR$yN;hex$nx+x>5wawds9es7;9!7@Uwa(KLEUx}p zuy+e=6)#`^)gLhPoIRaaf^P(t&R)w-S!O!*l*g+Qd`M&V1pKa|rr#pDHQBf|E&cr` zvpwSCQr`#8j>gytS7@zfw?coPG_<{QQ$?dk*84=87h4<`wauO=ZhUKEWa6@>)egfd zljwjWY1wla(NQB6*1v()$VA*c3@Ev!vAaIfqTspJ4|P}FkI8limF&`#BPf{)!2_mT zls)eL<*v<-;~dJVTK-gFGyP;G4QPPc2~GdB`}{?ik^+Ff!6BE5JfMuiMc23Or@n9v ziU&$UX8-O1K5KZ`z-{a{dElKRlnTzU+D4;P#@`rTF!1^&F)uO?KTcd}SWu(kRe;S> zm9mAq=%HgMlXl1+P5H;~L+#>QR>8o^eDees-S~HO38g1S@^0cID8*QNRZ48spmwG4vlTW|2=I`QE7_&*lFUaUD#FQF42p7 z)$OJr!U&!ju@#Pxv!}vWJp3_{1=$+@DFvbQ%xjF`ONE7-t_i=rey@X;q!&+xn?aUQ zZDJxGMJ13tpQ(%|Fh)q%OX`{^LndgK zZh0$j{BwoL{2Xv!mz>tQk@Vxbg!wL%jOl!x3T@4L%FZb;o!SKq2F54Y<~I)tB>tu9 zn*IsNPX^Vu!CxmJdSgo%3oGeow=gzLX^Rf1E-w!L@>@LQglTIGf=rjb+VcfUCL-sm zRWott#D}udI#o9$_SrsnaFd$4i%{$)Ta+bdugWgpVcJ^7mNRzNO_V(MQq7c5lE+jdM=v7$W*fy`9pxF!|>@`}^aPj&1=jmb4* zbMzAZnvK^^gXTx8k=`0|q=uzt^H=G2^CHbLpZKaGh;WPMFPK=GpWW;!rWOB@OmyjZ zt!VGp&j`BLg&N)YP_Z4h7Nz`XrBTOPQ*5{T(mGT4l@E-Nq6_!Pg?U9h>9g|cWoWtN zrfM~r;QE!r<5TeFmLPX);V}-u-EL>+&$Hzm0m;7RLsE#*cGHen0W`;1S+u^l;$a1$ zB4z$#?5rdC+CEue(rkbPt7$U4U0H!^!SvW6aC7Vcm&5DA)?#@L@USgOTeUPkhdrg! z-c4Qv+`j+_OO5phQpj{XFY?Ok>_kStSb7p25ic#pNPRhYGQoiL-nn`zd8N9RsYC#H zGh`4u)xh#AN_&dAtj@>e)iLAyK%Rndv^0+cGS*X$G!%YSgdffH>I73Kq2LFA&s|(x z7AJ`ng#IwPXxx${FRW-zTV1Bk$?D+c>x#h)W6mM)M@i9%e&1lGN|#% zYo9nyniy6r^8@itI-*4@&ac2GaPi0nkx*}SmT&8iesl?wNOr1h+G8`3`D<5=duH+C zOb3^I_Kn@0WK#vCx=ZI91!7r4S6%+oo&dsX)&)PA^RJ8RG*LglXOiYerMV}G?v zg6*47bo2RkeA$vGJEuXowS$*x0q+SPYXD4Op3iAM3Rk5yE>PpWof70V}&lEI^| zbRE<`%u0nPcl7ODPY6TU!I*eup_7?!FvW~FZ}E+@XSy#*dXV14T7`qU_Od+uZrHJ$ z0UzddQ9lQWdbhuXhCbR?R?SXmx)k(bJtbYKL$Ap`hl-HrW{HeVk^FSyv@6Q zwJv7yV?%r8wE*D`Z>ft2IFMO?MP6c_D`Gi(Y^fg~=f)if%P_tDP{1AHM6VhH_lpCg z7*E8eNzK7i4iZ*4c(0r7YOg>h`AK<*z_H`(L zMY~dlH@26rG8LpUAns0B`wI5yq%>jmADpi&$-$D`N#jKKRHJ|t)^G)CR=KLw z@Po@2;|%K0YbsS2!f!3Gqq+x_jdLWUu+4D$ebHF=jp{|8Ct*P^pSrv?s3on`EPpW! z(y9S$hc!Kz5dquCoZn0>-;o|p^|`BX;N`8q^D<}(485-vd1B#$&7uEz%@C_e2g3uX z;I{~&8-hh~@O`_k5yk>|YeQK-YNjg_N(~xI@^{M5ShKAU50h@=_TSi9fpg1H0vk?1tY zPSm`7bRQ29yzr7oE@P_OMx9~Gby&v)K-z@X04?^VYuy+l#jY3ZvY(E!#$CpR_gEQ&FqLb9aN(s8|Q8*Q_?U$#Q&ckFJ(-0c{jd#F0st zMXawL19HR(a}{kw_JkB!O)5@QXhc@%-OG7(^7Fd9Nhwe?&Q3f+4_$)#$Ty^dILoCE z4$1TUygoMte(@4cnCjO@b?ed_f{WrLK2Crd(_)y=O6>!`Tse$gbMT#K&$zYqWY~op zqt%Av;K6Dk3H}pRLH_aw%Fgl^Wl7$6VaV#)1xjd9rzfwX(Kqd0C39y+c9SLe@_I)EOpIbfM-$+sEBhN-m32po?hS8QN4BzEvd~{o7u2{lYX&k^5>oe?Y%V?> z%i48NIq=1h9rZm{yRZo1jjcgzeH#^&f{SNcNckOB;RV!VwIHeaIqSy&3Eyn01(#-O zGRMii*VFTJ< zj3K6V1vWJ?k5oPxK1R^)+t=qyMH(KkqHmIlFSmD+T;3{#&dW1z;#Sz(->sU_}9K=T?O68E`K<-ok13i$NXt;428LF5lZ@N<02kz{2F z|L#`O`%1R#zc1d*7i8rBK0Q(<*J%z^j}O&xXyR2|pw-|KZy{RVb|c94!ftCy6N2?c zj~0Q^=%XdC_ZjT`dxA_}qu(PLg&+Z%Ub9Tyy3cBrtZbjUL3M7GXj#%X_7MgQJQhC~ z#W}3?Hc^!0xbx@oE`-P_LgVxL_859(>Q$}odMW8ney_41y8u1pn=LaYS!^roh9DMe z?}Thgss|VCVC*WC2?�W3vch)v|`~KZX3I%S$SKGUttM=Nc69_gOVP5Zg?z7C4Z%8i3#?;F4!GUXGMa1vS4OKlf z12kHKn-7UqZ=bQxLt;^^eBKA(wR}0(U4tlHK@puK`Z90~I}5S09q^}VZ&26W%UZDP zQr2OULE9TRB6ev0+YQzikSR~%T0?YQw-SkLZr4pAC#7D*?O2|&^@-ko?U0dJALC|( zsUCs(HGQs<&EZtrd%594m(8@!ST*SpsS1iO&VOC(Na&Y)K8)M;XJ5t% zA1r0Y8XZh@$;Jw^?)AvHeXcH+nS-zjL`cB~Je8XZ{=p&Xdc-P`yq@= z@^fqU|ABLl~ti$s4NJHJ3fjEZ@-;n&T8aC8&8X?_BRyg3hWRc3x0nU)1~I zbM%>HBUIA2PmfKHJ0se8HS3+nNj2}FM=0X#}ajw4l-fwGbl9s_)%x+Ag%#j$kw_U=@5 z6sRyy;EY4op1Ekk2PbOeTvWTk{`zqP|4hgYs*kX2!51g1X|E%6u%~ytKB<)yWKULW z=Ww=Rj??-o0TBzrm1|b>)?@pI)J@)KiuJd|A?y2nTU7?t){Eju@p;8Rjy*_ApcXI3J)`D+?wsBOP!dwM$l-@yWfX~6uHl^093)J9P9|38KXk$Vb zRUpq?!>dNb@On8J7W7+5*#}`*L89FhgRK-{%y*dIK~9#;K?O@q9DH<>OS0u zr>vduiLbRaY{I=k@-GOv0#Ec;c9kH|>l3X5s`x>&ew(pdlOL;ZG3zSXuQ_BMCBDYtE;;0ewFF{^ zmgVYm%)k9jlyy)gw0o98`#(KueEjxM=4Spc>c^o?3L7^<`qo8D@m|7lQ4N)a$o?5c zjA-k-U>ttdoggD%2Br7ctV1u?jyBNOJN;{1*49)TSg#eNbTlsArc~szVaRM*Van^syL4O5+(X0qxD73)%|Qvu zM1=0w7SB0;E{iS5{(M{)O5)``gNE$=kRckqmj8HYI$1w*sc(tT!TaCl8)=ZXYrA_C zOg-}=^mC2sK4JPE3xRGIQT_zC(LkAb={{$W$dO3L=vTpH9jt+HoJZ_`~5Cr z6ow(UA>}jkYtyc-u)W(O^OSBoQ82FvwTa=2>z6V@Zpt4#-kS>)UrU}ImLqow9y-kK zb)6RxrO(pknZJ{Mi7Wi_lGWROhpFsVp(m+EKbw1<;?49-CJFz@=a{66XFDAdg!P3j zLL+xJF=RJK!&WtmJvxFJQTa!tID|naKl;P}*VQ~p7Oh@m1ol0Tr=bWdxdaYt#ZPj} z4Ye*Yl-h}vaVdh9GrYmGh35gUl)|U$S9bLMB_aZyL>|(}6`~wY29^G})H#gIjL~TB zQ!knDdWY}pmk7SY3gAeUle4v+N{>q$b|X>n_E`kr_}JNbcO_kSC28iklBy#raV4$B zu75`CICcUs;~hh~*9^`qR>J6RAU4Zohf{)>*t_QNkX;XJyojM8+0qW6>PKFkEPv&+ z7ppjWHraphbrj}5z-zv_r>*Uw)V*6JXbEr3bCh0QRFDD9QOxD3-Wg&9lvm!ap7c@P z{9@e<yUa<E$bI*p~-Z3!+{vEVfgCH{nm<33D)Okr$ z`^^P>b-5Ok?oZDtwZ^D8!fJ!C2v?fzSStsEPPLx>>_q}Ct!l^m65NI>&k1A-Y1}vc z{P;{nypAHIv0OR8`|2jiq1%#qyS~nnGc0MIy?Xlg=IVx~vn0~=JV({a`}W1BI$xx9 z4+s)F=tA07^!hiG?|P0VoctLM(WU@<$A#)XNUgV|H|@74YkGz5kgGg-L7!6H>|OM9 z-KS3;A!O33wyKicS+$S%!zCQ!wwv*Gh+i}i?3H&AKi&Ff60#NL3EFDGc9PqfuO18O z#Bm9X|3reF072&F=SkRg=zBAhR6U`}*)U>EnBTX{iE-?o37ScJ z@ktldTKf-K{v|c;QMQ7$Zpu<~9L}IIfoudjQ>UxL*_~dDXG2hn#;5yHr@ao?c+XBnncB9r^2oXA(!A05EcF`$Y_35lRG8BGgBxC+?_)0ur#0Zoa8*g4zp5Jj zv$FKYC$8-ip!?-{+lw1YRX2szuXJfx%~rvRYkf}zPH!)x8@Fo$vk-MnJPKjT?8F6d zHW=Mcrf@9wL+gWGJ{?Qm2Q~;on)c&l8iLz=`3v5&He>c>`ac~cdstae z+tkwfU$%DNEVKbk*r0g*_aUe=(gmN2puU|-T)7U>!LmlkW^#If`7#&zu50Ic8uzX^ znVG@>%&^2BhC-uS#z-tZ=c@)P>DIpHEslKdKB3;Xw{GdbWlCy5)EHdDX0`})6}MAT zNQ3BrSKikgo{k*71cs^ha@QTfK!p1HEowmqNcTN3#c2AHi|D8$z~%zW_*)>G!XT~o8QKByW9i3`U; zGlirRngL1qEm&ihHncce<``i@7D@w*YtH=Eo*>)dYgv3=56RB(=ObK*{A;tvb1g$IguasD%=;l!FzQvQ)O!n_3jLJ75SKaVDc;e z<0l>32aNrPq!)M{4Z!Ohv3x~pfZR_qlZHT0v5BFSvM^y+pRaO3G9gX8JWS;_=K=8V zYWcA#lcjeE8wkr4ec#Q;tqb`R6iS6jk*uTgR9z1K_KdV=Q9CnTT`=LfS;tTuS|l{x zkPTPg*>lajs&24FT&+k9kHqE%(hWb;EG1#tpeM=F7*H z*8(*aCG`$?vEOFd;|5t!GU`qFKti&c{eITlYx>5OAh6Hv&paPNWp3&scPWVxld?}gG~bFbxc0V0=?lh?}tj6QkI9W zHAqINKaiT z6ti80=aV&buT^y*GkE5&o*q*$IiMx|QnvgP$BE*d+e^`g)rj=kF%smYq^x#oxgFh)ohg_C12vj2LPT%P#CbU$TX!=5en=Gmhrw((&Jelr5i{s9Z`&g45w`;6Ya zT@X8^N!@c4OxcnEuYCMM6>iCQv4HsaqMKv%IT0}kix zp6VJmx6ca;BiO@XvwY&yg=!h!F-_haQY)I*NQ6+~{^y%%4WIHvU7=Hyr|xh})SGu3 zhF^my2bY%_IQV^87uK&5M81W3SI^Ps!mZOx7z-NX^4IQBzVo8BS8kCA3x@~?VhM)% zpXKJszFa)W&tvCCGi3AUpJi?+u~GStWR@j{(Afjfow)!)6F=rFeZ}a9Q80!8^L0Gs zfQLmHyn#M;XtCydtRn5JOQvM116TQrY{$E4Dz*%$NV7O*Db-mBh+2z_28i`>3DR70L&kRm#KXS-jrwkd2FTrC^&`jn@{<*z#m9? zeOJ7+d6w+!3D7Gq@yiA9MVbBq-zcQ4gF#AlJ^Ii+tk%dUYY7FVURl7@g@?EaQe(Dy zGL6!nXT9oS+u6-^M{Vwt?k&|j9mNRkGJT4&ruOKqaLM(}%X)c8&aAD6R{sz#etDH5 z77N{Pj!W0y4p(3o9h-T{>}ZUex6)~8to35#Lp}eu>*pDw&DLAIi*YH-My4;PSB6g-#PZ3L zliMjAYW$T`2j74U0ACu|v&x~-&NgH)fKm4>`2$rJwcPERl6z@~v*L~1Gd=a<%SiC< zeQWvH(adsvcnI|N;N2-WX{K2Ce&GkUk`E1LzzOemRbtI^sZ;q&DP*{zt2RU=Lk zkI1-f`_$hXu*DP)Byuyc89~B-8*g|xc9BGja(EXv5XYw^Ch8Dy6heQW}*##eJ=WF*JMDF8RNYy@IO zQYqApmRB!i!XWweqo6EGybl z;8&yX>1Or2#q;Iu@c2Th?cpBX1^I^j*Kz|$&lPG$3Wp8t( zh+;aH%$g*r4Ozrt)eVb{U0*%W(lRkyHy%dpUzJfFhfyjUByFHIX7yK(NL-rsRX7=4 zY+cT~ykcTn&DkYOrN=qiyJlyPb&F9hBn=!-vDyH#ADBm^dM6qHue?1<^cfldht>+7HhMo4(j6;7LBS-w@i~U&v#BX(-fDp z8N6o=88P;?E#AY6f&}3X;;T-_Y z?>15H@7KE=r%);HzDbM3ovwp=L;V*h!@F&_&dFIx7eqa&^~r{ zbueC(rW@plC7yY0VYK6h_uDlWqg9upB6IzS`2qlw1vA^LIi|$@0QHGdGC8FKb!Vay zS!BE&{!3TS|2q9cuX{M$VqNRY2~qFh#4pIe7`dPOQjT@tO1lYSS9E{cP_8$eTtH8w+Xhi5-_dV@CVqkZ}r-C6Qyig@X$+A!i?F`fwH>t zA^xuNDdV}IhP^DG)30_p({BlXg-lsk(tJv;uF5}UwNcxra5jZVOL29mDlCEAIUrr6 zGZi?vBl`qhOiADKl2gNqbSB#gN`>rN?|e3|6+JILUGO-m$)&E8O2519yHgigu(ERT zvgnYj@H%F%dQa}v=|nDApAhvVzv#u1Z_1qL>sLoMDwmQ?6vW1rAG|L3z?JEMue@5N z@z08A6xp9bAE@H6*8OSESm{;t{kJ=+PQ+j&e(UX3UplWv3E9EW1A!qVBhNEUv6HAu z1C4$CbSWkowPq=f0E{7jd|?L6huCOoucI;VKo^lRkos-$gX^=$j^Vmt;~eMnL;@Up z)ysdkDF0;!?tgu6D?cl#e{g2N+m-oi`;yMfLbe*6fhMk*(~0l0+9idP={ZXb{Iay< zCjo^x1g%*~CIp2EdMI_<23Skt1SqUlzH1-5aImtR8^H4y5?HhYQ4TRY7f1#AuccNH zDfCS@Vee_-tX-QKmvrSf_}R6~sxGrkUFXVUM27TM zkb-Y`q({q%`|hzDg;AoNV78Xar?|Vd_h!&a{3O(=)5p9JNur@dS9pHZSSJ>Fb=9l3 zjxLG5+mfmjRE?XWfEts$ZICUTy=;%qwpb_js3NRw(x*=?@_;daR=a7JM%`yLHU;eM z_ouU>FFA7Z-u!ole$V328$<5teDk;2z}b_oJEoJaz4mh3Jw^}G2Z01yTrGrBCoXz6WYhif;--d6V2}>2U5H|T~4m$jQ5ubg#!K*mp)J}tmCLWNf-4| zJ#c5lPShR^^Z(ZO6_m1?>jyr!*rIqw*-`#XoM^7n2G&L%7Pw3-HrU#+WpUuvbLw3;ZaaX+~&-arq_5KBGU zu0iU&BK4Xfg%I%{mc-zmeLwsuN4H+EzGTXoNf zPu)>|@&eUrO&ORz{W00iiz5K7ZmzIlhnT?^@L87IEqS$reKPP`-qMKcs5~sX*#Z=v zGVNU9A-rZRr-kK+^mhfjE->~3CL>yHm(m`_BlAOuK@78%*DXwb(o;nyx%VF&A`tI! zmZ4x1SF(iOp4y;W*4dJfw4jqn9f#B8{*io-w5wb4*~$n3+DEJFbYh?s-sqUL;ISrv z&=TSsoyA26k}#~ql&`I}G?Lstvv{^UJ2=n6m@c-Ses*r@?3M-^aU%+U5TJ}&`<>+) zUw0?TAL(9NQ*=(?HF=(%OBWeExW>i=b&=;c{%1S059avFO}53^}n-tekF zt|2eT4GH%^y)oZr2oOO;oKeZX^sm5t(+aR{VRR~&XSn?8A?mtqQPD_EqtSJ!knE%k zxe(tzIkh2YA-Aiu)u-!@75ZIT^E-1h`W)QX)@UNfi3J`Vv?`hswaE^c?iyH|3f5cM zS)Or5H%+SyD^BJ<+QO-|p93r$y)}_t2YK3Mg_o)_7JC|B41BAL6FN>GLyhjmdm5p< zFR|^UU$V30@QsQ7>tZ6G68;YkRmiT~k?ywKUvk*(SKI3U1O6;G`UlJUO5uxtmE=;` zA2n$ddm{JNN@X|qoq(wWbUzu}ot&JRSHxOa2p4rmDvQ=N4k&{5Fo)R61-@@jIb17*zLjcGDqNDnVKfGTAtMM(~(tHxPPCpT~)si z_yi-Rrsq2m4O$ zOt?4Qz?Y!f9Ls|X(wj+^l)f$QCO1T1IO{ss^cQh)QMwQu>j|ltnBcg8tNX*uKR9Gk zp?%-)w1AxtR8Xi5uX>0Ko-*FB5CxYO5Z+kXnSsf+mAmufwx_T6 zHMSb&E4!*;+PFF2qcXKHvMGI9@X_9wI{C=TT6B${Paw7w0s8c)q#(o318d;z?-A9f zQz07;DWChqVLmdp+_-oHqO~nyA8xBE|H}x_L%+`&no;Z>z8X2ej#fCF+>3DotBxuZ@JDiP#~_j&x_ho#ht z5-iG0EYA-TcZ&s7cwQ?t*0!_YbbN;1${qk3EF4t#a@5ZH&e(ZudbE0`lAg(cBLq^>_= z{3K=t{0L2NAfTOc%V2D_a&Z!G^^ys5vPi0#IFPP&o<&PV?w7nNcRzq-<7Ewsj;S7= zJ)DugdXGFaw&Fs639xJ19LCch@R>I`FfplopJCpWMOmLE^R3$I$9a##Z9s@gPcmF1^0ryklDok68rt567g2K=!n9Rs2I_;nQA z$%z&i%i?Uf5|YZA@|CA%D7K~W=LUZtG*rBCi@*af-Q<$Fw}l^va--tH!Q_hxds!UT zv`EWuvW1~Kf&<3%+}6+VcO5NtBLxVTw`k*V!(QQY2S*X<{%M)KZG6v+@b?M;)a&`! z!Xkv}e0xRREB;%~dloK*T!(O15|?Zn=qGN?-pnQO`hvMUM2v8;;BmyRj-XbZ%YK9S z$ctHovsf+NoIpAjt|yTi(CY`Gx9WJ2t)x^t?$C^l=w9Hc+Ff8CeLC}9T1wc;J~yTSvvYeQCl)AV_eR!Zid7?hqh&fB*>&2^1Q- zBpGefQ0aveC^IyQ#ba*EX^Zxa(dG*BnvvcWo4`B4$i~8-yc0wZw0xe1=6X>{$BvDS z&%9&5d%bczFEh2}se9Q|(@HS=tFC2y;b;@h-_V+nne`X%_^021jMq;D#D$D1@nwWc z3dTt!VU`V_WA>m_7UPKYT2hVz3Mi*$# z^nh(Go`^Sex>eC_da*@1rHmgvM=M4jo-tW27~9b=huEA6jh)r$t5*0*7qmUf5@)Ywatp%+QyP4GI_Dbxr9Srw3 zS414fX2FK#?Tj}ilC4zpGN#+m%`#Zh&Y(X9Z6)$(8}^T+p%qT-+T_Se%7HyAct*27 zsg1fn--h&mP}j z6@^Z{3k=UKC>I6i3Ot%&HodVeWb|y36*x5;=*#NOx@3LWP1~r)^#i)`CgsXtkuFZ{ z{@to_#|uSErtUP?OV*GhH(8e>7Yk6&g%2<>b;d$y_2H|}t45Bt%e^P1h5TN-ZX9V^KGhPN9vj`#OFD_#ZSgbCD}?eYd{ce>6)exYR*{5R5-Q zJb?1P780|2TSP(UyqO;&Q$Pzl_73o~pq(X8-FIe@=i3!>Uq)I7U3A-B_Zo$lSH?_S z>c-HFqd)EF^|GZZO^Mx#Y#mO8-CHR_dF!3ihdBS>u{_ojJ<}MVFTK`TvU9PQAwOe9`}=4uZ=&zM%-r~bYGPjBV3>stl7dIyCQN>lf&uo zwC=38yJJHt9&~z)dpLVV+IF6d0!*Pwo4)}}uA_V13OsJRb4Ro1-&3p2$IoO!`4Jh|i!qsa{u`qpIOHQ?pf$Z1%oviDsF28pan_B-B z_)Oot25kS`oRn$bvY}QCf^a#W?A!JG`omiBEl8X^Z^VOg#qetU^hpM>%6iXX>Apc> zlyCHAZ`k!F_rd3|ytZA}{hJ%xfa-MJE!5zqL60HTvA)jnLwC-~7 zi|d6rY-9=MG%eOcc?RvR-uh%d)uF!+frn^=aq>GE()L_|8c`6Z(~j$Eb#tZ}J!B@@ zri#mJH~3net&PH(N+CmGK-eU@jrXwAoBfs+13nKogu^MDyC^DoueVlx7Ch^0XL!?( zE|PxU-p4~e)ceNCsbkr-21j5#r;OvXcfAe`P|sWH*IHO$O5Y8Ndi``Aj#HkX;(CKj z&D!Ojad%QAdhO``B|X4(0J8_gi-ut{2g0j(S#K!`>5zpH4mw zS#0_sYjWv#GCi5P^1C^E{tx2=SmlwH%LBm8Ic&TlemMTgu$oO}7k&90Um&b*4262# ze}iR~#GZF8x#M*Xi-JCWIR9yF+THOi?d9qvf1Sf6dRpw+Xoho68`h_OlZf2Y!RCwb zotd$=%uDup?v&y{)^FCBW?uQ0oY~KMzYd21LFBq<|>){WcZK z+cV7DJLNax^qmNJ9Ql+R=f#uy*>c4;70Q4AdE+jO1PYYwgg(zqvKv=GcL_w!xT^$Yka)iQ0Zj2+ZIBzIj4ZQ zNL9LQD;JHf4HOoA+F<2W`PDEqZ0k>4)NRxdH0vAzi9=S>xnvO+s|kGf)s6omB%d+%OR9slea1h`M~(@2%S6Zr?ob5XwSo@nWcn z-3v^pGt@v%3ErkreVIXfAiQ%pyDa}1W9M-FW8t35TYTp}Z{nspbDy#r)?u3=DtGp@ zn_-*GBIIy~DvmES>ymQ)39^12psG zxn(DR18VfRM~pDs{gv5Q;~7*ek+)uV9SX0gejaz#6O9vWe@Irn<}a`nB*!t4H^ccj ztvvijnt`~Dwgt8Wc3Zt@Up*goV$}2yVlEXLxb}mP{j`O(&>yH*Rg>_yETLP6?@|_5 zN=(4(x$l89$;r7SPLA7Vq3l-t5I9fYKANPTrc~_O#PQoQ;bzWUuFiHrDzK<9*1dgC zy(OKSVoZW2d%_v|@~qHV?e($O#3f#Zqdj25@e-k*bKr2D7LMyh&bXVSOdwA-;W++G zV%yU7*URz`QAHi=$5iII^0;v>En}q`MrEDDAS$NO2q(Ze2d!vLz{&gsgO19&w`hs} zU_-d6D?uj9!6<7^zG=vzO$^qW<3Vj;<~j51nxLfYE^G>l1z?)t_*WZ3yIVAfn3sxV=9mi*P;~1B`8kj z>6k2ozWpS*7DRR;j6TQgE2~m3VT?VuXu2$_e^j`Ma0ECBz5xhJiLTjxh9LEcu+?vE zqDcZ{p6*jr_8>SAy>-&i2H(oli7Up%HvBPlRiW;{SL;=-98*Ig2M5ZSOrX-VA2uVL z(Lpsec`H(rU;jT~6JN|_<-f7Cd}S6N|GHhfE}AB3>uYK8(@E7u0mymeg1#%>>W$|y zC>EA+4nPeHq{kJOtINA!n=TuXTNWtk-?Qmj-`lp2QftzC3$*zSz=sQk74T|r*Us02 zj6piHiJKmJfVI{uJ&5|Y*i3(3iUmnGQ9=`xlP-pn7MH*UarOPl`dWrW!T4<5f@iqc z4#jL}TrCaWi;^EzI~C8%sW}+la{cK*;FZjPHkQUa13HMJvXoO0h2bDtgvbf?#T%64 zB*MI5XEC(giDe&4F#s;$nWe|`pR~VZyv7&74Ms>GC=A=*s|GlZulip;VD^wNh(U)l zzf^DMoz6tdVp8-~mDxA)A)V(ERANe<0=|cHI9dnbpakNY;3k}+Wl-I@7qfYXO_!@+ zRSF5X7ew@+q4rNsN}Y&E^h)Cj%g3o|-Qboyxjue|kq5P|zQFUy+ZG%5_m-O(jcy|p zR8kEWD^k@Q>fpGxU;aZ19v1foa01{`N6X6ZXZh1`n^BxxJ^DNjkec=iZBJ^;M;i?w zfOF?Be~eaj`=VoC!+X$g@9SfTF+mNvk}6r&Mk3YzW_*I|-b8;F_E2M0qVZRjhN zCPl{j5=KPohu{RC?V9GwHbA5QxFX9NT!IQOVpiRY6u$~opZW8U%jM((lKQUEBy0qmcH2CviBH$`zi>mT?&l=&$o{&M~en2>sSF|9tp7Xu+%V}yuUlTGdt9wGO- z!&4gTeuO>IhD|&-FnH%))L5lfRzyvTi>2`zR>0-?A(yB4_N8Q=_$#%MrxT1U+BqBM z9RIMR^^*&m=o2rrNrf*`Ix5iaL6cAXbZY6E4@5NMIms-;C6AG~ds=vLVyX#^O=D%b zb3u>X`Rp^amuHrplxp?aE zi}sYhX<&7V%KSL$$e8*eq$hDxk$pi;DG~nRpGN`&&edsS@r*ty&?ji`<>)LLIE$$U zYI*#NyL)ZK2YCGpbm#8d_O9(BaetCsWxhDfs#}t6k7oym-lw#qFHf95TR}e7d0F?6 zwkg`SUmRU<;_k$M)ay;>H(79#Ecq07jC~DXyWBscsQ-G9j*zTiFBKdijLYhlaxZxF zfqm|O3gN?FVdjz@2=b8m0zURU7vk?&*V%ib$J?FNygl*j6aPEYI&aILqQv&kQTnH) z`Dc`Z&r2i))&885GWtKKD)E2&{)^=V5qaCL*WQw(gdiHtH!p&fMyutpxZT6?@#TX5 zs}T$T=R(Ba;u#vd&3~nRcbe(Zt9ouNJL*MFXh6BXsx4elo6?fBmlEd!y(7t16XFsN z$&dL8t}hOp<-dx44>Raz$HbSLIKWLDgws%6vKC}?v6~OSJxp1Qnf)H^@Eg#FeIPCT zCLG7;H-Ii12A#LL672{X+C4Er=%lmHQI{h6zSO_(T1(jFM>tra{fHx#vFPHIFle2e zZ?edv+Jk@FXKR*1@eMZi`;Jaaj)9oOfpjhoMr~jsp)}#j)qw0O7cx2iFwdMMV?%G& zvx#9udnW&()wAb%FL-!Wp^fsafzZ*wK|siE&56DLW`0-O1`S1hrFzIvQTVHmt5DlnN;<{fkhq-*@MrwaS5mpu4H~TZgZ8p&X(07hK%C0h z$Ibjrfbb-Vb$a(UfBvGY+^|F{;f$EYCl2t4|JA0hs&Z$@l$6B;ifZ=LJBanw+9J-R zt-z`u^|qPe(Se6pD*5Kwdu8xU*pe-;q$N2mgnV>m+MeYlN<(LNeJvkUXT-74y7!W~ zzy2asK9Q3jIPHjE2ThgA`C}W~>VyKZv0NUZxq%zHV9v5Iu+8GE-c6Y>U}UR<9fENC zJ_orUMLW$pobz9E2?^Y~=M#9PH(++$?mCrE1oh@6`PGG<#d?7RX|&&#hCJ#?nD?;^#~3@t5F1Wy@fXVuBNz2^UB(URVsS+tJa9e{29 z@E${iV(OF&sM&uqtBFsdrA<0pMxP?`t5y{*zJqW~(e9YF&JfPp+BnqBG<{1S1aVKV zHx%VLwt-mMWg1aS6+1m69~c@LR&J_m%O&0t+KN~(ohMI^?rm*x<`%NfYV|D4nD^jB z(|=OSk;hkWO^tF7)az$xNuAWR<` zgeOpVi-(Pv?6~K^m0$eSQuMxLzx_DG@@7kLNI)0;#bj>U*bvU+s7Qq1258SSg-hsW?nA(I6^g zC(+PmIl&7QT|hvunL~H>nIl2u@r6*NKJLLWBm`B*uC-416Hn{^HSPi@=!AZARTu|% zdq#LnpkZj$S#w$%27%eaJY{3m_^5CdIE)_A39h%#0Md!q2^dCt+D343 zoPMQ;zn8zrVcUVHx}s37D6=%%q}j2^whBAMt^{r=LF<*mcN3i)oeAb$t6{o~(>I=^ zcC5grF^SKjP_Fy15p7P5NIiXjE4tge8gg2#-4>QSJn{VSNv^W){JIS7o@yPjwco8( zHhxvUz)rrrkYVIw=f2FJKmQEl^n}8o3r6DjJV7OT<_5z${otLia{K#RoWLrJa3=vv zm{OkxBAI@|R5fY8`Q)}Of=W@j9nam5SUIenyANcuxfAsA-Z8Q#WHqiY?5EuESRBpQ zhV4a5=ID;V3d}adi%|XDE}T zs1KO47S)JeZ=K=f`mLvy^rZ+j7RbF=143c-VPN$kyCS;k94v%@TJ0XfjtF~t0cs0W~oC4TP`P5s3OIJ_Y||AQz?HX}Zj z>>dgefVa_-t8F>+33q^tnR17A*ktvY5*og=>ajy~F*8=M-Cv5Tl9_DvZQ4kMdA}a& zuI|5r(NPZX0Ha$Bm%Tk=T`EetulpSr(jC?HO7s~5Ar^BGRj_cD6tb_R^bWowi8%#U zK8eN_YgIYO*rtGwzx%Cqe~3J6!8agmOVoz}y^FS5pYoK#d3RkVHH2({FdXo^L<+>?UNyn(C1 ztg1fmLL!~nEKBfgiY6VWxBMB#C8WDL)$m%Wi3R6gTP`-QwJO^AF`cnBFJl5^xDZx< zi8~8*Sp{bcesl5)b@yGhBL~;o=r<(~j=dYUAvr^>joU;sdGi@lldJQQSpuj1YQ~F; zCnm~K$NR|Xa@3@|AMK?Tz!kpM;$<+gET7>V*N=d9Gx9XMFPn*)2ICos?^<-U8Q2r; zW-87A(^_9O3Kwx!?a(d3X`~i3Ul#CxIx8!>*H^;I?efFbXq>$LHT`{n0w;npY_Z|> z2{R$n6N6mWN>M)#$tz`)S;`49$FQHOCIS@7QV5I*YPM8RW8jV*ZC+~q4VT)xhU6Lr zueyAxmgBD#+wL8rUTphEg9Z>eWyZQiW|oQWfeSq^SHRf=68YT36xUOe{1gN(v)Oc5 zmDXoq`;sN@NYw=0idHCSsfv0G=jZ(=G%-nRL`-p)X~4H@VZnJt_HEWiYCS=^O}i` zQIC8py0`6K7Jh1ax?!8nnHdr&sxazI3&69EK&idn!}*xm)ruW7<^ZJ(QVjLt&;4L0 zv%2nxw0Jfy-b%{TlHC6(q;jRjq(>!wvM!c&SHmFb)JuQh<=gSbFwSu{}cUX-@3Bt7ek) z36xms#H6)VPp&DtWMn92I!N%FxUI`KG0#Q8H-sx+ zgt&rFZ+xk`mNLqw)FeJll!*w6N>gDYaB&Nzv$KbUghUx^!aDcLm%~~mdGFhIj@Tqq zdwyIyda$|Bdj)0nn{uGs6xBVc*0H>jJN)IQ9_4?MV8RuCIyqu0u^NOBVF|73;VrBh zJguOLt7)vj5Bcn?DX1|jJ9DtL4pIs1E}&|EjH|x|; zySk)Woqk>J&|<`d?tp%Zp!4Nizk*np%_HHm2ctKkcXRVlBW_sLl_wa)X`0`Zd@g`q z=x{D{gIvEUo_U#@MZ^|TNp{z2)suXvozN?$X*)zLl=C*xd1u>N>Qp?|&o{M|^(4Xx zg`<7M3aqu8TW*nX9-rWW{KiS%A^>{zs2ACt+Ao!w%BnY=d_2Uk<6KG5J$z%CJEFiC)kaqE2_3AqPuR9$j=V2v!2@bG?Y(L=X> zb2d5Z7Sr#tiQ4%au>Xq5UNohV1Ji@e3Dcm>;Rb^Md~Ls{bt>!)p*rgz_Usi@8ehQT z;@Cz0K~wf%MLxsKA@9`hphd`(HN0J2#yep5^b*%QC7w+Zo+kcgHoK+%?qFX%lai`i z=DENZQX1*KjCZ@qKLqB}`iy*5sanN9dG6L-=ZhOz^K_=QQP!*RjoQG#RMr-2U!VQ( zl{+{+{(9%soVd*^^6X4!H0AZWJQg=PhfVFEhpYyc zi@1?^;9wUc4Q*obyeu)unKdw|L}Y2;4MIR;Y%Uw^k81=k;9Q$ocUxsJxP4P{77aQt z&HI3_(lw-b=5(<6d>oB!7j{w+olI72N5`dz+b626qF@m_wV>BBxL}d%gO7;rNOP$s zoYH7>waombwao_ddsKox^NaftWm34S9F#fnbB&7kyCawR12gau^VYi@*RqeoJ0((a zJZLkfy%Ph2{KIl#u?e#yJ-X=;JUg6X>H5O(r0o8QqLzICHk*;xGKIZIb1%-bjh|P0 znpIIPapaiw^a4QMpjSLR9(so!2TPl3v4J7?zZR^RGPJj$&5GY_2!#4wuNM`q z)zVOMRxjd7i7hsKQhU?eVY*^d_}mwxbv4IxW+g4$mia!NLf-FYgubtSiLTF#w5Fn@ z2a&IdW=JtKRI=J0z4!rQ-?6*bc4ES0E+zV4oV+{{A)}mKwHz>VMcjYzsYws>{ag*7 zx*k$ZRXME^(mc+uqnh9r&VHFMF6R~iz$ znKr0PG}rwBpxg#gBZ>A|aurh>;20|(UyZ51+~ zlt+=GuH#Tp{&Gm&0y#qJPFa&zG%n)+C-5t4BN1JaYamjTX%k^ohp+J3$7oo!=7@Ib zSp==k!P>@=LEExgwyR<~a%!_(Rkpy$j96#;C+V{eLJKy-Dlk^v{Gra{r8+CzZ!-MQ zrBRSX2n?r|;%}}6JB>Wo>ZiC_rbm$n(gHpo3#qU|PW1)k>>6P* zvtR0Qs-`F{2{Q}mm#A4OO!!So&CZlTwCNJ@v0?@g;Z02<6`VT@3v-j|ItqWjIiUad z`13OH%r{x@pIH1Y|8lA5o~7$!bt~JK`?Ie5>>zkAC-gdF z6-L28nKWLc@sGw~R4t6H!ET?)N=1$+n>cxFbc(LG5XmHEe|y%`HM5d+5#s85W$7YS z!RFfN{qPI_e@d!8zqj!n4{gFSz=H74phRrxwH#r`FLj? z92eFqkvo`NI-ct`NSVlu=pF_AF~{A)*p`q&g3CLIlDi{;p{UfI`)!}wn=Gx`CC}Hq zu;hspn?p;60rTMYO{6@A7}DwZsoru2(!lefIIaGf^UE-USPEShQL87~OeNDfRt8(9 zc98|3c+v+tqATtbD+{6pcBA3BibzOVidhSjqTXH;C58TkL`}W2vwZlParq@r76r%Y zG2Z$KF4DB8S|HkCpU&7@u`rs<8Tj#X%nhT__(u#`(Z^A9SJ}qB8s~yR;hcoQHgY5v zqYn9su`_R155a2j_TCunfUJ9;(m9@!4<$Ow1Pa#eGQR<_ye1;nUO6V~@(CYg)RkXn zk=(b_Xa7-Mm)GR+3#OVB*P6b9shlMeG-L67QtsZR;>*hH^~)B!i=Q)2#D(Yb)o86B zQjV)}G1e zZ*1i6)&xQhda=V$%u%%XQ+^s3Pl7<)WD?VGZJ|=G z6NUJz2J?Tq`0zHT`}jpIgC)ns=FNu4srO0SenUTtV!O;wsa+$k9Ks6HG0fG(r+e{i z=BImgv~2fwch%NUVqBgoXEYklncETt)GmL0Q?TDhane5SHqK)%YB%_x_gB|d!5ZDg zKg|6jcK=u?|JW7($SD7RPoG4;s&26yo=UYY8ndX^&o%x`yNuWrByXwn9W6L3p;L3WIw#nK7Ort#`I9Pb1F{U!*lF^GK<;S^IQ6!k0 zk&C+@gSs?e0&T4}*rlx3h@OhC91DbWv`YX$zKRQ8;tS0sGyQ3@4dbCSO?sl7>*Yr; zz~Qdt4@S7r@=@Jfo$9zW23{C!lYiMPbEC6hK(?-wFPHAZ$a(krF9HH!fAhd8ZwBL+GNM|efA$rzYXC3~90~|;x zfE&&>*y`5fVqrRAYB+5MTSKWTb`lSPurxU6Da-{%J7FJy1}v1Ln{Wu&dn(UBX7Lj| zZbE=vzWpEA$5kk_i|@B6S4g#7yi{;13_eCTGdG@h-~14z8hgDu=HjU;IyPX?XDdx@ zfNNO!T4j*wXQzqa#*5jZQXqioe)~xya^=dp&TNni+Gs#klavnn1DN;i$HUH+lJ;%}39o z(M^SZ15~kp1G;~ni!4#LsT$dRDmI*flzCe1+GU@q-ung<=owgK6-I|9k>o$4>mNcT zN$(cPR#nZTTyA~xi+H#K2(NZwf{O0LjI@-2TNNMUpD}uw#$8f#I!Up>U?bMAud&*# z5+(5=RgQ7L0q%R5m1}OTcVRB&Rlo!A?K}IRG&I5QlOsXvat{&oA7x@VwuH1&Sb#+8 zx`E0#0&9fSml1NY@8avJYzV|dbYf#_RBBw^+6#^w*lZM7N-1M(p!N)=mJBKm_O>Af zy`aOL<9Y}u+F~QTX_KrU&Vg)tbqOe6?Kb^GV4_XQbF*}D;5KGwziDL=cM6_34T;&N z^y|UX3M4E3jwMYubFM{h#9(DDyasKN>cRd_yVZLBEIrb5B&9UpA#);qISD)ZegrqQj4^Zg68su;bcTrJz2H}FQquokvw^B=IX?jW;vS6Jj|qx zj~go=iS-E<_%|!WQjlnS`Ul(@dcXngtJ5Uv9~o$X%L5c}@nTi$&~S;juioS1T>>T0 z(OW?omxmA>>d%Ls|NQnB1ry>U*aT2BmLbevJ!)a{g1#u5Q;guIpYDY(x_$G_zo6}s z1-J4nz`-XKPAeZ3&{ z4+BV{&{*mU-D-ga^NOX>=IglWh0@P*-Y_0qJ@W%&IcwW=a_AFCnG4GE*|B?3ZBB|w zX~p4_n6r?2aam7PxtP^~k~)gX7RFCszMk;ZTR)q`eJxht<+fdYxOqmDXFB(CF_7h2 zLdRg-&jbDS#J`CB{Xd4){b%8K|MY)Eb0JPHC0noY^_%xrK81_F(n{DGKvUu#c0nwu z*}DaX(9cpDuG|##8WxlHrYWcD#`W*8$|uKMF_?~(_g`UDPT_Iki++lNKWPH5D|}qt zEGIKptYck-MGJ>|81#LhI1ezDAhl0S!&`~Z|Js0n8gn2sR7Wowf~osV>5^@w_`PxhOYxg~uG7vJ^R4OrH=U@4iLulOMG5 zOXBL~653_aZi&BmffKXxgb`3S^-qMl#uV7JSbRCgb(95lkwrTs=POl8YQf(mzoMm(Y}jX1u z=cZN(UR}8XDL;1ONxAA&^O^UjHmkGbquHsw-9httJ$HLc*_&@MU(=}PxzIv%Y@DrG z!Z@C_4G(a#LDD%4+2onUdl0&hU-L3R`T%Q-Nv>)`*g&`A_9~XZz z$HPuNlVvVJlgXVfzDS6{g4Hf<^~m)h12dJrDl$=LA5r6;Um%m37uk)aUqgJBLZwfu z61VT^fRRb4i-{RJI&9~NKpm@D8i57U56e>C3{ygZ^Wg3n}2+w}fl{zX4BJ zE*HPYw;d;>Z#Go1%>_3^;ATzBE@6r$%SBmsR9Y95Tloj>4L)~Xq)gWmn-uq4Jrib} zQYJmzeV1ehp{WTQ2so|J0b8ih_afj1!5V>@#&n%|)fZzdsqFVmPT^>F*z@q?wY|mJ zDtKbjb(gVyNF{!E_NFgq#=;->!SOQ08g2{sEh0y%oe!OcG^lB6U8K{tnc(Rrh|3}l zE+B9cIb*9r-}sQB`v+9TNUU?)vFV*4;TES_M3wDjY4a1t?#HeZxJ8vhbYWg%#Y>>1 z;UtqX%Qj+A8#ac@0wnwjX;FSx5i>GzSUK(mK)qP%HKCsX4(*hTT6omYNMLNZNRT+g z`*Wtk`uxSdyaqX)udcLx2CjV}kA()6o}>&`2xLcwUWd}pp^3FsAU%014D$o0Ok965 z^M>K+rz9^}uWs{wqs;WLf4w3(I}QYjwXU-Cf}6EWOFipquJ=EuC&q1+yYY9fjMbRB z-_1n)2D~ch{o|NYw@XKYd23&nuim;nSx(Ja{_C8aN3d~Q3so56E-=uM1E9m|iv z!6izr&ZC*P+Z1u!;QIix;Cpy(U*`RCeW6EXaJpyJqsc(4UqGIZS`JYY=Y*%g>6X%Y ziXgI&5NbgmB^2ZDydEOv{-tD|qtCSzF)xB_nhOw67zZHj@U8JjUk$``dGpF}{J2+3 zt+gOIEpz&c<&MqBn>kByykci{Lj7u%B>;XNg*TFsm@<@gq6dgLocs{f zvBQkHZ=zK_v!jGsGlh+;~HEEl)nKn5UFve%tM-R+_RIpqr2I#a0x=kqmBL3Yl;Oy6|ryX%uSi7MT;LN8^gGVC;jZm@`q%ptqha*AGv!mZ(c$)AF=@J+%6IzdGP3qYpX*EQu*OM=-a*R;2Y}C7`M(vjgL6%Kd+5Q>oRp8iGyUptq*|aN26|*(4Vvnm0%e$8ad@nNNw!R2d zEtBge9o1cv8z;C4@oI#EkBbldP9@nCLz6m$&JA>z5`Azx`(w*$?MJy)7E>ZfMym%hQ3s0*mjL5I&Zt04MG9VhTw z2BsE~qg%DW?mgK(BkJU#EP(MFaA3dsjZXrf`$mb}z@8k$oD*|^I;0|hJCS%J!68CH zLHoFqCP7qndTNF_+`D4VLZhKnw-FmR=rJU)iRq6#O+8sh<>4!#;r6p3x5q>R7nSqj zkL`X(-W`Fr%qrTS*L*IIp?h9;&`YmyTil@NkQ~k&WRQSxRU^FCjs)DW2w*YHQaqUl z9_5Lt!1&!$gBBpeAM5Xz2}oyK`{ZK5lYGuSFAT8Y+3&P@;zo*#X zknMH`YgyFl@+I_5u1_&T2B?kJ071KanrfjLE>&w7O@q_cy;?0I&#UCdXDc&K6TgbO zC-`}eLc2dY*+W7l>O3LgZ*0SM6rI^ z(WRnvyrq=3s5^qsJ5V#s91blX8}(+Yg*Yx?cd9B{w~Sk=F%;R0HWhi`>;UH-Cn(#e z@)l5@6SY_1m=wc069vw&Ve_Uq5p6=ZO9^(VWr9OP%Tz%ta$px9G^Q^Vf|AnO@*_}t zd3~wqzbVW8Cl5p@y4T(k@`7eFRYdS6W9Tk-kHFCadedJW6s z^+!-mdaqugk>XqS1oO}^vAR)gs!K0!*p*k``QCq+>Hj7R{@(%?FfC-ldEIX}+a>vv zN(>9)iAc6%{Bx=J@JOog3>(aQhcp&iOsoaWXb1St$f?f&$aaj0Ujes(7hBb&xmbAZ z4|7t6o#Q2Dt^jK>#)!7~v*K3Io5`(UU3iS;?rOgpMEP$ksU zwmpdUe8>qiU;=P~c_^!UUPHOG>E-+HEEeC&0jy43K* ze|#jk?oAv)eM7z5eXhZG_>1xSyZYa$38NaZ+$-Tekz;l0XPwdz=^fuZm+PEB zlvM6goE`Su7B3x1Y0%St)5%ZIs(Io>-YN!2*PQxDPOv>>`XwYf*l`wFvvEpHZJYj* zV)>RLs-K2ki(kIJcd4JHhN+oK7~@i}ib$Tf)y2nyd%kY1*d6cT--M5N9#TR z4R~8|Y5S=7m&HA2Ax7DtVdVsGUNhrwLwFN`+T?jo{X1~vDn=UY$)^Pvv{M?N|WCv5t)mUJ|dg1XuAblV6SfnOfBTW5g%K(tT2pa9}E_UR~qi zbcb}O6GM&q&M=q#pw&DI%?;IbLKrgG3rRWj}1M~^jH*5;#Rm*4C`{rl< zo#F25H$zI$oHlP8b{NcVxuJ-buW6^v-yT#|{?t#S^woh=Dh1jLhKppl1AFG3>%*vg)1r*9RGAtW-yIL zR^_C+k-P5z!xDpRWkpv6WN4^4lDBHXs&U~S)JIylICdtkfVs-AVSg9Xe0SW}2k|qp z69w^?OCLzYn%Zl9*Cd`mnZ~_xvX%RHti$Oel zZ`uCur~Mb#kIftngd_Z=Jme30f~D%Saaey}<7>z#!nVs3y9hn=lbPRuE>K{cEY+HR zlk5B&W8Y5XMq1aG41Y~D%U^3$Ni%puxyf@H(aGQz(n#>6{O2Y;E$ihh&pZwwZZ zHj2!=5QMIF#0HAgcuKNui%s@}Yy!$Cjyw#nh`JnHTI(gU8;?N;QPqmrmc{X1il!D( zZvZ#ccW1w(aIf2Q_mUq+ItKpQLmKcCmt!fO0rO~WAF8?MXv$5)*eqbMzn4`(z~-A@ zh?+AoEYM&z6f0TRx;l|lr;5(OhQqb45?C|rtAK1F?PoBIfOGLIYUzkUFg_joI}^)S@%=&WkIi4@8D;^hRZhk)Xe*u92#J!ucfs zH4FVui__RE5S&aoF=F)j4o`|aqUYDe#gzMDyM}6853&9={nfXn1mOJsZutVu-pQcR z6?bqLb@!V~5tKk_m0;e#wl(8lado!7r_)UfBcqt-Y}d1aC?5K9eA8*lGH?a}sr9K} zWJ#jP@z{kinU`I#z;tW$rZw8)Bj`C$S}JMz8ol_WE(>`fpMoPw|2B7aH%jPm3oE)% z-B1w=bJw#eyaQiY;I}l`-HJ?{=!nU5rM`CVO)vXUMQC>J5#yHXY&LtN`eSWUlsWl> z_TTubOw&cW5bXkK<8}CWz82LJJpbDJ$k5t%#oqJdZllB~MfO5bKd4Mub^`O`dQ!8( z+XD2V0OX$drK`LdyuU{LZ)@Jun+v?+=}Mo{k*`}SUS8t36H}S3Fz1R`S2#Re97znQ z=J1nE`9s-Nm>#mC17A)S2}VtgakuezX|wSXEkxn^8y{8Otpm*lO=-=B>JC1-EWB*$ z>}i|iF6`m6mhWCF{{c1gC9gB@W2U$)8Rjto8R;w^Nz*e5A!7Cp8cbhC|6R6(YX>bl zrX7%#@>AK)Tk;T&9|%virW`3!Od*c=1Q8-SSMJmgFM%AHjc`;&O4ln^>WHy3Mf^JyTa-(RXEwSeuI$=1oW`}c;t+Nm8Daho{D{KsMRcda(=u$=o=jC`g?9ewd1{&Xe?vt_(Hpp33uNpxhK9J+7 z@z9`$?hIajim857M(v3kO7u#vp2LAs^tqp?pjdxadLCS;0t%tQP!zxuVpZo!89B%k(TO>imoL6O{>2$tUZoQIP~5Xqr5a)FORgVC^bJht68WDRJMv17Y72* zoN+rAn*tg2>7OV+H{I7aA87}sEB5iW$;DmK6bA<9_MEP%uI&xmZ^FNRBzQF?9(^~paw zsd1HgDBiS-UU6uXpHeP=1OrLfsvA_103e!Cj3a}GJvT?uxmB$g;!JqFO(I(>NSSuX zVf(dRO5GGz3fWL^7?cnhdC>zW!1@h`=&#j#hYn10G0RbUpO;y1su$VPz=0|D?cLdX zTu28vhMtP>m4TTWHTTK;E|upZ+{##QO%*f2!sR?@=MUQ0_)t74Ht}2xa3-npX?5@O zFc|H*ejlq$+W4f=$%tg8;e+~-!3`ldE$o!FB5hE==84naG~Zi*t+_iWimCzwodqP* zM0H-zW8+dhXuYcSh zuu@>R+T^H8qwjKquN+WflhAzbk`cKy1Id-J?aC7!2qxZ`3Zz=9jBGYcFpbyFddLmI z!Fek3zCen8OO^SaIWStfpsrV|AEkQXVDQn`sP{ewv1*8oxYBQp?c#+_y%xl~BxfvT zxQcU(ln7c}pWocy&s%Q0L)f3yepQI)>(7LzZ@B+_XWv)m!a6Ki-uzf<^jbWYLAJ0n zEWH3Ah8Eg2yY8N}d7W^nr&O)7?>Iu3HpDCln|5&HHKollhT6nhsc*}ExAVp4sNn_C zX;aR+aGy??YA!kN#6~y5ggriUI>1@py4?&!ZvBG%n1u!A*CWCFeXaBN4Q=pb$TWH7AyL&IT zQIvyjtyiXR@oJwjxh92_;1~5$x#|xaw@d3*V&d|aCXxZ3Oh^GFpLL5HTA@XD&}*Rh zxqq)m6GPE)hw{G1+NiwM^D5QjGQ-l%Q52*KhLw=XIC5I zi^ELGmB&KnSX_$5WAU4c-&wrehVKB6T#irQCu!B_CDjpzp00RaT6NF_zOKfKs#YWT z#6xa$*;HBI8hyi9}{5 z=ba@2zmPgddrYiGNh(3RSssrMgTq(xR z?0wF8&N+MU&o{F^wztnblJE+&x${dYLDpn~*>j>XfMAdA*O;LC9}yCf29F7F4YHAF}HQ;`viDG4cx8X+Sc6Qmwh^ zcLQmziP|Sxx)Tc)XsIBOLZGZcWS(~!LlseXm?bCFOro8jT=RhUo@RE_vGd+(y7)Ls z3Q4XVz7+E=vEWhv#9+0EOCCJI~D<~e% zQBD@oS|%;@%$8%7U1}x9F(2BFvwMbvzP3A}F0meoyz1B`)Z%hCb4OT`&g9XlNrk?W zUuJiP$K-=S&p0pZD)sxcPBS0I-F1&O?r8V6;YC7MIe#U1=SS8(u`U{5xosa^#p)jZ z4Szm1BAl)>rO2CByMTJ8@;FoF5h}TU!{6CND&bjh8lb2TTt2jY^Tq*DWc?JKVzQ;C z%VvWrsm%6@cBJ*Xzv@FY5Ra`#?N~vTgF@!B%_hn=3JN=9D@|QdFMJm1aVr7PnnL>b zzYa!q(9N;Er^Z738DCtz{2tf`vMc8H&9ZULYhJ?Hw|E`zR-=6)Y>-aZl)Q%7Pk%1J z`S}x~<;fhCsW)jnF3dJ46 z56Xt!6tirkZt$*vuJIEKPMmyaH{X_A@}?Btn@}=Jt$wN4mwUT2sU86q4%go2PO8{7 zH&^nUmSVoxALX8Gc9{kiEC3v7Z{>1Cfud%LYVX@_0wY+9#C`|J8D}z;Pmz0XQ0aL3 zqp(R|XoOfq#%uZ_jXxY3v5yTHGB#H*sz7eUCymoOR20t;z~jxpFtMu0E$ayF`1e?M z6j^dLVE=A?UW|LqQvbOA$kEcNXbTE2)2@~23UjlOHC2}ECTuK z+tF}J^|f?OzQp(_Y%3F}rFqG5rOtsGOpflKt`O{gJ>`3|(LMn#X6)QHgpu$ZqtGCM zsu|UR0Z&sESp!|6=xMV<@B2B_bCP^CB-hf-&Z{1+l@-||*)_7P2Wov%?xuAok*@NC zSKcHTx4X$>yg${L+O`@Psp}shKLhg{sWNO3@kp<+yLV|b@~1n@cUMsKp8C$jZH!3C zHD`;5+?>WK_vyUnRKhBJoNo78$57d{Jo&d9%cmdv61FR!`>AADuvTn{3;piP)bp}- zYrTocx3%}&;R>ef6DTBqh0G@;fAJ*$mu;ty*(*=KFYa2!XknrYdK@0XZ2YtNl4j#s z5AXz3d2HCBLCT=-%XSvNK?we{L>#?$WL@&&T>Td=cTBci7sKMNy_?n@xNdnf0s%%1Ba&L;mKIMRsbfYat^5>;5?!%@vMmjlFeyJ9DadM8r#1WY8@-gH1;qs6Doe zwF7l-RkrtgeU#nztC-y1i73=5QLK8=)K&3x_$7t|?6u=xCAPoKbQ;m(fR`97%Fx7BEk%+@8zCQ2=sYO+L#gwG+`~VVpO+k%R#0_Y%I&y^{;` z8Lpfm6j2ZR4z8~4NmMIf&(+8Rw?(4lbK|BcA7@zjUNcbi|Jr>#$if5F_X1`5eW8;e z6WQO3c2JNUS2Kp|`hTgoz3K5~s)Ma>X(3&6?&_!rs2;o2C?~}n!=QW12OgiSxz@T; zG0(;sqGr!YIZ$Wf%NZ>UgllSy0Rp!Vp!p1;!q$I2y-HjTeU1z8A^hZxVabQSt#8p~ zK}ZFihoDgG_Hp)^{#ntLTPBznP6|W{VT~-o@1((QceT;Y(O-W8-Y;!UPpzgVA=v2^ zN`27+^imlM{9p2zxcKYYse)W-%-=?eJsTub}FbYE-D7 zWm~y_#5fPwq(*Amu!YZ1Y4e3&mkY~0xKt@on!JwMKff$AEj1v;Wy^Bg3#tvFd!2Z( z3p(GXe>S!;y@My)Ho}sUCz$wB(5$_S?!8LGDkC&p&I|7SdB2(xMEuU}?5w?x!*#J^ zqON+$>42U(a6&|?yLjA>N%-eO;5J(#E z9-mHb0uNU7)WIXCP|c9q>Ivu-J!?e=lMB9L>ca51M=@i%tK_>=i*a9Va2@g7=vo_x zA~o+;&Fe-Z(5T&tFPrl zhNBN-$;G&SrO9no%2gj`^xU^c>GNwn?B4n;d}Z&ZL99^y)Ma-i=&YCoEC$PepW~%3 z-s#h?yw6JmL*=s4v@modY3Kg(^as#SMeA5GJ*UJy#Sh!7s~nA>A>Go6XX$mS;$pIZ zCKND!zXr&j9MQ+FuXtCvMbxo%dZnt&lkqwG(2UIXEBK=k*N`N>Wm$!tGl`SSs>6^3 znN_4{7}PH^hR)r+TcK`RMBp3AZf^V86H zl9j`rk`P_NJ1A*AR?sZHKf5w$a^OWv%o*WrszbC1UK`(-^xn0aJWPKi_-fnSf+@Rn zO3q5b9i{pFgQHRmdwIa z&Hib@ShJ5g0ox}W2}wpai`h*zYlwzd160!t4XI|M_MT$&q=aO3Xo;I{!h;T@)K=QD z+!9?8Ww@TO9YpXmt??-+rD2bFR^|ouh|do?PTxCximjA&UW)KPS`A46SoBE4p6Xw= z3AXyuyK2(n4ZG66`Z?Wl^D96-j!uH_yu7V44xte(u%OCLstOS+;e+WspjYY8fGfXO zXbV4Ryz^FxzcXbI?EShWJMYYFqvg6%L-_r{GFm>oC^68WR?W@bYsE-Df$j5Y!;=}X zh}lNQ2eHjgG$@FGwA`J~WJRT4(N&1i?id|asFMX?=EE3Y%vbe_Qj6J;MfHm9PMwOh z+*?e5{ai+jgvf8rRf+oC^OkQmuUC$EujGl|g-v=D<=mFdA-0A*K)T2?)QvMUN2N5Wu=hU(QnOc zWf+o=y$zX=T!lhhK<^vzLHW~?IYqlT0u{1;yVB$oIra<<{01qPPILU(ayDK(e0V_z z8_M<-arK7p+asxy;>08x`{qN`9j7XBIATv&BxnE0lQ2@#q*xky!a0pncA%$vwH9<|2ctDsz zw^L5G{MC57=g1vcVE@x9{fy(COrrGN+g_^4OZfx<>ou;aO(Db0;D(!a&b|0g$kYzA z>3RGis)B2PzxnD@Ud-~j^h(c=fMQRfQ4iG05oQ$mn4*3N3UOWqBNxFt=4yNKucn2K z?7C+q27&mfX%O}?NI{pM)|rfhw4AhgL)gwijCMe!z@KwwG}60%Ob1{t%j|#0Tybo>Rt&f4;r>!5CS$u^q%xgL z%9?6mhD6-i`8W&N)v(}p%9oTW8o4x>YO7e*)7fY4(DAbhy_YDJ9lpAGVtTJmOV~wz zWffs1STW8bGRO-eN$ggL-os`-IUGOvRQGL!4k+nY@>s>32Nq_`wZq5e9KbnlwyaXf z9lOMWllxq~S=DAZ`?!liSbL1$iv53zq*i0=`;uJ6CG@hPIwHS?{C5=-mjw7mBctrWC1K$4yg&=di zCsaRG#&u*TPm==g#L~VWR3t>f$s$zmjH~VLSOEj9`IEY{P|s}JIoGezEv{AM&~M!V zIU%d{jomV?dxKN8K)lZa7q8-YjrI~)LRs_Xyh9yqh9a$3cviIMetCfGEBUiIUUOk9 zo0L+!S)AI)H7apz3OqivA^cPMf8X&i4d{+dQ&~-gZ@=KBOPZ&4zqEj+oa}|TD7n`1 zm$5s@s;HZeCuobktX^M-l)6o<Y2Q%DT9>gwDes5 z67$F(OX6ejAc=`!ojC6Z<<8z|Rw9wxsrJ_{#8mFIn~jU{8Lh(TWRLX^7s~Ud&S#`% zV=RhiT4}40%ezUIho>f*U%F>9Tb?_V%ha8or;@^ADddC*1b}&sr;6LS9*ez^4qG_$ zv3i`sP`$@&pzbgq4J3Gk;d8G7Zo*R8K4*KJ_;fQP}+SNRpj^?Gf{7!Z*s zW2O4>0JCrV$LXs?a~brN;>&3VP{=Ewk~WoJXaW~FB1F_zHa~sR&2S3u6s~?y9~fAR zpz(1AF4r|6o(J?y_2|eNu#hFbO{wzk>Ho9d+%V1S`0KIG& zu+AH4D=|M6Ok>_aG%%Y^Y;tKqzvvUY{dhA+n8??6*A-_*k)MD|9EUg&%wzKyW44<- z_lD^Iw*{w9*!Z&fpqA|oiz6Buk+27k{@+NX`|th+V&qR}-m2;zQrPEj5--6sgG=tZ z|D3@0XeCromA78SAJ&4h44RnAWPbN)_xfy6V^qaFdh~{V1-m*KyMzIg^3;%2M6YB} zqXz;|5ExyI9aLj){!;>RjNY8A=l`1ZLsn7u4NSI_+wU6d$YHxw_R`yjv}(>J?c1}Z zK75#oU8aI9#QL^qQUNp!z;Py$jTA$RX=k`0E?q@3kT(2HWo)ED*GE}?ap1dS2*xKm z5GMDDvJw6a{yJN`(Yg?Ev-r7}Z2j}rHk>xL79%)X3H+6Q&a6bf;lur;Iu*@+%+0?^ zjLkXA+z1Ymrd7!bNDpx-i)&@F?%E@f{($Q||7Jct(6fii4{Ufud9p~$F+Q_m-+WHj z4Q%T5nIlPi!0{vv(Cq^Y2PQR@;1rpG<(VOrup4{ZAQW`(if)34|c zIT7+5?{DmY)D#c4q5h>7-o`g4if_^-v090?GByo#8rv%{SL$7BdR1|~!{^;7vmvc% z&NTa!&ZqHA?SH2F*Z*6^vj2SeKO)fgyWB%}`2Hpl`kUk<)Bkb(`4yeKzKU4y3_@0| z`s1Bre=~N=g{j*dk*H^D?_vUhbao#Sd`PGKF-}{+S?J5x`PhoxC2ZHVCa#Zk{HZI* z#UuQ&Nym7<=~#$>g4rTPKS@w?bFtZ<%_>cHm~0`oT-U{9Bbtm}zBXgnYaX>wc`hHC zknP1WYuW|u@T=7t?7IB?u1g}8_O;G_>n3#67&QnMp262a*rknQi|jn5#TP0ZaC}L- z5a6-JQZFe*uw3UK<1$l%fSVeh~?LNr?D-y-;4TP^$4}w-@u=`N{;ZXg%XCm^%RZQ@Q9V#8l zIDGk1-I;Tm0h8*hVbApAke6gMbbX^!#|E{afv!dH<9A)w#Es@7h`i-DuYHNd;`3t{ zg+u#-aV3X^u2AhU$nOgsy=Y#T!ERM4589qP9v&2<-k_I=u&1r57 zJK;&u)4Z+rKVtYSd(Fnz$g7#;b4bYhRJ7Hcd47cf?gYn{sC5Lq&;&C1=PKg$s_>80 zsmR954ap=f*CuD*%WwPmi*hwTa|ZsH>D~V{a8%SQ$9cwYuU+VCX}B@qZ!tDVvcFw0 zM5agZ;V4E8DN^XODymR*O(XYe?q2CnZa4Wvq`mpx%OMi)OFEX%yCUbF+)};MV@37y zin6_tZv4Hu8y=Ma|9wyomq4a{R_c6Wlc3cZGzS|``&wwxCbP6_DNshcBrq@=>aCuuVmm^Z>dLLJ$79U~aEIAvVoI5xqy2e>S z0%}b_$LgI!>+5!Wm7q!fkCPX@Cgb616S8y`;3vfQxoS`oqUc>L(sIj&jyrW}9qR&G~I`I*x4HZ5zBob;ue z(9fx^nJ@<4(uP+6JNMz4ouRQk%&t{iheT2*ep9n=VG&K0P#lHe%r8B=BqdF+2& ziyTtl5uUS@9XmiaE0NAF|(jH@>^8+eqkm?hz86 z^XwkHuIAwaZ+Ku7P=`B^NQ8{-Yj;E=a_7I7$VYR*p8KD!h&I?{TK^n#>2JU)l7^(v`BZ z9q6zD=2HsLS@Sv!CMB^CHs1!QM#2o(;`|_niYBGg!9N6{9uV6=r?_qk+a*MztjrWm ziE)5WgpiTn>4t%S6Z{R3n-LF_^|Qd&Rjg1bH-|@@EYuuNKSbKafV-t7>RMPT40BcY zQB|+eN3}2308iy-Wt9MthNA9^pis;pQUr-{SZ8Hr;6uG76^Rosq;Nx&; zBgyyni7qYEKVqCHLme^Oc|~IOS|>l`mniWdY+MZt_R#tz908c0KYZ$nr)LUX0K?$8tgeR^-b_m&U(BHyTm+19eh13&HAV}!=?U7BM?zR*ON`N9(Q z@sZLJ^WS;1CQr748-%Mv|9}Me2^srr&Qc(ihXn6M9-lGc^!W>v*T)Ydv46#K(5rivPpGfw!(P z2RG07?%tduTn^nxAe6KEs_hX=Gvv*0+2goqOgBy46lHTD{SPh%^hbNP zsvM=7xBpZGVbdEM3pcfcvKLp|d~NV_fA5pC<+@w>j4#V{c)myT;2I^OqQOGMXsR})!RiCivY<4dK_HevmjuR}h2R?hEh z4$Qh91xgc}V4kWh#Wxnsv6jTRlIA&|OwAK5Woj>AHtRBao$1DuM~8ht1Cj2Nm%rD^ zJLRCa9$inDzMX-2txpM0F?Akj-U@Ii=M`1*EPJh>6Vhd4hvViljw>{|YIdu`USL9F zyyW5k7(mPa?t=Nhs&4sjteOoaRab}xCGuQ8$EIkVq4H1Us*Y?Eo}I=mZXF>av5l! z8Wb<>IIb2)KID04M{g1xUgrw7cRK%I;)Bg~HaQsqG=$Fi46da|5J`2E8&3#?or1iu z(>$z_`1H^pp;PhbKUM92vKPjyVxwd4Y3S$Dyj!wqlaeTlk@F^CXY-h?8-1&w5tC_y z=B*q)$%yp;;R6Tn%BeD2iy}3mFpat}La9Y;MhQ9J-8P1aGGF+|Q2(^q#Mf!qQN+eo zm2Kr*6hMD&mz@hZQ<~Y|K|F)E-Wwa@xIkpeDQqX2gi2Z9YUMe$neOVS%$>|6UQsQF zy6&|)${iIiwg?hOsE9tH&uQ+N#+6E$WU?1&3YF#z@%a~kR#n>9NGl;X=1fb1R&_Op zbW=gofZ4GZc%|JQcM8XlN#1`N^Iy91q4$}#U}k`K2J>3R*UEmZAw@6AD6K!yS4nPZ z+bAbw>pLrLv+F2dvdQovQz+MKXp8ct(wo?SocWK3UphGqtgK079Dr9O%DdYy#Q6~_ zq8t}D0rUjZ^jzNF*;J(~-R)eIY6`36OArIUBiVrvNp7r=m`d4uzwYmI=v#mr0df+8)6!&SFpmM- zepp~j>fa>)YBH^y#HLNLf-UrrARrqfRo7|Ryj>MEe{mXc@;pQM=u7Hf3RI|TwDEzK zRR<0CwG~u!Vo^X_QlquDaCU=8{DC((}+@X}g@<*Uhzd z$=Jb?RPR74Lls>Gs$}IIQ1*r(BsMlg2cXc> z|BG?-&)`zyXijWS^x;TscE_blPdI@4&6H<5Hy{GfFB6TzbWgZwY^muRItFZbf0cXa z@lKb~+5Y<=r~GYHs_tqQ)45&EZYAjYQD+cpIn=zpeRA@%sL>nTd$wbqa-g}MyEc5n z)Qzis42M+bt`=83()wpc`)8Va$cvGbLw4LV_Asxy$NQ?%Ih$CuPPQ)WZC>>-Aayb= zvRq?v!y+&p5W)FphPGOXJPiJ?{egyh+V%6HaV9k7%%SYhAlwv^!*r$t@Yu0pFVetN zSgg2b5^wvC1*xC$33k>ujtR~fB%nP0Ci%AkNgjqa=J!gH#6SO&+PUM@NwgN^ZR(Lw zR5?l$hG!lbJM>w>SJYok1*bJ=(h1b%e3&J2Iu89eZbs$}!U&|A3e>yn@cUTv<^i&; z@Hwbqz7tU*!_xLRO1g4Hc(p<#`Nd(aj_|-N4sQWT-hK4yUv2dusxb6lhu6ax})tb&Kv%6`p7Uz9UPC1Agl*vzItWD``N+Xe`biyPccFf-Vf1mq#>p#w= zK6#nvT|}$)hc4af)Ohmj!6GMDZ1ia+wJVXsN>FCRCzC>ghLbd8ybv8gL6g`wRvcOD zpHeR{q4HCek;e4;Zwyo@*u2uE!m)`u0Av~z~9aK-S zDtnH|6&kYm=(K9_g6Gq<-i5K`5IdBnN@O_cYa0x|JJ7uA3ij~Ut9o$8qg{A(MOz2Q z&GbHcqWcBbJrqI08~S1YgyW~_9=LP&t8xu)1{l?-wM%}3l(UHO*KM8#kPQPB;mRY_c5a703D;T4n~!|j^2k#QhpZE#Hxi@Th4Dp4n+)C+&=s^|EG4&~~*2@}xBLuuRdd&7&$10T+qS zk_;*=NZlV^_m)YPW|f>9wowsJ@cjoCH^}@wr#L#pLzT5n&&7 zWY=v!yDg4iu1z~dAKmHPO+Q>Fk+cV9%=Ubv9z&ZE#zJe2g=@b8krHFG zMSJxZUN`>Kk9eS@V`e@wWZ89Tz3-F%`vqV4pf&Pb)&Jjj6V%&pMR20&yHt*~;bl~Q%Epv!adZpAD|Sb~9pPn%LH zY56Fpsu?aSIq z$N7viG`;C3LZ18lnS1EgkUyEiiQn`N=R^6-AuHRXQ2wO~WvBgMXRI!BfAxZrJ7*HF zj-r2y_TCN@ql~*qk1M9v{oJkx$nd@G7~wSVLUR4)4HfRKkU22XiE>83&P3yX)`Erq z3l0DGnuhyYY%7ZK34!|)X3y)_YkZf54T_|jdy017=6Chf+RTDaN}Fg6iNZYWby)97 zxR$J3gVZ;C8tp)#U6oM*P6ZIjcP5V8z=<-xh8v>2cSLJ}b+^nyg5j+RV7_OQ8+*g# z+NJh$hLp>GivBEyf%Xy_4zyx`cVZYf8X}t+yFTfi>!^`ygh+>S+#87@KCk|@?mv`g zzXYoyqA?*WG(TjkORf1>n4XnHb(sCSVt5uc&5Q#s*9;!gC+e@qo*dLaaqr#xnhbZ~>%{HlG`SC?ORK0oejP3xsVlm#UL>m0enX)Q|Mr7~}~Al2%M z_4*Gb-q*d|cLckkK5rRVoOOhznfVNy`*w?tr1q{5dbfJLNL zO%{PuIrTUlY7c#rQS3R{qbrU2#*AeEq|)>HuT@tcGS(42RH6sMACITtZSx5wt9qo?))4Ws#779mx{1JR2>#{JFYbczqVj+;37Z zk-9O4oI&$Kdc(jCLiLu&wi$?0)ZyN1B_su1_OjAPpV#(+W3a+{4z_Hvlny!WF~vRB zslQ1+v->d_qS^zV%c`>(nGgGc0t0wlVa}P12S+x9slTo#(}gtn%u87RXlyi6y~28PKc0 z^FQa%`0uD@cE(bKHF-{!EOGWq-A7ExVWmG#(5E#>5!@SPibDWgYP43drPne^H>{!x zrdHwD2W7nch~LnIwA4cnU=eAJI%J*x*eT@hDizw|lhf?Kc19%9m4ip; zCkvMqGT+`beh^`5c5)qXkqP$@vGH^|RNS+a;^;`vMut3>^gJH)3zBmTN1y}kQ#E^1 zQ7gIbVDCt`copQusYnfQ|MyS(;x2Mc5!|w5*PY}7>X94sNnA8SIprp%mK3%vS|JPqNP*}q9QjW7(8D0>~Iq%q*>C|93a5y-SBv%zr_5tN|G{Qq3 zp1KuhO)757ECR&&2Ui_xY3eKMSwx@8+#5mhK!JToLEuS1={>dV+3r{MbOtPds~n`Y z&QaR2%wb?{GoSDytpMBVf%6leKI0^Q9t}vsffG+uRcz&cYgzCl}>!X2Sl=swX z+Bn;fc?J8+#og&E&bs49+CM3$RV>=(l(B*#64VhU9j;B@N~iVB7YtQZgy}<*hvAB@ z-9~}zX9v>?R=Z3(-o|(3$^nfX!N=FScaH`<=0Vwb>n4n%euZe|-iu7k%bd&BD=U@b zYo?Sp)6PoK=8F4FJvS?Dg;W7|Ag!&ha+|T(YhEF8i}%EFZg%MDsfe^1 zeDKryTA068-Spy(@?ODEX+6#x#So_x+Wy#7f>rs(u33c(IAA~tnbaBR3isO~Ub{O~ zH=1Bv2-QQQlB(#wRc;eGPtn{CMy&1uW^hYj64VEb(nrlim_jOTL|LA_T zTAqrdu+8M0iZQQn$;qb?6mOi?P(j|Ci}r(IYas_`{)*&QkxFTao!~S$9=256>PZ(I z;L#e>roqA}Hd{T41-qL%d4-A`wy0g7*%dQvaXIg+Ov3)>j3xb_Y_k6PkFoyOa;@sg z-O$h1G$pEQ-nL)6@uN3`8rajH)AFSXiIm%SNvU!v9%E3xBDH^wF4Ia3yz?^YPmif{ z*KtHy!r;!~I8ZpLl9FyqS2xt8=#Lh!5qHXFL0pVANlc1nq@p)qCd&Y=L z-Dcb|Z(gq%4aWb1kGe<+L2MXD&G)_h0PZA5)7uvk`ISjIxfEP!YjM4#mu@Tk<} z=Tm$T!s2*y8)f!!P}DU(aDw+P7Ykh?r*h-zk5s|MYMaw=dRvIs3OB4bW`)661XQ2t zYg!rj-am4oW#*;4{H_%FT_vAeoB@kRCcCVQ({_OEUw^jLJA&q$|Ddl)5w`=U-HcG{ zrx7y|Y7*&hlqTe*t9^1kX#XZjE-R^=^sw945nP@$V})r< zKMcZ70mfMOcTq01k23!z;RIHlwg2MDe6VjNNy_W^9^`K@pG0qB@!C1xCT8&`OQ=>B zM`p_JSbUhXeQ_^QhfRUVGU$(yb_GunPfU*G4L>bBiKwx91%A}$(A~xFCNt;^7Ju8; zlh~RxGpa~D*)ElfKX|OjqnZ0CTOuPt^RFEc=LUsvu9gA-W3!;%XNwC!UIDm_s_(pl z{+{OZ-O;6!J~sPvNDc_P%twSH@*M56mmJ3Df%1|9_S~7~Nka*c4b~ATz%5@vrKrA@ zPo{JIOf-^q52=`U4@Ya(>x*${*!*p3p2^h1X78T89BfVdPC7)JjrbkP{_1l?5p9Xx zr^&tXjGq;Qd`t}3yjIV#av8);s8O)KVjtR9GBdq|@e6dg%(e@xuy&Ab(EmP!+JiWx(Wo59iZd>KAMloLC*zX`$Cu%!1GWu*QUuev97fhzvE!-k4=KIn zkgl1%xOwyBp!1h+uua7ZKvWqi)aY|h2#ZL_d_@I12QCWuutCXB2Cn0QV#fqxkzh=v z$%Ua`y)nDU;9@Vsp2*`m&fM>OXHz?{bRqJDFIrX$M zpIK+{jm~-rY&Q4}UBOpar3K-vUt4BGuKQV_*aIVp0}S*{maR*Bc5T*eMlQ>@h&OKw*2dIkE%sLf zTRC1dmd+N6(0o?;VP{_@Pre?uc2`yo+gdKl`8ul1ZZ5;G2u&e!YF9M@jEJ)qOmeBuPqN|quovZCh3NO48pYPlk#4T)+tJk>Trs(%@Ye##0&QVHEXPy_T#fE$17}( z*?Z9R$_JFQdrQS|W_3&4)=d)LR=0;s{b`SnM78A0V9~YPCe@qQ(uKZWFPPc-In}el zUuSO+1hENnoOTNi)uZE`lk2kzH-#~fpejZMm^gO0jrJOf+|g&vAwqhh}D;! zivNAC?`zlts4oDvdGWd4lvd+(Y{-oNoXz|6uNp~z$q8yHA*~=G7UN7u)UL>i2!7O) zwRTNn<}ZXIt%`H=|bJRlP!<`uzC~| zxNCoTy{W{^^qL?N{8+>ebxl~NCjvlICYYp_#V`)S3Anqp?Z|68_jtQ>Z0X}2-Ft(j z98z;~a>9WM)e7o5f}xt|Tf>iAZOaiHN|w(N2vN;vW)zQQ71-H#nexot=Krj{D>Ls_ zppeRUN6N0MYQ9J4u zD4K?eL+&$8!l}toUG5*H)$M98L--_GyDvXeQ1Jm5(q>?gu4+g%)V_AQweT1DVTOB3 ziPSQePOS<4i84PH{4h4LPA&_wJA@oClYiE+L@ve*U%aX1T=Nlf{6gTRt{e`l;nCxg z*YP4M59J#0Mfk69u5oaFR7ZV8q@3Ji<+J))R9RUk??5TnxW7h-d@>yIdqJ)F6zW5hWmo$Su}pP5IrQYG9RYxh1UG zpDm$F^F%*VlCW6UbMY^+gpaYJWoPAtbsGQq+I(Eg=8&|EZh4bvY$2md9GVRxME17f z+w+B(=#fNdq>t-52WLmfh`*(Uk*uP7;qyoBtCxi{32+x0h{;rJq_8SW&3g*qY@X9! z4+0K_CX)XqVYt5`B`Z6HTHm=lGQnMy9{FK=_v^xJ1<&$jfe}x-WCqk|+tMB9-;fNq zZZ%&y*;dl3PLYv2aTmViG)s;!O_b%tdcQs*YT8~J7<4}FYd)reY^7NZUAzqx?>^)#Spt9<`uChB$`HD~PodLDs*h+- z+gF&@Y}KQc+A1aBV_|^fD%Z04;tMq@7H_BN0gA5ZT@}i9Secd4gT|>F2{^0;a(Z{M zVq-s5Ggl=gz{Pe>lyU`@Q;QMHZB;powXhs$Su^+Wc+kA8G$^@;r@QrZmiTjwE>(x$ zQBXR;i{}gH`8&SQV+cHmPtJm&BOPzvZaOd{mdD*^U-r^?Eq)NBeLUCFnMXsqH0QF= zo+z2U58w+?jO6rA_{lx{lzTozzv!Z`SG`GQ!JJsaoGaODyZI(UF&!&jJUlZo^I-(e z*t#lHAn$)Le}n+UO44?>lzjcRMfcrF@htLh5;2pKr%l;#twn0a;x8QDh^*4z()^m+=)Z@L;sW&`$S8yKM4_odL0TwN{Ne1#iY-qWQ9QD zT|E_A64wr^RbEbkcJv2Gu9lKdvvTO8!Y0tn*cObO1U}wnpoFho-v+z`eTLK z-%r{(0250w@@FtrlEr!I=xyA1clGjgvqdi`x z?Xki3_&b}Xaha0ujL59Y$?B)e(FLKcd(ypYli&L{hgdM;Z?<*J;i2A*g5`Md)g93 zFy)F2)U>vrIof}0wnfFD!(mI4L25B-r!-F@pCk9#6+wZ#m8oih~S!spB*};iu z|M$wU8m%ZZf&C(rr_4k{xPjJv>fPvWV+~GDbefOT`uiKa9ih+ry7rMzMOM(;I`{o& z>+hEs8b@Xfi4ck?#+J=O-2uzYoVHMyo3S^VZ2!+sa(%QgBSsF zwCI7%_k_XKBbU}e)1vUHFnzX&OSq0H62iVlXBnP-zGk*MMOy298(kHDn4UEir!Pf8 zEz$PspdWV&Xrfi3G2VV%wzFrQwNH|8J7WVL>~^#BIX~;Le)SLDl}U?>kUv(plHOCV_Jyp&X3Vlf zTqfBOSz|qK@e_8iu0S60+rZ>iO%s`-Q>U9UUy2$Jc!lwBn>qocCAt)M0H5uC+MClb zDuX6nSy-NL-($sZIo5A$Hob{uX+Pc%`Jm22qwCL`#oaTAc74u9=|dOlMPYdqT->D& z#OV^CgMK2wb)9>j=XB48KE=nuhb`sh>8lpfcSAM5N5&nLH%PIY5Th29;y}WwGR;N@ zbtIS@UzC(v#w`lw1wJ~B*}6V6BLEmnDu2^A!%|tuJL48g7v|+IFi&gS<E2$&xBB7s$#Q5uW>`N64kCVprUZNRX`w^ zEo)E{Mr5bkF;mKYI8hPxDVd^?=XrCv#W27p5clop}+jxj{NJdRUVuq>dL(wdHo;R~ z=hDjIx~2)sAm8#N;4O>bs4sW_8aEC9eyY$Xwz}I7fj{x|q}Y6|{+5~D5)d$8`T{lW zj9Y|1)Ot=^3fB>oAdv&AeJ#lQ?uy7(;=Nbnx!;x=HmJZ>IcXn5z&i@lhR=W(fYaND zD#8XIhlXMh4rSlQoAvvE1qgpBUaE{1V}I^#*9%DaoZIF;-iWEj)Mc4qz>$Ux)4d=4 zSov=f;mW5T34WWu9fk%6ym7<&datn$K9UmFj&o{qJElrhDVkm7 ziVf@Sm(51%E&41G35fv`J8M%(9d=H4WWGTe-|su<;$-Jne_-5Imwzd`udbiZ4|}9+ zab9t*t}W^H*yQ&Zm~#OO{}Xp_71UPy_x;iv3Z*!0vEV`4V#PfzrMOjb4;~;uAi*in zA_am6hqhQ-2oO9#a3@eSxVyU*=)HNLng8B1@9gJbpX~k2KFMScn)?X>^GEs zj>U-JZScrWJB%-Szhp7{M4b;sYkKe3`Mnyvh9j_jV}Ihi_@Tb0A6856k%^SX0m!<0 zhFDvnAu|I3O_0X@T}_usp7bxafBa{nZlDpLRPio&FUTxfh?=POBJ^~7LsK6yh!(*a z*2uVxVnYfQ6Q+Im#5BO0x0FX2zsh-NKunHx-0Ow=bKc?ZW0!xjpqO zzinbC@VUJ4SRz=GR7y7g)A;p!>NrfZ`s^M6hDs^qv`558WTmP`|cDeju7Lx4JS)po3f#iucb>CNJ0ZQ zN!31ymg0?0io?IEUgx^MRx!>I*DeW@u9}_Ssv*c98Yb7W*W{0Mnd3pM=-T!4$4f<{ zu?e8S0mL)T`uKJ@AuQH|A$^@;Wp~16xIER*L$H_HaE8nXQx$sDsGBP9y&o`I`k=FR z!yPlzeNwz;^`(s)@{=7d_jdn7!m-0M7zeExI_BKq(G!>HmFRt3t7w}y-u5r;66@!6 zBbQ(9ofp2^)KqJr=Z~TG81|O=rjR6e1Jd`s?smHD?9~(NQCw7xTRmjDGOvnxORR2> zfTSG<@2mP2w@Dw2r^qmp<(PoG7pAW}&Ot*)MfaVd?V#(K*sYz1EBaAAfvZYjIeN1+ z%BtaO?FOxG%iMLVj`Zr4dXYeM?Tdq5&FYTGd2=HR;tU0DS>v_8h7Rs`kwo$3G-X#c zQWxAYGn1v^s0FkI;L$V{ZMf#q-+sD~8<#G>$vbt2Pl4G3ed%J)q~K;FOqi~)6#%w->l#$JFdn*u8; zM7uVGY%{1)?B<$CM}&dN<%`P7n%~p;g-du>evZ(^E5jb4My;w9l(K}whrao`^&1s)-6KKhpek`b+Am zYmmwzreznLX*ROABzs4(9$L1)PI`LHWiVpZ-jx<@KR00x>0=ZW-hw!T&oumu7nWpS z0DTYS62P_8g1Y)o3gZdeOz4*vXal;0v(_aZ2Wt3)*R(It;=Rg@W72BOVaH2>T^o0K z&fb5CE@{B`gdE*@>rPcq26aPz=}I=bM7@69uAdqyJNpPh{VhJfgeN6I{=wIKMCU^W z4DWRF>>q99#tJAI2xugKx6y`!>bormg@yJ46RNKzk*&6F$>og9RPaUi^;H(~wF+{Q z&KGC34Wxbw@4@^Tn1n4~GbdrcZ(*dp+zN+MLfdgEzWL3YbMk`+!yV{Ar;F>)kJ|$P z{V^4c5-E3BLsXBl&Q3qhwd|f2$qjJ}J<=qjH0ag<0355t88_>n=zns1K;o8d-95{9 zKA^1kkrUEX;q61s@CXDN8wmQcO~|>zdHHtW8vGy`^Holz3@ZY!k)%pW1ny!-6`#Sq zb~m=ag7UtDlS7>G6>CM^?w_wFwm(L~)IBkB zPOei<-B$@u-8UYZtM)_o^3*VTf4P~OROffG#3y6mfvPfeoR^K55x?v%`?<<(QDd{8 z8~;-paQ|o7y!Jn1NFhtyoIZX7;lsaH5bp|m_X;k3JP}PoK0of{uaRQn4T4d~-FthBUHuCB4kBZl0 zU)T97t=223PnH+Fmgi^6e=P6L*G?sK`lC#T57{f#Z&AwUcPE6`-NZURpmDLWwCAH6 zDaQ1^(p)S;#fHANg{$;L;~u*8eiur*tkvYd``_8d3w5viiGA+@v(I+&Bvz-J-XZ#f z=7cnJ%X}rNto_GWvZACd5MW+a;y%J+>Z$v2-fGD}zunG3f1%d0!Zj<>8to(){m!P6 zKY0K?1HRK~z1Y{gi1UW;F=Q;%?HWHUsngDUB;>1uP)sy^*!Inlr|BoUfw&AzrWSDZ z2q-7`6<9m5r{MRr4!U)MZ%Vxfb-AX+noN_tbzc^|VtkXUOMl-GCpC#VxNuc%)1JS(vpUX>sluAK8qu|0Tq zzS5Psllf`fq2fK6z`=`}hn->#r}G6V*jXd?ap}1m4!`#+x*#P#N^uds0UKpb9nfOePpaYTRX16MpqjabV*f=$N1}leoNlD&6H{q6F(_ zM&%c&&BxP+pGXr+?Ta5pf7WoYsCa92(aoE{PfML?xHPPmI-xk)_vI)W*Eu5{AE+zv zJ~;0$44jkOd*&tE0{S+=vv2OCBSKf#s1Dk*&V~#hmq-GC$~!Y{;!3S z2p6b$tIlSWcWVX{jAG(O$ga9gkkF9bSyKG6QhNz*AL({MhRu|#FgZ?F515_(OSDDt z^-V7!i25!1GyYB2Iv}P~l)zIy5uiTd8}Frvgw$g*NMevTtplMEq81EBzjivM1ji!x zJjL89l_~G`$GWcaq_$nE)9_EID-tlqm9yPA>D-jrTp3dQ5rQ%e}yXx#00t zxFYHqNjNPz)wn2eo4s-n&eJ)Zt~`u_=NhEXYE! zM#}+>--cE>A@-AH)o^dWh0C-EYugK>SLLq7CHPh!Pm_`OhH{zOdK(Y;;apfUA<~5N z?|=Pc5!&t6R|HNIY%((jVq}$!ivqAq*f^PFBq*U`_C1yA!KoTE&Bzw?T&Aa2E}G3- z&bvMBgL+f7eUBmH7SV1o+=vFMfg9fK%O5>$ol* zh-we$g#hZ;@7eO&LJTX*@hDG zwtrjkb4g6v8folVm@dn+fx+9g<$U*1YkE;E%e1lL?;%mO!&~BbNCM}&`(JuT3IXOo zg7C7?@|q`uQKJzHX=i8bmon?Pmy_C1IvG^eYGG3R(w&2;Z+G8zU)uO5)HbRmtjl)Y z4kKyi)7^PAx!q4}?Ky-Eu_Ffxv#dED0B0wP)F~XJmna*B^O7->6pyyMHDFAB&N1`~ z$OaVJ_YMz{l%6H6$!(xmlB-;`h^bP`Rk|z)&=Q_?{5hF`tIeT2%mPWrFy#22IaRb! zKG30g)D5sF6&!So`4nEOd2A0lj@rG4`7RK{v&7N!K69L!^1e~c*ka?Dav4sdN#S+&fX2(S!1kh=e^ot75NU2j|!^_Jm7sbVD2 zyr3v{-Q1Vx*Um8Z4ce;?UR2ekBmQ!utuZFJl4;{Vd#YrjR5CEdoWE-bDoymbQk@DPg9S9zed;up#?w6vuAkppK|q1&O4Az1nA_+@vdP`Th0N{KfVzfj!X z!vB48p!9x4y4^(NT3L{GU+H^!6TOY+DJ-7fS&Tc*9#B zeZMHx6EN1}I#9@pWZ5sc6jlSwKa5G>Zfs@-y__@}aA>x}fo&#SR~JI*LQnlzhzpgQr`rCG}@@Odg>wH88=(5n;I z_u4CMd$fqcamz?f8^oXR+%S8^Ikd#=Y_Bo!Vg$JPH|y~8Z=KbZ6J&i|X47peX*`JL z?v9Uh5Q1^ zyj2B$nrmjsii?K&cdZLtuPa96$%2uX<`Kj2W zIH_^pe~CC_EypV_(A8Qr#{10fkNRpI*dB_Hg^09Ir2HJ{beZLofr4GuCm^2k^wgK) zd&WV}6^n$|?KQ54YmdbzM&|1S=I&pKT}r9j+^QmZL?f`%wu>{NE%oysCkI~*BQY03 zpMz1qbTmyK{`wsU<4nMR^^I3uTv*JVH(|TkqRQ?XOm}^l{d5RCog5-P>3nxqD&b&w zE&M^Rt6mnof<;cdpNuf7h^20CY`KzwQE+2&=AiW0XEm%mTOi` z)3l>gfHizCmv=iQdS>POrW8VJ%4i8Cxre&XfsY_q1wV+E9=X1{gBcq z`dTd8gRRht#VQs!1;Bn$iq-5X%-@WQMzWaM5?*IJKpYfJxQaml+xlO1zT*~_TJMw& z5c*oF>#Wdy=aBXG37)&(Q)M}Il4~8LxwgK2O7kw`cZT0Q4k+fjSyN`{bUK?T6Ol)t z@0g2P&aQ-_WWiJjo!{#YL-K(AD&>RJT8F;Wu7zoj_T6v5$1p9RK($>ZuAfR_PJW2z zDCZ}vo>@C4;n*JNYkh{2|7o>1ty?`(vEV814Zoj$DR7!6rr+ejOld9bz3yk$tpF(V zCoA(){4ySxTk!H?`rt6$xlBb`Ar|r~W5<{CYzUxOH4mB7G}t_V19`)P=Q3`2fTjIF zj`tNmIi0~7)ln}_tGy<6Jq}&FtA!}U|GZXc4a}{}IGC3@uX?vX>yJ-6cd657WM~*z zT1$ausQCdV=d%Z{^&Oq)9E-lMdBg5#eO}o#NN@}T3AmelY1o710qbWU^MTUlu$~fX z$ZN^W=Mg`=5p=c<&-q;aW757=bf1U6BoOx!XxFNxIg;`FY;-5e0igA-U(D0&2ZJX{ z2?*_=lYI-14^19ErXTj*EOV#fA%Ej;0eS;(h3Lp{ve`R3sOo@}7h4^lW%<_F z&lBuc=b~yxRwq|i*iFvSZEmCBz@Ynhy|3UqU9DpBiRpb70vfz~3Wa{Vr8IL`ao1@V z8YP#ae~^7RCe1xm&)lVS>1};kaSUby)zf~TecJPs^eBQu9`Ad!%5Qwr`_%p*JfdwG z<3F+WR=?_Q&G_A_V%09yJ0qr>8r0r{r@^NUA9k|;DR$2cpk_-LC5>hP_KDd)JLN#* ze#5X7Qa(P$~)jsZ(FUs>4cBDy!9gAsXOd7ml+SB z79J{*_2`L31&)F@V2ltL*jpDJ4H0}b34FBT;|QC@-h+`X1HK~iiI>zJb=oc{adNb; zQOi4Bg0E#)KdFfb=oRY)c;`6oRUWO0B{DFVr;*Fb*QAezCvDtjJ;+GFd2+;NzUQ0v zO&iZQJDATaRT=Vf?8Nq6=1+Q@xBXQj@>B7)lZl;_{%-eG6$+PGY{eKv`+2+Kr^(8Q zt7~?R^*#v-KVXZ>1pm}A?>8x^XP+W4wK0)6?#H2UR{(033B`KEYlsoxT%a1)cP{v1 z1j9Ol+a=rJ*Xr}3sNw!1pWLr49C%!gIFmuzcA@p_t)D8GQcYk%z=_{8m^n?jJaIp~{Bs zkM`B(31SR!9wgqi@-NZh!H1T(dh<5^Quye*a!?|V%>X3*cj`TPs-4>nrEkX{_ylm~ zx#=Cm7S^M|Cr#?%aSb)bd>%k4Yao%bG~M!R)b4i%4#tC=`s)P^M8Fu>`>3Iv`Mx@3 z)&$IeD7H>9Cl_}<0jzOBn4+pRZw&B%YW7}SHsL(q;R^JQY2iV>g(EJ8&Y=80;Pnr~%4!J7? z96RK9YJ6NsE&s{8E=K#sx3}o6c;#pGu5C(Wso0fp_LbenRx%7s+;{S;Lk3)j-X{hu zP-ilC-iPx(KRDbg!X$PZ1(z9ho2HX~BzdcQ{_M`;R$^X8p4?{ze`Zr@vLRgfNqQof zPozOEcq~eTGn}|r{vvd_EO$ovORN4OS5_{Ctj7(VCKoVkqXgn5@9+0#7bJf;`1&xR z`#$u?%xA(F%!l$H#L4Mu9}Y5`pPYebO_C7kc+2vBQ*}+5geQ(73x7`Um#{p`6UCPb z0(4C;Y|OdGcuhL#*bWcaLb8nm4AcZPz%PjFS!c6i=W(}Mb(zc<^&evy!%oI|mX%Y- zf3ElK#q*L90imI>B9DpazO7(c5qljvr`+|9$s9*tod@XZf4j^jTQFOkI0*a+_^tLa zl+q00(Ww^fFc*ePKhUrFrLF&Xi#Xg(_@_`*)oK;*W+}+%TdBgZ=x*}jw=nVU0W$zHiI-pn|L84jWY1oFZs}lLB88S|ZlT7K4hQS@V6+Nre>Pji zdV7LMxTLj|Cr+eLZMJVX=REYfiv7uEFjKjO$+ml8=N?lyp^W%ipt&P?aN>d89$ayV zCrR4`2DtMSgQo;=%}2-H33n!Lrr44obg}KW81DwW_hS=<)T@zbXZr3n-3uGA(R6BU zmdw8SQ_AZSjiXbx_y(H^#Phu8e#A*^`(0)=tCKdCFmbDWC97fvBVzN3C6roNSN~Cz zthyjS-jVxBW0Fz&evw~VkqJlkqEBvnm-bYtIsRnMc6AG++&F*`R|_T%sHeFr*so&_ zF#54)<<$RhM#KEhL!5q%ZZt7pXnk#n*Us5?`MQEmc11fggGqVkE9=Oz&Q95`Wb`C^ zMI_HoVmZ95uzRj-VVe=%tDJsdA)d7@U7-hCQVgFcxcMF1_DiYdT3(fzS<%Qv$^>=W z*P@DC0RwLnjW*^HR(X0;ayPQTjUbBUUzx)8+@ejaravX#D1TUbNzA%L(Z{3&GQ0XG zH*V#Kwz|!#+Z60fzZq)py_L9RHctQ@p@*?f(Kur%`yxK9&qDlAmX9;XT`OL~Q@27I zC3CWwwwcIrzI53lIoaKQLkVibiH%I-F2ve&wfgRk+=x9le`txaS$WT`vLRJfw4pF- z{!)*)CI|asdj(2x-YxZ=(J9kNRg+KWpjGH3T4fIiC0GGSSRpFT3bGnJxZ=Q_;o~VnIm|~+MDaVU6%M(FQWc*>x z&w@r>Rkm6QKQh;wn@=8-dHzQrjk!x3pXuy#Mn(=Xy)R7+FL$JMx=W^PTklB$#6N}m z&duU-?(Cikn5bUz3Zo_#uT2^sKDNpn|M1P~fVt?DMuOU3X^q_)QLaury-njZ-xPkJ ze{}I@w*$oFtmcR+c(n5YSz**`^mwa&RouVikR-m0QOLhsLlmXKECxIp@IS%q!R$U~FZ z3v%I?E0;=ti7oHo_TM4MV)J?j!52;*295gv&z6Wr7AO$4TnPaJZw%a`LUh4|O?LC>yTnD_(Y!4Qf zS8k=9?NM_43%hd6g0s1id}#H<;rE{3-!MwVTKZ6vz@YY1p4De=-bF@@Jl!_#Luugd z>e{Gp*7_#{k=|goM*9o03t}`4NRLmCOGtGrh0E*InD`ZS*)bDwv&X(>we6Anq*v;Z zL53Bcu1<=j{GEXk91R6+JUL0qx+dtzMNeLoKQYt+<-1x&&XFw0W3~#!9CYKlDllgtFGiMoNW&G|&wawW+^(gOGYv;;} zGaQ0c^^eB4W3K0ffW`+D_bG*>EDx!|!}EJ7PHZczSKi8&D+Bdh4H)lD0d^fPZ^J)g z^6Z<*tqx;9R6d&V+^t7EvonB-U&)7UJANb~4lF0M(nVv>3Xk53?3ovLd2p(-1NyH)-v0PsjH1-xLg36qQ0iOes6ly zWp9b>m*T1lwd!FJ6;!uo<91s2aoyfZG;*O5`XY3(^saBU-Hk5-B8EqPS*G-eWVM*6 zPn)S%jEcQAIo;K9a(vbG*}XQ>y+NF9<`u(A?@V@8WR4)(&rqIP*MMl8xYWdCoKWuW zZ7O9ox|{jRT{h%OMee8<;Ly@-?KmBU#lafi)j-aL;eGmGx_dhVEw^(URPk*EdXcWN z;Qf1j`i5Y^3VBLhk?3jzYBdf<=_c!dJcP#FjhcE<>ji$yKLD{j{RMC9_(#idToD~P zq%Mm!Jn~Uq7T!UWm?5{N0UsCro@sf=QKQh&pQ_}XzqIe$XtRD*8$tXy3JA1^`i+N& z3#D+uA=xQH(|R>%uUYZv^rN-8lVmYQM=||(Sf_ekmiRLK8rcme`v%Vp)yLd+#-rLu zvp%qM0y><&OENm~@N3LA!Ridz{)9(Ho439ewS3`k++~ZNKtibeyM}-*{I?Ok?}X3iiBdPRwL;-fr_f7i5 zW~I8{AV8`m^m%K>PI#mP=8$-Og zx{q@|QQ@B{-+HlQATHCj+!X&`@z#gF#BOa%aU0==@qKcy@a*_wXwa>!sBVn!@KtP> z)3SGX3^+vF)9XjXieMpS>9rMD1ELkV#~JO9FF}16+e; zp82+f?#sGeqOwHZ7GZ+ASJfEXo|isO|A;7FWH@Gjvbzj`%H4ok3T z{owi5{Z05c&g2*&cVJP8ONwlv<#kq8@B)@6QR}h2z7fxg16X!OPeAP>;DCKK&_DT# z3@&1tbW9c~hNveHzc>d0e|I?5AODgox=;>(Oc3qS_nDSQ69YLh3h^k#mSCjSD)cdc zt2qUV-KkxZEAlW^j>xzAZKH2%K1jVhQXA-_7gff@!H$ROSV)`kvaE)mx9I+az{xvQDJ43~Edco*@jJ zYlcOWe#plsZ^;={6f}=34tu~Db9&{#YJ>DbjFLKz`ip;L1P630T?RmDMcvcxJrsuK z=aSP@Zfm>i)0R(2)xygNw-TL7x4{=qlZC@^MsbQa_DDE(z@4=Y9u^L@UfoSlZNJFi ziTR=fE0YOpG6%4enfDhVv5u>&XFIh$z2@2_yw^?q0ax3TO?M&0CCEq3Jx-bB)+|68 z?1N>xiIT2}RMUaq2vN=$SPaa2(`oUivdDpbOwWBW_ zxqVI0LF~?Cblx6y&Xc&f*nh6xvcZSWuGGM5n57!K2Y158BJixU^gX6?Dbl^GL#K#& z-@~(?-^XmcsjptKg@uQ3|KHSlBmUq0?Y~u`i9d+OrqoTPZcvWYGD1=%By&636NrfM z=nQXVvCWxIWuu{ccQH05QC~OUWUn>)dyS>%8*kMhd+X16D#X5JU)5An@p*(;3GisDsMXQ`L|zCYO~nL zA}irkr^$4~$$`tXp57ygh272DB$>>wkrB zE4nu2M71Q~Ce%|^sVBydAy#d=&lBeZAtoK(q>Ee2`zm(z+cqqhz{qM~{9&a`@N9{~ zvPF_SwC2mK&Bbu#nXwU{{^&pXsYJKIRPUvs@jVF9j`U0dV&}eMS`VT7fWYw8d8a8B zsm`hn)xg7|Lc;@&5ya-~PjwLMe}IAW)$jc|l90wotl4*F+sK4+0G*t}r|{7=sB}^P z&OB66sF3P_H#(M9g{|7tT^OSvpV01-w)AZGsFK4(mxv91cw~g~-e&rfG%=M%o)_kO zRB5-}B?3Z1Gjw;9DoX2b{R->ai4-}tk$RsG;LhST<2qnvlyK8Ms5(Y48%;w;p2NmTh2 zI_OZ|OXA(6f84|A@VhiuZ?$GT`x+#sVgpqHjbT%_SlV{y#g>XBL#sE~#Cw#FJ>( zplJh>^2GIiP_5@2&86`t8A9pDpgl3acrgJwq!AO@WB$}#+*N(qnGjzPNSx^cn82uJ zTO&B-!?H4(pYCvWwkkit-ccD9dAQs8I%aGS@ZnHj6iFmr;G4s{upgw^zu9OZ@UR$a z5mt7PZEEUCjVz0a6B|Kiy!o=k=q#mMDlHsILgRXqzJ~#E_WFCfEoBU-%cp#2ofq2A z@UI&W*w-}qyxcqbq^to-%kp6t+i=uYOJW0r#-e?qpsoYAE#1kb6poGGVur?Miai z^z z$B(HEvufMC%DlcKn%EwBfaGtbYkuNt3FQ+WfvFs#5#1X%4_g6SM<3ycQYt@rFKf4z$`r(xw9HJ!b*1Hl^9RJ)O9Z}rE+cxG@mkPBQ=+WBzvIc^Slr6_g_$zM7 zYzNIton*A83-V{ijZ^Ie)h637Jx1Qx2*=;`nY8>BWp^0i!Wi#6ZUzHvL|q2VhZ3jNjcj z->LZCW?~SEwz&XP#I={px|nzs^gTo|btw+@6jV#qhAj88>NcRhH_@yVH|BB$e|OO;_mRg+rLEgRzC(8$Ia1+fy!jOY2Rp5+r^nJ@Whb{%~aBD^GhW| zOAT6&hfu5jk-hp|TF6mFP$;(L+G=ubw_*FB4!{UH5)nk7lR4YMzh3hk>8f>nc9I+~@fj+u*%zmpD^zHQkp!YfbbwrqO zl$F%00PjBopS*j4^U}gsPz$W>%JJ#$vUqZCh+Pv`S|VTTo|!08_|z=-2nV%?`p&#v zcJ`uvDj5&ghP2oqIHDiT1D0uBbz4TQi2t7r-}5wb=<8?82QfjT|PDss+Q9AJF<7CYkDZVxFoXOTV8FI#+~Ik zPS?W+k6;`Nr=bw+;u#U$s|pypqstK6>Cxc(dxsjEWD!-K1oMfyl-sSImO?A8JWt6V zKClbJhE|fEPx1%OmrL*eaV|Red;hWMSox*T=Bmh;OLb58yh;f>d9?cSd@ZrBVl^n) z@z-P_71l*7CS=IQ)ye^-K_LNG0`{dijT$vj9xl7uV$Pjce!O>4GAekkQqD9y zfzLP%x0bD1{#NQ>LaD~q7X9r*bfH8wWDxxx=23<8f18pxk-*}wKz`1%?%c$9Lzg^} zYJRV7XZGo2MEX>P*Ro%cA@bcrOzZi{Y_D!6g=*_yPe_vqQ1$bfivDZ?W>(hDax>f; zmbF*7D;qxSN>n3owwJej~2vg zY`p3w!w9Y~Y`H}}L-C&+hh!di|bs=Q)~9uiAdvqZwet#l3S$7HMKxxZl&vu94 z2cU40>LEFXDH+pnRIVnTkygxK-F=m8C`{j!)KguI3@f(2!VGKi6rb`zv@!=N!bL~) z!oN)Fb;$fgx1=Rxt6WYjg@6+6=?H5Pu@){0?X*K%ajFF6+EQ};Q^&)xqtbtg_Pgwg zDHOR%yyr#$vU(Lc)z=LlbXa6+l|?N3H!$OA#p&wY+@mih{iIB}S?91s^e^Uv*@2n^ zGJJ+Ki(=}R=(-&z%fKR(ZHKRbMzX~6on3#@pbY>m^_Pl~T(13)H$ zPG0@vKpC0r=Kqv2+vkQY5P2FrTw#*k9R{n17V2OrV3b>&{{4XGc_K*Q4V4T@LU!#kE)6*;aN^XsM=umDKT`VlOJVzc!w zr&(pq#CpGIJjn03qW!r?yb873Qc+KQJH}R>scdWyv0tg*)Hu|)=g?R^iF#7_R{YC} zvP=adDL9@)Q-{{HL2;PbCl8T%2m7C>;iTbjcpedSF=Bo*%fB$Fa*zB1_XEh#Ld z>R+PZNk`Y2kX$GHgkWQy2jI;!!{mBJnX`SPKP0GFDq7E%Jyq?g3&v9tyJ`=-Kc`h= zu5qbM&EOrqK)W1xgh_D_8+cRAwUSKK9D@^@;Q1WK%r>Sl0KPmU&GeJ;Q+TN(K9^H9 zKYtQFzqo^Z{(w|<$6eXnM>Jw9G+6+axOm1*mDcJJ^KJRu)CsaZbb^-k!sq;r!9 zaY1o!HD~dJoJy|J05wbtIR(-f4{}WkW>WyY6Bx=h3`c5PL=dneBEb3q zs1?o_vnYryyTE&yP;j~=x)5?7{n6!-TrzjPRWVW_b{F#27fF-SoDlthGEl<DME&uGJ*?RQIv zyM@7V>0UcSn|iMH29e0*3=s#Cn9#2hO7p>ILZ4kJQwpOK+VQEgVWVLzC@#hnFra9wQJLo%zY!y1E2IbjN;{2x;M`8 zEP5{6n`Z@(&mI>}<%Ug^LdYj|^btF1!0$c)2X9A(dbY0GD~6jHTjY8Q-&9JVTzb@F z#n1Q?c<@(qnqGe8GSC>h&s_*oDT8eAS_hY>aruhf20ULAdv`h5|Mq{j|Hf=)l_}!d z&+7HXT{gb|!&$MOlS6J8pX6%8Ovh}-g#WD(r6Xju#b&9$hIR`n%P%7tSG9|f1zO%V zH!Vs$p--r(CvRn! zHN{XeyDV$$(B0`$05h{gy3}vLN@%J}mg+xRKEWi=-;F2&^;7iS?sZ$A9y{`hZ)NlQ zfs3o*# zTANgC$?t+)xik-|A=T&eH$f!(=m!0}dD2K^B9?>LR|v!F0I(YsL>XWjI)cB=RaDK_ zxOUaGC59erXXojZk`(lKgO{ovOpJ?Pbz&E1&_ZWBHxM5Yz}p~t>vHpOXrUgj^>$hT zOtxI|qXdG`B@$#G?iJPM{~@;XeaCo>rjLq7aj$u2+0p3>D%dS)F{9LBI9~o_KEA1G zdn}6P?!1>^a^%iXWU^EFj4s1>KS9;yuWPl>Z_SafJ5#DJPMYTk39UtUn~KdHv*KI>TFt}<4&%?tB4{JObH=cP5xukQAlLE-kmAu-siljX;S>r*Cy@f$3-7rEZd1^kT(JvnvG>hyrsXsF~!%PnRNFf~9 zyR8CQudF6HtOqxIMQmx$lWqIs%K zxkZ~fWm@reK)~kxEXSLf;(51y(kuwLIk|}MCYAo6s_;#l)IAE3iQ7Vi^O2O)OV0S> zNmv9Ck?z&a{SG93zY>7QOBE!`i+nu)@UVMQF4P^HpSN4WPJM7Z9AAOlob3Kk6&SsP z67}}7g0OCAA4%dpt!5(~T#?QD3)K^ILSIab1V@zTql==xl-XVaAN0HOAHQ56upxxy zIf+0Y3*yBfqW5ljYkm7GFvBa}=o-z+Sm<&(6OSX&@pJz=wljE{Sf*@ZN1Gia?wOM5 zt98vD<1^XvV-MLp4ny{Li&tZR8T&@Rf%KNcOtJ+|bIjEUt6O9;A!gfbeuTaGhgQPN zcCC}>Bs;dMZ50v>Q$Nzdbe(=7z)U_9ImdD6V}NMF{Pst2E>P_K8lS3Bt^AwA#@NOi z5ex<4TvB4PWN3dsm^K%nI1m>Qx~kgp6&xazZ=o}%B4bdmb#q;3XIH3>t`w+IQ~we% zExY!`#g9S7a|A5-jdE{&KRUwx>7l-Y#+?HW^)1?EC>YGAy^j7yk-oV!E{SO8f#tcu zmg3l(VRs6NanMciAn0UM(qo6 zWi8iL&ZC53bE$|LB(Ad%!fbKjLMnu`)(Ib&wCM4oB6Tu_B(wRZf9`O5f`n`dELH#; zEVp`aCa8LFCN=g27YIOQSPp0*H(qTjqxEf|Zi(aY86^mOE&J-Gcf{|IwdMWAij8*i zArAB0KNhAAsanom@Y+3k`&Y50Jokr5=kle%Z{pSD)9-1R&63qMPY>s|9kzkUJq3M> zmZ~o`oLD5jb^HBB(0P?U)Jzt9seD~R;_MAS|M?G<#9JkS+-zCy_(s87fhYUi!ufmQ z95prrX<=%}Mal|CJA1tK-&Y`WrxHirZzYbamagH}^2j-gI}MVrdu?qQ`;-q{YE)3+ zC$rCgugz6^9U-m~HG3FgC+~!rItdDj=2vdQ15=pzlP6&6xD&7-S((+H688hYdMZDu zngq8-IbnuwW_R6?5qH!+W<^Ud9{YI~r=9 zeT-XK0e~ifwAlz2&<5v^Iyn#9Mk07}&m9u*W%z_OjnQ028@7xN_F|fjfzoSy zIEHDALS)Gugx7XVeZXXMlG}(kG@p!RdL`YDmw$os2E?l^pX!mgQVFdLCMB2-s>5Sp zGatXV>ram_(y9#CHicno_vm_h7!(ZZkzOx&ApApO*l!EH$2}sw^S{VMLNKk~I|?V> zs^}R72D^UHHEhbiTyn=*SE1pfj06YupEJKff<11089XlGTzPuz@-~{!198RUJ!?`PC=v74Cz{pO57S7hr};DM(-tUm62EpZPe4@M(J|3s(#lNh~$7;tuu7fOoXZ})qW_4-;Ye_$;{Tm zP+3p0lvKCWx|Gc9z~J6u!_Ab}^h7y#k$$w-Hh9`98JL?vC)LI}$s)VBv#@uAyBSUU z#+aM38c0IXxZIGg^xRx*guH z+0C^amL{gLlm|D)KK`F*MGqI*oEpkN_UsKML#22`<4il7_tCk&3+|Zv#A7wiVTx7^ zs+!t^3t&CRguo+ho`G!Py69`uV1Z~g>jql zWzMzR0pWY^RRC*{SxRV}XqFm-M7I9*a_OC0*YT3JkL`rQgJ5%(8EHv%_w#niBg*)e zQme?>ymiDf`60FC?(HjXD3ec~!@A$}J*l?FVvl{;m0jRob{;2oijzo6jjw7=AZF*BoOF*B3Jj91KHiy19u zW@cH;%nTNz#j=Ha{rB`l_s)9}yRk7FJG*@pp#xQwnN?Az?w5J8GWB5Z-B;Wmys>!o zx<5TA$Z|R8_*A9ch{;V0618TrTi8LY)_jJIm{LCaB{dmqLhd(_AZ>gIlo1SHo0cr@&EUc}lnzgeftpxM}#X6Hkv|*jatELocoDZuQ%zYfyBlpi< zLyo9QV+f{=h#Jwi10mcnyU?&ZA4#VB8fP;2f8Xdb@=qIG)QKft)F!2BHe&J)HxRQf zqHY~B7Gd-s+>h$Rfp21@eN-l1< z7|?{#*_}n_TX7wv%u$=Sru7N$Zgn3>7OuQf`Cqn^Y;`=>O#XU_eG?+y7bBi;F;Dto ztzl8cf`1h9Y1n4mNUtX0yVjJAVH;d#B5RC$l z&AMIUrn8s6fQh)rMdC$qsq`*`NGPXpdVb$UFH6!Uesg2fVLt(SIB?lT3@0aPWc2vu zH$nBC-<5s;I{?!~Vs(4*9ORgLUN!8zpae_SVipI`A+w!A1U&w6Et}cS4ox6Kh%;qm z8#nd2nLwOKkvfQsX7;8)7iTjgJ9yBQgRwO{8w)85 z>7NS$0a9iOD_a*cXVBT!$i+;|%*4UejFef<%-+Jql9Zi`o1Iih2>#!idt@Hy8OBnS zA^GPh&Q8atKt&-*DHo=#W_c?#s@!CcXJMY8Zyr3$vSO-v!h56UX??vfNzE~wqs9#vuBFRgc6>f?VD^98ydZgz^IR}3Xi2Mny}+=( z8{@Bq8{C-75%zW0_4{=&oI1VTdok?K%Hs*kO|m(On~~=qC$#VzdGwr;dqMC*`xgxOnK{wL&Bx!4AIq_{EtdXy)K#2pEn_Z54zT zH$hfjvpwXNBCZxwC4J0=y9)2zh}2mVtnJoelqa~h?7~%kYt=b0?bj)Mlapgu3DImG z^Ag<%-x&}H!`&H7cr(q-MNOA$BEr2?Erg;4J#H9ed2ScX-do2(IB5D9k^KCUYUyD3 zrXBLeiD(TrMYgkg(`l;{;L#5F(xRA+(|-A?#U(G70`cNU9qjxO`nL3-*TxSu$tmRi zDz_CuEq%?NKhOB~z=UT-ajOp2R+1}H4HGA6Ieafz{ALWV`E})Z7bh@3FlgX%OHT~T zC+wWR`6Q_4^QwhpJSKtfr(Hfn~h z_4e>5%-^*LN48EL>^fWJv32{&357IFPn|c|Fh2QC7tnb_?PF`GvTCS-f(p3b)a!N@ zm4xdtETw--eQ;SaX3vBgz=v{_m1Vcg3TS(PJ~D9(vp8kwIfsAV=Sz=$6s zXvmN1(P=!rPnnLKoQ$>#;ts-X?9<`rI}c`Sg2ocy;cNkc_6N+~eWL;D%QKqk7)04s zpvNMcuOxjm&5;q2Nf}Kev9}0{{jwVrXq$UhWGt;$j4DG5kc^t*$8i?kj7J!1u?JzZ zv?*EWFDumvhi~=+yGWvy0$g!}{BQY9m+qJvnDwRM$dV;i0!N&3`0Z9P&!UkUPzudE zWP+Ac&#)(KUAJ@xoSG*@)9-25`cV=6Xt6ja;qF1uCd~7`_l_@3L1v z_C%TeslDRKA@JlmuDAaJiE>wY4*n!t2aAB;D-@5w)Eo)BwRcn-XN8eq1FXoilPDh* zZJ>eJxz(HxRdzfIT~u1{(s9lc9y*gp^ycn0O3r8+$I9sUCb5gMz`z0@QSrA;wgEFZ zwr|7`&guGA0vLW;szafqn41180moz&kGog{`zwqL5+3t+%TNQp`iuvnq>{75b?8%e>mca*3$ zKf8Qn+Z;eY>rilrE0Qay?zWWFi6TDtI(r$5DJ-r~>?T6JP5{XEUti>V z7O6XH@D5EZwjhP+Q8g^y>WJqAngK?F_Ex5QGEqNldQ^pOqZM6UbDb=*rp#BYTm6=9 zXDuJ91V;)D&3;a`h5Qsj9u3LOT%yPBRe+KRpRJsoXRzbTFwXGK(YkXm<&G4TJs+tE z@aDvGQ*QJ+u+j+|`}OKZ(DYF4R5?HJq5QPK+^V6ZRCyiFsVoFG(-QClN#%!a<##ly z+qeia6^8n&u4GzAJ+XkR@?gejKIcE5YOB8P^s+{n=3%cf(`mpr+B&dg*odk zUi>`iXWGX~EyFKS{RAmIl=Vfmd1=v11Oq73VLqnfLbb^se|&E2NfB617yN(&QI=U~ zm@_MvzR0M5vLd^%^lK4Ce!hORgU715>D1C*GJF-3Z+Vr*YUOgeIGl)I1;O58S2BEl za=nPR!CUud=V7>rLtP&Ss}}`Qy0tDrBBWSb3^WA{Z+HDR6=Z)`_M#rd8hCyf z&ZU*ST&m4I1|b3O*7U99pT!pU7CyRuOY)~Vep7JP(d3r>dKk-IwZGRg2T$|HtTS@5@wLq&6fPifB`W`Awi=ML2fW4VQ@LoiUjq=z`ZKKy=dXqi#N!^KutBWv% zMvz(ZBpmCo=xA_-g0VgnKT=%Xcir%lDuU6xe8YU6;d}LE2jfiYR|<8lNctnCp!Ax8 zEyK#~BfK735)ski20b7nb}6$>55dB(9Z8wn1AC~lCExlWeDL9qOX97ySsfBlZ5I>p zg`~@TFYlJ|w;H@ryouHxDN$c(tUdE>N<GMVt$MqAa}yR$cHx zA2xo4U>xKhy62L#=l|AVDW%}VbL?$@V4k3Xc(QSAyI#`Y+?9Yl9V+TVG;*)2ved`( zHM<%GEb!?HQ2L~d!^45_+_wF*jJ59M!V)Lz)n;CRjx$9-XT;1QK*;vlDBJVv2gqah zY(M&b2^b-$nhH4a-nIzB<45SR=Gn!Kp#mw|?)$rS9g{@5RcrjNXzxy~wpim;>xI6r zXEd2uAG@}@qm09s7T(yT+RwZ_9!u z;fmQNE)7o?Q>@uPhlXpn_U*dgRrXP98gqZ$kAV--PevE(*`+bxaD6#Jxby+J@pk7i z9IzZPpm~_ehTnVp+Nvp|) zJgihh=TfP7wAe!j?`bl~2XiK>(|SZ;I&?=AkrdQ=^j3%}IyC7D6)@d5PE)L!v+9XC zz(@bsJ;42(tc_4C4+QG?5i_K_o=}QlPjiR!6OL|FbQP&&Y)m`kuOeg_@>Af)UEeSj zdQh1cv>2DFa5rOLh8XEn93?Sz(MxJKrA~SVw0uy?k^i_6sBrg$Yv;UqVP6xivAfcm zbupnjTzmLDKkz`Ned@K*l%DlnJ@&%e&SJoyqSxM`0p<-7dm#g8{=^5R;zEv_ATag}P={N0Fb zQv++oN$79Wl_Nsj2h2IV+ln z(nS@o*s*-3B;sOkr&X~sow_dC2%`>CStEf-{20xZpN%vgRV1LgU4WeW;j9YLGkXo@ z07V7=5~ljT=eBURQc!VzAdYPhpG5&R|6; zX72(dW#a%nRUu`TH#4;|`sCn2stdYjA?4)dBIV&=C)FoqRx)x1)g)!(;`v)m6*HiN ztFwt2ko1qLEAHVUspr)K$?^CZM)rW^Ps{ zW-5{($^Lq*=4@mSGnm#3bG+^aDi(6n>~SzormWi>hig83}0#02mkm00#O4ysra90nm_8&`^-j z&`{7YFwn5@sPOP`aPXMOD2S*yn7Ft&nAq6(M3f}>1muL+*rW`k5OPap3+05n(tE!ZRk7#RQ@4GaPe?0o=0_(xQ5 zh(GfF{eXai1_OtIgawrfpaK9;kluyS`>2f8R8z8*{Ua{T*y$?lVg$W71h2!;XFXD~sfMG zf7K28i!FQ%c4Kp}&VsbGCp-Ibm{x?#UsBP=yH;A%mdhn2aHQdpB(S}RO+LtcGRde) zJe97@C@trRzq45WuAk=l(&O|G)R0YuCd@~jiR=pT!#Nqn63(adnWnGVuV}#4{(lfaJeA4sWV&gLh6xwRhfpCeL$b={nD52d|7)Inp5%d41d&@Z%o_ z2@8P!kA0@`P43>KacvTqMXRb;#*dTS)=%BtmRvsmXRf7yf8qq7{0F(=1K?E2&a|dK z|3$Yi_n*Z5FWm62X~`;7{PubKzUU2_GrrAPY0TEya>?>U9qoSm`4RW~=Fbzu&38cQ z?VD_H_qz9y!CuWdpPqq)=h2zfXNj}ncK}QDtBQY52=$})lj5WQDZ5;=JX^^8eq4gI z`)`C!O%!{<3qDEx*AEkG>0;uP8oOzJe?+8DwNrmTdM|eoY8Npm4?92#yjqjPdI!yDy6nVy<5~q>PY0e)`SXOe`E;V*n zC~~+ZfS7mED>yN0z`4_(l2PWj#jjB$skji8Mjy%X_V6NnuP3RIj-NIDt#^vzH>##L zps>p%eAb?r*EgssMMaJI`MRbBA_+cji9(&eBWCz_+tBkeR@DU2qSh1%3ExV|*rXS- z$7_0CQdeGL8>08}oSfbaqeAfsu<*MH-4fQkg@MPB)jfWG`^2gJUJ#kwNW-n!=4wee zF>g9D6eNZZRdDZsQQ>dfp6>v4=h-%5XH|hX?ZIe`Wj%KCA1y z-^4-P)X{oSy@n)6N+|^(WuKO7p+H^2JiDE}V)|^K)l^_VY8((qeJ4b>`YN3?Z+K2A1wvk%XzcaT4Mx#`UO1ORpot3 zdPI_P0xsZklkZZ^?nzGe@}LDxCT^~N;5j?}G9wQW61Oi)DlKf4RS`h%il2is5O_5e zq>_Xg7!8`B70|;ms+(=>+sl)0S#F|bJN{nlmeqId6ND@F#)X+A`3?w5{U$T|{mGk| zc<>xzl*}t9Ub9l;3Ejs=uhWm=V3GrUj8eYUG$yv5Tqh<$`4{u>w5a9RQzp4Cd*$LO z#SuZymAaX6yY{hB8ur|U#9v`9QBhO*CBx#61Pw2SB&3Be1Gq6Sb1I={W^}(s6|WMW znM)%Z&c53ZYqa{YR*UNe>$z|!D!VwV^hx(KwVzgZJX}Fs{`>FB|JVhFHlOtqc97!A zjK!Nt#NU?G|0fo@|DzZ4|F4fde*olvG8p9IkV6b9}D8pcp zu^BmG&y%aDX#~b6CFikoeNr`cc1ftC;1F$S?8V`9o#h7B?_OQQQU-wt5`ck@f5wP^ zU?2!5g9>>;7!eEt4C)Vd{x_fqhW6n~1e{gbXcodLKCccE3JW`M7afC5wHF$bOyyGo zj5CL*@jrvdy+1K!i})-=tdYbCsPYR==7gKXMVnb(CDTYgt9(^9)z*}uWo+SV$J-c| zm8Z}1G@>kQjC0Ci!lnbKWq1MgFGU!m$6lx|-|h%^*d&Lj-HIT9SX5%pGxP>9oMpb9d7F>x!vFe}1Yp2NM6~`;%YV4R zx=j?(RLbGfY)5U?Ez}N|X4rz-Xf!b{8!F^6gpw!Se8Uv+CyV!n)hnml0%IAMmm(@{ zfXnwn6RzM)p8<=y^I(MtBePk*+nHrX@x3Plq$!8c8+&0fnS89OaiM!T^!s z_&2;>kOa(acw(wZMEUOkw3QkhW|G~mt-i|~NqA^jKA9N zDZdYS2*&Z*m&lf7Vn>;f-*>4bqa|scoxD)GiVI!5;t>&_oAB^VVSOdDaW+mtLsJqH zgA}Kp(UFnYcPuN4X{HlJ)K@ubadPMBA-NZlB8RQW7K*wN(_6;oX#paO^j@Om=fTzf z_aT7)v%*3IGcE2!_o2C5gNJpX~p^hUt# zKRoB=eR99=3(5yS%N}sWT53>IU9+a!ICJHs$l`&}0qi?FvUz8p#g|z$2uey9HGOE+ zBQH%L4*szYA>t-YT!`p&TFKsoWZ(P_5UBZnJ^v0k#41uLN0>}|dMmzg&rc~|q#NKOY@Z zGb~EuovpK#2L9<1`TRjBl3&YaUH?q;WW9<3tFoj;YFTC#cjggr_45l1st2(t<0Q3B zvQO#si(k%|r}bR37H3R(=w_1hN7|iqp8ddTTk+oK<{%!(sOxt?{m{Jolh$j@q=D^% zLr@ZIj?BkZUZ`kh_x_E#2i zxge0zDbC)tZr;c(PZunX{=$a%bb-9akNk0ZKA}m+e@(Etd{W(N@s3?qo?<$kdmba@ z($jDL8k|MC7p8^89SykdmX+5sw^rSlq^zX2kVqtkx@Ufo`}?!0GaHQH(%A~-7#>5a zjKs{xQ7chHO3{y-JWifq8=fW*iJQPGU+>yY0Ze@7bxsD%$q#cAx7~U)RH1|IOVimS*QFR=XaC&*UX8V z=RHw@geDHQpW_pUbIgoy%^J`2sY@>7bn}`O)intfSvYe;a8g=L0VuXnF~aGQ9XeKP zf|RAUZs~l#@+y<3ijYAOnixL+3ilLT+dJA|@Y45}7d?Mlw#ttvYSF7{Ft*wx2s=j^!rz_#_J)w4 zZ*5%Jrmi;MyPKCHDH!*Yc?E9A^s#U9w4HCeMcYuiVj0JkS7G)d(u*IQSJ&52^ zj*dJ#w?TDHhK;2~*y)0d5~uvG?HY2uG=R>Q21mGl@1xAj-;DlaFWPgoh13}8C*^_` z(2@AoVe_N7R+)8$mkb;HQQ3fB4VkaWUA*QYz2X;}V9U@2qroTe^UJC~Thgux8J zeS(b#2GVIghrO$KRpGnYovNM>O$$ETR0j9YQ~T-ZD3j9`;qtDxsNs_-#{0YnB6s4v zh72T5jN^@6%yXnIX(q7W090o-2?gMkCTi`VG~?4Y(&TQELaB*`6AxHM$fZx~G<_+G zrI*31ecN=g9jB81B_8vsPqgPW?}i6tVgR&XwqE_X-n!jdO1+GwbsA#eWsrH$D$Fqq`zY|kohW2vHux~BDw85StCm6criIxJ z%ixdYEns-5mcs*wDXm8ngii-{v|W|!$hGBd3$q%{QFC1B*h5S0qM@lcJU9UU2gs`F z)9X89Va3q0B1?ufhmRW8>FcK4_$5;h!-Q3kEUt*35rbgTsPS&Jmk9(Dm6WXX6~%3X zq8G7d29*_XxuoW)mcdzruaST46Z^&cl2>1_YSG=jmFA@vFEhmWC`2AwGHnL2q4W<*z+05%|W0c-dez7t?OR5=lcKy9MU7jq*~@PuZRr7`l}h)~pKniU(z^A#e8xvz;$sE^7Kd_Vw2&6U8V1uUueJPQQ{c=y?lOzq@PC>KFixCbuF5y z&=#v3nnuo?ZRno8ZgR(d*jZqKU0a{D>(o1XEIEMulxQl|QW*+P|LvYaISHAO7`T{9n5c^zm=S*sL8{p0)&tFuLxp2#bc@un+)Zsh@2e^0h%x(!8nnk)I1sXYDHA8PNPY6<;{tRR@pBsIM(BP6t9h} zu;8=s)0|Go8&|MO=SDVcSh;LBRA3eJCr8i^PBnELx~zO?9Xhv%!QBi191$kh&2vpM zGIixu&Q}+uWLJl1PD;kTMc(uwBfA|r;ob}*sC95Q=@H?GEoM(LV`91!^dv+`k;mVR zLNZfcH}KvK18&$~mV~k%41K!R@>;oc_@pfGzBVyCx&)8xK=gug31b^yHCTO|m8*Q4 zVqT^nbnP5QqRk^!!&?bo&w+=Wtx&Olr_9bEdR>0?4WK!AX-;CUH47jl(n_BHI4n!BgHy= z6=c2#0qF?1y;T;JKcBldYQaAmm)4XUh8{b$NHF0Yk|K4=5sOUhg%KD0{>;>+Rk@f- zx0VhgBDd5UmVKk27{fMuiX}AC*hN*?ttNELJu@r5=;KN5Nev(b#=yOSk!CH5pt&A1 zr$eZ~pZS_)r;ifg%X8Z3;2(K{yTzcG1jl)5p0jG%ZjD*~bSJF^1~GavV&6 zVVt^td~7E0B<7fY;Aa1Rd#K*Hp!VSKIrt#B4hoT2`h;9%(;plcOFvzs3F zBVSrSL^69L>PSdSvk9l6;vgkIgk{AhLzgrHjy6+@SuouhnrC#{7GkUu4M}{)erxpS z(wW|3kPd$8$Em_vQSzv0+-W=jvG8ksXM;D*cfE_aB|OVAzKd;4P83J)P^UP2r(vP) zR*~shIR=Ai2Th%dFDmd}(7N06r7304I?L`opQxp_-<> z9Bk&?0Ja~}fGA||KYoJ|uKo`e9*sKXOh{$9RUsK~S8G-XS=jvhsQjM4Uoo-h{`Uro zWxLRX&rt8fU&G^bMdJtRN5$3=n}7_3duv1rMJ=kcBCrM94sc_joJRX8C9X@QFiGf`P%X2IdXsw88e7Zm5AP<8SX+P`8h}BS+{+i);t8yG;L~|mIuIrvEHVX(S(>X zUZjsW9epz|bDViRr6a4r20s899OtNPP8N^az?qf`Tuh;UmaAW7O7|*HtU@&B3p?BDB<0ARhsXuZ5cIIuC=WE2CB zEb&1)j-@(F6LSl0(U{V6*|Rs&ze)t&0j>AUE#p|vaw~Koe+K>8yGj!$b*5oe0nV=4 z<lv4r;3er+dqaDZc?gX3%AwNZJd@9>kt+UeYHsqsXwmk8M7fdFdr%g%T z$hB{u=KK!?MU}biIJs&t-SwHBiV7bF%W196P1%-^Gnt)+^jNQAymMZYIk3ZBhyxZ_ ztl`Jp#|@6yvbh90yd&L$*bjk;$1wCEbo=o)ai-qcAA6X1ce(7)NIp5R3u~&r175y* zgG}qXxOx{si67c_&~FoCcZ3uA9|%12TMTV1D?m}XT0fD;ND?CLWO55>L++ij9=4TJ zh;t{p&v_wFyU$@ZVY;VhXw}Zcbkp`S#wDB|3|v98zY!)EG+X0G3!XctABhkN%)zJO zFHOxIqMqbTfk8R8NIf=H0XGg?YN<(mli|3v>fMzu(2u+pG;|`zcO!ygs%a-U&OZbr zAFNO$IOTB8{={d?zr8@;ET^Hj+_s)zZ+pJ+L-c9IB@N)6F8_V6v1)2-&eipUebiEb zWT#S|RvwMvq0YIe{jB1vIAm2iSUoGbg3gbn07su8;Mn<^+dKkex$BN3W1$S3c#!?Ue0l34G!aO(Q*Z7}>!=Okm+A zk*lVg(-E;EjS(T1UGAE80&eK-Z6qeBFgUvtY$bK*RzD>w@vGD?+m}KsQ52US)+A+Buju8+Lbk!3BNIwV7v28eQ zYIn`4tT&~6!^FY0UB(%tI+Zqcxss68fq>u!%y?Q3%{a2KwIUE6^6 znfNShz^|X;5tPxeME*Ai6OlEE{e;ais$RDCg4EkMgNib95AB}-n?UO?E8@3YE?iXd zGH_zayml{n*Z2>OR=wB@UHBkSWQU7``{%bBE zMuc7f!D5*zwL?>Tq~a3D*t}Che@c}9U>((u*(|EMSJE*~8I`cRUDH+-)TMTh0Hpws zHZ}U~PU_SELw%lTSyn~pgxPXs({5LaHE~6?VDIafg9l?3iTAt3M{o%_t5(D*x3X2Q zTCRD$EOTU{n4CU`ydfkcHkTr4ddCvYmEiDeNb|KS2Rm2HDx}&h%$#yJkRc|#nu0Ym zFh(}E^irnDp;p;yoVbjedRE@GAU^2=V-D|I&(4pzIR!9(!goMHjCbK$p+Ss!guCYX z{p|W)c~Lu8UYDg&@)p*4ltRvidR`iwt%P6z49|GJhT2d3{yagN9 zYuuMrHm!*;jp2xwn!co_)MoEBv*uK@9>)6|XcVJ;(PufKSKpc~u{yqVxx2qdQ{#fz z)k89}t?AE}9Pfe1%-L^BVYW!t)KzDcu3%YM;`~|kMO8?Ghg(=6c-LHtXG+CRO(b5* zB_`c1H@#6PZ((M3a3N_Jq9pD%ii_*U^XGAMBR;CS#)gxNY04?v(64eGi>afwy$|_Z zH?W`&h66^Bl$dYA7U+YF{afx_^CGhRRs(hOLbjc^ z$?oZ19-U%I*Rq5P>EPN!MPIA-+=_^p z7F%QskvZYdKxaR$%aM`cUu8Jfb~G}yLuqppcJ~Qt$?}z zu61=*{2mKjrxjJMg8cMM%yx_rWfRd^QK?qQ&*{| zj{x_vc#rTO^5tLh2Z$+DdPBG4oZ-bqc?((g?1_*jjc!)$7feK-W%7dPI^8SmUMvQrZ& zagXLtbFOqE?ZfQU zFwxZXcZxeUKCsUz^HKAFLR4*%?iQ`^Tn6DrIEdsKmlx$(bxLB*b*J59F4&hiZML}S z7-n)GfV9RJK7CRv!5
+6mc@fkEbDIZ6o3Zw9l53is-rfI8??a*lr8@=@f%WtJI zF{2Bzb$E))fSd6Q6ffJsu6dl{*>p%cR_2$-q!fce6s(*O>P+xt@@J#uPgc&mbtoh(BE@tg zB#$M?coc#X!*WcYqV6p7w+ci>`FuB{w~q0Muq!#XDs8O9VYiC7aIUH5aG9CXGqFU1 z%&HvZ8wfG8ySSJ5#12Z(N8Cn4U)`QplY>_yG;e>(_sh@d5v{n+yO_ylR}QC?bKRhd%2QJSFnkk?Hh zjUT{&!+YL{N7F8k(s0V-;AW)jm@VsSdKqv}Q&B^`T7nqbc(Ivxi7JQF%FG0KAcJKa%<+O$C!8SNnU*N`LikG7AcO zY4LTtrYBvM?h_8R=r40Ai|rWn`Qw{$GrXfc#D?m|j|GOdEz3*I?R*iIPy%s+@$t(s zcI*11<4%oNDxk6D(1&|sS{AWg$lnD5E9|aCD9md4k+$$b8x5S?wu{-C_$20~o$5Cz zs$E#c#>y?D4!OIf`p3KVPW(L!R&+`%zX) z+q<$l!s}4EUsCR=jEXf4dI=R7u>Ux={sv>4j4Chh9wJN8Sg%14oCkDcecG%v-sg>SxK}PI?;ekELYodgScNDttJ8DMD_k!VL(uQiN3!N%O!{ z_|tD*A=&s^6AdF|F4dMWxHbginbPOw95f~DmT0oN8uZmwCAmQfd`k;L$b~L42J$>q7mfrBp)jVg7J}Y&N0LI_-9#qPA)M$#Yb)zRoDf zVm}J8>tr%?XilEZ1DW~2+b2HSkVBSbZAU3aSAJUM?j?HW|EnPboMzb=r|suUlRLMgBM|4YBdITLqJ_jLiK2heI{x7@=)fuVe8LN|bkB z` zGa75ndKnjxLv`J~AtJn}!p_brux^Pjf?8DO_fxEcJJEM?UP6#94)s|uKIL)RrCn5% zS;rEu9j6=hctgi4A%=XE(4svVRZX?Ru%W&DOk7Yh!o{^oiLW?2Q&n&R2%xb4=EFb% z5#X0Lq!BBvMgCn;Y1Kxa-N@2yWyj@KIx#kg>&EHOr&Uu2Up-HpC*za2IOoyUa)^rR zdK7s%ZT2lqa$y6~2G>iSQ@K-haS4{{JicZ#R|6o9N*Ey%3=iTg z$T5@x53@x2L}Huq-gS|4C?O?VVnCmW{gkM!Co`qxz!Kt6fHA9Jb)#t(kD`sWG9~YB zAwgBOJD)dFfjhM&ge4Q8xEuwE%AramQo^{@MIx#BW5a%^T#G8Q_@ZcHjc^7S8@deg zEmO~t068)-Vqs~Hl%CpG`?%EdrA;Gy8#X}o?0`E>FW18w({_5nO_#KYon^{(ERX#9 zk?%qcVfrjx|Zs6(h7aVR6zR+=uqwEMNB7#*s z|B*{sDHvoDwL@p%f`>fgqH}ae2#8As1O>L-fHe{naC38CVk8CpWA%Tc0+8 zIn&xyG%1+XLK??*`X!E$Rvh{ z)NShzF^xioU+DXU8Q}{ZYi8HJww88c*F}e)-X1=n=p7bg_eB_aKbEoJWNgSSG<1+l zW6SuFIp}(-+65V^a;5n&P;mV()6(PkoC5 zx7n_B0^X>wjh`PcS9h-OMA%!mh{Vsw5++W^WFpergIR4+P+V9XFJj3LC|@p~7^!l) z=VsQTPMmfG<)N`d0f+aW#0w}3V z(!ubu@yO6?r6w;Jx8_&-1&3jhFir#uxIF28q-?80K@L$56H$8SCs4Bn=I}TSz(NEG z4_cNi3d-0FFy!~tl!dt6Q@eDr;_+B9kNN>O;@5w{j@{?B^>Cmo0#G=X3Uapu^M`e1 z4QMZ#(9|_#5ie78j$}qQ3Hk#}5Ih`UqJF<(l}7^|z`O3act4c3s`qOvJ!;rP&5nMz zk(dJGee?f_iF5%Z)Vb0*%tV!dr5}jI@IC3&NndsMNpfG~7ME6bcO95m&GeG@Bv{c| zI@pFKMGH<$F0L~uJ5Mj%C@UkPM0KgM<1bwEa#N+9{!yv-I{hmUSdW|X49W)-3A5?!_Kf+CK>#(O7w zC&(KMR*bi$-QW=_-z#-dMOs)pm&=lxMG+xz>QDe8>IQb0PU5F!vDaB*IZZivVI`VL z-x43~%JF(5<>r)X(^n*C&nq`2ZrFUNFhAjjm2<`kkPu=xB9=LCUrNuYekRIQ zp48OD(w8kPWedgK?qn^1uyF?-`Xo_eM%qv0ac^ahR+o|`Njua_Bjf;Mj#z!oU#G1o zYJCWi6@~9LjybysNJKcN#!ky4UF8zz1d}BMV7~h;zfijoI-`)>PvD_5P;fDd6E!HS zZye@bv#K?l3+0q+4+I2pgWcchM(BGIwdZeR{WtdBGN{ey{}+Tp3lu9*TuKXtqAl*w z6oR`$&;|-2NFcZrE3{a!0>y$`a0#xZMT@&Tfl#ElR5ri4v$Ol(7dvzRbLZ}xeUq6y znas&^PR@DG`F;$wcxymdpB^~XR^wc!d>TUJ>d}-I@R^^MC&koX?FDR z7~1II#;_FoC*VaGmqW+_lhv_nTeSJ`aAj>YUlux{5fmC`%5+6Syl8f-hg5#$A=IZH zsps_KFykoyhLEe6Lr*r=*8A&jmoLlu*6x@+6Yzz&IGv6d_LIR(kA{pLCBi>l$-NKs z>z+k`w*jkSCA&`pZ(A%Zx(#X*L~6#|8c+boIgP{I)1*dO{Hpw;qk`ya5z!kZn6Txe zl!6LI=90n0YWS!7OTK+Sy>a7t(Pci>z2st~m1}4vP~+=wp7Y3jq(Fvr0f9rEh=(Gc zk$6$md5KMaH^&MVRDK>S5jX&&EW0*G*FItn$Lvd{u9$d9c+KFA{nlD~^qO%?oUz-F zb`^*qTT_W&r}N2IOFM{4WS)9>IM8Kp`>^x~xP^csr2kZJ^89$AUNVsiJkUlKHZU&He~2TyAZZhntd=Q^hr0G-S3MK;f-8JS4?sH^rsp0@{4Uem^@R2DS0K3HAr<{FzYv6Gxr?J5P~(lmpU$?Yr(zduEM%NB}H)cm(&j?9QHh#a0wn*GtWoJu8WP=!=SZLSs8R zZe{M{;6BFYMD~d*<#zJnZRMMrLsu8MMdG(qbp>BE0d3lcU5|#9m5Ej$E)r9&b#uDn zS#mQXJveCte(^JHAp|@}x1qE`i!kymxTJDsq*H&qlWcS`{b}Icm8^_=Z(}UMQZ4Af zp@I0t0hX_0hL1PLL_k&aB9d!}ZBvnpSv5gImhCbAV;nd-Vn9ydkzZUrO|!g290_$k z-1CxtrjHRlpAU?}FbuVixDg7L6qGcCf};`Y68fDUc8PV8QlnTK+oce>npb%*(hsJ! z7%Ln%Ga`vAj$EpuNuEV2JXZP{u*|X#dcmT=?#2fH6S<Ih-ywM z#d+Sshq3Y0vMn+G>0YjCn2$sQjligXKZuDejmiH{{`(cK!$s{?wIj>F4|Dp?J?p+5 zXr)h&1bvl>&<)Z{7}}*)Wb*zkFvwjiTPpk<-do6scI;3C>1jH>&5*K96?zI0NGxQL#2^Dt8$Ko4N*2vd!v+Dy zcRWcLmrp%F5tR09jYCOC+xurft_ba&L{p2Bddywe*{=(v*&12V0S(f z$v6)l`!*MO!iofi&wJ#lMmmHA8C63BMT{~lKM#|o#8yQl8qv|I&OWdmUG%xfBn+c~ z{}#K_f(U?~i=Kxe9V|x{;)Rdq4|5-Y2_*aAe;+=JLT0o7_@yakToZi*lwni@kzELr zaAKD^Q@OxN9PuIevS(S|q)h!~8%<{WFEF{DrxVL&urTAXntS3$5N2%v`SDIpe(W<~ z+{Z_U9}RvAJ60~2Z0n#*)wV{YQ>W} z#-)l@w%{4^Tv-$_V$8#pWNSrdIa_Gj_m4xr`zpb0n_C&{*6_$ujNRxg@P`W&8=oG&a7)xY4Ghg8-ds{oXT;>GfLO$GJfR|x4sU>VV z@zyDcB%dlL687pr#aH%^YK4~}OO_k8w;q3UfQ5cd+){s^7`?x!idb9{ae_c)1INXQ z+ATOA-{9T*`>A`7fMIk^7PrHrcHt=F`KhP6Q0kCmz-Mm{6woXA3~pmtr`Tna1oa#; ze-l?;e!;7FUo+)VV`R&DH&ND?wT3`NSxora415~EUOD|}cE8$tdP3%#iqUgTkw$pr zF$WB;`h2BXrhe=!NgqopSFf?p!~5d<*Nr#WDAPV8=e@S_)C(qjPBs=9?{mKFd=mHB zi#Ho~RW35JMlDlP+ie1ws4J;nhHGUOm5e1wHg4Z{;gHuf-Z9-F0ShNE=m{?70X13O9jvENbZk$K$@&^GpxMpSd#i#0%nWI3_yPn|_{*nprcQw0z2*^ zK9SS{J98DDC?<4`yiXTrc&>fXAi< zBEL)vSxAew4(x3fxYSPTKSUJ}XHvV}uXA~dApDgT9$#wlhepnSa7=XX%biAflZFW- zzoC_0QBi5$dLpx;`_HI5{xx$O;&q?;Ws?~|cd?Ao8P1D3%wLoFt!%F0jd$7P#v;-f zc=zjv7)|Uui`^{H#o2y-WcoS$A;2v9c#eccs;K_GvIqun zKKRQXfE%2l%pxUsYM6C~ZP}j?d51u+GEX&3Y3P{1@=L0UqE6F1iA?M&2X8XNSV%Bo z)pPqXVs({Gyc7kcsDl@q=%GOiQW8IG7DkGhatWrZh^n@_y(+Ept=j*h6dH$L(I+0c z<;ZYKaIgX<-T9^u+6Nl6%XJ!viHOH4p8~j>WaDu#N^~oRElQ^Tqlt1^gX!B=m192V znW<&B6j3e{qmH6DYEdHhGfCI8W=#_R>-EOehhMn(0eKW!wYfY*jRiZ zdy3Yu^=vmM zW}>Dj0PLFsh{0Agbc}?YlL_WS@Vn*Bj>5QN1OvS6=t=ghb+}`2*{z9UZ7~|!l=jiH zX_8{OX>52T^lZ=OE%`5IhszIVJdbE&yM^kvMwOL?*21@I*yeEK*X7EB5IIdGy zJ_?R&A{9Yx$5agI>2L5Y#dD>`W0tVF_4~WskZxy!<4BYEuEU+&kv-5aN;L?Sr6t|{CxU3ve;*yEjux@q{g&=7BW<{ zt`Rw(ZV4cPF+6#1j6O$`IU@q8(Y37A&p$kPqPVAP28024LZ^t4wva_Tav8w+P-95U zG1QXP9U>g?ObaYBy8hKL7P77y`K(Z?YAc8jD^4Bj!9jl~CK&0FdoB(woUw!RY7bGZluwMAbfO zbC%r%64pLlbD;Z)`an9zW@2sEjvoYn4ZK^K`(M`T|Hsn(zx1tOjSEGW(f(bLT$RQS zdUlV8ZQjYBE90Gwj5|xLM-YVxw-tKS@o5!18wI7;TYW_H(@OiQKU#S^DOX%p@n+-d ztRiLIy%d`spf)MYoUA=iuE6M_i`R2tt1Ydw^QSQZk(&Fj9aqgk807WY&_}UR+hG>- zAi&pOs%WOoBcJ1d&R^w0%QqvN=Kc;K)!;rzGxl8D7t<;AmzU`9br+7H-QBRp{AR)m zaDVC4`{ULhxn|swnR`Vf)-yUBz;bDrq{f+*R3su?Mp?L1ie`m77bisvy}!~8qP44& ztXE)|Hzj6mV!7&j)7`Kn=p)r&6!2xPb4|>&nf34i4+(p$!$l}d@I>Hc$MRuOi;UW+ z$k!P+!%t||!!-XTBh=&zhy90@8vHbY?8mV`em5odpX^w6PFw6wnSafgd-+|%9p>q> zC~nYF>6!f-fyuGDwpMtl_=)h|YuP8LOz~pa{%*^`%(-!K^Ne}!oKXYyxl`Wbi1=<9 zHLwb}KcfreURE*P#Al?qco&#TpnL62CjACxp)In6IM|)mf7>$n7NWU+`!3aAdB2r= z+(5_7MtpLCBkugL1W#gft^K3h7VEF%Q7OJqNX9TLCmP4OfM#qkhwKw$gq&qDlE>Q( znb{3Z+z<@ohHu4jy6TFG`DEG>IYnAx6BZ>Mqm-Xr=jaPb4hZGeq7mv9v?XR6gmo3`=RN$yzD+GugF?gVO@p8S~`rJ zN)zPQSU(TVYbX2@`^B|wcK)Yf+0r}i^6HIeTI=hQeL5HH zHuz6)o&SZ`23dC&{oZzVvE6|7OTL0 z6cVh>V`)Y!z7T5zW*imy{a940rzkhOq*TQedumGW`!v(RD42XlexRB5vc;1cTT<`o3A* zOOWwePQu}SA>ziOrwR^7f3ve$zastqY>HZI6CBfOVmP5u?De?S6(P;D)r#*;kvJ(g z(Wsb?&m9u5#Xu0e;_^})HRQkIA~sER?Pq@Ca~Mw)0>@#eojz7AqcHq$2@f9V z*u^+-$gZiaR(W|?UjSRx>Vfw&t66{rwPj0!kimd^cp zx)nO={+oWZd~(wqpEGpcpA#f=d&9UtHF7SHv~LE~$lZq&FX)&(SLA*-?h~5yKy?5! zVHUYTK03j_n5tS>lt2BBAvP8YqG5t{!-N_fvTl5=74nsDce7;&JAE#E#-NkU5YkM( zGN)}Q>I+s4%NY(;Q@FM*lyMzbVQC@4)z>IZCX5?%%`6$<7v!vbkF*WPP zA;)MyBWs-?iXG<~!}qy9!*44Y%n7xcV^|fQTgOna**=@$vVQI4bjugq%8BmUnr<>T zZ=&koTbdfXj?DRow^z~edVhf;PkL~inH04jL7WWPf~~~0|HG5_^-0=HtF*QB>;hG} zhNa@W?MFK~egQqU{m2fMt3#U{pn*q+2C#CfI`fYgECW4cM0hX~d1M*#^SQ-^PZvZu z{%g~QoSfz*rkAleWzyiB;Yj>Tjp7{=j$D43O~jc5hv`k<;mZt#FG7A zgA;GrF{Q;b@;}71WR3LYqb0;29QD3~2;&KBD0{$a4sQwcmihDdwE(TzKZKPhFQYge z&!BKz=4e-{FMt-vcj;I7r2Hu5@hV5JA|Ysv-O-&e3%?=pV#iNDtg9CUP=|m$(bb6w zbejF5sUANQU_#+*6F9orZ0160N!iD8a!v}{Ri;1<`Dc>=f}FftehtBpX_IKPlEU1@ z0pwnn=4GG7<%v1IYyyBSeJl^xWJ@!oEv0pse6o)2yB7a(F^5x&H85>Zg z=VyPdE9i#@`KV46ebBlf`#qw5q5eQ>JhqP0M0Yyi)boj3CrhAN352^{7(=M-UY0n~ ziMLxLd4!)Ey0g|A4JNrf(mrP4-9b?mhrYhw^zVUNub8Ai0GU7c!+d*-!xT& zzUsNrsYXNNu7R=xBIosX5}PRIZW7iK?7-v2s~+ z6vXSfv-wC1*Fd2y(I>-DV!;4r)7Xgu8jaF9*M)?nzPNCfb|MyF*ff8mVk@>1+ zJWs@J1DMe__(e-?P234m=V~=m`3cMCUYkqKNv%~383W<0qFU*Q)JOmDK6ixdv=~=N zzugHe$|#O&_*oQjh)^rZP86&1akY5g8?PI#9Id66|1w3aiOKcDtIpNk#-gdwK5@AP z0hy>CA~vf$1NoSa9_JwE&%Ykjy?k`7^g~4aBjB!BOJ^_?%YB@T9E|U0o7i)qTSRkO@~Cz#~fvV2{a9>H=B? zZ7He{8eULVL05D*X~_aToC%wW!ee9oDjEZ1x)kzvqiG(mk?BVd#IXHNoPvxA4#-j} zJv!Id(2nk~*bGegPN-?GQO4rLZ5Le3aE-^!d4{0I2Gt>){f)H*t$&oj@G z-f%)|d0Y<*h>&!F=G51>LnyyzB_UG-1J;}~WMFB8BixilN01$H&yC7XpYMHK`|P0k z1flLjwLsab()2`$@+{&W?)&}w=NAvR{hwVPMA)+=^bnK|Gs`9$3MKYoLd;jT*OL>w z0gS+UI;R0NnYTa6r>=XG&OHqN>Q>hu{o_g{{({ANuBN{&HD+|L6+Nw~P8cz7U-B2bKr~s4*_KLdNwH_toJeYAEf)9x%AOY$Q#^2;zETDIC@*4B5Juezn&nM` zCtWh$zx%tAVyyYmjT#{Z)nI16FAEI2vQqKjHc3gC%FF{ay?Vo)G?kUh$($BB_+8VW z8NZNrxA{87M6=A!pxgIu*-CP*gwo=6aps1X8Sy=Xpy|BQ*pJ_4bE2xbT?uM)6i(C{ z9q?&@&yrQF%p4@>0rYA$(P_Y02eTUFB%OoI`*P}@5?A_JBemD_N``q=NV8&R&A7rW zYKzzX_oY}CKj=Gp%3XrcmhA@L7RtcRlO0n+wV%cuKGK8OH{ZU=vtHIj;!H%6f9LL+J82sRU?cQ=V5sH8qMI)TC;C7^ynJe)DCM z0-w##Ecmx<*Mr)l$K;>d`}Vb*K^)1F`^`r;b+5aJh{O2zE4@BnB|}R?e^feuC7@IG z!7z-uZ7rx`MS(8%vmL39WQGZb8LHpCG_?YD2)^_X63IyY{KzRq_ceOV(#+C8g-o)n z*e*$fc>v-VOMUbgOcar=Q+P}>=mO?}xeCeqdt60^^~I)9>kzPYH z^1mA)?!DyG(B?&!;qk#Z&HQ`2!5rPUlRbNKJW19T$v$}&1bnYKi4Uaf)dt+slpY2s zf`XGooZ2#G)v*vZr}XB806UtttW#oHzw*|2Gt=@H{+=^SYsa(3-~#qP#C?{-!M$A* z?&taf*(+oh*n_J$y^^YFrpZssw*pleywPTwl+P?<9lk!mC*tpyJVwGgrp^1l8;e;- zRIj~!$S9i2(#oM^!eYI~b zqsuO1C32nR@)Onk$|H`YUS7{>z3#vK7zhVt%eIgcoVyX?QONiqt}Wzf(<|Ze#&3DD zS7@y5o3MO>bz)5rf6+4_=R2*!$TXX<*Mt2Ge|dchTr27V6gmvEXGCXP3dM)!zJi_G zPA3|kTt6m^+Shy&y7c28o^Wz&4sf;|dnih?2obG{ zXjwg~%0CG*`?4BSD3T+TxHNp6)ZTapsIhnf7885ih|O?XaAM%yW7XBLxRR6>o;vBG zI6go=+^nCQnbz;o(lf&Gwj7FRwgv6{jvLsBPE3b>tg5a&O2!Tfm^{$(DrxBqC^B-c zO8qkRWpRE`#)?7}LlBGnhX;I~J6HVu=jMc>T?N?nmkzg;M7Je6VvKz|uG8hGi3sr7 z#G7~ZCZ?B)J}p-0pdTwA(9;*w*X4;nt{FhX65z;KB0PE2w156oKr-mNck?yf^2(#) z&tLZ+3=A&5l09MmJ+ymnB8_F>waqU_5~DzM@03p`IQ-VPy5PTuzNvMz=*C#iTUS)5 zm}^<^{loi-sP%pIH#TcH>#%S6A0FG9k;feBd^#C&cVRi{iTsVkiEA4-jt zm0YYfRYiF>eQ|+wC)n)+8y?|~@&$S*X~O`DSbJ%VM6BEw{1edv2MDgmo6xc-;Sk~T zy?uc;GXZhZ4mtAE{MHqa>f2(p7EMfE%KY;qN%H$8+#FpedWSObrU?jiM!=$uhWiuM__?z^vk)91^etFY-4>Mde14l%%TK5CWNz8sNdf zMHw=rFF4tvpPOE;hr^K-X|B6 z!wcQ`CH0eBW{?@S40{0oy0e%5n_|i<(F1-N@t7JFcFuxATZAhE+l;?*T5_u=|XJkcDqf)gJ!F$G$!BmW(z29UhGcd zhzN2vN0-F>e9WpieYl{)1UoD$^qp1iE8~iWBPx=+M|qC{n{M%dse7DBXf!y7PYv3! z`0V}15@flHSna9dOjqGnor+pB8JK&=abV2ro>Gh!+^X=XAU7KbElbmQ7+o0~y(JdZ zeV(PrvacA@Cv~lo8c?2Uspf=TF0MlDcWwnl78`%ba@X!})jXk0Ct#hER*G@(dE=J- z$MWG;X{FcK8D^|I%UoHN*aG+;p7P`g^UhcgazS5mvt+h7lILA%F+!sQXdt1lerfd0 z0`FwFDF#$c>vC@tf(v~EdMn*V?wgWs>lDK3oh;2L&ngj z+CwO)8&&W4Y2C%@oo_4WB4WnGsM!AkSPS$(Zdm!dJ}sIek{I6n&)t@>HfE561aksU zr}e&6(Ta8O@Nf#0#ho5gP!Kq>xNssBQhmS9EBdzrT!RQDm|6WZpU)Na?kxp4!>^VFt)=N_Ib&;1}N7=EGgXrnbuG zG<*B5MhTM&GI{#{ESUCrnBiXs*{?v}TRrKIJM~(TF-V(vJo4v1n+F=yT_rL`!%E;k)+cbx$+)ruIAs1S zroD#hq~4tYvg0JSGnBOD7Xq?B(9@p;XpmmEw2;}15fYqH7kmmYwidgeUfD6NYr)8$ zB98KZsH&s`0l%l>P~%{Efa%IWix)NYzD;`QRJUJHs{&vqbh|D}(weeK3=PH1-`K-H zXx*$$<7S46#8V&ek%$1&B5L_P+ zk7xn9&|VR5V27Dk+2)tJ7~D8^$4>e)FxWE`>WTW0!x&##fqTHSm(Szek6!;I(UvM> z;=WBbG)Fo6WXO0PdtdME@q}+Z9gAd1ek$2L+#WC2Y!>{9UvHHMt?2rEM)aIKj2UG6 zes}1Z(4ioQYfln9XT8OrbF}~s+TU=o7thV`kM@rdND!Jf&{tywFHt#O4=jbZaS(CK zHS-E-1=OgOdiFx!VL>jJyWg&)CFsO+KT=yP+cmi}_6PdEum@uI4PFm1#l+b)ICnc! zrhgA-?WA_^Pns(iF{^@3Pf2-tZJ;VVg%`Xn_Ap*l1(>|z)M(y)0xFu+@jIp;L2C%HUCk4bCakU+^svo{IpRcDKRVyqgwt?Jx z$o+F*RA?czdDQPYTvXA#RZO`E{a&PFIj;<-9aVAm5@_j@2-MjZi^BtR{NWg-j=Zps z-pNC8RyssBeXlDPOuka&+T7ggBNpUn3=s5)`UYz!O`2#xT)Ys<~xO5$F37yiQFP5CbcSDU~OVQzKl%En1?MOhD2W48$g#CMJ{_)94Ee$Q! z{id!54t@j#Hd{W}o8sR`SgBUQIHG#)#=6xTH#%%gV2VpoSt<$T-r8~vwG6TstMc%0 zZXv;bcW1-({PC=liC9%xS=#T+_`l4}1U}$P7+{1-2hx#lu?BCMn3)~d7Y|MRakJw` z)hcmx-P3Q|aP<{|C1nxMa58(_rfbsbqdF&?q17i>)y=&Z@PSFmQhUXw)~te#Xt}~D zZ7F4=`0apQVlhJ`i2ziJ^B(0K^38DlJ|AA$Oi}={b)S|vQe7S5Kd$vl>{}B9#qoTC z@53MZg_Z+)K1{6lOHkExkV3TKM31G4XcDL-{VqR%^c6he_&h$i1RubEDHEhpF97h) z$npA`S}eJd77#g`_KGTY{Yvg@@>{_V`KkNS{?K>G>S?b8paH)}BO;Oq)RO&HLh1-| zjBXF{9R&3_)Gr*A{=D#ikNRaYiO`7C)JgM_N^pQOjLJU(V7exV#DRj+(o!!tn}rY5 zv@`^iYV=eLHJ)eH&a;5Y&pFo#R(KnfDiGPz66z2CT*T(42Y3u&y~GUPWTOyNE%Lxb?sJjcHCWhX9~vwS?af?9d5)i|ug8%QgI6U~v>JK`^?xgNG0ZFO^# z-ivuNBUjHH_cd~*v38p3V)#?)yPJFCu(iu*;xK`kWF5m5G-<`JVL}7ZfhT+`x8gqu zlPpB~S!Y)===?JdG{q0H$?fI7PYR__9g%|z7=2+Ja&0UgVJy)$EA(uGy!Ks79F4Vy zq3Dl(cF9!3On8F?BP%uVgJ|Y_-ZD&cz-}vINed`h?4?vR>YPNCz`!K@o|<(h#P1A1 z4SeYufeQ2Q@ov^#+TX`At1m{eV8kIT1R?Jp;7q@{8HkRf*T;=qJU210ye5}&FEVA1 z&F5dpP85CJ+&*m(;*NeR`3|MU&ge9mApDGcb#q&hL7IWy@QBgk#%*>46 zBJLtzKe3N0743B~&VZ)K%_2IJicNr2A+nOG@t+dbg~I!P;e?v*k5l7Sjg3r9%a~=! zQxB<-uMXD=Y*vPw&mpq>2gczU8)|L|D=sHG?#_`(NUghVeoj9LJk}P%+{kBp($;Ev z7VA?d%h~r|#{HrdOi$`6=hVw`UTUcAh#LDc_G@-_LVqL=^)5V*@34uA%uDZ8!|>xc zZi)fIjb8Rdq^eqD5U~;!)ql5s@jq{Nr82*F5uE8|N3dtnoewRk_2S|MXqy7KFF4ee>>@^lYtHpIumU zXb17_A9`LCS0zcSe577|Pn5=TmOLOI)F-t&Q0x%ax)Z*rg+xr>L2X(V%WO+MKd%zj znJ^4;gMYMOI4}Q#S=PTFKAe9Gt0*Wh=Q=ySx1dNY^uPr#G5(Vc3z)^{%;GzZNVhjR z;81YPL}{R`<5-VFkjgk*f1uZVwq3Gsr6dHed;QN$3=%!*1Mto*FKHKFa!*M?q=X4^ za4`_#Cy8%VUPy88nQ$rnIAN8>Ievw0u@!vMHv-byvxs_v$z>EtN3DOkXa6{| z!v^lG5{lgVZ!nQhX2zUZSc0ddK%}chQ|GO3>UxWEo9Rrnxsm3RYa#cDq2B-ST4iu$ zm0aTFZUm2Oph?|jBOc2wb9xeN_0TRBze?eax$^rVphgw{#OhLM*1@!oic;US-kaS< zq#x5rZkmG(&gq@aM2G^e7;fu~#688qs5kkL(GM)QfBT!EN~1>B_o{GLeCR%DLG zS?U>z^ax`NXtf+GD^h*upPaY~)mBpU^nT*p_FngQa`rEdOpVOo-9uUPv^VbP?=4QU z^dXppHn(o#B%A=z?_@G>W|e6C3;M zFa0^Ps?KchwWrAoc`0%r8L-}92Hn3wfqn9kfSi*ofnG)_21wHC{&Rg&@j4Y_@f;D& zOZ4?9l5}APX0wzztAQ=j_?5hRdI<9;B63F-nA)wNf0Rrq z)EZ^$*glUE-Jz}4t?S5-EIR9hFnxUv{go=iX|wHviKs1~nT-RITbQTvUNb;{JHLq) zyLs0=n!c<|!6g5(3m?9Xs}M*7KU#oF4?T4IpD&K-uv#R4UGlSw_$BkCz};_j1Hn3$ z^Kd8LVzCUwc@f{vSeT?@WI#Q*Sw*zH)`nk5CYsumppYh_#7d*-bSF?eJu8>?G*y*X zpunyHtu3{veRcwSEy)G|i_5ZaegVBRPzxV+o#F6u>xkZC7YPqBEr^pE&lvcak>f=LdWi* zHxQ2R4;Olt15;_aYI2J^7ocYMP1F~lCy7nugPGt^pWrn3-PNt%3trAOncM%I4Rgkw z?(BR>VJRuKlLiaft}nq%R)ZE4`%Drxu{pvuXi{d|iStw*$>teD-`ip~R#Nf9I0TzE zdm7cU-3zC(W6yZ_>L#XnyE!XK-!1C^3#{KO`k!AM|NGxCe)c&;N&iv6qseT>t{F8P zKnHlkdn*!*O8%?=e_i2NykwE9f}C@Z{`fQQAM@kSl!7N){+WX3*a}f;``C)e6#)BE z?czaMuLkdq8!a2U6@MWC7xBkD4VjfaGAB~;;}Kgi;Bsc-S4_@;RVPf z@3;~6Ey?q9*gw2`o$$+;5>T<2rDZ2Qy@5Ob{gq3Z1j?nv-;V%-Ygef_r%?s0G4)7W z@G9spuJtlACSwFMq8P0=Bif4eFWpMaT~AHJQ#SfkF~J3)bxQPd0C~!3S7d4hptjWX z9lS(%3(8*NudlO~$Udpn1?{Fcn}bnhNphFhP|iGB{1N7Dh{7I*BSgoUf6ln9KshE@ zr{EY(`}mRKYfFK^2gTVP#_y9}iL~u*{e&4E%zm5}2U)iZil5QG=nnW0=l(9h6)#C9 z!iVb|@;LJ~#0+hD_<4lVg~eA)xORjhu8`#s<$Gnx`|3ZX>5QkacV0%#I`FU;adcLb z8!cnKp+p4i6#L@Ojx3_JtP74V`BKnnCcneI6gDWszTct z$%wK+!CW|^pgBC`5Fdg@k~4f;Yft&ST3z3zl#U<8ghU-l;;+93gCLT3mu8iy2IWMH z;xKd6r`+PW(-FRKhCZ@1nt8lJpKOxSpR;F{uqHYK5mpspW!e6mS#r(@JqF+m8dDz= zr*&}($y8)?b0*uU6Kl(kkHPQf9)#bqWw%-LZ5K4^sR<~V35#xo$+83j=LxP0nT*PS zt?l~(Bgh>cfHi=R*Xb2I^wlpxZF%bDumJb9gnBn~8ohhI+T@4H5>8us!0e*g z)Y?z)LkZUh{xKE?t&-Qv$1(6IZi1Taln2NRb#u&=p(x|qWbk7DN3!rRf=HEL4=2w> z%US7-YxXjrX${e(C1f(@n}|8$Dmx6)VZNCqdJUA zjWAAN`N&)&bSa5NYV`3s2XT|;k;PQphZ%!z?fM58(wCH z0BsJrtgS4G>kZTCjCY46oKb21io~DaNqi(iUmu5&Fy3;`$21HSGtuiT9h4rWW!~+& zEh^^^4O2E>yQ4gbSi_Mo#gWO{RB(`;y-=Y_y1troDCmah(_-b%O+dK1}VWp22> zSrP&#*7GK33QBau-xy|CWA>u zK$&2J=ZHWU5@lLIxE+b!rg-1;j;ogD+n2&0FesJ!eh?BHT@HuZqT9b+dNE9K5G=3h zeue)fCDPuPnaw+3HliuA<91Gx8v&aK32#c>@(t`xJX_Hzs@Anmf0w%Qszo97%_|>D zT|f^D8yrEI-&NY+a;0M+oPY2JToTZD!Eo7;xB8(W>q#qIiesQi^D?+>&FL!KMc-50 zi@>L;6oj}FX}bpVQEKfo&lVueTtt;k-+IrfW6*IF=fj@{54$gjMPD>c>@%OIVaz}- znX6i^S4yhgZU?TT%R%r6<-*CPIH^eqe-Sc|e|X7>)zJClVsjmZH{q)PtFr*hg5RPH zyJ`VlxX8m6oay~e)8ClAYFqy*M77TMx1}kGCX@8UU$zkCB!bsLs)G#YJ=W6yw}+iC zH8J~oyyz1XQ_+rO6`&amUf_I6uO{aLzzflaQNC;1$6!*<#u_>l*@|aoyw1C?jYG*e zW2yUYa7>%z--iDAjkxows{I;Ao}0V3V&xGt{g%DuKE+e8r>$M7Nr*bopp4f^aVC3-^xX3@$fUf{{B`$ zb0ePBJI0-bGY;B7d!(5h$ogTp+&Z7KE zFSI|El*LWx(5Czhp!9BjWk-uQJ~=2JJ2#qslsFnD(cFv?e15}yaG^VAT}@GjD4GL> zmPQrDexB|MUMN)21#tG^CEOoY()PU=9U0rVS3zMBa8BqUDMYJ9e=WpvD=*l8D@T$`907u*ETrLzn$|g1In4*8u?0O zTk2=y#>|hI#j`vY#DS|HnD50BU%Jd9peNpub8BIw}(sN$k5GOwq8kp5zoq|a+3z8+qd_jUZy zE25wMlN>jiTFt)G@~?}Vu9|Knk;21t2;{+gbx!tPt(k_=L9AY(X9ntvL+J;k1?-ru z`@bc(x53;z$KA;=zk6D;4z*I0cMOFk!_NJ?$9tlcc{ z0q&G{+h#L|U4J?D;{S$?{Vt=X^Q!W4!r@!l0WnQ(#53DHjIU)~Bp$7DqX5X>x6p z+qA>ke~&#w6PByIQgrJR28-d5K`@${2=iOAG&>#1uJ zSCN2;ql}~s1C}~q`xc>xvSRbuw%=LbPOHW9Dtl-2j9W!KGq>lcumo$(&uv>C*R+S~ zS0)C=)89=uHA?KNDBZca1xK|5&sMZ})-XzUGA@xDEItAp8sa(S^^?MoCFR@2xY1=W zZ0RF;FK4U+o2f3ZjC^e#6Vm76MgE@J)=s%Aqb`XQIp-q`OBU6^S_);0CErnsKxy-= zp^@f9WTzQvj7sj9{rj!bm-M{vrj`X$(61yI5V$e_HNTEj*P+Xw^@+*Wr1ccqzYE;` zsApo9=SjyED*BwxNkY$AGy2$>S}DU=V;wvZHodOtcK{E1}Uk z$Skp%fqe^01~&n6se%4f89pN;BifXgW#vdSEE~YHuDxYcLEq0j3YU5LZA76DG4`Ke z1rvp992Q&RwCVD^>V9FE6rmsgTEKEdKzYR1`o4A-FBP+uH$t)W4sy+=2|uy*6&4)J z5_DJnNcnQ1{Nd(V3!bH-;ox2=Jbhxdz{eDT^81A3Dx!fd3!Vy>(Dq z@4GJwMOz9~aDo*mMT-->l;T!gg0;8@f?JUyr4S&vg%&5c1ueyc6bo*}2~b=Ml=kHN z+xy&m=G@&qv-iDoX70a9)|x!8JZnAA`;l_tb4oq!7sjldnP#pBJvzea`)eh+*k@jy?F4&lJ#=*15Kh$T^=(qpqLOg7k?(Mov?2!^vz{<4@uDhEwm6n$=e&Xj8b$su3 z`p~_#e*n9_Dcvl))zQ>0OzIcL#m5^{x)ye42Vd-Sbl>JPFBU%3>wf4h@fkXRcSXKM zwV<`YT7sr7G8azveDoRw-!jyCoPNbl3Y2{SxH@V!6L~e6RO@eC5NQiQDleLB<$eZ4 z8R0Q*3vrJXNYFyyI<6G^VB<)jlOm62^qRQ~E&e?2koC z)znwk!w#!FH}oH%vB>nQK(MZEqZ19LW*Gk(rp$!eH3X(25spR<8efP!W5oM@|JP5xWiE0ux@}oC7e9y4xmvoXsF>$dZ?^rvj1|j=A@mn;vx6^1Cn*M=)2rmmR zFe1=#+7&Sbu1NigQZsygz0~=e1^xJ`l2xnt;8(9NOMI(gJ77ealZ>`-VdvM^_Znuz zrm(IcyOM?TN;)?$kCQWXcFe%M40N-I{?I)hRBp%kayVu*U0Y2&wM<1%aJ{&&dA7KD zh|T@A`ntC2Z(oQBzoH}rb54?@R8pl4YQy(2*&5%U8-LvDx~y*3IBwElUg;fn>Qsd& zSUx4T;U&8XT~dzl<}Bqy-s+ljUN^QV&1FpGl_wSJF}s6z>Z`4r9t3_xqXH>uXHvIx zwxXPeOKwjd32%f_|}Q)r%H?brWbduRPGH@KH}!BwbsLqUK2{3e^} zH|X7V6}Pa(B~bPk@(<3qIacJHF01i&^Xmcmb;|oX*Z;fI^S>(v{C`R}{jZ;gb6LzH zQl9U@s$_jOj%7cH+BGaQVCbKRB1h=NE*!Alvfdw)r0*lFS*D=8o7e zbBSXjSE%TvJ@2Q9+hZ6iUTpV8?NA_kF| z(2mITrInOOlu#ae6?bckX}W$-vyJqY!Tg{DcVGhyE)7F@ zmy~6ulGu1Qy#9le5|MleS^tCM_sRMl1>FXS*g4I3-8lioU0_upKqQ#6K@HvSXZR`g zTqEXywIpHwM~`REho#l!=xACN6cagPiAaCU%Ow_1Vb`2KsF)~=|4jnS%@yj1HzAYu%@6*|81(Rfm5KEMTSoCVMVWp2 z2m6F3Q-Ha1l?~e?#k(a=vnwhG9e_o+6jrY}-$yn52cL)tzpj!RMJ+Xw_!P;n^(N(G z(r)xU7hQz?>=M_uT~(P^#y(`NBs3BEf+0JMaXn#d`aX1+X#4ZoYh+$6v2{vytqL7V z_4haYl_u;Q2$iEneH2JsW*Q~(K6mK;Owa7!ycK`H(lEl2VAdf^&eiupqIUnfYsS7L zdP2*qNr}C?Z*aFrr1C>yyaOry2qp)^*H2`xeXH$()Su_67aef26}}G>8_*@OT5i4x zr3v`r9-r4r7S%kdg6}z@JhMV!JM|!oYF`S%v{QLm%*$vogC<+TP zG~=tR>BY)ix3mqgdB*)vQ=+H@M}$_o&M2FZd^B14t&D*RzV&}OPH?cdYtmdCM%B$W zl1k%W_&IJ-g%)7RI49nL;^t~p(bLhN>E^XzQ!gmD;a=w3I%Z~vSqlGRyML9NH{INu zJxaKnWVB$}W?l^ALd2*z4x=OD?H!;ccCIS`#pRFq;UnWH*kH9&3zw6AL&MS=vsboH zdm)so|?--nbiM z6nj_7blh)(_%M}x7!1gk+}mgmna5DazlXcWUI2~gG7lXy=cL&`O$}i=JQl3GtPj&W z9Y|x}puEK-`PPRbC~5jfZFq9kCk!0vDcgrclvFWz?Bu1Si{F|%dm@iR=(Fv97IJ5F zazD)I4b6AeGbkJV&?InRr>LlS(Z{wUJ>22{e zML<`HeqG=z7-BRN+Rz;~9*cuB7{4nmHK?{k(7sIi>SY-z)DaO4pHm55>OiH|+cgca z1jrSED6fAg`6i!LfMxvSiI3KqT?wawM}nr}tWe)UgXV05$64H78M9j@HdI7y943^)UBbv9`2lX7fq#>)PMSv z*zidI-d#@dVtX(jAM@SJBY(>T@6_zmAAMOGJ4n;=OUVf9su-YUGP)uC8@eV7D{y;4 zD0B|dNz}O>M5# zH@F8}FroSW6;F^~tWG|fi0u+^d2bByB*vly3#|A)>&S_4iUa7MfDqr}9Q+{sYU_A- z_>V)hXb$vu&U!0mxV04hcl{+0;!Z`8;2AA|c9bM@2vOg(JBs#H=XtJ9v<~k`2~rn? zaA0zS*^N)@3L;oU#i}34IYqA|nE2s3B>bNQp!|E9{$G=`K9@1}B?5tcDRAW*DJ@;MW$n)Ol|96R(q?BM3lEsGg`do;@miePGw9(gXyd1uN1fpP_ETgd6|%Z zxfvzR*a+%_rVYon)|~pSkLy#xJX2CAe&0^)Z%4xO%R;O!?IN>16vfZ_<(ell2eZwb zxDK5Qp+r;nE;p->bpc~@7NM@(4oIuAer0Yo8ZGjDmH|Tid*@!)@pGgaazNTzt6ULFr`mh1k+a@@$K`L@4}w# z&Mr&wS)+{6Zf2insQnq_*rzkT#Rqpdn3&I!7%i1MIGNrhWeAlGJgMe9#~+(n;#yXc z*Jkgm6C@F9w7P6DjmXC&( z!V2NPsujkPdo!HEUuEMpw|RUIxV?8z_kckkt?komek{YOnMk@7T=oeKEsr=M*$gACm5{ERAq4IN7YQX9 zSFR$|{f$SFzRN*InG&{|;bkwW!HFMk(*xxtXGvWL)mcXzp%W!9Cyz_Y{R8mt67m8|}`t>>R{20hKx>;qKkOeL>1{^a@Xtl$%q)orn8U2PN*iqHR<{u0VGb zJ|^CjQt2;2hXG^M{+^l4{~q)|OxSQ$iCXIZ=c&Y^{o5C#rZnsI>tLF_fbcC!fgL-U zx%Hs)xAr?35@Z#3%!!5?nZy_pl(d!EC&-{^?k}fS;GGXrl1{i;*@2Sor}s}N2lt=t zDzE#s8$Dg-w(1e-+-!x4F#_#RWt3U%3sU^GCnt1JheP;Z{3HVMD#;!mX=1X$-vx4Y zD0LjD1uJQ!Z;NxL$fOl#!uniLWWbf|;~!yRf-y=z^}nA`1plhW{7o@>f(22MXtC|DybnFZ~iB}W97^Ocfd#t2BFy>|d z=t#z_>8fe3^lv*zj!A*-uSxMg3HpKJxJm(<&&HHL;6_Ul0q~EjB8Xjp0^B>r*JlW{ z{}MDa^a-rDAcx&XLH4WXjXz6q$?gix?c8cHAQ2iQNd5Pqv7xd3*E7t+Nu9f%-TCz~ zhk>(;5^uH0<~wXLOk|6J-BJ-Ae_i9m^PKGF$zH;fH8&@n&&@UA+hyVJ10OeI0AjIr zlM#BE3g5~i$=^9(yyWAx8&+zv+`FMb4k`bYPqBH4zyITe9#c}ER8U@0e2G0}c_n<# zV2a1oDkZ!Q6BsReNsQ)XXH51|PvOC48ou{MJVT~*Y%2`I>#3*9+d6+;cw~?p0%D75$+5BaVKTsN1`D@DjPlA4r6_arIYsS<2+f$4+x**X( zNyS22?QjUspVz6Svht1?_BrJJucXFRTAx&$S|l61^YdEJ^HRl>j;TZ30js-bH%aj@ zS?BNT9CXh`_N{jeydBgEGMbN`HRs%5feN&pHLYxf_yoz67HTS+{5N}Fg|{)KI=_2y zb6JB23)TKr-($57>$sooa&B*PWQS9#y9Tm)gU2p~k2jC+R8RlGdHpVH-s(4%#qFyP z+@vIQm~4SUR6EXKxtxIg6Ha1Q=18~_BJoD7%!{fjKc~SREmD!>3HvrXcOr?C_JDW< z$Ymo_P*_+pu@|=SHV<*kZy-misX&utrHubLvZ15-y?gzARuPXHErEsT0?9~4IzPq| z&Mo#1`KO>$(&G4*?bl|uu8p|u$+TGOzu`kRay-#!zP#s{~*LePur6O2ZhTP2u>-8`St^8-3o_8quKvM$_a z_e0jbfrC5sjkIiEcl|1;WBg5wOBeh5D{Z)KO6NPQoz$`xqaN(}T^iGQn*h)1oYOw` zXa&uV?5LDSDwi(BhqPtLJOYRviI0eLv58BxaTfF6eg7z3r6ahzQP!({$HA!L0Gg*Z z-XZ{XYB8gJVNJWYx6vqLHK4Is3aLK>OJGQ!xsVF6WRm3L>$K4fS$Ro9t-uH7B<}Ta zTo%VC`AKKX$AM%&H5pJ==jH?VYplxX=im#OFMnjVUusymPHKZV3c^ay-KEjBEi7xM zGa)GcC_8>N8qnUnsQZDK)%aiRM1^f+DGOdIo5Q!ns7h(fK@@Zae)wlZzNONP9UA z;!Qa*kcdmx*DSLi`t%UkMs~&97uYq7)*aKe*ePy_%Jdz8$QvTBf@KoupL`?!;;56(VQ!CkCW>Db>f6a!!;UXHTzKZ-j<+ zm8O<`+>Wz)#tp(I?Hd`!o4+XYQNc~@_)JS^s?v~fijYKGuat2tc|_<+D?8T9 zp{{?`k=IbAKP=S0%h7s~;>txa+)u#d_6H>KKp3l#BI@c!>x zVy)pSxUaGH)I|-MpnSJ>-36P=EH-JA3;F0CJ9i$wp{$%XOAG^)s&@+8*Y+}Zsz~;C zHKb44EU>YAq0ISP@G!IoctVG#e7fS@<&v`$7~Sr328l%LNyMGIM9Qn8qL-ezbphm< z?PhSw?pe>88Gp8OTDFOCqZmw2pO5j~64+1kWd*x;^5Pc5eMua&A+?g4N*-P!C3@5H zlq5S=h4F#UopPqqaTa5%8}a>GZxbq>%Z+GzxtKA;cc}9UUnDY@z8{mVP{nn1?|19j zN<8&y^}l-d2dCaK9i+Ewa!-xq8Q|z=VPkqO3(Hq33_om4Exrp3q}i*}Cb4!)$ccqi z3xb8-2fA501OUbk$$ zm(Jlg_8dg-jDG*ZZx3gg?@JpS`5Wy7Ly(&WgIKzx*Ak|+Xw2GKKnvcWq= zbuZ@auq^$0y_}GB#F(g^9`4dn6=kGdZg<(3J_ui2?@DZ+d6zcJ-ij@f5<7J7}>w!BxKkbHaf zQK%mfAu&Bs+xU8xYhTPrLv06{K`ZrI6bG4yY!tV&W3~;@6M-1| z0!!_JY}ZD@t)6OhH!Uo)5}3eg9kv2R9=3BDc{j&9IAsPAOQ$J*N`>dnGZw(G9n95= z$byb(gT84!w_uZz5^ox<;FaTek@87T&+90&DFsS&80C1dYg|N)FilVPA#<$j zX(zWxy-4qa&v}TdY&kg%Y4sj!rTaDqs7fNPbCJAFeE>`s^y4X1jOxpH2Zk_l@evV$ z#+Pg@w@Y>xN-Q^K7<9K($bimSCuD!bDLkvy0po^s^dZqL;!~VlSb9ax2~tzL9|n6Y z;AUVRldX7Tc45Kb{KP;2)C8VvakJrJe7Cy-P6v=~wq#Rr4lTcX{HV za9+p3tnkMU&n0oiWs*Li@Sv39AQJ~I+OQGY4P6ryI`8_9o-s6~`Dsz|Y}iJ6HAUnR z%7`Vlx0*<~OHDq683VXIscatLwiDu4r7?f&Yo$r@QS(kg-%-a2ijZ-HHLRtVea$hW zWOw!JM5o(jgPF!F$c!+xE;WB-%cK2~e)4Ybt6L8QWM701badSOa$FaGD$%tpq+9Fh z(e|vVMd||_*D-G(vbkmpU!8f$ic-q;i%0Y2Q@8TeRaLF@fPDvuocZX4D&eTPM*;+MHSQ_Rk_P#D=~~1 zi+d@h;JJvfOO2Q|K%?&SAs+p-pY3dm!jSD`2#j`)I^#w}IOfH`GPee;-jsZ4DU7#^ zg~RRJ#1}QYFjTJnv=}f$!i$Gq_JuW_`g*)aJ9DLGJ|Cib(p8+u_VSc( zEKt@aq8bCe!KzY@I#uPO%EDA-ryLA(k5E&7Y1}_3}OAFH89Sp8NG#Jr*zTUg`u3TmYFgB*@IfkQwo<4 zojr}R`3KW0E1O>x$g3J!77SgE(j>S(AXu%3gQQkG8MR0Qpm?S-1#d{{<`I57Pd!Cm z58NN_^Tw)xrPik4O}7tg_HBo` z3L^9RC)Zc>ByiP8d(kLzpl#SYZbO2eJOifNB?a9V#tsWuzgo`b}BN^0ZE1pCx8i*x6 z{MaLtp3zLR)ao;Q-gRQ8I>-CZ7oza=_#V;p4^61vR-bR~16{<|ob2AEE~f6)4mbGp z?9N%A3winGA@#p3IQL#&ey@qE;I%%UUm`ha^kn7)S4)eH~zMh0FI`xp1z**tjFj5#&V+@^|<{~~H{5<)B<)3y&mA+`6qp>Xc?2~Pk$puQz zJDSAve{epgo@V=pAd(D3&cUNUeYC_101_?*Eb&WFQ!x)k;7u~NR+8=Iy-X;-B#2^Q zOq1_|rw!*U_ym489<|Xksi>LO(+g&tIQ&5fO)9->C5lOnTYL-;?!TkUk$X=n&lEx-VxJMHC=7O*#B%L>+ole%S%)$ zogcCbza;&DDzZ8zD|~9MG>o12(LgSn$qE?ySx~Q)!%p-sd57q%*g#NYAPSJp9G?d> zbWEK>(G%uY%hehTw9x6G_lqU9dQadz?6&2Z#+-$D4RiYq4`c&u^9$$KJ82|8;6t{< zRluK18uc>$`>!E{4L2U-WcCm(#!q22$%DsLTnA$IyT$qWWkpcYhUBS)hCQ;`>E>lJ zw1*rkT(Da9FB>8uu78b_=kG(~r-aDTV8>btlg~c=F=(^E%L$GT5p7&sZL?$22@{P` zkwauDk*rGucWSpNskHd(U`SDtO1-n*c;mW^#ZJ6%@r2tYx@0wmuXtEL_m^&i%M4 z9sN(^qijl}Br{d3`vThw@)GS;eUTI<`gLuy4a4|}t6(ZF$W-Vz!^H3o5m}Z4vDKwj z8tGypRAv+3h9dr0Eog&Vy7LW6sc`J5xbW1@dudrL6*8w!Rp#uKLdg_We{*=_Ea8-NG(>Ogp9EuEBr~c zc7?vKBeIkCpN!ye<&X9f9Z**OS4LQI&-RCf3D(Uq_%szIhTJsv)YLLX{i>YVzy8(w zJ9hk-L#xEGt_p? z96m>9aTKP^ys=>i<@cmZUC?kUFOGrw6+=zz5n^gIZ|LU%gt#)V-T@F5Gds^JMN$|a z!R@(OJ%x1o&9}tghli*&#y;Tpv3~RR`+=~$RAjt?jN zs|_+pW{*Ik#y$pA*fL?lM|nr0Q9?X@md=a>OYUD5<6(QC1@=B33@cU0H8C_{X+$Zg zDb8pkmFG{`o+?Vgw%Xh}X**Z$Q^l{@C_IH$l~2@~mu^H%mdBl>ruGA9W-PP!=W?Z7 zN2r1<>RRKZ+3IRnowEDT2gy3h=6n1qAtX2>DB;UfqsjSs1ihzcRU-()n=edgjNckd zBuj?ZK?Gm9<7@orMy^Svi6jWg56P)W8)2c&&P8B5l2q}P&SR~QZu9t%AkXuZr*~;J zFKWF}Uj?YWvArr&p*|B+!Mt6Kche$12=UdDf+8ynIItG$9NOH0;U)8P62jCI&}zT> zH-&OJiQwokCvGQuEDT#Uy zCwmAM@2nZ&g-AW>IxuM2FDYu$U09CsVTWvTqE-F!b7=3QF6PeAh@A+nU)Y3b5qhi`IYRY2g;GhA^D5JbmIIxsj@8ka}=^>Uly#Z1D0Ko#}f2-c$-(Hkm56t@~oPmQDXlngcJ zjU6@xsi?$Ump^*{3hT>Gvo59oT%kYbso!@JUX<+*xjo;%Da`)Ah}@5Vmmq2aJR=rnvFRK_1^u)koPM z*k6}Eqt)gOLc$%e#tIB<6I9=RhHx;upqL-wiW?$?*}s#>|Hv7amhhgE!D0GRlPk49 zt4WMyjDvDx>sBxcFW}a!chYr3(gq?@4|UD1 z!HTo%M`s7~-{<(NlYXgi{4D=M`!?(kj&a4M_eA5GgF}Br$Wid8!pIZnttuBvcB9hw9T$^c{8+(cdcgOefLqEO9Wi;Yz;VF^ z#{#$b(1e<^J@-jB*6KabCdkB(LCoOltMM z8iof3EESc7)q)5>oc1E@%Dl_UEC~a!JtexEJkA4x6R*0j3w1Xen zu=acZ4#Q@jYf^_nwA3hu zgAFm+hkcY$yM$Pa*1E-u@y7u3IU&(w7AF&n-@3Fc`LR~p<%e*l;P(!ubyn_UJ|uz3 z<5LE&YgKkLRO-)7OfK(-4-sx%xw@`YyImU~hPBuZ%087_3;Fu}V(Z6$Nnh_*?x%K~kRLHKt<;x!6>zZ@e)(X_cmHUL^rwlb$F?~Uj$Jd8O`(S2t9Ddj zNo8`XUk1oP%IsD0IeWF3i-48b2JASoF}K<$y|a3sL7Nq?)QR(c-OF8sqqr$Zn+K0g z(w^W`YTSa49f^hgqzJOiY%naXaJgZM~Kvnnio zyw`NA711Bf+kv~_f!pEk3eGZxiC6z9u~$s{)PzPBrjVDw;BVOCASzBY`@>krg5wLe z3lK?VbkoAKlHFZLP6BE5Zz0P5u}*-k1N^k=JBxPg&3Ggv$5cGk3n0fvkwjU@Ai(RMUAf&){0%!YwHl^HnjAikbXS;xTcO#T(Wy2uIV;m zC(k0#G2Rm9@FT2&od8Nu_zr0K$DForBdEgvb8?cPLy5mT@TXrN=I}%1;*5CC2X8+& zo6QB?tny)a?YOh&K}zlDGw0BWgvB?gcQ{v(`r zU8T9kN}9tj8;Bq zbALd#Cc3}!Kp8)Q+(!Mn@SH*;GH=Q`)ptd>MsI+A@n;@}6sOgJmk77#5zgUO<#4&9 zP1xIAIwl5bU?IHG>AJQDhenoaOc{q7hfHchsQ=rkt`}MhDGx{y%M#y>UZyt|8p@$R zj@qBgh9tqkgK2`&5)&yWYo?(~X;M3sI3kBX(ky%r4>w&{jHmw(@;A8DT4*p%KV%VzN`2s{7o{-k za86*OFEydp_w$pGKy@FAdL=}uXDh^y(FyOhS&n~`ZzEb8Eoo)7gI1WZ!=vrf;%X(` zm>V{q=TfcFy&JlgUmDWo@nUxCvIDan9t0|^@O-q$#Z{@Vxcurux1Tk%$V#%&R?2nZ zXrHZ>&AU(k(asM_o;xjwL%kc9S4;N{JJm@Rb}r5WQCE@a8*K0RWnnY= z>W*AC>9xRb@z3Yz!*qi=oHH z^bQ3NrDWOHtDdURtY^nU7C0sAGW&{RBJbrGIu?}`L-XMNkv%^*mcjsFEqp-iMvxv% zm7e|JMng#f|DleqJiCUC8D}6)AI-#&^>gNiIs6c;Q_(X8N;3^oHOFU;Pm4-iy@yDK z0D}BrCFzu(2ba$d6xRl?Ys>>C&vPcFtkUc_pp<2tDF(^4<4YLp6T_jGyiYJpii%N} zeP}b9eiNOnv*!2ikGkO2CcLs;sfA4RGe& zR!_66r3Y%f0dpYTm%H2ZPXS;xWAuGKa7QQDhiE7w4Lok*4-YG{k6SK%4A_&fKk}Za z4Ge_GFI>`IKf|XUYLIRllY>;mL~qd0N@jjzT*p}_uy8GG%b30XSkdPlghXjg<#G< z6_1>1tXadPuVf!MeIPxIy-&cXL#6ZF?(A&JKM$J?Q{hE-GcQs{Oz9SdnU|rb~r$?wx%CaKU_@ z$;N37p7JlI3p`EVFQ%lZ*O8ijT@6Y>tz+kbl<4+c#3@@2T@+UA*9X9;HO2haKy~g1 zM@sB{YnXWF=g*(+Kw`;=X1TeFzo4Z@13tpnVQU^L=UmBQW?Uv_{6h-upkZlJRxW$v zXD+No$3Z$LV)xU8Y^QUxMt^dfM7>%#yKP@J10O?+o%drB6DR8+?SjPbIv1 zim93q+SfbT*7kt=lHziLdDRPi8V#bggsd*o6^#fm>=c?yDuIq&qoodKo_8XC|EIC(ox*FGmP)#>{!;>t zh9Fj9byq7=*-f^7DZs^gU9+*C1mggkwu+&J@RLe}FEgQclt4{Wb)Y1<_FN~k%Sr?m z9X-Eb)>YIJToGj2`4w=kdq4GJsiE6!`KipdPOG`|9zc#R68+q!M!Op634@LbdpODgH}eYP3d9J-~0 z5hyM<%V`xlvI1-u-9Q~`Ywa5Jlwy9-oBAO?#(BL((93( z>vdNw(do+;Qlm0ca8!9-h(SU3JD)aacXlounI?SD-C{{}NAk|`xyi6`(SAJzn6cychb#SHg4I5yew!p@tglE?r0@v9;B()YNmHU=$IKV$ch{ z-7iY|C1tUvEw=Kot%;UA6(bbmX6ry-5YgR#y_-+|Aph4c|uPUYt!7Xm%k;tx~0llrytEs7200jy-=PT)B@fOj@kE@Jacq`}}iRGL8j zY$D_ZjFtp|d)U8eBOlb-HJe3=ZjOtT=W%h7@30#2SP|-!%t<}lKKWaVyRT-jTO`QhiS*Tf$D2=jhdZnqJJATl+!__sQK4*24#6%AUZlC}lFs z8oJ#+)amd4&_8GT(pj}gC=~l{2IuN&ILA@9ZDUJ2NpC7pje(o9u+q&`3`Pfv&l+MZ z3$l=Nvuk_h?QhVxz#wg^#i09vX@a18y>mj5Nm+l$I80O&77CEe4>n>aYVjq?&+ z%YXwrw%+fwBwCdg4OC*Yw6+7JkXB4m$87b?FI0HRzx9O)@Emy0kd4qvJ>30LHYw)6 zAJ&dB)2BC|Iom1;$V!}Dmtrc}6dHdVx`$qU84?e!lArW_!R$H+GBy6HRg*wzRiw>MMwdUQ=ry@V7{O zuwdrvTeA-M7;hP1Yfk zB-Lw@00LC8eSx)0&BRnM#}5A-c2ZZ;*4v=hfvjT~kXm8>=Vnj=qItIRfehkVu9lYZ zBqS4q<4Xhl%_YsgwaH$CoSB)D2rYh+J*aLqy1Boc5(1Rs22E!5GTgnmF;uj&aI}$f zZqJmSym_C6XQ~FqJdE8N9m^Fd|N44v+sooM6}ofq^9OsRL6bm!fN3zzqI@VG6mYjI zezA(0yIASF{<*%{!{}>t^Nq~h4f#gZR~-vBIA^Y3vxI%@Yz=kk=*uXxC#)uxBpVYn+ick8F}K6Y!o=6fpo{b{4BnDC*KjLvu{ zbcHbx%3nWUtY_dTfowfHl5h1ba#4rXl_njFRAT*lX=sc@X0oP69+Gv5rU{Z0-KYpGwL0D5b-b&9JOor3H5%z+%3;Kgw zgtW+?1H`3%q}Msg{I#^EBjJUwlRS!{gXwim@fVAYvclAWxZx=QrZMqTspf(s?~SXA z^_Cv2ReTxOb=rsBtx`PNQb_iS(8AiDPztBLIVWFK!`y+lQ=JKT=>epD z0#zbj4Ez`Z)s;}ChDb2RnjNz8xiN@1*}pBW+vEJX z`QxT+5k7OM)hjbx=aO?Or2K7VB5(F57W!%Y2j?3{Sp0eJudbD!X>W#q1STD7(#!CA z@W%h!r@sI5M?I}%b^|hJt95HLPyY8y1J$ax{(pR70f>tuK``QlJ(;rYX!|6Y zYt7UaJDn?^+Y<~^9QKboAD?j}ucp6Hb*6&KBUV!fXQ(8dIzllHePG{}mH4ehW7pdH z^oo>G+};A@|6uPeqvGnaeQyW^Nbn#*s*qp_7ThHS_ux=SAZP)F74DMY6c!-3ySrOR zCQOp1lYVEuz&=0^zuu<|^SIVZxTUOMszo4n=ja zVh=zzFrp%wnHLRfI>I>_%yG3u%=YmTnK+2H;|MHm{3mpIY35)>%x zcK?3j)|;IO2snbW@sv8!a}yY2GiI5}J19BLH$R5Ah`7c>L>sKtoa?hq)8I6G*|@hd zYKYb=FM!tQ^vY(;mfF0fdeCM+VFp!q<)b-~=uzbJ{l5oZ+r(eNa?3bOh zD}z$pm_rRNAjTXbb^=UjFcs7va>$clguCL~C|5KIcEyMYpExcL?6h65&6JK`nA_{v z?WbdIlJ`2o`nXT|uRe;eT6dnFu-!xG7u%icr~!G1G-jO5UHf&(Jbn1>1^jL(JI}wp z8P{joeZ!?TI;AOtH@=kv?Gj5yWP_ZsxeUXVi)2Jd%gwO~fZIbhnYXU*iyiVe7!ewab9q-?g@ zpa!ftb_)QU9Pm$!Qh~Fg<$NtGoA+KE9rnZho|oV{v4u3-wHqb1vUs+-*=~so40A`i zN|?1IH3!Za7>#15JX?;>=EtwBOmN~483ay9=r?!kcvz5k1Awg=UVLGu$f6?hKx_;|NzZYS53 zqW^K8B8gJPq;G3q7+7ub>~A+3MV@L5p}m%~CYz&25&2dQ4G?gQW||Ot=TQc}{}V;W zklCzlewJM3OK|g)GL+sSDy#S7xXeDHNln7MrA0@EEulch-(U>w1ahh=J5ijqgxn66A+JaTe&4=Szo}U?2XpGPWS~XJLxhL1 z1n3$av^FGJoZI!&>E@@WKfzyuI1Foy0X%pXR z#8$<;a{r04M17|JZuMs7Z0|x`{fGZgl(qBa8`)g)v)KC2dEzu{HQ#&T*Ofmcj7NSL zsa{0iX<^q_9~|_$zOkspocAkh-<&%~cu{f_hkaNasy?p%fxPLDQKh8b>AbohkK78r zU`~P0h*^w{sdtp9;h`;2yubyE+*1r;5;51GJnt*#x0Fz(TQ7)vy7_J#g}O!tvrZUb zh87ep-5GoixHaIsvpJNXvHVeL#3w%Gck*D(^iJm|3QheNbJGbv1%wi>R#I|S%<1(F zt!Xl9oDOGem^&S%8vyo+sYe*k_u2Qxc7eU0C{t8_dtz{1xm1*g&0ClyI*;SgW=(5} zbAoSba0Y0mhaTEI2#%`ZPh1lbEvI+lkdZjkS#1vyC)#}NX;(24^!g7&GyY;WHjC`s zYNyeheTv?ib9MXJsj1CC+H?}+GHt96-BR@ULjM{9e0itE`$2Pha!66>$}+5)@3EM+ zzo#uU3B-zv?d!=^#(o5h<5aQu!C3qyI6`p!Hwo z@LVC5(pQ`!!Qba$N2dK+|Oh2_&2aR0Q zQ$N=#ZS;$AyIH^a+>L?l-a(j8WBrW|ct3NZ)v(j%x}kwZ>>1`y z6asc%5v@xQdsakl<7rdx-sa}8U--GWl^dUyvOr!odqO!FvoxSl01iKUHTnAGts_VN zc>uH^u6Qp$hcU;fw9&;~Skk@c#+$C?9H|R24CAj60x@AKT&WyXx(PI$``IQS1y{vV zvoc`gyHPGuygD90Af3ru(a4^2yO<=jz(X25l##Lf(>6D$Rw6X{Ioqd$UY&|*gwZn} z^RLg>Z!80N>I~UqBHXs!jB$KBkWY5|L*(_hdFmlw_Ft2i!oc9QKxY4;N>g8rMBwnS z7$HpLO50*WyG0cL{!&Rh@__~KaPZSYT(tYScHUyk0a<%k;`C zN{#mGaj@-N*aU^Rov-RQ+YFRj`G;5)5cm|%O$=APQG6h z-FaL~^nIPwq~et8K_#Y@JsFa)>4u#1*r09e3N4Z;lFGV?niRRWw1Jin1DaE7V~-*? zCt;ikS{PBe;Vm^8hZ^c2c1Ybs#|It@S<6!x-cJ-M90$S8&IcKp=BA@JFPg`C&t(;* zR<>V1Tbw%Bd`va6AJ=Bl+1wEH@gaykQwiRB+T2Zc=gqw5sUDsm6}YM)%u-Uak(0mW zZ))?^ieY4g{?Ud%hRDbmD0jk{NSBgshgo_WL|(z^k0~WfnKs_D*%U0WS+!!KpwI1e zkT$e-w%>m~au00PVn|DRIDXKD{Bp}^X)U)p>~u3*v-fq%I0^ChmMUGF1l;`W9#tx- zNg6xHj;PyJzv^*{Ay|sYiS4qb-p0fo65R7315WKOMdugV6z@-w;8*6C28NfGQBHV$ z1Je$|Y3_}4K-YaEbdrZf_3~@^S*MWEFgc)zM4JW_CY3C<8>!6jXGS*wyjY^o!<>Dj zG$<4ZwtD>0N`umn>o%gTY8Q*ltVXrVRL!a9Fp4?I_wcA@=SY-8J7J#nBM+pG4C}ZP zvg{Xha7Ng`r_oYjIo>Ac35o2J^qZ$`+ME%2_aSe?YHCjW_C@pMvCgVhOCURJd3=qs zHZxQ32g~eT_QISN&)2xOE!y>YR23KtYt+!w&Qn`G77|$M+qOyrvV0S6XCUYy!i;9Ly>Y|wB23R_0SjVsXp1UMM#Kk!|g z-YMvY6y@-0+q@4Zv(}S;wX^kB8SL&lhYEK4=$auMT~bH>=k721iSqUS2t2$KTY8ev z+4V;f*<#x5)yT2lOgg1?;S?W&gk&fOY`(3j%(^-&j*l7;WP`j*XCDE)qndWzg;X4Y zA>2Y%AC{wf9iM-0iZtVF5IAe%tBh;7C#^wg9^IoUY}0C9B@%Rt zg?Y)w+?qGWnT;Hp+^9WRVp#Whhem7Z;xM~tCW66z$PWs^d`nzsD3%=UwhawLdg0Zn z`aAW_KX*^bAu>$*X8WI6rI52Q-fuVBcW^lPvGCNBWiZD?3+WXNHrD9;9E=R3Qr9yN zVS^=Kv02Bx3h{Y{u`@y?@KecLJQTbNJjcvi)tq-Z52F8zmmrCM;`$>+gz=xhx`gID zc=wxhQ;M8A1t2|YMJZg7cJ2M06W8XpTc4`6Imt2fu^(gX(M92=@-Km84k#`T&`4x} z_>fTl>GWm>gfbE$(<|Ii^i_Timr^fh*O>znC`e7^H_lunnr*O}n7yBu&FB^ez}E?? zFJD_bwR$zhG7j<#{w6aYkvFAeE^X_&oASigiQ4D(i>d%vQy@du} ze3RTBrr0;^P-@)`P(B&&Bq*v2GMe2@oDOLrA; z`#xM{aKx>Qs3UIjk3Q!As+i`V7hbaj-M0B1;$N44oRE?4qa0@m5+o>~`#OLF&__OXj8z#=numXQb`{kM8^7X?d$QodwP=bfV&BlG8}hI!`` z9?6RoWwR9|eBs3m=G>OyR_W+R3Nw&Iem8J8x+^+?1p0lu3w_hC`PLRiGY*y!oR; zN?!wrJK&M3y_l1LQI5Bc{hjd@8uF&j%jP2`*_lrt=uJ*yIC(mWV7}Qnxf5MC3Mppr zBh)U2yrahnLJh0DVdo96Bydy%-XA*qs5O_*L#sNC9ofho>^#IujlysBk>3}+g>M=C z19=+e1I0`Z4X3Pi#3PNOl?C3A4~eVaE-&+F0>46IZxi_Mq}4pKiKFY)HL6s0(vKe~ zUbJq|H*X4d-58r{2BE8Zf180uO&VtJzYl^!p!Th{icJS~6 z;O(75gM)SWh9c2stP)H~`eNGJ4hU{hugh)HGh=hY)v3 zF;N`@b`t0aBrhW|JEm%^J$jo*8kUH}{Nag=(4<{}u-{JUJiHG7QqcZ=Qf}MKb;118 zdU+tzXIEs=#$U&a!uJoKs{Ffe|MtJAj>X^UBiRbi{&;0^2s<|HJPZ26OQx@Qhu((V z!#m_?`}h0Z_jwdgmgQaA6eV?Vh{Ql(oF{KqrrV5@9AqsHz8m`X*m=FSj%u;dsXVkd zpS|#FYE`w2D(M{_ViDbX@+`Ve(WPS&LVUhua!8@sQd)l?TETtl5{KEK=x$N8}zC)aYmYhw(iX2h5y>Sx*gcx+L*?=aIOl?>U`X02D0sWkD~NK`$BhKN`3JOeIaKzjEl zJ0_UZD3`ViE66ujx&_&7q8c8^tbXoHH8&&?^8jsqMX&-afC@+JGXWXBGZ=fUHGOS8 zWl_CUVp5RD6^6j3SiCCqaDYnsviGtQrQJ$>UvE|)RF;|182%E`sLqD)78UkoOiT;a z#mC%v*!ZSZ-l`-{TblS-nJ6pc0klDPaj_7ca27c^2}o6{exhtz{-8+xiSmRuAB23O zo6Q)ZtA!l}k(LIMD2wWb3dP9?NG&A_>A#?fYJ&Sp&}^~{*j1J1m6R4`Jhbj-Dk15q zn0X`QuR?D(F0OmzzLzVPlcyb}uMf$OE*Sy7QsO&tyiNE=PU{T9tTe-jmM_p|~OfJi8un5Y{f)Ctk&(&2+3Zcb{W-*xe%y#{1xX>H}<4NP5tDNWLYEG$| zl5T2DV1Ce3x~`g-@@942kn!k6h0~b%qwbZo?tDR{bJIEV94VQbOMU-1POZj5SzqZY zj}nl9f<~XoOUL(+`L(6vlq?A{>_Q7N^sWb?Rd-!I!Rb9{7P9DZd*01@LcQ?9rw~w} zzNT77flwk3hv=tq9&yvu<<_*B4CqlzJox(b$x(eMYq+Gf#v%0oHVgXu=zWB(3fSMu~)v&$2qy;OYKVw z@-fJ}hG*J`E+f#K4n^)ToT|@;DiFYwWyd`?3^YRE8hbbv*ufiGM%DxlN3-R&a4SeQwx8Q@Vu7E<0tP_kK(EVRX*C$LcbU zEiSH=4Rjp^Eg_(=9M)z8wgMefnz?P$i{=`Wdqt1(5Z#i15?4uv&otB&34)WJ2+*NX zzOlkJ7B|eY+)aRThb5txPEi^yP^@N67SlS>01=o+9@E_g<=`HS+ zmB7f;d9-ldot*|~Yzz*QS2%}yI*N1Oa((tJ%z&kMBQ_Ii`i4fr22GIAM6^s14`y4@ zXLZBb;R-^Q{lqxQ33wLfCc82VXlbZ{-nApnnQNk0KcG{U;d@(qMDuM7l*z0g?FqJO zx#h^_ff<;<>J1-lh)hvQ2K?Z!k&*THkM z#u<%ib@4(q^r$#~QvnA-Z0PIaC{4CNVXcmogefZK9IUqi^^c4LnhqBzxE!WqFcg;D zt(oClOk#`jlr~0hip-SQ9tDUEHf_?cjWC}V*KO~JidmP(BFhGPx#@Ca4g>k?;aLy* z@F-Q}(N7-L4LewtN@|tT7f;44Q(HHL&#v#pEH%K}Ol?AU+{l1cq7opl&+g4S{hH67 zZ3Ai9y=a!}n&K2Ym;}~0XQ-ul%^A=;MpMyGV>$vs7=6=|GJdk*YBcoHjr4@Wm?Br@ zGVKkc<3ba9)@Dx<8M}On2oI>Auq5~160i1yW_Ew3zI=omK@N{ zmAAL8Y@Q2QAlZwOvTU~Xn&U5L8>ACk&Fr@QC1{6xu`VM#zB5laX$_9a-{l~z_FFP6 z=8sn_?Lm(QZE52076guCWCixz0Z%^;Dr-tfOAu-i1(}&ffR3;jDu(e#SD$=+Z>X?a z0GVN_pI=nA51yS@_+oAC*%Wc~ovud_0Qd@U*zX||$Dab9@ugIQvcd~8DsrrH%a7^_ z;N0dOb`OU4qj3B_`l7XV`aXItKiCwQvqOPevb9-}x#Zq7jGH*z&g%hzU>h zS#o-%9oA-icb~{JhG2xe#6A_a{xhjlUXJSF@D5Ui^N<(xy^8KMX=W(ypgR`(>d9^W z&&tyj7gG`^4RQC|08VG@If|9;HoE=CR)ut{5DhqaC4XUU6OtHQ}zfELtjV zEsz534qoQKKXD>YT^zFg8~Ao)p6&mRAF2ETx@F>J{j^&Dj0IZ3^7{^2LeyQH5eYEM%2IXS(T z*7sTsJBBS>8Q*y5qDR1 z3%)k<*C5q1B#$R0r4zjV}P?jpG*oGO33YCiEbf@6udpGPmu zA15pmvJbV(nhIY3L=oE^u4_UoIuI+FXZp715OlL_bvIN=Zpi)R1yI)5lSK8O_sY+}n-lYqs-#KEzz|sb5ix6BLX%mJe)5?Q+pYKOL?DNZY%F?oO-*=j z@9^-JY27(&ram09uLKuNHq6NJgk^@N$a~rn)_O%WAeX@-#-bz+)0z_U8p^~KW+arJ zWx&k3b;~^u-L1M+QlI-9+jap*`UhHdRc!~(4k{bGaI$`s>-+wpT*VchV~sK1t$w#-bcO^j=4Pbw>_a1uX{N#t-{am9W>V6JJv!mLkcZN0;2 z3$t0GPZ-8?AVXh2d9gw*9UdBFQ@qG>Fjo{+-M_lsJKgF4m;NBwb->}9{BeqH`fu)c zwB7RT2nnSvM~&OrnJ%MP7`7bFe4DWzVEzzh(4%1zoIl^vo6N) z6NNH0Du3Zb2IIlLe|=GqJ!5{ym&--AosA2r{o^F9K1E<4m*C1u?5)D5rbWBQTs=;e zrDdG!9Ft1he48CyDx?%NeYs^txx1HohSVS?Nyg>V4+r~5HqllnH>A-fp-hgg@!=Ov zM*d5WbgDhqVXP6S3bkeSNwqIug!6rb863T%!n5~~6=rgnaX#A31m+qBHej%2K|xt@ zaE4>p8<-An9R9h#fcR%}$8uPp0@*IJzBFBxit+UUDeOIJYl9a zJJ!FxlGn)$Ez%#l8E_le-J~;ZU`b@ayu-YKW)A+K65aRB zXOPY(u`Q`6FD@!JMy(m3MYeC*5+SBca&W?Tc3R&_WOS5x7WbhQ^SnFg+T%h7=r7tu zNz?&Mrh{KF8jNR?;vhj~T%v~YvE$68sex~S-DOHS3h{v{{czq#M)kg7$)rw6Vb^tP z`$wIwmCLm2ka)_FYsn1!nU<+1xUKdxbGs(Rj}_n7zY3a&nG{9Nj3a0aeoEysMd&nK zu*p!_K$$k0Th6F!Z5Yo9^(P8R)wzxOwXzZeiQ0tJvyDgO%%3@YO&x699*K{$?e`&p z+dSd>Pg|xYM_j|Igy!h7pBzNy}KmG50EIxcr&@C#s^v7X7*peFSCW4$tAU6+jsqw+$CZ9E#; ze&qMe=Ot5Ye^ZR(oHqT)tw$j={OLb~FZnB_1mjn`LINuilb}U|yrmdvazAuKh*;ju zAd};RliPGzyZw=o*3LUpVtZ$GE*XGeo&dlyCSM2Q=-B)UQCy+~<-!klRmA2~S*3OF zLm}Ywe<8Apu1uWxdo393eqN2&ukY)u-k?b-L50}zpYU(~(ck|RsX^iQ8!ebGMfLio zWkp3ud70&DTp*MBqh81e8)Xxd$X(!HG&p~+1*2|VFfk!U-E;oHP4HkxQ9&{Is|xDk ztR>ng3Q}%B60g8k`!Bj4eW9PfQY$GfMA{+c4X#%`ek=I#7m?lHX~B4@l^{eW$C08f z$EQZG8-!j`GJ<3P{7!-+3%~SwKP3 zf%eYlL#f+q)yDT7S0aI*0tHVcN;+f>NxB+#uu2G}yAdbOh}uSKc4P^X?n86;nFKe= zIv0(xv8f4JO3)YUKF~R)EplZS0v4o+9U<4H{>_Z&FQ$zBr2enK_#Y{FLeWYMy9hy+ zp>!S^KJ(LA-){9Ag7>%nM6uJpmo#5LWZ1FIt1QX}1d3EQR~F<3l$p3G57~6EeKL+w z6ien!p(Yy18YI(4nI; z9qNUBn5mgK{*_@R;_S-pPP`jp^;=0X)(^Zx!$59^;%Hn(nLT%y2pIITptc83mlg0f@1*iBg5`F(a5~Y62N%?b{35g8FHrY(8sXR(6dDq#; z53hO760x z{^U=QeVU~0`4zx!vZwUo?+g!VWe*4NJVosqqkzv-DcwvrzQc1$eh;0_{O0=C^A8*k44S?j$qoA<+Sb z50vy0n|S2Agj3;co1LRY^E7dt|>(a~U`>sk*2+; zP@9VBknpaOXpog{@LYid3n~5KrVG>@#lJx}r3_J$UqzP9F08F0 zqCZ&1ROoho@Yxm|n~r;AlTXy@BAP?F;1=3WB(ESYlWo<@Sa(vd#D8q_3GMhWjY5Xq8 zbGHKJf1B3+fz?mvY2xMc48AlOUz}&SX$qWC8ex++0tD6qq40`_xxcYm?UMw3go2R5 zb@*#vUCJ>V!zmXB!cJFro9e%0H7(`5bdh>aWWs}w#TT|Htc?CZ9Z(Sjtchh6efdY( zd{zT|LsX}EcCt7>-#aPu@mQ4>$1!R%AC5_n*1CQ6s{Z{Ky+zx^BEr>V?k5h5Kay=i z*Y;)@gUdAkO^6K%I78mj{O5oCMpVN7?0*xLsKa*ggvsdf+$;;{nQrH?npoZAoQWa8 zT3-+JIFnyYo8~ndC8F@Lv`YO{lPIu5vCR-?LL`ZLS@ZQpH#yn&{fz?7#cex^FH;h_ z;KJ-2?}D6yUBmlwas2!An3*AHXef_lRHvN2rCK#;8J66N5>aeoBvWm+amH3T8EjHf zU>0k)CVx#C)0EWEcMA6u<;R_-FrLTYcmYmR>~=MbgNyL2*fEWWl)oRU#~6eVg5k^O z8ka>oI7TFG!o}kYu9m|5ko{p|5i3M$3A$3s!qFD4qB%*mt+R2A0YFt?9=Z+FYKfwA zDGZd~p_B)(I!hMqG&MBIp9mJZVyocOMZdi!;mJ9CzcaWmN_W7OV#8f>w|XtN=6#P4 z#j3YQ0L?_WMi;Hs!-UNNJZ4p5fso>CQRrgu+TLWfGoWL~|I}s-*^bXxXE$|g)CAda zk4|HKmSVE$p`t>C+rmgGQdiv+k%(1uyP!K3h*B}%m3&N!2XJa20vg9?8{*HI2opG{ zG{~KJ{cat?N_rt>MLr%gq|%RmiJl>@V%U^ccPnseP;S4NcVOGtuf_Dv1V;Vn+D(%~ zX%@!uoyrNd@A`zaz-`!iP-biB(Xv%pE#JpuJ4ns5RPEsp1xF+77BlzAE<-(x1x8+3 z(mUl+Ow~nQhzFXNcb;QiXWUc2?MzD+SpAV3f2OMg5!JED39cxxe%!-O|8Joee}xZ zEn5+oH#WGR{5~Q@{xXG`)N6u(#X=?#wNZB**xB(EL+aUn`HJJ5T7*a~U zvkvCEa4T^pzn)2tMHySTm9j*O->S_X=a*+mm-7ZdvEJ@4>k!yh$EchOy8<}I<;i;m zk@TXDobg7A+5ET^1x!A{TMots#*qHV;JPGly%V@geo?Rjvbx!Q5ir(Nz30$v6VQ$B znbieY+!D|2^K3XugB6ba-)sZq*ko7Ov+jERlLdAo$1CuD7|heBSQN*~b# zn%>@>2*a&2kjLjFc$VRC#HEku5d4S>V!g6$lq<=~Peg#eB=^1v8x;_f_|ey^dZf)# zR8qu@Lozv`p@PCFn9J%gGI|mOL=p_`At|75(5&!^$gvhdv#nnE)F*h0>Le~SV8J^i z6Pu*P_hjp$Vax%_mvlfpLT7aVh4oBfRFm55>yYj5S0V|tb?m*&K|V(sitRmlr}VoSv2>2qJrf_cG~N}9{~ z-I=rVQ>I=6m9Dm>uH8cVVK*{dj;9jk(($h)bnG&-9r800ZSA&4kox4ifm{`c{l_fv z2D>gDHRa{P2FA$F3?ICEIK+CVu3qWjG&MviGY;mX&@AY+<=8C1RrHQz9wO>lf|Mqh zXXlor8K8$HAJ}w!!lC+%QBG#s%0W_v}HDwczJ%#kFnZb(WK)q}_cXsEVR?xz?Wyg~zbq4xL*Kk;>hHeUTlP?5`d>FV;rcAraPw=@yU^ z`XGTw3zrurj$w;^IJSz2*B#>XJN` zHfu;BeDEpF32E=sCFgR@KufI8wrrfn#&P`|dl!A()Sv$kw$qCL(N256rF}vYH(qss z7-AMbA-4?>ZVOvS$y`DC@!*#pQ91JxSh1X~A*Y2Uh|vY;!m@4dEbu(?5f)TpWu?wB zHm1%hbnBJ1p`fA@OxM_p5=dM(?9DgZW8rneFT~A`E$Mos$|RcDz$UBcT+lRT{V+Uq zC3#dR0|c@_C3>@jCS>f7iZ2?CIVCy9eI=aAChuU56hC#S41>Xh!WQX32P;IqzNMhh zJZV;?>8}|EE)2mPDQkM7bj#540$Yn`&p^jGL&g^8ik-Agx^$} z>_D&u&xSb8Q1(6!w0Swrs9A<>`OL?9SMpfd)jxaru zC6L)OrS6w~->JT>Anee}xBOb<`+Qor24EBelXo3ugJ3P~(WnGkOX#yf(i^44r`uKs z4ZB(Cb#7Q6CU|i1yj4vbc@grcJ)*rYN6Racv-6slii>OtGcYFGt9ma@@o5hn&}f<* zN0@F&vl$xdbig+*dy@|%T8ncFoLSie`I`^C?M-3){$2d{Fbppqa;nopqym#q#8B3% z$2mAZc|1a;q5i6wlk(XtG45FR=3ReX-J{Qsi6fso`pP&?!tWQ3G?2!>0xep!PQGf< z9;BP!P#ArI1lMe|Vm3$sSPxq@Y4G*#hmlQW|WU7e*c+ zmZM9wtSKzFu7zDhzZ?RcxfFT)WhjX&w;;ysrncH|isRIxjEjOKsH_-gyTzKV5ZL?Yi)4?2L`?Vh-a?NL}Ka z>FuAx6(Qji&zG5&Iw&Huf6=!QyX*>B*u1Q;3zx3lDkg1e9b2-~97oKa!*RK37zVfN z65~&5p$+`#IFY>@qUsv9~0B(0cV2Z&X1b42w2nPYPKndqg%0Rqqc0 zi_CwVxcF}H?GdZ;qC)3ukTs207R>z{gtacpBqwUv!F7)+JAM_Drn%K{*sJ1KE+DnY zdBPm{cXYVaSzg>o{e899&(^*6+vTo($!p;vs=g;2MvKwtv8@@zw2EvL*1-))s{OnW zO(#R^ynHq@T!+X;6hIQr$+?GbW7be;i6V2YwuWRO7;O9bNV~R-YIF1xAD_BnDj;8>% z<4Kvhp!tj58B&;}lGm`9@|C7yF1ykMq`m&AUr86fXJfX2wpiz+OQ9mSB7o#@C#TMr zGd8EDwDarS?|h|SY9rMC&^lP@m`E~BQceIRMzB}7_}lY3A-)b6s9Q&a4X96zMZ9P1 ztZX{h4EMNW+UEB(&+=H8wH$54%JKbzFgxQM0kUkHq_xN>eGeeNbil`C5?J~Ev3Jt* z<*G>z#87cHC9@2?a2eD)hcYb2*GS{0 zIwb|wimj+on_Uq;qt_m8sP5ni(AeD(H)u#_+4gpr(aai=3=B}qdoghQ(Z%%3q;DU?JFbi zJJ@mEPZUDI5m6Y2uxHk0Xv=-l4--Vnpt(v)N@BzlF+SAhR1kE;SDqZ;P?9}+kIizQcRUJ-nHVWTL6T@T1b3E%e$436Hc(2p<`Z4#C=Io>1&0XyhzA7wyTh{{ zO2`+SPBsYan77)Na<~8=NBW!GK%)vXei+WwfXjIh{GpI#UPFQ(nUO&oU44k1H9UO3sh`|&qCJ_8Ot6O0)<`iE4{{5Lc zQriO|c2|xpF7jpv_ra6lV@eDY6ef*AR{HKk!fUZ3eo_AW4s7$>vzF|K+o#ViO_jVR zB@Ux+#3g-^MScQY5|wStWyI+Wuoel~pC z!pv8wBc|(Z79S??>dzM4Kg-H>T1EwJz-`@rJk$c`X69t{7t~GIF3~o6PdQ6o`*Pu@ zqS4v=tuE?7Cn(-zJWW@2-w~YXh2vAARef9mxSx^>?ukqndvfcDOOLWR_kIZXWbAQY z>bIDCc3cH4pIyrkc0%c2OK?(V$=BDyfwWuj*MA#6p1dWDgnnBns&I|S@+F&~IwSB} z`!O=Q+}Ydx{h*f{(U|070Mb_!YRKU?awMYo2zQ_N3r*Y1fRh2+UqL?^-iuHIhZU6D z&O}9pLt?2ZQ5J^Ek!_sxkuU)xgmZ6F202i}eY?3}x^e z>HXkRS+U#8EF^s+VoS{@Y}$FGDgWnIKUtr8j_-o3^d|X<>2G?xS<-2HDh7@YPg8&I%+)pso*tx7-PbCc}XzeQZ42D$)@{` zFIKPM$$?g94~6Mc4!*bF+%@IJ<*b;`Y_FH=$w5$N&Xyw>!n!jby6Evq$p#y^2LAB` zn&Aa7c0JQn14nW4OqHh`S63_7(5)F$Cf65=T&%~&!m*J5>0j>tm%IPpnBAYB)L559 zXb}@U6cS1r|FXT!@Ijy@{Q%_=#|pl&gT%VIeNkQShZO7SA@|wYf;qzDD%`>Oi43n3 zxxTe``;AIqkJZ>IepqTMPb6Az*Fv30aE8wdOws5#TwcD~vV!VVTjKX+-~yj#2^H@j z^Uf_84BcQiUqwKFJZwIz*F4`YHftPxJfsTbNubVO(x>7h4A}?Y&pP6eTPLai)4UnR zGo1oS{ls^eQW5H>+AHzh*bH)bE1Yiwbt^`BeuA$+@(R}$C&&dDJ_r9-PRa-+&- zcZ;z)aQrg5!=@&B@Lqxe$b-9cmbSIb((j7ZwR8coXIdr0_WK7(cN}SHXkm^mf7D(g zBrX$qkza%ft~Ju&WC$u>E&NlthhRI3V>ey%dE1)|fi-7)gN#fAPm}3K18LGP%oAd}=;T%glq( zRq^}Xcl(ijRhx8x^DpD&KeB$J@Ylg_Sv#-89=TcaMuG0{f;KH}wt2lS_VMwgVc{pzN}P*hr))zeu_QRyJg@O1x!CNI1& zX_Iz#4xV9{wz4)5+Bi9DSuj_)=It8FXdBa!9xs$9U`H9yFDzNJ+?f20Z}`XC*TOB$ zd#5f08WSTtTwE0XhC~Y-RH)r_GZ`fBM4QJCDBf5On6MY%h8GBw6i1-$0v>Y+qmw4a zIqqg6HTN%wFR(1iVcj?!hK%8i6CG?7Z{gQ;bf$t({^0e@C*CnmpEUfe9JyG`-+iQ% zv`CH@(}Tp;$+p7)XZcA`O) zFAH4>K?C0licuUBxZU94g5I$u*$F6rJB>Rw-(df-M&1Q0BNKSbdG8cbo;GzeH@HBD ze@8Wb+iEdrTt}3#Qc{d4P)Q6aypJ}!gL#&AWo(L23shyl<6C(#Pk9szBek(Ee=f@` zJy8)mC-aUis-bA74Jc?c3to4OXJtpJ3kw=n+3yMnB(*4rR)*x5B5cFo8)WYwd8sE` z9Fje+AK=f5TtqFvoaz)vzlP`5WvV9lN8sU5r~%QmI%P-v`v90+6FMJm%tQ0lqr0GjG#Ya<03v&y7a;(-f z6hc$gtry_DLkos!kKBKv$lBBOJH4F_{`+PA->v%p{8zRzm87F`4Jil*#jJe18!dvq zt5?`mWOHDrUeN{rDjBH&I7#ypWgy|-c38)A{#ykLYY(V|7tcq?kwENi@sHk}OlpW$ z==Xy|Fz9P~+{Yng+q)4L3ObH40lt6) zku*cm6ePnMj4W-uyng;10i?x^XQRWTYzAx|+&fX$MCv(eA#1yk}Ujq$76R=dO00l~c(y0`0bUhn@^WBWfAD)_JJ>i?0|)kvJ=zdZjx)${+afJ9^O zH?&7t-QUNgGWVK_K11Bjz(R@UJjU#^By_aJXY9YLl=)RGZxnAejxtGi1l;d!5-`sr zJY|`($yt7pPI`_0SPk^oW0Tq~Y^M)?ebj}dBEsjMv;G!=q4dB0@Q*i^j&_Ucz?vZ$R*V>49=iP~n zU-j53bzSJRqWkUjOr!XZjbk@&#PxFt|tzW9)Utjg$N8csA_Vc_s z$8^6vRYkw(E)I#_$bP?+<+uLhX=A6=T7K(QTAt2+=SpdZx6jQvk@%B$wS+$(Q@!@{ z)^I5}IX>HL^#6IJwp!aM;>*I|ebEY^>1-D?F`rbKfS+SB+TxcTQIx?)HZiB|@L%W# z1#vZqjXqE)eyZj0Ex;9l_S~RO#Tu$=V(2NjmF4{XVdfGp7O}GQxLEr~g&0&lO)76j z`n2@0Z1txjA}GzGp)WBZxfMkG)Ce@TE{`>zd=8jN!dofAbmZRu)w3H9QE}Ldmn*t` z4%WU`vu=n%g0Q#)7awDs3%t)q(-<45*ydv71>bw3W&moz@#vpviqo~0{eTF)d63Dg z)VDntP@$mHmHtVrOoW(-Ydj;lzp0jfl|8Uim=XFokUKmSP zeGw9P()>19ZX;dlkcI+mn;3QDfd%4cu0kg8FR1)0}tF=K^a*UQKvri?wUwhT+ymV#u zdr#IJ_1wsm%>R*Rf{C%(+H6xV2Mw(tVn714Hu-KG$bhF%R)c-#SuOmM<&y;V`t!4$=QJjW|^V-KwtghRi-M;Lk#ac`q%O`iR zg5y2KgzQ$cO}atpOPpjdb`d`q$qn=rqu2jmlZRzBwZu@XT!qZ71jZF2irmaA1ZTc7tM^u8LG5T-L= z;LJbk3SfA{b5AWJYPrg7#@(v+6Nc^C|6=d0gW~G8eNikBf`t&A#x+=Qw?J@7a3{Dl z?$9_1?(P~aK;v#9xI2wY<8Hwla{Jrg+2@_Ad+NP9yI$3;y0usTv1;}jbBwWO&9&y7 zzxA7A2o{HDzwHVwPQljCTmEPCL)BXnfL{h*2k6C4<76JiF|(9 z8X|HvyKlHQyoYB)9r;OhEeReof${XlpbX}3xQgNJODZ8x_1P6?!Fg2evo5;PcF89k zZ-ZBp#DgYACD&5|Z7c{jWoh3^5fm-ZnVxw+mwO^C9?vxo6crKD@4Y{itu49E$^Jh6 zx-K9yiC(NECc;B0$Ll*C{+y!2`<*l{V>hog(vm;EY-jt5N`@l1Tivr%Azp-6@@wvm z*ZG{b@2_orOeGIh#|MxIH1q?KLMz0t4lE;`T>0{p#3c#7W%#b5K72Nfc@pV)=IJ~B zGU0qnOFU45qA{j9F`SK|ESnCUkRKItY!7NpNSGSU`pl!iqjdavVd~d6d=u8|H^d<| zKh#Ql3^S{|3Uo~3cqDj~fRbYzV{c~AgNy(?Y$|Raio-eR*3CM(kw$l@@A*JPuy0?0+i?iaSHzOzo9Qg%Wpb223yg|08#eD!b@U)!8h(nU52Q zf_$Cx^{_r6kA5Ki5IxF~DEVt~|IpXHt@Z$u^={4s&@OVt14(qa6D7S z?La?Y^9YvBIlbz{_1F`-Ia})I(a;oD@UV7bl;s?r#Hu%fs)c#*A=B`5{Qll#tMm0s zP_QPupEY+`U?3uZ9uTa%!?VPK7_;E@T3XYoMnQOpQt)e^6UF*r^H444t1QOa{wVYP zERtHQi|D>oa;>ygu2rfd7uU-a^=bzE?ii8C*xK%oJU>y7NM6N7MTbq{h?hybC)+cw z?d_87f1=p_{03*AT>3YGP;iLvyT-QXb<-X35%Y^*0n+;~r(*0B+$qjUzw+E43++~9 zr4l@G9mL9KS-NQrIBpRiR;tTT5YJM45s{f{DBgVWR$lf*#_NdEl`SR4%Mbzmb8pA4 zJpjdV(=Z1c-?_LH_9AXyVYC_3M6qlYgoq-NP+ej+FEM$vLjZHk0^7fmqbDzkit>~=5AiS{7l+FhE#olR;Apsk?vNraL!uv1tC}m5L`vT?#B5W zCm?%+&|ba5a?*vyJh>LO+7n8hl4K{Hf1oa8g%MZP-#xr|=a* zS{I7p*)i!5z@>#Wzv9-jr!$5lD4lX;K;-z>RXUb=#HXSXMN*E#ACX~F3!fQHV|oVl z6lBW8c@@>X&;$xQ4yO5ulzTVWFsU0XP)W@R%WvMwA*!8=?*V*Gwt~L;wvCK(%_v8{gkDZf}rZ103SaQ&^}4^#tG!xv)N(?#e2TOxtFw!1@mp@#j7iQwO0I ziPR;XITtI-38-f|m8;nxupjE=@>mq_wz^BXoH*&G>*p>9tpHZBd4bZHNK6Ok!!IR1 zKOJfQn7w2I)W7Mu-JR!nkLR3fDnzYSl5r{4B;Nh~x$p{#2t=BT9WZHzQr0#h;g+X zW^@-0818G(ehO{0b1RFoq>4pX`y-UxHN*{)rCVKQqIIrBo} zA%-AqtS^MTNMtc3oVI;(+7rZIqnz?FG+Z8;zR9+>;Yt4>vQo2TI z*GJ#xLpiXDmqN1jNu(MBkZwN;8QOT0>WlOudX`DNC25N4W|`8)*(xN-NCB`>NL67MG!* zKJ8>ngCFX00{e>gW1#zu8hCO%E=_wOtLO|E@#K)MGM<{`fsV3_wZXw>T}tfDppza# zp}|8+MLbZ4vf>!+Q$M-=Q{??p$qN(9Vf0mDC)a|e@*eW!(aK7UUiDWwczAWarHpKf zBVV-2gX0|jXj8^GirGGUqtTtFJssPG1+-nZAom$z7=L{yY9--yDn2yu%;kiGtnrah zj;$)aEp(Hlr}am-OX#$p+s`!J{5Km}F=fc6z;(1IHoBzCwpEtAL*E}?$R{P)IzJ)? z4%sHMK6TxGxUBbWK^E}#4}QQbq)U$S!fy=cP7HU8)I=~Ua>Wat{b;OOU{cMIAZRJS zTd3@!pHlIS1e5%Z+ofA;68bexO#&}mBJ|XW-{EkgACb(qL2`g;wbjvN=nIjg$B7kA zfBUaDgwd1Ixs8eAq+Jts7h3eqm2dZZ(>C>Rf*6@xV$w9w8TIMXxs=HAlHM!f1!K

VVJ0F&AQfd`Xy*VJFr@ZQEhutJ{hjrtG~aNAohhDgcS3T& zz|=D>?c;V+oe1YNwHmW4avq;^G}-*M_d!E5eS*;Fvu6w`t#uDMoBg&V?4HtOtu<_|DG|G8M+w4l|#!cM%th*rA`(uy=HX;-3ecmhu)US%kXUQuPveWz@tHpRYuhADR@c3o}rK+SNH zA|OKJfpkIk#tX2gN(Qtr)wa+-c_3C76jYg+C0~O0^tpo2>p(f$i};oc-A>73B;A~o zOBjt8BlV^3l6@)2L}*<;^?TT0DwEp%bNO0#NH!$3CkJ*$kZ`G!C%9gh5Gr(9Khm_r zDj8Jj)NxX^X)qK^l+)!;-)|^#m|E=*64i$gd6VIqTyi`(*cF6c(RdGR_64(LFQw|R zaZ@>M@Zvq%!V@*AY*PK@F^E59#-kP)XktG?#ivC0>kns9+^Cmp^ka^0=QHfRPxZ+U=iLV`^3NFi%s^XSw zk#F0_6c^#9cr%Luv-)B>(8x3OqWGgk+sNmMU#WF{W>X(W7E8d_5@_lu`(z#8eg~so zdzTl(-i21Yj=-B>9anau-h2oD8-MNK!%J$@O;}!Z!%mrQ^1A42EFVgAKz2Af>#h%b zvNF!&nd-_@z$KOO;zN&O(^OZh(9Dq_^l`m)MfkaTFjqES?aYVQ{FiKMuXUi7WR!h( zj;^))QMTWIa>qbSdn{#~E{$tv`USrTF8PV)V_9_V!tyo+n0_d5KChk7oNrkMuJHvJ z`ySU3CRL7$p{6X}&T9z9d6?pX3nU~cqHVZK8`QfLb_V>#OU*026xclunhuWYTx{c_ zMYC56{T}xWugr%ZE_xb-IDXg2$!m^H;QAaMLAoKms#>;3kXD;aBJNK%rd-1?D^=vJ z784$u5MK6g=7Tb|xSG#Yn57zJq`uS|q?+|B4c{l&bVKx$czxGmM6 zq|WNpWRPoV)F`P`cuLlwR8jgKbrFq>JMPIHb}EkXwj*$8xpbw|eD($FTFnAyQEj6r zC&IoQLgP(ixwVDAm>Kol6K1IN40uB?XF8ww>DeR}rqNv|Usw}X7+-yFk0>h6c=YqZA7v{j<>KqulMZ2eldGV%i2+9MqdmSf@3@NBQ%DTneC$?6ZcbC zsI32Ycgl({7M!$4Ag*=qeH8YP|$O{PI5hFYeROR@V{A)ehV z?T;QWH*Y)w4AD_$yi1X1;r}trTrd4FLvH8zn<3VaQ&gp@>e$3g=v$tWZ3`xkyxsmQ zWlm_dpxm>o@g+*Ccd5O&1uD#eKwpx1_mjtC z7TgkPqrXl!m?3`kC(E`_!9S~bfkRT@q^d}-bygS{_dGe0q%nzH)%~wkAE`m=VI6wTSl?YGjreS913JQjDZy$3ia{JJR!X0pI!g(|r{wopQLZ?W^DXExknstz{ zjfTR5Vo+7~d9t&Zn{$J@)RNEQ-`p5%?$!qsp$YJH2ekdBof({7i);O$X=zh z#9b3?SMF3MDD`dz*5o%IzOsSB*(rpNjPyz3mtmY7N+I*DrjPJykB1zW}l$O8Nxme}gtC?cyVNrmw_n=!`ZIenh? zA3l@}iSC>q+%5+UFr`L~g>5EUKM571Sn#}+Gr;8Z^Axz%8=4NYFkt~yrsG{G(*tL z4^0d+e0Sbe{tfL-5$AC3AJB@NXt62z#EN~wu~5b)OpBf)vsbXtGu{6;{zBJokvmUXO^aegOFx#R8zLo4;;i5R{+Td{rovBZ4{)Y7}d2?qyP-@t9z|4To zFs{Y0lhAO!Yw%KE>dkMGr(`Lh#KebEUsP%*WZ$L5(A2e{vT=4uLyvmIvNyT;l4+Kn z%vZcz_*ed{;q|VELHKm|>Onl;gm70H@Xi+l+5nRZ2(*r?IPdJNeg3>@7*W2ka5dGh zJ?x_DN8sY%&qKU|MRwg4sjCON)y?la^4Ag6BJ^C_(yH<8#$7b)8uCGk+i8hE^Y zmTTiJjT536DPz8GVxfqMr;UsIu8OodKH#|tM~+>LD>2+!r0ZW_y^C0VCX}mZadg4t zrrr8tU=R+*y9+n>4y%CVlBafC7;rQ?Sfsl)g)vQ?293TDJ9)eG(n6nYIad}fK0-Yn zW{zsPQ^goa0j?8tR4DozV;O{X1jIg}s8|wM)~$kTe=<~RjS7y-Y;(c{{s>kfkCM>c z8akd+f0-ALuCkb5>$IZ~$&=rFF~osYz})(cV}dS5Z=;|7@9FPE8)rUC8`B!S4A&Q+ zF%4vfbbH7NHn~~`xqEq#k@j4ZsO(ixjObahb6tuV8LYVEnmy+RdmKDM4KV@$%xOWm zRIJQb31UlHPlOxnFV3rqk~VAQo`}ABbG(Xd>+tHC`84f)|GY*{<4)f+I(6X(oAd^q zrWxhI>4dl!ni}mufch|egSm34s$JdZ_BZv{Gn!W2b%^VGx_})yxRLx;;PE!NC=%Ci zg6Q1+oAJvQbLaf3hhhREU%B#g5e~qQ2pIQTXYhPx;osBYDyQP)UUFu*R+i@t_HAWJnQdF)Hk>Es%jx*u99y7T!0 zNaJMv9{||-AHac+SCS4alSS0;6SWltaebxaS$=$5VD_;xIw+9pR*Akk9rP1l4|arF zmTv7dF$-&(_m}h!h#g&j{iT|?q|o+s{@H*EibtwFbfnzmj%Q{zLnFTP#loy3&*xJq z>1N?-Duy^T;^rTK_WT0Dk>lX*evqoZTgZ=7sW64+>zv}f`u<;!%j{BT8^~{x++uia zhHxB#&@5B$Vu+!Fs-$2iw~9R-q-%oYFlb!tVXJeXg_epSlRFm=H?OquZ_sU#*J(=7 zIZ84{|5~kZ-%VG6qNNetO$BjbS9Tp3*zn-lKm$L~2c^S}MuP;6E$e-|=lsNKI7gRW zM;($v9U@bJGglAB8jrw=s_^+RE|*Xf84S&kb2%HFpX<37j3DrSYYQFP_2HoX&-rcK zYO7%1`#ij<4BZ`?N3VF*2QNw@To7614`Ku7od75Z>!= z^TD4)xoGWS0Cu@F#}R`92a($0i}>F>?7TX&u_L*YR?k@?J!YUH41%EjddGZ=LOl$T zLsq?-cZY#M_UBv@0E{x!?f$n5n=kS9>A}FR12lJwki^bqHZcIcq4lxiERS z)w5+LxmG)1Vog~UoXWW^N&HBpP4xcd!%CQRd56?IQai8rY1lwK>NXFralrvF$SA=y zC`R2?EFrW^c}b7f-j&(!FsIHhasfMkTGDPl<@z=P$_+!tl z#v_!G`7AP9H9mG(@GN%TPZvU}6Hlf}hq{>JHCGcH>^Y5>jnbO^%RTKlwL%ikm(-pw}z`l?4QqS^WcGbZIqFF3Vte80iR_kwr_F zVYl8s?l>_P&iIn0oNg;?ZS6UTZ_XA!*avAADah=&ktzDd8Br zm9IyxN&H(+(kXDsP%A~2IWz&--n@fkzB2Xb5iuMmUs9i(9dPb7-vCM)CG`|1dkp@a zE^M*}2UV8sy$&(!+Ej(9aoU3pmHQ+pG9S#s%dqYxB@Jf1@HNetyk&L`A$yxB#z&#z z%(kZrz9L7GaB5nWz?)FQu6tUpmiHu{M-jC-?Dl3M3omM+M4SFUjWib>Ap_t5A_hMhLJ^kk_%>u#wRT2~@)kZ0 z0;>&sF8B?8yPdF3760HWii#u0Bzj?4vY)uq^0!CA^9X@bRM=EKapZ7RCHHkq^u2k- zFgug0YvNOkEo zVao7=5KRx1Rm*k@lmyJk4JF9FY#3w0@ouOL;>@}d+N^%5ZVw%EZWB~ca)#Jkc71;+}Hon z6~P%m@N3;|y9{W7h&MzQLJxo5_QnmMyi7-om zZ7MTIOYH|~Ad@5|E!j&|{=V8aP0GxoF^+i8bo!FK;hiGbpG)JF$ez*4z|&rp?aY^Q zYEO!76Wt-JLoDi_UL$IlPo9#THYYyI>rmES2`TfFEJ~fhwVmz3g%o;{3=i8&Xxd;k zOO-!ge$@aPI&{qS>g)_;$mON_B?RfH@d$qM{cw)I3;C^CxT}(|BzqXy%A=%SIMFSj zqM*vVqKo*ZDdW^d_o(snAsM}mcqm2khbd44Dl?Q$v@A@pp0t$k87xp>G`Hmd=9;~G zEfMCD@M2??xYF-%6hH4>-KQLkw7yD9N452*6=HQPMhWeRn*eHf4#yxiau zxsJ?b_#4qVQyxKsaCXKNKwScjZPVCkfD84Blqms>P`*yO;}LZgecO&ZUkzZ2+CKnI zj3ZvVX!A`I1gVqw{%j3>!mDHIvm*mdOWU`21~ifQeX5Ba!0T$(unmO2dBg{j`0^gt z=j*mwBnZQqu(Wu(*3dvi`K@CqYvGNU*ie2ln2F1E&>78|8aaDmxc7RRM9Myn3o4wVh_P>mNNRSWT6W;(8=5LDF1=B9jcH5J=f z4=KxVtf+niCX&jt`fU8-9N57ZGKGNd3ZJA-4no2*4yRWWpR|l9FTX*$7-O);y>h)S z(b}_D`_Uzxb8Ea$8(lDI#L2GM_#`Aw0IzTWgLi_08+}5$D>2XajiaW5`l%6zw!z)3 zJ^vY1DsuuqEE}XGSQc5Wyyht>o%)R`6e`9uIK~~p5UG)?RY9+n+>+Cf$!m^o!_ zId$83&f1weRe=z}7&SZJ@9tiaujkG~T4xs(6hhYsIn%SZ*d_A#^Z)Mpd{}X8 z0iaf0O?f(VGz`rT{Nre~Oi0X7ka3B+Uw1_wha+07$@lj!bhZqWB9=J6lsVB6xLQ&T ziHyY9GJI!{3f#73%0&@vuRr;H?2_c@)2^#027@DHOlWJ{NSr)SPyxRqCb-J>g$2jp zZYizv4!mPBnh)9((5k)6wl3V`@a7eN{i)RfzHe8mTRjb}&Vp`Z0eq1$X;lK*C=hzQ zMM`N|h_OoW@>XT1DAce|LTqjS15ilAi%tu=9du@A63|ya9^mt5J^f?22R`n(l5>VP zOzW!de*Pu)ICtx?d89T4i)rQu=ztXO16loblzV(f3EWV9*L+@GX@@%&oW~7oDv~>d(N{SHKpogX9iDF845$J9v(-rtme?IAU zc)f<=JHM~3VsOPF0z^jbArq;`Sp@$8|6UK&q#-kM`eR*2@~pxQ3Z=*X?!-!8N#>`C z<}kYc%@b@>rFkl#qv%AZp@DQFoN;Dx)h$_nf#w~OuT=&)7UL-02vH9|1^e51C0g6& zA>NX5O+!{F$2N;^C3$Pk?=gjcm!Nf`uY3K;DZ!`skow%UVd?$}_P+;+8WggXOQYW?;Cxp|s#6{SY(2jHx!LlUL7n?ix~`Q| zmG08+ZzHi_x36OlMmOcuM7_~KBCPJgeIy49K;FbJ$K8HyN6WKOPtCwg`vj=nx6(4s zMl5fp_-#_GR58VuC9OLCm7M%G!Al;-lqn_xhXd>ZNYC6Pb))Kl+3)8DX?-kEg3nAS zM!GSar=qBWTiNUpEvdO%c?;(#9_5iZvcbde`;NB$-RWPs zC}P}YFl9JNMR5^qre{B&s?{zt|5Y=Rw|ElqQQvl&Cr#}!2ZHMliK~0lh_6p-qoM9B zQoIo2k00F(Bwq*Z7B|9lSRoHma-~ayB~-=S+?>zM=;W_yU3b@le*ogSKmw#S=z>%B zB(gV9h$QsEPR*{e(a@7$`4X>HfNa(e#OmxhHW0XB6Z%2cfX^hy79N=eI>Pzz;-=mXP>b~kQ^Bjgzi?SW7qe-zi zA#d1$2CuBy-j-s!#xX{Ih@t1M7Al@ma4~J8JAExsmz$g|;$fG>#2rlLmwHLx_vo>O zd%Nw%Z#@rjjhSwD6=I`bx>iTN^lGn2^4;Z+`1($otsnW-9|m`!u5$5tx)11I2`5io zRKKj|=7*(^lU^n!H)j?G2yU3uG-)A2XB!&yXANGi6KSb6)PeAAgL5}zfa2J|V+fS^ zee7Ry{eZy0rWLjd65q5Eqn?F9Z2PF48i?_RRHNy7C14vEeUh7j`cj%227TdD=nbbq zGBn*jVT-K}>i(}(o08s)K+sUrDRvxktTX$5e{bcn0;A`lZ|@^iM>gvD`ZT15Fd<}wwOR83X-C_$G9cI zC4!zXCQ)gK`Kvnqp87g`Yj`M3zMdyoV2{B64_l)?NR@sKNNT87tw&yu7ZH?6=ymf- zFh)Gl@w;t;c_88SJD+Jhk#dtu%xeuQn0d=MYvQI|T{asB)i1B~#OH~EL;PRcRn8R+ zraHfT?e1P{woKKHd}fY5-CT6E2s2h$A<|+9>WTPT9aqT%fMS;4eHQp=BSAGIJe5ux zx4gG#ucKFc@Guz2OSnjLgSV1PiW%f!{*cf20gh-&)WOnJa^>A1vHKsUv|4aGBzV)GH%Y+vBJc#f>oDwjSj&QHp_F{I#U$A2J%z>s zUe25F0A^lQFmcrcA`Lcs`SwVbkuBe^Z89QjX-c5lprPfNy9a-+%{y~B8Tw)q3bV*S zYN?S|@GSipsfvJeI?Ayfu#e+lV_&zs`PVk=W=nP4Q@q>C>}(q?e#d#bx2Hd^R~8OI zS0*-hxu?40EXNLS@E)IhI5Nb^m8kdQe0iaYwY@iJEYS0JW_ONu;j%Uu2#X)~v; zx71aN_g3!9ZsAVigCAfqEjaF&9mG6~-mlK%RtuJ0Mq*5rfw3}Y!nM<_oaW+G>Tj!8wYOIu@*t^(U z04*#(Rl&z(9-0|0z9M!AHN>bn&(Oa5ntN)ih;A(e?`)S7$bVC48O4a9ImFU*=+r&@ zWdili+WNoS?f+a1Gyy8|Z^;sa!aZ4Z|DAWeo?7j-EjTc;M8~J(OAd z#v`h~ng?k>t|iK;&-|nO*vgOO`C%YcfCSqgH>h3AHY6*6nRwG~R^HwLXuc&Kkh$-i znp7GRLV$H=kR2%O>RWyuG{pHP`pD4+ikVX#LFCMOR> zg8oCvq0SwSdo}k>)v9K6W9A7}a&UY!Qm5+eTX+LK2qpl4%?pbZZ_3=__Lm+tQ)xp1 zb^23sC*Z?S3a8Ymk+YEELuV>R55@@Pap{2SB&qCG`C5HQ#4*Cp3&*d+WV0%v_ntbi z&7`!M(8WKyP(>)|!Hk!YQc;<7S5X9XzJ02S%7njMq;^YlG09^w9Ig8@1)@XNf{=8_ z!9aW4>xHfHk~!_aH7jNL4LXXyRJeprMF;$`Qp^uFzDx6YfEXQ_iJfQ|lvf%uQRks$ z{6#{YD#e3PKK$QL2)2(w7{eb)P3m7f6GQ)!iX*D2zqH$a-(z@G#_DT$*;eZ30*}sA(x$;SzwNDTq15Ah#N||xxtWDp{LWo zKh@xrRopjO1!y_A@mZiKvzuU_^lT38KGIA1?G%M;jqG74lfL@;F3Ybk`q!!s=tonn z4birwecOOFd!?q3=?oGLmr$i=2dZzK-_kUW$-mCEyGH&#`{NVJx}_9u&>H=51h%AE;r&(Wz|uep#MX3uyfB~J4pDmC*3;wMVlZx2KsMg@6UCLCdo1@VfiT|uyx2|og$BQ^D4cen8pM9a*kUQbNbH0ZNkQk|Fz6+@b0 zz=TIIua;U7456Wb!Y|t1>gdyoSfd@;;>m{*aA#|o$p855*5EcTjod!Xtsb_Kv* zQQxk2HkO5ODJ0j%%ydkgVK&%{+Ec$|wpQvtiC}f59Qiddu%SgQY>Dh>ipn0uSOpPgC~OVvh#CY-YK!y%`69QH&H-0aKfqdA%LB@7gB^ z=Z!gCA~l#lHqW#)v*O%4_-}6qS7GaGIv-4xT>Ne zVK`W%ZyqE%Ln@U&F*XaCfh!Xw>?V0Zrf#Ey8(O{%V!}-%o<`Ol@{k^QKW2(tUT0C< z{qwvvM^}4^F-0EpDaDTI!Lkto1&sqF{4FXB14JE4rk|-e3%}Y^BHDS{svaL9`m*a7 zghOmwHg3SP@V0z_d-JnXD1$u&*&;Rgo?HT){SUy}zD;X-!rfPn--L6GC|Zdt5xN@o z4vXoVxsFFGZLftwWtL;pOd~u7r?I+%D-O+UQ}oDUkZ}WOo6F{IQ+yPpc>2fv7bTL9 z_UENIgvCLb%=sK72@aJp8(*-!lG0$G>51JUdK{@BUQmCYU8p^?%#Grqp*`Uitl!?0C@a{=7VD z^)2lC(-4gNoYHXKqC}k0UFra|f!f~A_3Dy1o3bq{(M$7^YMQ*)(<(w3tQ?=^{sXY3 z9cZe1um^R6<+vHR#}3*j9;U~H6s7J(MqXC=ZPqm4`c~CHoySwE=RZ|kty9LYXgyn} znplJUSUe}6t(~x{ENYPrh76|V*2}!J-i)-(Ss{*`yl*j|F?L3L5ihfH2MZGL-TI59 zGXa%7u{&j{E`VN93l_T74e4m%+ic&Z@}g#%XOz#r?hCt}apge{1rigVVRo5uE_;{z= z`h^$%^z!B>8(ZK)$aIQe8w>|hi(%C<3DkpV|uh*B>u-uRLhrVzxTQ>+|x8f%Xuh2 z7oAzCv0JBmtR7+(Y#U|q{{h&CivI&RZA`nzy45>Rhcrp!C*LOhyv>n>420Emikd&_ zcS-M~gLuK6tw<#5I;$N~JS4oENf<0NwgiC8-1^eR#`0Cg0Sfk2hrkTdXXKEPQ0J0I z%&jlx*I2zSsprD}F68EOcXxLoOEbU^AAi-WVF&~Q58u8o*TFo2d_6xIw9*YFtjtJu zDC`&kdK(U0m)k%-pUF=H(Hs8&D9!FV_jej*+8>>~>;z)=SZkgQ*;gkIRYG5WdL(KW zZf94YCWRF5r1UJDR9X?6AGt*q@fuq1F5TXWI%nO`wp0dZ$(bwFGTx7zX*v}}IQ9zy zhm5xt_}lQHY`Z1K^bF)7OIhm(3i1yfIo z^-t&Y$8@w0&s5KtRuk>}w)6Vso7lvS?Ww!n`g{(^cio8k|u8)9lc3WX&}_$A5zxnpd~u~ljjY`E$5JC=@1Rv z!}r*5MYH680D*d~fBF#ON$`?IJGpmU!$|aOtg_{MAW@5Kot%^2`Ga(-m&swk(+Ud&$Lk1;y+qoN6X*lI}C29?x zk1p%prZD^Ja1rxlph#Kru?P4bmOTLOwyYdN8SXUd*UsG=CwyDwNdCqZWc2GEBQ>Y; zxJQR>#wW3Ur+D+pg05|-MF-eLmW8XWhLS?a?p2b!hxn%#eT8uo67y1EJ+8>EW0Jd?b!(a`5 zd9ySws-Mfpe$F9Tgv?^HX=Cs~q-|c^tcj@37X-J`m@=QdxC{#6Ir5oh#Lrufa!Wj# zg^l{Ou(GFb1hlLPb7(W&iuM!Xi#`*TA-40LldZ21`?wO{=ahd(mvT8l-erz?xvr;Q z$T9>n`di_uG3{iYK3JA3rJ+TUEPWTuSDZ&yMO?6%P42@_?9SQtNk0f#QiM9KH-1G! zMw(f%8-wjbsy6-NOJ{4c8&SOX6cQhzDaA=sf8ygedx%4|Km#wh-wd>VNq;kQ8=Q&A z7WF&|{@uU$9ESvdQ#}V02WNjdOo7 zuEWp-sf63k-)?U6!^^(&-!+v_e{B;0o&75ivHi@P9gzaB>C8KWO$U+;s# zYItcmuC9{1c5ct`n{$0kM2HfY6NY1f^hITLcNChXk`y6HT(1yAisCwcq>$u}AU-=g zUPfiA;cUwJ#-th>mOtLQE z|4`=vxk1Vlj;k3pFi#I0D9m0OMyi^_s*0HxUxK4wYBd8ENaJ}}4^qS@YfPKe52zu~ z5nyrJmHjij?_9+Z#&dkgFN^+P4~n>jwcn)FP7tSyK2V~|D$JyqlP^DkoPmOiRY7rY z&Ej$n?R4v2zdhW3=pX(a=6PzyNtc+SA4MY0k`PkJ`pwX&YhEyz_QPbw;SKk@hd_=P z`l<&^iHS+d+J4&LQa~bi>BF2-Z5LHamm(wvCYxg=tjYYSuppkM;B3+8Ac_iC)bGnw zIFypJJPKs=F>0k6eakohVdl`1-%$3sgGl{OuGgvsciGaDljC&<(M~Y` zcc5)+bVc6SmkFmXNeT_U5wKQWlu#Rg5<=7h4hd%gd#cw2Ifr9=?S5NPVIzBRq^H-3 zcz81Pc+OHU-X5b{>p5$CMBs9Pfs*X6PaQ~^IHg<$+e;r0t>`<5XDa!iqBu0!>fJI7 z$@gi^uQm%=f;zt(;`R~yzvMpZ9c0BRr!IAN=IQQ2Dg^Wkrz^h|fSU0;A;L(`L`LFu znEbx1N?#k!&5NFY(|aNl2?qH+sP&A;8PkF}^O&OeVjI2|9qkSMnKPkRarc_Q6W8L< zjZcl17SM#pv;Qx)-ukWS|BwGgDU}kX8KHnQA~|9pDlLqTQ6eBQVlcWS1O&!_As{uR zb98qoqq}o-cL|E`vvZyE`TlawAF%7%53hJWUXT0zc59JRn=t6GywKoY#p9reumKshS#dgs!>te&CnR*k?T=t~=EW8V_Pg8M~rj6h2sXSdD$h zxE2hQ!k^>qZcBM(tfl6_`SSbG!YI!;nWkZmDFmKQOO0lGE(*QkS`o0_biq{=r%p@y z0U#UM!BbR=YxsZ$-+l&fOxpaKau!7L`^L1UZukL1+z?0cb4pi1A;8n4{ONW)g$_>l zxkp~S;w7`uIVao}U+-r4KTMb<%Sq1^r%blf(K>>TYiF;)#qE&gMjXNmOQr^KuBgnA zD&lu~;i`SI+3-!$8MhTdDdIZiwFA}_ zL|h9Ejb_^n4yz@^Iw$%1v2oWUGDkK0nyK9q{pki-E34f2(wMvvR|@FsKF%vMw}iL* zX&UkPy)AD#GLiJev%lQ;f|XX_d1dxS)}K?AyT2hi8wM} zoNH}?lq}|4*&=%~)fct?cz&0;&Vb;xb?E**bPi~ye0Q=(2jx_7D_SOA-ViIrP>*8y&irieU`QIOy1dCe&R|BPryb*1*INQynLb15r%&;RYIxNj< z@G^4V#c;Mv?~-Ymv??3(}1dZ>GzeB$zns(Ud)X*Rb2cJ{Wd(p6pv zU%3-v#A+v=Hwlrs*Dl}#X)(}A&4vc-#_e|35;V_hl+K97J1aXtj66R{y74FX5L6*h zGCn>b9f5Q`XyhK}Hp2T*wie!gDheq>hD%CK^oXXI!yo`u7iA0;0pTrAJ_ERDSNE5=~kV8?p~m$DUxuW zx>IlL-{%I+4y2C*4z6xs0ix)EU9~2B2HkLS3QQw*z?>m~q_AUcbWH`w%F)0F4=Qx=>*XB1AR`@7*qaQ^`#Q1-sJ7Sodn zihbR29sQc7???VIl=CW3RqQehxJ~TEw@*=4x6D(JRq)%hb|?mekQ^A%8KnAly(%=V z4CVq@p9d&UIgAbI^3*4e?6hy7PM_Str;v5#{=zk;M+bRGa8xl(f&_glEyT` zx2CQ==QY5~4Yb;E%ut^??xMI%2Gq`9y_bfe;Obw}R(1J4;NYYSURrub)nROt3`M0a9)&j^Km5e**ztcLx|{QJfp|7uIy z`rgkQs4&yKB1FwDbbGNLU3N{3!8b+)+q*&P;QaC`4bwRavC|tcT*TAA;|L2A4!8qd z5~jsHk6y8&6~jy{UdcQS*2D9qv%o78O=gd$<%4=7or z*8xa)*Z9or^0S!*%&o{i7p{KQf^cnCNqrIb7D8GiA7P5H@psG7)`KQJ_v$iMpr)6y zI>>y`^0;DFkmm9hi6pCC%VyuM$_I^h#%Ew+x)XYd8f=lThP@-kB^4Ijl`y$?L_M8O z<)MQ%lHbig`f1Jtd^D>$Q2-4D?KGRE?NaBO<*^qxX&4Q3Kc`5Ftl1nvy(UpNCHVAk zSVVc2E^M`|#JL@Rbq2QnB6D^MmoQoF^Xk?O!EIH`Z#jdBl4Tf(!q#VG_P>=?1C>0% zSbC0-KKn*RXD7UAp+~D>wN@ysO9YM}wPI0VbL+HlFh6upF45O$T4hU7*IJ5oh6N~) zPCkC{haQ`(_RMaQ)?E~+#g8trZI)_gu_l@rj<}d9L9-E8L-<$gMVi!X z>k@x=pGmWRLH=N~aNVn*w`l#EKarRhR`z8=YEV#?x}F^o&@a>F#O&NQYGAmuMoTSW z@f=tW`p$meEGhmUyKqxYb1l__>pSSg{>7Za8)J>nnSTMl=--`!l}&0EY4j!PHK&KK zU!C?Ic6=cJaCy0pZcUbf%#oVbRbvs~lCK3s*4tc1O|0CHOZ#gEZo)B-6#{om zzeD7uhLh8KVLkD~h=H`NLRWa|c01ZL4hEo99kE0|uq*Ra?eYfa-O`NNmsUfLDvw51 z?kom9!d|?(C7JJmpIl)x=G25G+bk_ITUv z4uiSV?Ehcja@qWTBGhr&c&hknb}dQ%t@k@c`1k(>(*Ab>d(dak6@R8;Hiacz;Je|O z?YHVNXGR&8PPtadY;EQu0``a1W^)nZjY3`w7Z!@d#I%1{9vUR?Dbj7H!-^1;QI;U1#)-|#v&4q3V6#Lg~bbAyna<*sdc8b1pvaAe70RU zigT@TyQ49KS%l-Rv0>WK8th=)H!1$mw;2%A#sCS>kkHe(N zA)ipn?gsu*nN4=DgDRn+=(UWEbljOTxY0`ZLueSo{E3b_)NU}Gm%}dEily_$Ttw=j zJ=fH0*U{3QOq$Aw5D^mTa^HKJ(`>vL&P)CrTi%>}TE8U5Rd*<}#`y;sSz_{@CS}W@ z!~__@WIHsHad)THi9yclvtRjQ(M$i6!R;}l0Zu0Y`@e@8@Z8cD>@7{FBHbuv`nJg|-Qt6kt zMRkjze=9?Gtcz=Z0s%HY5eQsy!=>>C_JyA_CH1-s5Lf!#^xFE8uQR?Ez&rnlCy*sJ z&gN)4(R%TZL`cR6<4)K@!Z$w{&{#bF>poyQz`SJmhkxOEx2S86_F8WSM-U;(+gMav z^{M3J2ghHs;xQR{ck*0fcStrPC+r%A;YEx?v1s(?7j<+tV$Tk_Wp4%4GP1K)(`#7i z6lqp7`J4DU^8`eBEY-;`g#!oEt;ilO0z7m?C#zis`mVH*ecQ6U)nwU@T|4BVord%c zmz?EzgVM_>fN!xnbB^aseWw0>yLveV3GYC-<#DBE^Qg{ux%L@J%%wiG$DzY) z^fjm7yq4nr0l~`*#|$_~CZ4~DYd~qL;`PIyM%V5gv}B-;G7JJhl}8`o!w(NCO>%!r zW#@4p$`*{dq68LOge#89xW-&4{xP}O)Qa6IW-JY^`Cb8m_CHLYGwPyw-t(Z)q~@nf4Z zHX*mfsWzyDm}RSvIkiq7zZZ-iF~*P)fy(Mvlh4vP*T!~wyu2Iyzo~<>bj@m?_p%v# zm*vGPU-BEyi{Rgb&z}7+9$}8RxSO+4^?Y!B1)=Qe-Gng9WzU4 zRzZG)(v?0?R;oh0J1_w2=y#2+TK@XR82+qHJ7Y~ErcT=~LT*ZtTvwZ}Z;L<{TMzWy z_V6-2-^FIHc>{-mmlm%(i%L_sHf<+p67<5Cxvj;8pb~Nq&H#9q9@$so7Hjf^S;C94 zu_o_brQHIEeWn$Zj9CR|Ivt|g%N=)s@*<4$A?2Il={N71(d0b|bS+o&$pp*7-yBvc z;jYqci2xzFTUGjC3-bk#xP}w0?1+_ZOt{h^5ia>2@;e{zA)Iv!lvifoN%SG4F^czB z3wOU<&=l({VufMeBm3jWf13Ik|;^U;SPb&AwYQvS~#w^4o zj!UQ0YgeFPqPu=x+P|n_-EN_cxp)g>U04aKxXFi)k5kGYB?He`D4*-XzSDD&t)hZJ zd>o1`3kp8nljC^>LK1%YM zwPBdVCp@JgFl6#7I{g$hlvNU*H+CUJ?(Inu0Bf5qZJn!i39nbXRhyQc(s#+I>Wd?P z-SalIy$eV6%qR6uqehKcI^aKo&Q~UA%76=bvB7b}?o;i*y8GQ`B&&@W>UwD93?)~` z=Wm$MT}2J6%RYXi=d>3U= z|KxWKU}XiUu%jCYZe6=pGvdKe@d8~wuJ59;zgN~2FrKHH6a0F?UEVVb8$o=#bU$9&9Br0$$w%4I4Q zs~KPg=_fU~-koX43~a{#I(FS#(^NHw?)>47NDT~G0_aQZ@D;z8Gac_scINc|)p*-- z!9gTMRqV7Ds<*gfzb0ty0k+nzNw#c8j_rAOUDPr$?*7Ponb10oi+`Md;rzV`O7T|1 zrn2J&CexKUqZR{I;2H)65uOpEXk;tfcr5@JIH#o`+*DYBMZv6oC$SLY34Xj+R92+` z=IT#a-7V&NmNQ5NZD~QLRqlyE#`>PQJ&z5AoQig4XsjLh&!lR}MSE^bIRUi`06O ziK@1|Z#vRy2K}pKOH+e=^I*bss|uxxH$HL+C(&I2FU_*;gp!LzU5m>;q=|qAsVRdo z6db2~%-Bq_gF4VlmSYX=rnm{UX}zB_jrisbjHM6w>{m`9gZ#&l6SiOcrV+KmudS%nA0@R}kdd;$2^N663B!~@56OPHf2NcLO!;2LOilON z?;JMBnVG@UdpBV~!B5i-&2et7!6Bk(dI{IQPvZw;LWa_RaxtiFGks$YC8oW|jAw+S z^iL`#ld>yKFE;1>Y|f$__;xheXmaj4cec>OM4_{Vlfyoo&(Wi|=BuT(AX?-&^QpKH zhhKLzhaIDk=RI~R{R^k=ogy_sVEvx`5*%s{2MIs!J)j6BNEWu0lJYVqWq4ku>giut ze@rGIX)N^ZdYY9@^XWi1~Oiseas0u zN*wm?xKRfFX$2WRv?<2@sTTYgRdA88`MB=e&v$tnI7t^%l=7qc-I`nbg95}A;-XUR zWk~q8(f;R-)QD8m?E#SVrb`?3-mUtx!a6hS$KxaHC|;G-Sa!mODdD8K;o%H_dz=@NYR{esd%<827<#$p|LRx9SHp*_16w_aj{HJ<`7$j0rsN_+9ILa)aLf3m@{u>k*^1V(ag%ar{O~^}=25_v1;0?^DrrD#iIP5faxsh*{{=%>)oAKo#mp2@jmh7JSz zrMd<>ulK@L`ZE9IaI*Xid6n4d-ddE%i1$%jFmRPxGWK2aJD#%r^2;w5F~PX1zO{>^ zlfD2(}l2WjP@klV_dGRx3#jVjo-)enP>klK8*w+#@j4`^iI0=tZg#!$Uh)cskq2 z?^q(j$y*^^w}uhd#PUKxW+kR+1VIy5dFMi&eyN`RjDo7(RfI^!piqw=Xzm6)`>4Fo zgvGqd%bw$F`{C)ogIcEczaFlh15|%ij-R%u6iyF?q}03`8^wDlUd0HlaAbstm<0!i zj#9Op)c#L{lIWRVX<$FlQuimwHoXJD!dx{YcERxv)y-g>pIi8;}%?ByFCNlg@{@|`Nc|fZ; z4(=mmU`Y;BB>Oq+$lmzFnwCysU@dVehC6;lphC$Y-WQ)7H)`R=_$?HxK{}Fs#8W!3>mB$Rn zbdEI+J=0O51Bwr^|0(k2$#xPo-HM=RdtcH;2_{}8M`bADipyS>osHFIqU>H+`B^WqN3$YsMLC#{nQt zu^`tOZ+ksDBkYRYdxF;0klQ)~(FHj9S?A66#DR&jlL5(Pvj+}l@fv}TE<{m_q^#)T znv3C-*$;pE^~T#+HDB$~1#VQqKN|HR4-yjnn6@*&S^s${A@8D<@7Sh`K2mb+`pWe} z1GXZ-@T6P7iFmAmN{6;tfraMIG5**4t#+dLAdsGofRnq;NRPBz?c4}Sb)J%mE~g+S zW-?~(GGbc$I)dP;hE-V~^4-7T`sW&zKTS z8NwNwHq}Zq0C7 z@A;Tdu``8-$KBhj%8v=*=+K$K^S>gcjMLk`c6p{F>GU5zp_kg`bnowmG42)7AS37O zje1NhUQj2o8gs}^KR&n*f%jh|9ZawtEJRo;j>zOP$P(kgk8Hf;#{Ye)uDNngLsID& zcodVgz1b!YztVQNj;X;eQcevAdy=Zlo)#%C)X_u$tt2~X`zcOef2`#1$H z8=kPezc3z|;~b-#S+~FIgM=&XsCr6!Xgjh4Zh4RXq&Y{%KV(S!8!QO9TC55hY{9Iy)ltY;B-!0a}*Gs#q13U*+$7w!7wz z#FjBV)NLjL7R+5xg`enlW;=b1UsIA3xo##*MyW?Ex~(+2uFzN_DLmkg&tjm_tdKE; z)S$i8HD|(giThSV%Pfxu3-c=KJD@T_)U(S6$R0Xu=r9%t*!{EoJ-^v}Eud!|{#8#y zCLcDtO}^R;BN0Wwd$I4roWY@6yAvZ5wFVB(K8R|B*T9H;<+(%h#Juxq;dT-;OH+28 z%6Dt;$VLi+r+vF*73L6Kq0HC-(TH02@2YGa@cX7f*!41vM8piu~Z_BFCsU2 zUnXqM7yA_PeabJ6JehoR4v6m^pLx}R3!!>L#b9RGF#w`f!Iz~!L^*u5wtDu`*}pBf z!M0t~7vYykwZ4F<^3L)^jfuXns$-01Y0P%|Axn6L44_o5ianXpGc*#?(H$FYYLjkp z4*n9c`6Io){39F-Jiw&vx$kM4_6bosIT@I#hgs;#utn1K`ypc+o9v#U9DKcoo-VJX zii|ZJf|T9{{tNTprR}O?dbsnkq|k=u2%1;mSVrBe{AO#L2svj)X7)SY3>H#6q5gW` z-(^sjF;XNR%Yc*QV884IC5vi^>y!zJD$D2FA5KiKy8tfyMDjgf?-+~4ge^7({}j8+ z@-AJ_FK#=Q`B6JIVkm0TIv5uAQ%7%EVwzOu=5+Na?T{jCoORp62JpN7^)L|*w zj6R})R(`p{ArcKNSScZB2I2x{WLTt|5F)S|mO$1G@_~&Ssuu6yZ1lO*-+9oSJsG_# zV2_|gGO|B;&>c(WaY~P68n~-6an8(3nghQa5PX|ESmq+ngQXy{l1-R%vdB7^3E<=5 z$%(!&9gF4pDa|@eTBg8qdwtaAQC{#{dN0a~gG;z~=elCEtg}bq0%$y8E#$OL!Rj1q z!eWA22#+OtXQuqaDOLWp1BJ1Mwzhq)OZS`cchxdh)-ciyOAwgh0$?3>50+I{ZJMcW zMl_jN0d%(NnGw4rJ!8M#Yb!r7fEde)Rz*pQ)k%O*1U`;wi>6dreIv{CFK*xKjSm$I zR(1-A=)HY!ENX-^9c&h)#A;H$mTTK*m|eU|Q>)2pNQ?LtAb-mz9l4}?>Zf%3>5Z53 z4or0cFIqGyXeTo~`IpUK8*p?e;6$4+nSAEOFnbqWeI2y2+2exN*h1DQF30(O z#ia|gzM6XP*)0`O+IPmU)H}HC+1*6$F+ln|HlZX=`BSbfQ-1}sL(1Ap>^~={ljQ}H z>el{97uf&zZIa28C$DYJneKObj)Nu2|Fml(X^j<}=wsKr(jaOK=w-u%3T>^7L8mzgI`-s zI|)WVmv(*vS+KVHGP&plosXQtkNdWVcGK3$ttH40-3i zL*4QH!_UEu0YXCG4B2SbqmYJj4v!ub*)j)e9gf6(*v?GSMg!#!f6k1pylZ8%p46vb zSh`{m(yuAEeqT@owSh`*_85wHZ2qhX3dp#2m(K-9+?t!+<1*u_T5xVwlzpW=^wdQ2 zA<95}2kq~~_BO;nO>K+l6s;)V= zyK7=uuc!C*``mWnm-oA@OoSQ9p@{f&0@A7*zH;#VvAl}f0Kn<&P}h7ja_lFJkmyTe zOb4?v5jiD^6tJ(xCix2&(F(V8c`>T4DZ9)m+jzshd|p^&Jt6j&_si12_$EV@d4I8| ziq3g;)7`SJqfMv`jC(A|=z!9s0*C?|g!bY7rkC>^dUG?#<=@K9IZZZEM(q=eeOx!* ziY#EKkK=;Bh0j`IfR~$U`?t}WG39mp^~ObeW$xndp&6(d6|7k09G1AVU{i>cfx(oy zTmET@`#V~i@R<5X4>eE8@?Ry%BO$QY412Fe04GIjhLP74PDElKQ|kV>+xq>mMq9?; zMP~$eu#iZfk1*U6-Ub#y|IS@)OO0R`j&B*A{Hi=MY1&Yu<~2XBFl^44+|)Yyx7zx~ zwPlfI49|t}e#sD}H6qh#`F`bUF?`ooaZW?- zO-abW_4{&!@c2qQkbhi9#;}v36+x@3v?5j7BZ~#Dvj?6MJv~r3%Bra=Z`ds?2#Jpy zATL|AO34jxX6Wr?o%=%vYC7F^C@iZ_X7o~RQzp3pb^tE`fn>4Zyeh~F7wV@4+rqzC z=b=w2JC^xh0_HaHlFrZREH6UAkeyuINdOrNtcWFP6BF#SK71VXq z?Q>Sw-poR+IBgOnONR3I2nupswg$wmH62IOEd+i)Q96I?oNHzOJRCUR<)-Rh>U;(^ zv_2Xsw6NT#E^&lnxR7|k97me=gW9iu0o^-^MFMpiP%~wp-$oA3uSBHzigFkogVK9u zGD})y3Y6(;Y=NW=ch4M~gq$cmOD8Gn#>`$s1PI!N%SRyRdYDFRIKex`vkUw$Y35 zIAyJx99=#)R3EZl!Mudhg@RE|a>=P){gMomsHM+PbfwBEM9F3pCrL@Adu3z)|08iL zb*h}>=00=Z-PJQF0R!<$H$92}&xC4J=nUolkKhjZw_hsPLq|-|s;ruxJZ}3j=&b5k zz%n1&+kHlGZ9e7ksrA>o!H4aH{t-g*7drP`fs6+&&eWT@+J+G_R%FrS2)oH#&F=E14%#92&w5AL#Ucs;l(4KkG-Fio zsm1M?`IGs(^yH;lEsGc24TGL95|jK{K%e+eW@H|ch(s^{Gp-k2D; z5fyYE{LcS;XZky26zJ}C4#&@Nq5|{>iF0WUB)2+4SOUHys6V`{F94*PH@>{frYzpvM061L`smvu8pqH#?XRL#6QtM7)fA_T6ON zRcq32SzfqI4|{R?Z;+oq?n?XJ%aW;k7Huo^kq@&AZ3m5;vxu$d+suaH!r(WR&WSSl zP~-UenJG3hI9F~?8#qDES66_uPxSoA+ej-igNmwKnT0;u!GY_(;5H?$2;x9mndXlJ z|Mo1>P~B)UH~gOcJfFS?hiB!YA2D(!kPpM>Y}bd2g;hHL$hBOmW<~K5E`(q4R;L$q zrj^$W&N>_ruDvchODQg*Uxp#+GY?BsWBby{&_a=bUmeJ^3 zTU};F^edI?(if=jpa$l2nEz+vtIcvW#Pyz6FCCq*f%-E)G=y|ex?K8e26J75KyL1| z@qpk+k`e1w{XOiX(Ll?8dzz_q(j=$p*G9G9)}(;!gmdpp?5&eYl2joYc|nrwmLD}# zW`;QCVKRQDBtx7Z!dt>pTnZ_jXIPc_^{s8c*4Py==FuTinLNcYerf01NZ@)C*eXr}$Mu9ury+b}0uuW< zDNdeVJ|^ZILcn#{fGT&1-=9*m&ubLsmF*@+I%D0cC+rV$tfCt!mr&sLD$I&W)V^s= z#c0xis06TeArwBtS}?KXvGc6puWaI(12iYr@OLaHt*cJNFu49AV=qG6ejs3c`gex9 zHks-4PuwaaRsgsI@#a8S`lnoAJUCkPehFlqON3MFEaF4IFHGNlj;pxat1G8;Nm41Y zt#;s>e{?fotg~k*{p_i*dfTTK`VR;y9c2X+cSiojR%W%KMnHV6u4dO>e}6gJ7;sd^ z*hkcWPTwx#v@zc+jjj3>?kDsFO7Y?5(%pp^!IsbeGhAN2kJZR)y2l6m#|EC5=vZD< zXQ*iWmNzP2AW4?MHBJF{I+_@sw5d^9S&FlF)-z#d8m74JDF&`JYw3$f)Smy`YV9tG zR_O^`Bl(=YP3BfRy2tkPQk2)y9bD+BxR80KNc79Zsja5j%s1(B^hg(r8t3%a)$+{?Q0{F|=iU*bJhMU(EE&TGYW)!CPH zIZh4pJo(vkm6bjU&7@9w4T5{V)KmTnYtE_BD$q1cQtA&qJR+YQEJVBj7{w!CuY-Uf zDaM{hbMDtM^TWSvY|5C59lowHKTj%VIa3-HW};MjjfH`ix+XblS3G1dC6y)AifrRY zP(J4O;n$H`7yTv|T_!Y3(kwg}-&TvZmW{4u8H*s*0w2W%uccxASdK+}) zw`jTzz(+FVO8B;&N@^#4XiuaWA1vNu4*`9}m79Q&hkl8t#!iVd4~>?FZMbAM1p(x7 zqcAD9s6H$e`BZ*`&23<(*Q9=IUZsh{5v_n_{Mf`hbI4+F0y317s`cd+SzAVlli<~y zv2&rHS8bwjlgQRB0;$|!VS7irw0yUj+BY?hUiKGOn z>a?q1#7Xi_&}T2=6a{O;w+Z`zRFP$zR&7S+V@EhYA;ZF-SL?x{IFC#2v9fEI!m-cU z&l5w|)%iD)1b|OwfN@9Oa_R0Jz1!TOzu|d5ro14jkksz?vp+-#I>^#}$q8qlJp2nB zez)9wGP>K$*r+q3YA~%b-QWGdc^WNg%iTu0-101mJ^?=n=19^7^_dCwWhYeeZ+^qGZ_oBon&V38b}2@u!Nyl z-J^EV4?}%7OkbM@$}BY|T6YMVM>7&!-L-hYTo<2!ZjdWZesp`MtxpRQf7Puj z7=!E`DdHx?;|_jgMXBalYgLY`U6WKI7=^fl2#yT>#+CsNQ2hv6&kPjG2Ig3qOpQPU z!O^qk$Y_0Mj3CRkQ~4i%f9ov>-{jJ0M8lU8iC>>{PFa%X{WkT{Qo9*qBRTQ8gFWk; z_i#0FW|KAR>Aq!o>ylxkrWZ?Hr^dy{B-!DTQ=iu5Gbx$IJ;j44Qhh zB3{}#n=U@BiBlkr_<&4#=2MG54Y+>1!uNZ}66e+VhQLdABXKpgbY-UnIywtNUa65q zg=%N?zENl#{z zqdKLlUlo$icLRM6LrJQ2H^zqmVSpzJ`@yqi`I{Tq&dw(1L7_;$g0cSyG*lf?GEVwk zk5(C}hXgQw>NlO<3lo(+^n=Ox;7~nNF;iW*-h|qYjU5a|Y2V2RU{$ucn6yx#klnvm zjEce}YhkJ1NZEV7CLHP!hNrIwQ89MNi!SYNH7!AfXswx|iZW%t5Zr@Dat9w7>Nf-D zJ=EDqp5KByRB}FrIwGu?t|wJnJ3cfs5OO!&RCweC7-$ip#D7k15Out@}e#hW4HtCvBH`@>*P zoRw|!Z?G{7QKDGAJOV@Lm+f!j-#xjy5G(IuVe@dcJ{*$pQ@Sy`V&Z4N9p#JCVpr=| z%|zj-dz_g{p$G)dt>AmAz4*o6gu6QM`1cT()08=eB0@3w+0VEVJ3*4lHzluS6 zGcPW>hNyJqmygj?P?xS&I>hqHHc6&r`Ml~Q%ZNF2C0LK3e7!+?#R(DBP;{ibGIDBv zz`$5JdFc6?fe0mnFYmznHtqGD2UovM?O7@Kbyem z)=s2={pCQnlFT;C%u>l}CZjt-TGVuMRQvw?!`b;_gJfsHn4d9?``xKH(P4uApB2C3 zOB^L^=P}7eIrTugn*L+ysclIYvo1!zFBG|2D4hYpL7+Io|)j5yE?f+D%NmWU7=xLZpYrRlxQE z(M;j15gU4=v%Ot6U)g`qxmuwPg8RNN-BNO+m5lmj_d9G}Q~af`(SuJaHUaTptMn62 z1mnx#eInF-T%S742(`Pm6I3SCu3MfH2i5*+;6!Grj8DC)h}j5P zrZX!F?RQU8@LN=m+@n#1&d)b~`K*`DV~la;fR^#uU#TvjKP{W$wn+o8Wd+m`gzIEh zb_3xmk3{$)p9gNq5P#Cqv`!rMxrz07G&{soB^#x!ZBsP?!t??7w1LXPLc4p_8!$(z zWrhCEDAJ*LhCiyRZyggN4FTdIDl>I*9L&Lc7^J0%KC|gcVx#Q!s-LmF6_m?;h(sx! zK$*c@GD#veTk6@3dN_;@sncUTI(fz0cY4L|&TJp)3HrtnI2=7+<&!T3kF?vVocHEO z&mF}NS%1s>X1elEFXq93$|G6YW1=$KsXad~9c>?L#Z{Cuw0Z(=K>PMB7mL(|VO0Hx z(|&B?uMy5mlR2>&;%&|uqNO_THpWs)3fNxDkq32u50*`c1>6+22%+Lz5k`x=DNjSt zd)2diX+;~&iKe}rmc6f+JG?-aF&Z2fKvyB!IkMc!Cb~EZ*rqp|bx}GjpK(WjY zV9ln8a9C{SePp`+ZOF9vrEw}D5@eWe1u~Q|#dTan<}Kahvi^>q-lz%Q4>R#gBX9J+ zQQ6)$r>WJAR32NGWsSWDV9p9rL?71BXR^tbPB*Z!QB^ zRsHKPXY8qYtmt4vi&o87wH3%?Zt3HK;mxr}l*|w+?Io)!ubykE2*$mZ?ZmTYA^pT6 zEV1c1fYDv2MbgTodCAzk&-8GF@W^ms1Nt@?D3|9ut=7$GcZ;ns=t}gj@{16N6yzxW z!RaPl+GlTV&2?mnt+$C+Ri%mZiS4^G;>%rr#3OO?K}V{m3Xq<0_;-3Zi$!hL!G5v3 zGrlEY&j|*7zRsD#01c<6b|d(o#+0AmU#@A^-?CtA((u7AMa8!q;icrUyniF4+Ok@b zZjqv$h8U3u9kaq(Sx7ivADb8QbL8(x%4?~cjr7a@8te5zZyU(SLBMWqi_nAtnY*i= z$?j3@ui)zGnKyH6&xq2)<7-0`x?khSHxT8~vMv)BsTRM?-rCm_%mcR_D}}|xl;(`) zJPBkZ1IG^T?|#MuGHMg&Ba;>aiYLHcMW2)A8@^wY+DH7 zTlr(+Ctj7sBZk+epC*i6Z3ArV#(aYRtS+;TWWs&xS?-b-QcHT$SZu>EXbO4iJ{<@f^qAmi24GDZP0 zlU-!alf#{l?>0ALT*3*D5=Rst}OHMHt_)r=CXi{c0HLtCaM@wmZ>zoVd3=~8Ithf2B`wJ z7mjry^KD6}PJLS4V3hY_?>gxgycyt27-CpmaM8q-eXi!8cUOJI;Q-~XA4C0t)-F3= zNHWe5>%%|(wp9(9aa?I^ZhqSESZ4jeM5*@Fcfff{S9|>R?VeFeoiUDs%^53X!@_(nc7b9C}5#!wp@YfONj5u9o0l^|i*LFEoSC}By zvd8nhlkMZEv*DwXL~y2=Xnr!Y7e0CnG_MJG({v#+?sP4=a=wk57UXeK;(PuQ5W36v z;8|XF$fMW9vW^t7q#SktfiSGt%vV~2XgJTFJ+Zax>C={>FwDh{8_hu=&-izMb=~{9 z#%>cHLAgu`(v3zlH@VwblQA_ik@?eUHnt6@9M33Lu^7E$G%b<+V)(6J1L~pNMu@>- z{;@=#W6eMPcaOh*gj&apP@MHLNR2pNo0k3YDrsG0F*e0nbJ#(Zn0k;wRGMnBX1CA` zHbyz6A!~zdPrZf&iT?QcsG6tR8 zMPz4synViBmIWv{cRV;C*n9Ll&(_#^hK!}3IV`)(LAqCSOZ{nG{NWKu0e7t}1K8O5 zr^)~6Fd;$i`t;2vYx=k$g)+r^Jws-Hu8VUij2yJxlhhW4Z6*j(*im{wovp(#E>=1(=p%eZj`Mjqi+)Uck=b-r(fL$PkbIw4QlWwdQ*r=F zLqrY$v;!U)=Y~2<{EjoNik`*PEU{Mm)^JYyPxls@0Tt(HAE~3J&k^-5)Z!j!!#VdB zFE*|;MaNj0)7B2FlCRJuw2D28pAgv0X;L~<-&9TnskHbdFHyrk&?vZjbN=AYlw_S0 zEmIb)udAxpD0{4yGdcZ#adp;jP5)uv20>9NMd=X=h;-)$h)79EcS$p1FuEipL>Mrn z$LJazqd`I$-Cfe7y9K|`o*(Y}IG*Qk*l~RJzOL)_I?r5J!$|5G(|@Vgn8j(TzbieV zc2RycDkc{dxwz4IN~*d${jibV?Ua8a?e1{>yoz2jVOtsASIUe0>naK=NYCaXY zt=ihE9V@1?Z>P@eni$uX*WkU$-yl+&0dBZR zGUAGfgapk-XixZzbHq|W)9C!6hd@0tHyP#Cv(=VO(%?m$>WA*I2qkBr5?yH6F%%v9 zL$&#MAaO&@w>Ya*#JT5+^)-Rc6DVHRe#%p9<75&D)RCQ&q1n-)RfZerx_y~0Qf^LtqC!@kB_ zUHyHqYkpyg!Bew1RpZmV_mf`yL$Z0l;86vBegT;N#PSR9Jo&S_SdsDAeH*Y5OL+l6 z@=0}Xk4C=|TT$Z`AsJDbh)WhD^X|vTsLus2FXA4K_lqi>4Sod{&G4<)oD094DsHeD z?*e_%ZeC)eKc6Z*8)0Iaos>?%)rK8+%BOeAT+w6WQl~vPZzv?)SMX%scDsf3Xz2c? z);e*#a4xUY93Rt_)t1THvZY3^;tE zM;ZRz8q-k7qMtP~`+0J6swOnt_3grhr=Fv5|AG}7A#HDiLu1pg^U??VEA?KOfse9i zh1K3Pj_GBmuw?90J~Q|bq!OTk#g)%r|$ zx3Kwl$q|K7IoZ!GsRv`qTTcxwUXZz4)=4 z=!>yaxV4)U`1fjVXW`{%r2}XG0w%+Sf<#j3ZU>gT-)cUFYh(@W1`g{s$~rTW`{wi)iyDm0=Ce)W1U4o;j~9Hcryu?h~m-= zP|*pZg`I2hq(BQU{DUIYKCePB0iBb_LKrTg{u!1oI669%CtYfmZ*8l&b^IW!=z0I3 zf-$-+kgBf!CQ&%2zV`+UVEys#c-m{^Ukc)-b3({?~)Y36(`T4fSc&Eyf%_SvjGFc9|A-RDJnh6yWmoWjSkk0QX!>Fh1>e%~uu< zVXfye`4g{MDFlAQBOgfV#FBn+D(Z%j`CjQQJ;I6+us1ygHHL&soG960c%M9)R%3JU zF)H9#-iDtQBN^cj7IbWZ$ z))`OTWgxL`JSQGk@raje>IPY(I5)Fq3;UoL#X4biIRB{|@;hBFQO7>;=ldECu0ihE zlSZ;sJ>5yVA~Jp=`P$iajBT9lGLV1i%=3L`_--!(YeA-;Q{)41r*b4cwTZAo{~}8> zJlBbELqwo0+8VV)Z+jGczkCN-!12awgsTsV7=uw~k`6wKP@~1=4s{PIowStO#YizM1W7imH>zVF zc@CH6zM0tw5^#wEEXxhD6DA=#o1mexA8msx5TXD+;16fsS7r9=G>ka3?xtX3DaVlD zo99^dX|nuP)k&jVk1sXQu#Vl2J%xD!-K}w{&`VC;#Uk77PJ2Qt@y++fR%wslcJcML z@P!|$l2a^NE=&w#D{CunM@Z&+aKH@I>_(6BV*?U#% zM!)bZu+unyonoV1iMUZoNn1Hx2?rYReCpGU78NXzIz|0E?}`cvFwSq}7M`Ht#2Z@~ z57)4$_B>3~6rA0BldG64E>OY449vl?r=f{m!)0N;5SKOMNgLnyqgZ!I&PLEmDU9(U z>u19-0EEg^{#p1zLgcG_3UaONcMKS%QuEDu8N z5GyYxVDGLT8nnQoa+-F5JgD5{F%ewA9syG0x3Y?|!lj;}Av8Q4q&ygw+^xxBHx&mo zC+U`A-?(r6K;Az_{U;@bui#=~$w>0QJY76)`hiVanvPG_3u{%l9x8=oxOey?)yZkQ ze)c#M@rF*p@+m2FKTQdbsDbin0gI!hUmH$7)4>qv?VXZtz* zCjJUF0{sV+x}!P>CRo>jO53riA5{`Ln)Z|25)J<{c)^WiT_cPXyWW149cvaO{0xF9 z;J|~!l0^o3g6&5xwbNk^@LahUsd%pne#Os_FZGFE?sVObAFS~_9H#y)`=S4@$;pZu z3t~k0`6S5$qMRc{+?V(ZmlUt~=1&7G?UJ!a+nmodQROZhsGnJsczEZt~smtV`3$oWx-wXorF2V zxH+-N53${1V-CuL3OF+!*?r;ZA9X)IQPS2d*OH27^SQ2dW)Hy}JcAWv4HJ{jf}%+m zWG@~_l0+m&0!RXuZdj6rcypU2Z*rQ`{s|J%Uo@gP8SZjOuXtcMVR_CwY;OcyuGHRhFxn6n`mw)&zSf zZ>~TzM9M=Z>elgh&i0P2$6$GpC3t5&_%LB^cqOP(VpoZiSaJ8>P4i2;iHA+IU$kEn zVeNR-;K{G`ZgrpTs&S#7cRL*z4)c$Jv?4r0P9jwL#wZpzhbh-y`q(B=yxYX@@SFw& z?b@!nuO38FQ12d1y%#P(c)%svvnHtNXTD92`Fua|IGZNWTGc<5E(JUiz64ki(10NV zUCRMxqefZ_`MVXu)$-tqm@NjlWo;wUuny@nGSx{lA{#Dq{-f3KPrgOL^~%Deh^EfC zpitz$W8M+lb#InN;w__va2u}?SZQ&|TxPomENJnfM}y(PdXjs+WA8}V?-mu0qu7i^ ztpl3k=E9KU=pwDTmF5|fhm_nask5^UZ5N-T^VY{SJzs9%IU@)sw zhWc1YSZCkWe|QI8x8AF4Z_uVzxHSE2T?Uw6gbQ7dnljim+KzFDDdt7E#rwmk=#nN= zN{(iP^gs0|M(oGN-cN@r(H{DO{;7OE{@Yy&tD;b!GnRL?#Qc&9Ul@Um^|--&8IoV1 zgwS0v=woAXL)-Z8g}bgqeD&$doc3(dJM;{3yi|=Jab^8w=pnxNmC0b}6T)YTgp`>{ z>P6qmDAEHNQo7;@!060u(?fLOoXZb!?FdY}q57kl^Er zBmn=S`ZB4v)#c;?-!rt7lrzZ-?S&nSQ%e|R^wgWe1fpRA6PlCL#5{VIRDo&9%khD( zPNkEIY3O_A!Rl+8GizihM222D91q`A&eF zVrH2=>pJr8R3D_%V3WnQjuIpuk)H8&>a@4=`JQA9oTpGGCdCe=^xS z*iZXGIi*vUiqB<{Hb{YwAhI}{ytv@3-zyKS`}me~tB9fjeAi$lbNBbcEFwj#7`asw zI3>opnH*Ood=yKEQ+uyhoBZQDPkQ^RYT~{1+4pY{usQ_bF3w7($Wo79M@CE`Ur%;= zN2D((%+q<-)4lk#!yfN`XhK^BIu;EzA`5btFz2l)^wO=#|9JLkCi2^pzC|+G%|L+y z>3FZ5gF26y?`re@?L9RbA>ZJl`sa)IP^A$QtC&?iX&^g0uh1Cf`;wt1H{>^5yy#A& zap}CA#oi;y<4mh#mWiVnM#I2m9P?nkg%H+}fQ0_RYEYJXKAbU`nskhQqqVpJ#w+|9 zDH1a;8WFGVbG3)eq}H843K;WT-u4?`@Hk6#J+bq*Iwc6`z%~sz;N8KngckU_ZNlhj z9@Y8S57$;UHrh)s>S2cT06E&32}0z?--nyyhu$9rVXxzm72cWwi>1HxiE%#jyT1TKoFL3r@@ zWqri)m4-tL;Y-r&LKKQ1;6kWpMwvBrBUIttIYH3^+|T|?lzX&Kcjvk0*qpO}UXT8@ z^=gADD-V@ppGSeIXLD^YcVb_u|3f($-%4x_?Rs#1pvcN{R$jyJTNepl%3+H$4aAnh zj5o&}z&VH(;us=3w`;T7EXMaha&BdX8b5Oz<$bQw9jm2)SK zbp{GkNkWegOFiBzw^v>|ff&P!BnaKY=4{>$KPeH)P}Kd_XgDCDP>EKwn!Mt2q9iGf zPXFA~+l`bWKOu`;r2OVk+^DI0=bY=v`QFLrDlZy}9Lhu!HO`SWE2 zSRL1pzK~OcqV;riM%BdPw{#w*^sJ4`E{@_8fm=x2rks&NORi?LV1vv8mj}r+?iMAu zT`Wf-&GKV%QU|8DZuGE4K+{0r#6S9FkiqCneIB=f;v8oah1_V^A7U#jcWGstCHNgo zP8Pt}1hUwzr?PA(=FqJTj|eCzWX>&_tWV_~dA?9k8lJ?*N3!tTha4EEkpj9jckj*> zc7BMCrSt0L;8^Xs;b8HdC1M+fuq~3nmDx-BuW`z;8_h4xMm*EE(Yq~9BN1+d))C3A zb=Zk`ztgHx7qL{1{93)~MOqE@Gx}|twAF;h=kW&W_z&|MH20hLkiU~-*cpx@0$;3F zj@|ZoRDDP07FT@v-8QZzPZAPRWw!x^Et{-HdeN|iceSdGAB?4M?ODRICoxKwPD$N_ zI4I*n76gEQMZC-ONF+Hs<=q%2sGfia^j^|%Gqrl^dBj~fZp~1&G^{AjUO8m6b>e

}iW|ow@Gp-Ghmj(+Q-@ zADeft#IVKI5zG^ayI97>fVo4TgRWM?(UH;gADV)0IIKjsh|oZYkL{Z1&yX_`PXZ#4 z_k&r1*y_YC;iM($7&E3FXJ<`+uR_c6)iu z2wSGKr&RrLURU)&uxnMdQeWu9%nw4GDHQkJ4PK*!@O@5n(Ye zCU{&|sG%xv0<3rHvA8!xMB0p+h?~FF_rC@MW^2>1N*DKuIJ1wVG8($(o+6+rB5Sop zmhk-KDrcE*7c%#J**uJ_4*r=H9J{HSZhAT9(7fGG)|Mi5VtI_gfC_D2zg`2a{Lf+Yf7-yW z!oiul3$R|u<#6*e04Iv#fxCur4DhirLVV07 z$DPv({^(Gy?R)N){6tc5tZwA`urmqEw>CcCud>Zg`aQni9VkU5#T_%edpDSrRyK~xZ6eU@@#Jz@oL%dHFJ z$P=Gkr)FweTTk)h00#c*q{7rf05BbYE3%%KzeM9L9%9J6Inf+g0ZI-mKd8hQgl3|c zjtrRV8mK5=c#*-%OAf)XhsOd&vXe$zZGy4k>yR=VWqT_Kk!SrTO}nqK*4(4SjpM@D zvsQsHHcW7SEE2NRQJ3|u#I7i_()oka;;27@!>D_=1>F-8+qFnjLhdI1PB-0Iw*+NoX8iCbgix8&6gm+AGC*?I%p~ zJ@r0F1)chr&}=wupqmyI2LmZB^0+r-6-*Q8{Z>vZqlI1(+$RUh1Kc80`vw$zb@GGT z1>SlwARnMgiX!xT&ca^RZ_&U2-sJK$mo!DQ8gk)6lm^T_(?K)hnC27FBavbMjR$hW zRJEbtd1uK<_-ve)?DIl|fbYqK$5 z@_JrQ!k1qJ!vJZ^(B>+5SS|rTTxn6gdq@v^Bv%tS`6YFBcdKtDab!6`jjfMfRDn+_ z?Mpr_z4~GMS8gTd!2b8@G(A^cda0X5BS)}sQ|GdmW2wtsMu)ObtydgEB=p=kAPaL8W#sb`3_7HlT z{R%uogA{cHDfs>Uxs7s^qBD@yoh6F^!p;Ma*jGi(XT?R_*kZ%ObQc5uKjM7@58uQO z0XMgw^rvKJPIS>7(k%<^W~)XPQkha(tq%8G>{5cOIo##-oRbX`d}0S*t1VAW z3|JF-fcQSnr?KOSTeg2E&Bww}$r^utXt}E!eyD?y*}I5q*47TBxSD$=HM=#yV;AUv&DL5on#s1?4pS>3p&-oj5JXKjSe87_suCllKg7 z!`gY}RPXR!%`SILt<87xWFfD}0sP9RTYW4l19p$c1iz_$?~_)1E>*KCXPKUvp!e+5 zCJOZH&dMKWkH_11-m$7ZBPH$nv1CF8SF&h$IHA0mrR3A3I}!Ny)z7v2NDsW&;whTaxmJ60y?|Av}4-jhG@O=hKrf z{F-+iQS+tf&cW_(u{3fCJhH52ycPUl>dJ4De>8Q*n&Zypos$=|h`lGfwv>U7ci{Z^ z43XxCAJn^_sVP3P?Vdr91DjTwW1+e(Pwk zx6{p1p4l17E_^s^;zJ5K^hf74-};@E)!pY*N0$}=ciEtZq&&dZ9V`?J5zU?T;rg$>L!wf&QFJnWA}7tQPwI*(Qm%n4r$wdxz7%V17B9#4 z;iQ{br68d**8Whrmtck&ZV^n%%7Q!0-XgO>TI6)k_ zGaw>=xz+#C^+%usE6b~x5%xd(+W)w~qTpC%h8{W=<(K?Vy@+I>h*2z%cVEJ7=Q^p` zp66j%nY*+D52QK0Bvw#iH$qflT^xs@J~?c3X)c$%Qs%xNj^3{M6hyO`j_bd7?;Ire zl`>a2D}}c#cR#nBmrUr}nevKzunRgOJ=kAp=!M&#Ar9~JGI2coJtp@F^rLe0JZouQ zIYs(5aUMKj?3uS8g~B4!Cne?$8z^qvyx7DjOoFu0Xwe(>9S1Y7NPrd8G}P$J^Ib`+ zpA#3HB}XZ0H2NEG$KUJx>k17{S=^$Y&EWbW#Tmi?Ss-*=c-=+sAf^0fE4 z%yO36JQo-lp1$H0$KpNEm~Brkz2!BQd6&8)LTcG2mH?_2e)mthBqm@8+rA!i&Xw+i zlYog*&0Q2!=uIwKxMlue-FK>vH!}Q*pcwbJLhxR=oFwynme1;5u)dg2H~3z!TZhs3 zNT9I8`bBHHF=8FBam6jZb;k(Ht#=Fh4mdlI7RbqqooV*tLi9wx_%!Rt2KA<)Zy|GI zxIA^+Z|`gNaV{vuQAq2@R0v6IU%^EaSY0~18?ewQc2FYn#>m-q?7dRmMvd04oMwO% z+hEj0#QPh`{YBGwpo@$PkXO0z&SuI>N_s$EXvdM3-k1_p`@31m;1PjwC_aW}k-Uh7 z=HM%Qe-b-GCs9806vbH>F{%A=+Zw;KLn1#EAD65D)WY|bIzyPwVw`rRu>5PgQ5JuM z3$1vN1|fdPL7Zn_#WelzW*S^ApYjt%oZ^YKL+41zeqbEep7{eFI)&9$G|GrLupO0C z##0tBDsWo=iPj*GV z(7dW`_*J^#F_@s{3Wsb(2^z36`^LH!|7oFFuUZYPTs7SdZRqAA)zNJJqo$$Ez_nHa zROO9%JfQ6D1bedkSPFNPJfZl3{SU7?X{LXVVceu4F=j{O8Q(3|ux9h~gou#CE)9b+ z2j%mccQ+~kGw()`U%u}i!QQ#C-gR)uyacJ&I4tPeymY54?W3nxMvpapMVer#?izQ2 zi7|e$8ry%$o$4ic=e6o9-Ws!kvALy5t7TcQ$`%qk#I2ODh=fU!{`#%M>dz zup)iC*0{v5PD+RwCp}fPX!MXwKx3z?N1Tm(O{(g&z^~TGQ_+ha_KLdeF`qftIdMq>BGo3X!0_13f)zXLdVt%T@uYM0f|Y zA1NXRc9=c{Pq~|bkE+}K92G@zOgkR%qAY7;C^-$l|9-&Y1lzsGkfkQ)TRkoqVNmF# zg`ovngCp=eLC@Z8SV>5X5bJu)UMHxCRJdaJ662OCt`6*ci#)q>QG0Ga&)zS(xY4eb z+cV~$o4;QM>72;OZZBx`?6gqwkZ8)w1LAlS4tOjHpvLTk5!g|Sut{Fvip?QCb=2hz zDitd$?kR*A%i{Zjk692O$!7h_RKr;po@Hpvp!g+t>eCatv&RX8GCB{8xjII;4zJ8|YFLpH%=Bjb~SpWR87J~nGZ3-3Qoev(sp7)Tg*M*ngIs_MGV zmhUCLVO0r&W--0E(`??`-YG#@;&xv66i@AkS9TOuTN`7}-DtK;j+SoedY;88%>{j@ zCPvFd35^*W^w~Q)eBVpGIv}xnyuf?`f!2p?pG_3ss_1Qh=dCP@R#Q=(-}Up!WB{=Z z+x;++^=}XkRi5uWh;wg$J1Ryh>oVoRH%^{v#`YzV_%;5r%h4hPM0MXvVi#i*XjjY` zpkFlFKpcX0n5Ucfvs1cHa)s+T(?V0b#=G3$_`ZK{X(LiJw(dTpw5HA3E`2)=$XKI7 z-s?cvZL${8Adf+Zt1HS}~Tt3dkgMhtK#wvG33xK6vVHYQF~=nbt( zC&U@=eKeMmS1Bo3#0+rFFE*m{#2K-L{#=G^gd5j#1${-l)vJG^bS{sGkLD?IBlK0= zQO?$sPClCB*yYYu^2=D%z z!t@z!+o_|5>AEL7cIlaiE-EHlF5r6PI&<0E$*#NoYcARn4`-#QX5z0O-u*xM6a2Et z57uQ_;3vOeeftt-NLf-AZ|NxOIdf@c(ldsvUuCgFOFd+Me732-fAhm_Sc+UbQX&I7 z?3iv167b9;$rNppujyWkvrG9NRtd`W9ahS?h`%wh81f=`93-*b`r0br$J&ZDFAB%z zx-b+Col)NcNCl2iI{XuX8n$eAUMZlG~~%-KsV-8gGai33bOKNV?v z=^EYIyWW+Dl`!Jjp-i{$^Oiaj()dk7>yw=Cb~pjE12CzV;hh~5xPL+RVBlK=YY#!2 zB0_Gbk4@_<{SFESZ%h@s2)r(L^H?n@s4lv$#Apd1iv+u^JJweD4v9^hs+&*ha#`1I zd10`?b_)lLiMN*Um6t+C9}BO~iho&CVd&edl)89v@W&ZDAECn@K`6#X@VAVLaLTzk z8hPO^ovW`BEa?8An-*NXo?XU2?d~zhNN9bU zTp^uXl%mM5?TrxX!)oiY)L;ONtNsgYzL60wr01Gw4Zfri7Q%DG%jT##^5m;W@z4=Z z9}S_+l&`hYh8y!?l*akccfgwW36!}`={~SKEB2aSp8Tk!Qy1nJon}9>wQH1?R{&42 za}Ez#Z?;S>UGg=a)2x$OSE6)K$d!?rebJ2ah;d?iEaVk?FTp)~nE~ zEWE-?=#-+gOV!uynMkf8@vn5)v2AFX+%BZP zram<$QW&QAxy@`5&v<9@Z69K7O;B0esVWLFFNAH9_W5P;NKA^z)7al&2dOTG-=kvz z!LyUljs0EO>&Eo?x6x?^$A*h>L7$MJmP5Uy7&%T_rqZ!!TLKrQpj(VtZ|<~W`gB%D zP3aWhPIT81wq4G}y1Y4N=aj{j_qh!52ptJP;(9q+s-4wEaq^5i4L3Hw7eY1?kuT4f zXoF?^A@oS9YIwCUxMgrz%}+LLglJ4|W0aTF4dpa#L$lbG!SXG>`=pR27K<@6Q=3D)T=ftq@-gYoK_@nV>{PNsx zoKS7{nZ$8SN`GeuL}KNk8`z*@k;I$Bm{4oLUQV9Czp7uRrCS57|Q+jnK4FY&!-;l(#RiYh(*DEKqv^zJyuH99+qmCz(E6j`KSWxqH={K(+{;>CL=rM0 z4o1${xdbZh?8TYbF?`rG)%pRH&rXD^{+Ka)C^%Gzs?bvj*R@gOB8)47ze=N%u6B{M zfWrJJ>B|FH8fI~eQSC7^wqk@L$+*aYYYLYQH-W7L%@Sf`!;i1XekEtplk!q@(aPzc zT(}%_QlY)UYJ=BfanMPl)qzP1#++!M&aTVvZ)|<@cI^obBkN%5{@#&ZMF8j= zBNK|5D68>oaFzLd*=aWq{h3Evv!we4x3qt3ZmJCU@^mJl(PgP^h%sB3*0z@*yf}&3{;I$Fk2Ej*NBtD0ANk(u4@=>bN-ZfC5^A zWdwRxNUpXUG{`;J+ER!Q-egR#UBn#lM-IwEEKmH2&tH5pO!LQw2&;kRb@QVl^d zK^(vZA95ZdwRbAuHp7@-`;k_(yZDDd-oE@J!9QgXy~+MLPGzV~2LU()WZ|*S$gAL*g1>(Sk^umM{o|kQ~KG*n^%ECp$*d*E#Ug% zKYiVtED}#^dECVRiVrLLpHO$JNqLWXo=;Lq8i037%yy}@KCt*Cl(Uuh0z%&r0xqO( zaPK)D2d^>e!bR@W(8miRpszRD4M+e!ZI3ch_1_uQNg1UfS!3e0+J(2ywmzo*gb00IK+syLWK=9!od;UnCSl zNX23kx*-Om#^QxzPlpRCP44cE;ged;d9-wSXg!7_J@ZFQ_&Cjg7)T16gLlqja&OZf zd+x0f){K#&UrQM%woXXyqap(kYcRunxOPpQSK8xpj&~00Rbeok?j&B8@LABB?j}O2EzMvfdy&da{C+sm;`oo|0<)(Y&vZH*eMj?{&k}HN z6=NrjDcQ82FTC<8Ek-VRB!#UOc(_K31}`13GW!2hDFXHoYjjbI{xNg(+EDO%O?mTZ7oN zj4MDaP`Bvg)Z$96TlAB>gmQ{HYlO(~;z3bdJd^rw=+7suvc<eE;!B)CrOjVX z*9=#p0=_gk?vZ*DMa=tcGH9>yz*n_d-RPXwfq1MEv+P6=mawpeEgnY8qN&Tu@#nU0 zrmAZM){BNEXwdsT20>*wI3TU}4ZZizjI9TUI9OSiFR&P#5@2!&icM`ta`}wXf2;8pwCmNsrn}}q ztlj4kkwHK%ozg`_m6q9ifkL9mE%k8auO~_)q8xH6IDgFJ3z~fm&D}=((YeA(k9PIA zE<+u$KFCn%_JNu+>O^mPp($*@%52+MC>XriL&NWfKoS)b@8Swwo`sp?(;t6M(;5qz zKesAgce&~Z2-LxXpZT>ApsrKoGCV~~YjqtZ?Nn?i)y}T?lO>w4(%BHD^AWS}hXVyQ zBR=D`#*VbAdpv6VQI9X9epv?;e1!;fIxz4*HhosqH>~D}d_68M8hT989XRT>m(;am z;6iy^Ur?jflq(*V5#KdVw*TVHolHU8gx+xHHkJ#;EBdkF8(UtAT`HXs<2XLJ%Fp^) z(7${AFJrp5Kf5wt$n=bBne$W;h;?EX(@hc=7Hxfs@XiO>zdJ2tM{?$n7zSE5x@GA)mq`NyQ=S(ENtgu60r=zL9ZDSXJ z?%oHulS6b5CZueD+TE%LMiIHWGm2C*w>kf)eOi2RSly6G%CX;4^7JqoRvu6TlYS!d zUZY-^>3OS43)m;SusE7i@-Z$v7x^`@?LWLVHz~K_2UC>-Q$s$D?GBt^HkjQFnu6j= zJT%Bo9GeI$P^hq%b5y>J6Ll2KKuQ4hL(cWiY)i_mojiZ3ZZDro+X|7s%9}m?a%(94 z+9xbvTx?gMprSGl#s6Hp!8&AUq!-6;h6$bu%J@|9$+}lGP-g@>t8oMo#85P4R8NS5 z%HtI6O*JZQQON^}Z01RcA|J*dRkhp!7a^v->!_&lyNp8U5=2G&-e4MhN%5#Hv4=zi@b=haxSzq>!&4QIW?Kdm?^Q?*Kf@W5>_yW~ z*LCiNL(2|$VY}kT1y0BYtgXweV1$7q3H`(YjK6Jm_6}72qTz+>5R`}ffzvQtAIMF_ z%;<}E7QQ!8Y98UIP~BAVrTZT_RZzy(Ot*f^*Xn<*xRk|WP~t_+9_{||&NdsWu>NMt zfo_Fhacxwz?sWh02usC3>Y;&cRQkN5BfIVh$f*D-YD^OtN`%b4y)?^CA2M;XXTf}tZ3G@ zgnbL*e74;d7@CXelSpmaOGXzh(w_{Qd?;sbfeT70$Zoo6=+krQvev{2N@qG#Nqxxp z^)u@ZC^p7uEFu$yPF&ix2rd7?cP#{*?@6H+mT{12YXFh`{^RX+0{z8>Lj;cN=vGgg zR6wL9GSpa1UPZ-a_|kSlmGRpkPz>$rA4R6e`$5d3ixu9tJA)pHkLl%Ak1^H_z-5T~ z1RRfMvj_ySlTJi3>|_$#)tCuqFga8PDt6t0w8d{?NZbS;teGZ6ghGe^`RbMw4}DbO z5DI|81-IupRTxw_RhV;}c&!>cNLgqIW)N0QCb(x=sdR0Fp6r(il9x3R$r$bh0Tw0f zrkDHUqqwSav%(Xf)_P6})GjCeh4C|SvZG$)LYZ&dKQ#?9p+nt6)GM&RR>L zAc>_}ug0n4GUd#Q4nO(7uAcdG7j5n$5caftZMnK_w?B~mOJRfYU_M5Z^|@57Wf@dRn+{^ zVgS4m?H1-eoQBCop4buc4(^q=->)o+2&nd>ZvA{-r=4b=34Xf%^)(SL=sN;=e_Eoe zNz7Rxb@wBwzxcQFEly|tL4MHX|5>BpefU85GNe4+j%p)~;ii(PO3=2csS^vME_XVJ z@bw)wbChX9B04bZcbkuw3bW06RRJXO#A>Qr(bqiQiEpFT`)pOe+EMvJXxADn z_(%iaNS!ArXPv3UNweMiAhuxmoOt=EUopyvru%2{NbYccY+Yramp7U6ehpg*3{m|n zk`dX;!}3u@Fd)4L9n-h+y?Nl1Mk3J54hbx^%t4BONNB;keEN4tRhL5HHESc8ve~)N z$60AFiNX6t&H`G4(^EN3sIRY};Y*E>f^OCZ)}+cwVWP~|(;Ay(3zH6$9`SQ7ni!_k z4Ng-LUJ4Eg4y(tfyr2hdERd&H+A;%tvtGBA%**e)Nu@thOc&=N6@6QS#UJ{c*c;5M z#<(mpe*Bda8W7n>z{}_+wY>DLcov8U(m7Q z{|Z6s>>d0VuP$hPAQ@U&l3RFf2OsSkReB|_J>8z!8UE*u-%Cy1lh-eGHC`C@y>dug zS>hIKARQsY5ck}R0Oz|fL`vkW(5@?yXH-W4c`lNE#ZMT!OzR+8(>V;q8SM4+)Nd1W zis5>XgvIazUFARS8mtxw@*}W>$UUm)($%vF7owV8Xr_*s+ApiT2jOhuR`s^|2*_W4 zhilN483q}>n@Z>e?xZ*Iq1`lK3K_MVvWsmM0$D|9>ESrbZ%8OpUQ+0gcH{XatqZG% z99s1N;NNe*MuO(r z0^9#SVid3et-<-6CcQh08fJ3_6q+A*Cuu|;H)eWe}8G&RA_D9tvesLV=nqaO6` z;#`aLnMFjz*^-cq)QcRbN|mhms^Rfw z;-jAtjTcN0U{#S3vq-|8J-{I{&=jX zdZVS*jkeY0J*KHCbP2D(bmIcM)sL>*ZBxYdSCxW>3+ws)NXq6z=u7g+N%6$S7+jnO zSgepX@JGrvNk4&omt3#Y!%u}}APgNJKf3Z$x89-NQDMmT0r-=gGiFB~{av(M^213n z_>~X`jE3&Zl|C?i!z&nMXUf=E0BJh3Exud!-3O}b%j*jA(zMAyN;%wYcMxerBNy2&;M-VC~6xPG9$9?5&TuyVj zbkra@ufgA1k19F(R=Iatg&=Q)H3X(B)@i_{nk?OW%WSKjvaz@_!K>%WkK zG%Io@bf(*E>`T)PSOn?|5x-7YEdp{5c!|8E&G1KP58EBvmMUx|oCVbkAJ%iILAF{Qtdy2r zYg*0J_j@nZZSn{?aM9u0fUD+$*~u2-{Sl1fZ2HzkzcjaQQNw8%zwckXlYnDfFxK5?gVFWcL>2f$lwwf+!+S91fJj$+}#H!xVyVM1b24=oO#~&ea`o-d(R*D zuDjM*=XU?m)!n;l@7mMVRrBjDTgQS@-Yeg4%Qj7F4g{ZWd|v=jnIihwItMzRW{E6A z>rnW<|GZ#ZeY@$fZt8xQWjuNFM5QfQXhCnaxIEdI&eKDc+uu&?pnp1tNVj6E{~=_B z)!$xPHM?GJfuUr#xx~B5&43F%;I|whE;fTHSYDeN#aw99wOpk6tdV!3^?|E3xTxx& z5Gh|kAecI$Hfz}A1ADi#BR@&Z009+&5Qzia-lSP7b|o`+`m*E9v`JEYUJT>7K7!3N zT4ZLynRIc)tdQ9GqnA;UjapPRa`Nh`Omc>i8GO!&oX;^Y;ig1dw3(d>V3A%A-Ut%Z_2^PiS3U5-VU88B#Zad(unHXH(yx80!!8GvM z&MYt3w3c|zNyP^>B8Q&EfUF)#5D)6a!FHtNE*X$BD*W`Hbcdmk>|%1WxUW ztMv{_=jch1Oqn7=w3fvd*B#tR$g3SZ87=mJhpvA}nI)x$?Jjaym(r8kH?zJ>TE}>B zV`rWC3!Vf{@U-DtmwR+RpyMW`(W$;o};hfpx9Oj9`LwwEt)kTZ6 zli|s*K3(yGm~F^XaT0g_wj!|!2(=Sc8_bQ+3677ew28D@tipSB%2FCHSL9~boG>u4F; zS|d_{rElB_kP0fvisPP04%2oGN~h-f&k5jP-FmZng-jopXU*Yj)=imSjZxVG7i9z?Zdv_H0UG1fN=uSws{|KY1@(mf2dEj!ew%5-#E+ZmKk?F}CVb1r}L zIZ7WoWL!`;lgsvd`T6$T`PM+R_+bm!$mTfYyXT2xN#mt= z`Bwy@YxU}BwR+6V!BP!KPnRxua+-gF=M(Z=1)8mBH~%j1iEbmJps=;iycsm!PM`o8 zz6*F7e*GT9Wh-h9WxJTgHThig*A(vYc)HR^=LDR_YT zLe?O!wx`GK7ygBVvQwz+@3xqO_Y^q=bTeOzF~$w2`O&wn|>l((&$cg3<%Y_doT-3j_Pg? zZNl9Yf61Psc9F_v@yNUR5n80{^vD9*s)i@MQ^~B*<#;ItxO`o9`<7#Rw*Qoc*6v6+&H&%Ad$bap+%`$4-!ZbjJod3H~_A^4$vbGbA_*WQmBTkbFyIt^m(`s zL^;J)8FGu7)58a5sX<|XRgmjJwGO>1?mShU;|<(d z8O<4Y(9_elQrnLAqmoOl90Bi<!#pNetI#u@KW zA5ds%ge6wS@LOCU(le~n(q|VBns*ue+U1(^q~b89I^YuY z#BNSjR`9nY*!W1)eWFaC^T0?;capI$_neh#g?u2OJ{1- zHBo|qMXTU=x(G4S`t_%wy75D)4!KQe@aV;&-aZ(S8eyfi&G^2!yG2YM#vSLq=!eqv zdWlG=>r&KO+!bqy0^`+<<(%O@`*{y-23)a4Lr9FKC#>V@vG~HClB6JR`8~9)re2 zX7TH5uSpUYYNzMHU7h_uy)7x5R2i`^08na(*VLmwkrj{JNZuIWx>q#2?+FX#S+))9 z2`7ZXb%8|}GrfPxyns${`;YXMwrxIGr`PX@H~Us1{6;z-yHASi%5fCn?$DQy|E;>6 z1Y=)G(8O1gBIvZTxt3h$*T{Hv9m#Jafhi^ZCaC`r)dK^L8na~darBaRI?h$e_Y|F7 ztkWL{JJFr9EhD$~APB=B#UijH5TZv*)rIJW@i8yADfOv%^qQb)nOs{fK;{g2UT2mI zz{Uo+F$jX4R_RyiM_XHaVdak@9Nhb2Jz-x7^F8gu3pmmA7h@-Cuia`sVPeLdlTUc` zi4hZjyMJ>_^}tlET3!k52zc~T?lq*_5Y4Vw{4LtX3NMUFxXwR|Z7U;w{c8P|!lM|t zPX%*>{%DU7F~IxG;1R0RyRnqce&#l%#*y};au=TNR~=ShDvxOR6JczyB}#e2wwD+E zl;Dm{vX%lu9g8&;^@Kb5h2s!j)5Ike~|6OyO-{R;eiM(zonigz_nwlrrM*V2S#BbppH8Ufj#0g{B?E_N%v{)y{r() zZ&f)nnKwQ%j0?DJ*!DYgPE;c>eCKFmVGhx3pOC+xUOA_KOHu&mmXB!rvo2?QtW5gK zU`nOd47pWX=CJ`DnKC*AbVC!~RfZ;02oBCG^-2^DCZEudqZLk3h(>;Z%QPq{8dOKH ztfWoC{2npw;u-!kg;??JFRu3q?KXTg$^PLTwatM~l7o5WMVYIFT8Efw0%l~#QdNt+ zk3qn~{UjmrZ>JC(@`DlbXPw9$`zy7eZ~jl(IWK@Al|LiT!4JRSKC1CK_bE>}8x&N` zHx?E^U16cAz=$_AxmbH{+L}5%dK)HAtx}b6zsgSox1$e z7q{R07IXRDTZtyMO&B6)BIk{16k%gGzvuDw=f+!GbsZRZ$)Fqz@ z%KfM`iM6SHHDu~tgfX1p;VDjGSCS>=Rq`La2zv`9w<%ft`s5{bk>t^$MeqY_s^06! z?5E=`J@jb403tBAMQ6k*lTWMif`ssUWkhF^MgP>053lVt>SuHluT(7|kw0;(8Cif$ z3&T#CNf8!x_(-5*Wg3|wKU1@=nhDnOi@!wIGOW*E^Hen(X66S^gp82l(Vcg+Q{k`L@z0h&9tFZOa;YZE)Aa)B=gZVd{5q;Aa;Q^3tjEM5Q? z)4rU(*#{4r-H)ruQ`0;iKu@pgX`72puk+^Su$Tc1#J21SER8R!8#1FRs=w10FR~uy zzKEPVqX*pLS))5B+$9uRd5Oob(d@j_ai{lR>{F?zXcn}GN15&?cH5tRC^a6kcaGjg z@0`)sRMdVuL=8^qt3;}&nkIWUDrrH36m-TT*=MasmYiR^3rkUgN6Cg2{rmv7o8Ppc zE9%lh5OQWT{O@YQO-)VLrEL7I?$SZsI^^Fcr`D5SK3!+U^Y(xtG|Z;xJ}G*V^Sggk zYJ^AW#rcNf#<4|No%)pS5|RmKLDWR=+4@{{byNaYD!F8Cb{tPUHkV>H$AU$~f@6MJ zc41LOezMdMaaSOQW&cs5{$cmA7_?gXlOhd$Qo8O=LC}4QG751pA{AAE4_h@5uuIRl7yKKO7 zW4qL2AfqS^sY=FGYeITLY4P`A)6}IA4)U`Hx+5Bi#Zzs$)Bz(ZAAP&TiHWF;+X;*iiPQZ_Iq;JHivi9CV=q=Dmec@sMQ}KRZ-)5fOqWG|Ufhhb^>%zN|JcVIbo;?| zcwW9KQTE+@8asiTQ-=m=Z9z+4^S8l2Ya-C>^rKH22D+s6jB+9xrla%+YUY*Az!;RWCv z5pumCkpA^dT=vGyz>b?&=Pd)=#ug`4sH?-_YG4di^S?hyI14t%?kPiWX(oPI3WK%t zN}_a*)uFGDKWyw1T-?@QI|VrmJT@T}sDBX`e#k#^<;ix>O&hjDL~y76#-8fb$HT2OFvvGY=hh`jqxb+^5%4``4jnT{R^2&I6z6A=WcQLkoNt> z^px!7X|B$<`M2Tc;4_qBIhjRapQp!-#Fq zXP2AzUFE*+kH5`t?}lCwAmEazNcBHA*c;D3{v2Mi&eD+@dVkrp7$g5$J+`*$bN7yX z0vPb>WxKD=aCKO##K-ILP`B4GY-IoZ*q7|3sOIs!jco1tmKl~A8jdG=>$QG)LqV^l zFGG8a^3et}^___HW|!MnMj$wZhyMnJ#||?(ghs;l$vWu$;rgf|jL@znSPHE%f>f-k_2YYIU z8E*~C=cExV5{j6qUF4KRAV=do68NPtq?pYGy=>j90SZDCD);5x`BPKObArY~J#Pcr zP11F*r+8NSyws%W#7q0?#NzAl z3K4K3`U08SIMHIW5Jhwl`9_D(I@|H_yMBo-mN&~l_h#ohH8RSDkxVi$MTkVj`hq&8 zABCFq;JzVqnQmbNqC(T4eq)_Oca=n>DWzyFlqnN0$TA|CK7aJ1^l>%Cma3wH_rAhR znuGyj*)&4Gz($e&YEcWx6JzmIq7HvpN6z@u@KnftZgC99$39~vcoJFPQM&CaMC`vr zf<87{n<3GP>fo~G&ZDWs4gdJ0P(>4)S;BbUek<}th(b_|}T_5J4#^xkiS3+Aqm%~8@uW^AXl!B~=<=*Xi111!x{=(~>oaapcP zlA1#Jf<3srrpG5j10PMQ@Vcu*-bO6_&Wu{7rxSV09x7NsNTM8()fR$B+MzBh(C42d zeY#)O`gM#RmqMKHz}@h`uLIBJ_tD4b><~e_N#w9vW9*n+S#Ki0D z7lSQG9auB4|q`@Bw2=_pC6V&NHMwaBaFADlo!_a4&uhpuL`pJP2BeUS?3(YN#W z($olBqdlm*iS|Y&6^*HasT>}kx7dG%6LX#YL}U>-d%sVlhzV|WCTrwm%U%je~;Fsm{$X)4dNol zRnt<*8bG+Bb+98d`52eTB!ppeePNGv1z_KQrxhQlMt4o*tupIe>`I6(kdeo)Q>@jV zsM5aIyVCWcq_Ke{<=R0+Gm0;4%q7r^P@d~RBYE)1TF6 z;ZK?c8gx4)`WH)N)!7X-a%_JSHmVO0mPOqObIMv(KG~@sVHDmy5Pw>6vERitYk!yO zu-dQO5AByKka-=<;1M^0u>xMy2u>84$!I(A#SH$j(j{_W*MfGGj&l54A#FBj`YLYH z(wHRC=&rL9m0~aes>gZJCly(K?U`!Pt!v-U296x`iChrg4HN7aV#M0vvvWeGUD6*; zAka$++rKm#a%QDU$pAaT+|K@roVJ->C)ICZh{QHZ;&_u*&$-L_y|jl!#%USE0{!;u zmF_`&rWjDUF5GzUXPKep-p1(ZyMLUdO|aBY_Fdrl%L{QvKqO1!al7Yt=pTDBj#SbrOytM&(Fx~x;YPL zYlhTQ78SudNMqL$LRQX9B!R&1QQB`h-ENE#rD8joACNKWS@_(fk@6{CG0YxySc+s2 zkuC`6>)uvl&@wgHJ@cd*Y{Q=h3|wT%4o(EVBJHGN2#vCD2%}^Poqk_^vCXUB~Os-K_xa=hf2Cl0+nxT0*%tWF#}qpgtkwVy1E^@CfFGAIhW=k zZ(5Lux#Lqe$exM|MTlECVc+%!P_9het~td3W-P9<_ZTC8HE2jOYFg3mN+kO}%irny zC9zEmQQ6r>;5?a^3H^*^&9k@=^BG``u>kg8{AsyfcT|@*T|7F~H~hw~Y3|JO=s2KS zjijS$ZbbJBym7?9P@N~MucbjA5nYQXmJx@;ulCy(Z71J4;m`N}2q6`6^&)ld*dyVcPh*Kwv7J#1vW-(9A@xR5OJdnWA2z?#F#c4`st-_U4lsXSGV~&qOKmvw z4gbl3D=TgY4e&~|(@3upBbN5&R4%r+ed9wOUmSgD&VY6v-jQCfv2!DeeN1BmIZq(g zUxvLK%hEP|B<$K!ucDQc;{U3SKoW{v;$(1N9%2H&{R+G)>2+pGlyyrv6ql=WrP=5a zZ}kiVE0BLO{Ta21VM&OJ9O2;^ORUA?Fy+Sx$58a;h>$*(%1=rnp%_T5Dm&*XNgH^6 z4AZ_V@vxg!1nWR)t;>$lo84$y7?L2xc)+ z1uuF5+_OVruU(zQ*may;g>~27`zENsn0GqkGM^LnE9vQNADBs=k;;D%`Lj$!d(Lkl zP5Mva5#;OyYg0_*hy#tkbNt`!~~2PKj|CBfJRF>w%1-d!vt z0QfRL&kpe*nw28^#X0C<=3fQwt!xnp*R8ETq1+8Ur!i9$7^Pb#r@E`3bRVK9tP1?1 zyV)7d+KZX-Mo>*UZZ33{8aRHNSTUe%6ftWXWZr%r1(Rl1Xd6L%3|w>6)!WhQ;LjmCXUt@dpO6WFTb=Po++XHa)@(8~w;;d&B(IC=%x zaZ4bY8HDP2NU|%7=TLchnAXAT&Gj_SuIsMyVHI|D^xZPza3f)wPlVcZEXK@=1Oc{F6uXd?E058+M}EGm1~AQRPU3?B^l+T?A3 z+`|+19IkWc=NZ}az_|_0>WC)NW`OF6CD)Hc`Z*CIyfHZ->1>pY0ItG6D*c~db4B@A1p*?A40YBXLyN|qGs zU&N?!?koe7=~`~p8Ep*h-24-|?VYFSGEQQFUlY<8|% ze~zI;jP;+rCnUjHl@O1(uZaO`p(ESlcdycBvZj{t!3Z9Rhu*>!es#Rx;@ zip9QT_CE)dFsnFq6~{6HX^io7P%k6gc_h_(m9F?{YdtVnlxQdA#)^(>Q!?8!?fX)p z-dfux1ni`CT$-Lwo$a(?YkgZR`>tc{Yy!obA9r9S;kZiO8$i;E?jv`#`f+xa4t}q2 zHTgCyaj~FZk8~2sRTEP|VnR!CTn}n!_nA$dWn*K?g0ok4?y(jc__9WuzM~Q|0rn|c6OUsVr1=~yB(0-!B5apIn8%<)KHG8%Ij z1ABpe*On;cb`EmWH@(AD0fh`zyu9pfn+7_4v@d=vOjVacXi@v7z@U2F~6H$ z?7Bm3Z7bdhyN30sJFl>XMih5Sbb??N9+ZTnHdfU%ovMJ z`XeHz*J=l{n8Q~RQdX2iAl|rvENi{Tm6C)gZhf<|(-kh+GcoJ^xzEKUR3O8k76bE% z%mzVVUN$Nu%Spq`(Il|W_kr4SUQWw!fT5NQnKy4K(sW8?ud3d<79Z>=X9f)a#6O%lh zzR6MyknYla>HVq?ED>i9bRWR(6skXEruJISi5%%e&vYS`V#g?MeKCvqDUaz&#&0GZ zMXFyBA1iERn_%yhj5O7(kRpFKEF&_W#$T{O8_6Zd8QhUkvqFU{PA96ZsA|FFtB!ZK(Z zmklk)slYX^+rBiup!fJpg_FD+cIG_@;H(ecwj5FDcY1ipt_xqCJd3*&l&!tWYdL0( z73K96j>nH)HU8q@8YrX#g*)qm|U73B%!;!%!xTWp>OZdm@^w08U zDHXC|F1i7q`zLdE*J*VX-=S~l^~_|E3dBCGild>w^IA3x&T=|&M}b&)VaJIK7`nV` z-*fyJh%umypvC~5c2r$Awn(rW77Ca&pkP8#nm@=bxsXd1;4k|`l{isJ3guPr^y(nI zFwl1pUV5*7k}4E{)vElK`crvxT7}oQN+b5sB|9*RnOY@jyzK1B{6+_;Fn#b-SINr9 z%pcsWcPC9{TPS#s5#}CZuc})6dkqNOJ6K!w8PYCdt~c+m4>Q($;2_@a`-dTcJ-I^T z0FwH}k^4CEXM|Cv)vDh$EAo1c(iS{=2s5rSYRddhR$@|oyGn7YwRWDDE_!Y7(c<-b zXoc8!y|ty*^j+IAx88$W>U|OEgyr;~r6%f#*R3T*W)HLkZMOy9AMiwf>tc-#_uX`h zm%oV@U)o19M;%`%By*z98lbYJ75s#T9>#q_(LIjPyl>ahpXE3=$Y}9E)`1?WfktXH z7e5e=i!IDU8<38Dp^?eU)+CQ+j)J?O!7r8nVA@9zZeL1~s;IB|z@j8s8Sq^U=h5}S z%k&#-%!4mIwzS#*9dYIUZ-}dyot-_@i9(lxgNGA-S(QScf>qq!4r*!#zskw`uS=|o zrY4p~pY7e?WNh%Ao1ce*kBjqf(l163I3>I?|Gz0!O`YtWAz)J{iofApNjIpp8WaxY z{_9MdlY)bT=ihYF+}so#+F&a`Mbgw zh&@=%6bhG!^^1fQ1*^KL8#J74)%6` zYk}`?)!EoX)Etb!@VXMFE|y?ZRcW|9|GKOWF|uW=uO9s)%|D%B)wDE$BjW5_>^%Py9_MA_;^hBt`1s5G%R1n_oRq8- z00992K!ERnmnDEW0Qof%GSX{gWF%x16y!JW-oJbI_U$_y3{155_&5Xv_&9iYM5Hw2 zM8s4icz6^{6jZcy3_u1#a%MJWdNvw*2Kv7nK|q09TEBUR{q7w$JrN!e{r~Ijr5%9v z22mX;2oZr2@Cpk75ewm^8$bdV^*4C#P_5DaBMYOk*o2Ht4vFAW?B=9FE_qvZy&ft9BUw zenX??cTUSKp3Nfxt%5g7^nYatSJf*xOJw*xHY^0dD}-0CUn3#GRfzbvLH!dX=HOI>$01Xjd0}BxgAPR{5=5F_)l|W~2jz-h8=Z>B% z)6t6u{#Ci?1#p#lwix+b0M9t^&E_L z_d;8m$U9GE+@+I|Kl^82gyK`YL5sWc_H~{6umckt6{%HTeobo8DLz-Yn;jzw7gj!H zqsFwJ!SeE=bS>Y~zSPbPXh=jjW8p0(+)6eM8tYlAwWhOEQ?5-JKDr@GN}RlQ0c-Ge z7M<{{o@b|d$U)5beq7>&E^&wqT zr-?{y72bJVx9T_^AS<3b?Mjm4hYZ`UYm$4!T zj1tJpJv6wKkiy#j0>GK_-eDchcrVxv06a`~$4qx&=h`Sn?af5pZw=2e%~68}L|=1* z?ucMsA7Xa!V!cXLg;@2DRwD+^(*YcPpieKA= zSZskjXsu=@AluQldKdj?hD^MS*uQNjQ{2~*%fd%IO{^#IX$D3>l0&0O--l4F(dIQC z9;W3EtfVGC)o=)yc-+5fRm~684CQ)qAtgLRzpgpW&ksp2D#CH%aXTQG-!e(c!G&Gh zxx;{_eHCb#+_lXJJ`K7j-0xNSX>yV%wD2Etecbu6N4Ac}9*_91_~dR4?Yy07@a`2> zxW~MW`Jtl4w2ajWtQ|NQ?@@f!PFtBnTIf|QO=Onms3%~1{SEA(M(u21IZ-x3JBrUQ z6vM*B?ex{Hi6UE~924-I4}>xYA_-ay$q1Or=_q5`DHkzx4~(36^7Z>3iJV-Fu%V+f zXr*0hFMGgMS1qfxHPFBq2O^D8;Z%eb^-{JcMq4c~fNP8MO%#)SXCo!Gilg~S^I~Ei zf!~vmJ69TJDN4L2X>zq-^h8O$+mdS}4M1*KXuJnwx|g)T!e6WjywgF=zN-*GJ*afd`QH;pt3>9gFp8j@op$$#tWLJ&h{7$@Og z@22Kj_O*?KRn5r2LW8Ezgp-{%@Kq|n;+hKUu2J9xfIF~0Nu@Vu`Ac4MD4Jh4J(_N# zDC)9p{qfi@sz%ca$q&irwxG50SwlOUC8SLEy%I(lKzL49(K*j@a#P{KxiFY6m^2}_ zXT$KReryRB2D^97t6iFpgcZ5`Webn(02zz?gRqW#`{6_-_-s4fGi~slNnRi`cZ5ET z0^Hg!?EU|FuL#5*m&aee0$w;b>5O$NDfd(BnUGD-98W{yxN@uPni0J?{K+3V7%zZx z%;3`68#TScy)ygoM&@)yTA9`#ko=w}EcK9YJf7Khkv@Gf-F5DiA@i~2+`=*`kps_a~Kda<^ zMn=6jVADoV_pC`!>DbiHaK?AzH2uc(M@a3gxYekRYzDB=J$bj7WX7sD!Wv73L+?^= zj!WSZJ|tO+BR{04zqPcD`jV582PwC9?eQqUkDgvgND_Kf03h01dyAmz)M?hPc$)U6 zU4;x1N1GGl)40TkYDStFE-2-Z?H?5sB`bf(b*$wx%Co^GVhQ2bcU8z&zi7$F6wNnTy-cg8jj)Z#1OP0od(Tc0&o}%o- zHSjlBz=ceXgzPe^Kvh6q-RtP|vy1ubyZu2oYix%^TO&k=F`Np2hVIfDpT_rhy3`iD zly<7*G^T)k8o|UQqjWWeV*V#FK6w(Y!axT5Uz8f5wJwvhtz|@g>LNvB#xmVf zWi}JfaaKT+(DdcenN32YLt$$4tlWaK!svXDB+6V^j94)hZpgLgLT};rg$j=s_^YMv z>YL7-&T88dTL}$raO&=9!z^Rmh$q1ur8_Y`-+aUIhWe15c|LQdKf2=Q5D_x3fi%OsG--WB!NNBR<6Pbm@F?2rs z@VjN)q6DqT6#a)$__eGGv2n4wK8_Xb%`HD{m)4?qXEFB-b#2=AYtK#J(xHW&20my~ zUhiQKd^X4F=@i3^t8QnfJbp2;A*ev0YAqDcKpkyPU0x`+Z3oHbCpso2qn_Ltvy4TQ zW97*lYD8zMoK|{dan*tGjH{2vDM<-12OZ;?Zo#rmdm9{&mohSN!6EX;t-FXeV+F~z z76ofw>;_>_8!5V&%%pqzw(@nOZ>dp%*4PGF&82PbIo_;(lT~R@7%5MBWExjnp6F}1 z>B?$mSK{c5OEGv&SsvJOY)+IPl0P;*`b~7)GvBT&@SF^-UZ-??)=ly=l};5q7Pb{@ z?h8QV|GZZK0C29~dbBp0fh1fIa(rv{485Z#`3XS$&q!5{y3T(bHcS(taQ((Td&nIh zh_147PJFm^c5g6fqH-Uwl1!!$sXGL=< z%K^O=y296Pk;#<|~1!}KegozX~XDHmNYfN7b_$qA9iG%beH zxw@(bodY?G46dlt*Brd{E^aVizZRngtQM%nXDnUfNFs>D-CQk2^^#_>HJxv>p#Zc5u0 z4#XG4v}}SIU>}d0iwdZpoFE7ESF0+-_-6 z@rrw1H~YTO`ukuB3CdL`@%@)*1mBMJObTRR0l3xbq!?a-1VY-_c$D~3+K9s0uM^)W zlei@@<>@p+oR>axX}hr~+zTLurLsHcM;{Fa5fd#=6swncLhPcqmTQm4MxrjVyKadV zb$}sSA7qMwUKlje1%FmTvs=SM(cX2ElFZ)uv(5hnx1va+_~>PTs$Pq>RR(}pEy3v^!c&W z)ZogCAsQFhs>eKrjlxAyrF;2Z1^gQb5lR`*O{=(GM!t4 zm^e2nhloH=)JITMQL^-to!%T+Z8Fp0dv*qj0cCl9o=jFkr?J<#BF>1HYE3u@QnS}! zGZeJdx}-WJ;mXS{gdv-zG-pZcVG#sNay$njY@&FO(7B~&^L@;drH&5mMVD$RP8Gd% zJio8n9R6a&*y2RL_8KeKnXNUe+^ZCxyn5XEX&*nLnj#z$1jA_S6mgjDKA!bvkg(crEiajzTAJ zMruBd@|?YFjE>)F;}hq%+r|mKp(R_6_PsJx4zU(t6%LOwnWS^m8lH2ax4c4aQVS$E z<2gniRassfOhq^5siApWJ1No)NBxK`j-8sGPAsgj0x&q!MmcVP3WQt5qTmEEWWYZY zm?fcU{i3fA4JPK$r7FwOzbBfOZon#o>_)ra-F@rvcZIf(RL_9Agvv{$bXN3w@{?&x z>=a36jzJ&|+5{q7*OXu-o4XH>=xA}Q0Z8NDff{(z?2{#ON&9=0y9xbh72NZos3my1 z-h)e+e*|r(uW=)^In}))Jylfj?F{5-yCPKrPjGIn7;Vmb2M23=_vWj3&F-J*x5ozb zo8`E(&+TLc9e!JPui`~bGG@rDF7gxRCY21rf@bqQHO*5-s;@QIi)XO+qt4+r0$&yt z>8%JUr8&xUmuBz>3{Q|PNE>Zfp@l_de)L*ZD(Zdm98BSggk;xXwMhHeRP!M$qlY75 zHB#XrH=7elkcktIq?vs8Q5?jOuW|r6ItR&M|2VQVI-N^qLoS#?Hu+cIhjZbH-P|D0 z<9eb-#_)!O5N9oGEPWEL85QbL#r-j9N`8pc(PHtQ`W1#h0XlaE;_AsXbcX9^dpHcV zO2{lua)9G<$C>RJH7+rIM$z3wZK+t7=dbD-bWgI9 z@WH|etMlw^Gyi2{gz?HnM*9Th30>ngGj&ZCt8iRXr0GOLG+@l8`Fc{~TfO8{Y44ZM$+|3?@f`jUM zza*A+H-xwdIF9x9ZzB<9vXdAl&Eb#kR-76am^58Uzx6BU*Gb(9{ey<7oUZ{$0nT~q zg=tZi3K=f|Vkbj}wfR4_>rE?NzN`BiKa7(ss|u5%EQeDa>twwOe7|yjo1R%eNLHYJ z?2$O@<8qc)deD>^q0OdU{!?;w3HOI)x>)Kq{r10qDkuBT8oi+UpD7zL8UXONY`w(a zg=d=+sa|sry)5X)IIqqrtTuE=tDSRf;F;03iPk@n{Rj<9=Es-y&BImcZ?)erZYORG z%_qyN+(>awmnFtkfJRL*g}XRogx~kWmnjim?kt9P0;(IR%!7l6Om(mGWRKOq+^J=G zRPkqN0(-Y4#%RXEvA!e$7!HGutUnmRv4EI%FVMOOr;J{3>;C?3&XW$Az4Gt)sa;9$ zUrUBxYo}K8;AxcA3LGMm^zrV2*?UZO3UFLpa-mLWij`{|{pm4eiy#N%%L)aUI$K&@ zcQ+4Kn4jb8Ck0<3_Q>e>CUHZn6dMZe2Ea?zpo%5>{lh}5>^z&$9C3;2@$PbUkkoF? z!glhJ@`VWix5@SvT}_t?ivJF%(Kvfx{;3+n`SWNc4A^LktM_YT0CH(!J3jq+WZ5dk12Zyu{Egz2P@4%gIiUJJsnO z`SfdyDsCtzi#BAhj#Wx7V{50jw$H6rJFBFnb%7?|J#Mhype!yTMv?qOwvB6wovzD$ z@E&%IoEo!M`j|YfU&G9=0p_aKyfHtQ;{14q1e0XAdE}O2ah+9$p#ET2K+zf!_inEN zv14A^sR^dB7@Df(z983#Uh7|DK`Gw})`lwE+FW(9W;(P(%A8}zrt*`kWVQapaay&2 z@M)I1?N1YiMM>wd*XZr`wV6kQryVAF(xv!Gc4}bTnv}G%IT@Q7>vECPAW2Ed0kvQW zXkPPTTx8jU7I1{*XDe#nL|Af@GB|x+1#ArSAbpa3im*fIzH-s!UL@KDs>3}kaP1Uv z)#bubq|>_>D*Ky!k2&xef0&LIS=n)Os*e^1RmojLwo+x2(^Y1lrluCuGl&*<8;!JY zZ>7T1YU(sC1gX=gY}~iwSHeLH8P_CXyuP(63kyh!N`Dpk_|>R+!oB0NS9xueZ#MtH z0nfHWcz2yyww)@Bak6wmmySd2N{eE6I6`WWxOTZr*_(MO1}jbr*4mcG)G~;Y$~6CUqIXJ7XY}jvBT=zZ)QrZ zG4C2w*|}w7&1BUrm7O1*ZJvB|PeLNeoF9X%qQg9Are~$CPa{!G!V1<2>9#T{lNbx? zL*S=+2l6MG)Nx3wEYW1%NzST1z0l;&mF4YGxEQj!=QmH6UYPsCuM!GNUZAk8N?%@N zw3*>LiKlJg&|4$ZE}L;B^5Oq*rkz6duB8iGo0xHlbQBo*ET$q>fKHrcHN0yU6eg8T z$>O^-6*b(&7cwJtgcrxH^Y&9*I86U|sKq|y7-?gvH&r{PLhiNn zB(5wNWkm(9(%34>4ObsyZa6n{);T0~A(O|IypC-})Va@rX;DL+v0AWjRSJ`GVpwRD zFRMEN)}E`n#4XPuZP0#JtVSu_O;~~c`5tWB#N$nwx?+<`!*;K?b$R@~CHoXr;mDjo z|8|}tiZ5(BX%w`vP^sdwW&COKhg{Bkt&{l16!{ayJh@OwbOCh{{9-~T7ES(;prre| z2@cWF_>Yx+ijH}?^>^ZUUFfY0yn(;G;v)2>XcSX;ju-A?0Cc>k&$m>>_bzH>1b!n- zHQXijHHMmj@quou8F?iAVG>ql+O`8rZnZx2P?j3JP+F?<5zqu)X?)?PZA(R)?5VQ= zHR&CD#p0Yls-LoYhIujUM5nv$_(@B1*})_G>BW^vj^N@? za#ykARBSM5Dx0?&1@Gi}u_Wn28$JiuP)Aco3N;S~Md-DWcaKfzGr8#mSQM5OyLjt+ zbAQftXyuBa&z%~9Msa$an*<;?R%N%A$t5pr6&6N0c}4TB|Z3ZgPPQ_m!KggJ9YKR z|Gc#z-@>44O5NM4u(o+!??&UqGHV)1Q{rsXny)S?@GmJn)@e@y4Ds&Se|YXFx;1J~ zV(c3mi=STsr6lfM>y995jJO;_q6Z7z%79YdatM1{f@$QNi4OPruTB2DMR~~~X&=vA zCuMd9qZ$^3qbV)68jeR?OrIi{F{oB%gOJ6O((SH3Ok1(@t(AaDwSr3_qUSC?Kz7;q;iDYzBGv z&+q(H=pwHgCcbFs6_q(La|`zd=x`TGG2X_xX!EP^z-%sw8s+oxa{@7RtytLRssez$ zt>&uz6;;}}xFI`0%9d2igp4zq%vwgw?i(H5viGiG`v=KEcQu+5zx7a{HaFg=l|i(<-4g>{pro~|qq@uL!Q(F^mRAOee~xvYl1Dv( zvJ`7d-0o8)AHyP*KOgI-xP16ZMZA{9zc$w7FN6*=wE65bHQ< zXZqVu_d0Vqq}kh6Fv_h47WwCQHA_y!lCOE+lIwq&6@U>ikVHtI+IlAUI4uTmn_NRo ze?7>;k-BNnPmmfvp%A7ld|D)piE@%0aA8@KUH0K>YwM~mzL{;QQYu2TD06}gs6f3^ ztH#W9T@v0VR2>HtRs3O_n^xQ;$Zk-S6lTnU(jOoDo=T1lDkPZWd3j=V8vPIU-ZQGH zwQUz=Eqh;}fYMxxs-Z1PK$>MquZASl082Uv0SQGqic5qbum~ndfS{C+0zpcEP*rMx zfB`}Y2-3UKn}8>0@9+J_K6{*VzITtkzcbD_=lz#4$DGfcdFJ!n^DftQ-3XxJ5_o8I zCciS5xFewT)t0tB=q*)A382h2ysSTUks5T~1TycW7wK>3KAw@Spe3~lrF@$oDYZ@M z4MPNAw<{NnP$Zg>O)sEGwes{(aZidaKEBR5k|}NzMHs!~Rc@Ay+}`;tRrDtErGDzo zX~T{gUP;KpfdTNWRh!gP4>cUOEfR!Q{Z$2aSkiov=i{V_nWH+8)5@pJ%eaAq;;w=< zWs}%0Q=&m#>DVP9b59)3__J7=-{^YR*WoH-0&YAw3Jx&|D`t04WIwjjuO`0DR2LM6 z(-1f~;VE&aafLc#5qD#SK2*jECMHwLVxRNCZ@$=NHdBN=ojc9QV~5P)?2&h}^qwpz z3td%8jI{d`lKwH~3unQ+$Pbg3ZlXJ^yr*^L)*Sx>rmc)#)1fI@$UaA!zr6&ND<_Jt zRn;HL)CiEMR)H$mX)hs-5s_|zsnVnj8Mr>pXs$H=b3C~ebX)YyTdI^KuuSXfimW6> zs;pCebJ}|5LuIYsFSSNCyOgynzZih>Bz6;jHlEFMY|A|YZg^FFzeXC4t*ZQ%=BlTg zW@dO*wWlZjRdlVyq%xm_&}bmhZYOK}IDALtRw}F*%F@%Cb-FR|`Q<%Ee+V29PA*X# zbApPicgm>JU3>k)?WMA2IzN%nDi_aImW$taD<938!8@I5snh#Lv^X9DujLQp$6cdi06u+ zr{8*Hi40WZ>0nOaUnv5Pib-|)rM7qXGSzbRQSxdmQh#_Zt>=6F?Pv3uue+-D=5=oy z-a8=$FTk5R2}?4d{PCZ4vlxXEI9P<`xzd4OqYQMrsn4A(+uANi3P~eGHkmA@D9$Wh zAV@~ibi z5AQ&w1Iiw9jTXHol#h}yjwz1_zyk~A!a#%1Lal7fZ)RY zs>JEg6y=Xyt&J~UY<|1Mn%--Wr3fD%9JBRZkY2ucrlgKG+-ou<_hw2P{(pA(?7t5F zALr>`6Y}3JoPSNozb540t;oL?!~dg;;pcF^|DOjH$G$($oMx8>)fGxbzG8)xWa!S^ zzw*Tq3M@BmXU+w}-xjRCUKH?vGf*fq0ial$CkcNuXZMPO-C2w1*+(E4^3{e5zMTrc zZi>;)M7Zm-$hj%bw-qVqOtt&F7G3NeQ|;5=uCEE)lYuN_uy*sVhziiKd?lqsz<2Yh z*2xr+iRVqgS8N$Kz$7U#5|K(8)buk=h6AP>XRItwd=&YF^^0G*V8?J1aQ($e$C-C-2W>MQKPaD;t1ZniJc$xW;p!dTQuK*=y$SmB$_yg z(J>Kh9=dX$>p*GA=wH)F?e$TWf!NCP>bA9Nori4=hx+qZs;gA<@lUb8xQfe}SPl=h zu@9NAE2^7?4NVf6|@{tzAkykZ>T+McUsMGQ!~k=$$!Rvi8b_2kSeQbZ&9F znSXq)Z%{CVzn_>$Ag##h$c4cv8M(6W!D1IYX9w`-ScjW~XRhVHGk*7RcXS%dy*LGd zusin~#me+{!9TOdc}kiuXBt9f(aZ*eFX%&Fu`XmmHbRUuxiRf<^{%z+DB~NkID$rE z{)u^;O7Gv(a9Yx1P*X!@coKm+%33pTMvrigL8yW8Obfdu)Ms(a4kP2FXI(Yn#^}Zu z&A5v9KR*u`?F|>ZBNJJ5%MmKB19!2U-*m*R*yL@N+e*ePbgt#ReI#c*pbB`@Q_I(FH-GRA z?SF|74N#J=93BUJ)j}CSHr&$P$E&@yJR4DW@x*K6EI`Kp!gir_X)ci}Q8_ zA;Te>0>aJB4KAagBhnAiKT7$dWHAyf=6XCAsT@~oHz4g`W+l>i@78M?o8`D0^M%Ey zZtLq4T?>Nr!sHzl1eFjo{?D2@s49hYbvVES6L1}#6_EhHZoH>tY1C(oX`R(?rQv~& znOM=8FaWtq5AE?@#LF$~F1>#XS>%qoJ6QTM`ScJ=cqSj+?`K9ZAw)l0k^PO>Y-u!Q zTKiY>Q&NZjPlZow6rI&w77kRWHZp!cNfR^f($cOyVPmU5jL-Z|^?4J>+vx0}7BpH zNz6yqnwzDp&rCu3-kroT<+?GlG_iz~@DQhugCw7xvX@A}$YV+9<+R>Md|6vk7n3Hp z9@P~qzoZDooJ{yNu^ka)mt|6V`Oeo`t8eF>DZj5Ap;rtCv~#ihzKGp)SK@g8%oD)X zrVz&gxpxpQoHRcX`<7o~expYkb}(J{pS5NFC&oSXZ(ypME4sG+wVE4+AMQ>+;k)ke zU-8V5DaD#&t_KTi$Nsx#|A|}wu%px93KXh(cYZotML-ik0kjUTur7B^|KO7v4Y;?3 zBZRufnY_i^nuRyWX;BiSKu|%4h9H)pMi|@bZ{8zEQ=tq`bOzDqwG9=;5>zwq`~42z z9T0^?iKisAHaxlW(|mGruIWwcwaeKS=Z3{Fa+hH%GH~S`_T%}m_x}_B`9JB>f8Kez zRG=QQVh);TaQzLZIr>mmXqsvhdKF7SL`;;9EO1M0!M%56nS$m#(?#KFR=cxc8Ct4V z-&zfa=2TW9#EJuL)gdtKmBoQcv(JI9NDppnhIHZx7b#Y!TFYK2#wUff=YQy5SHxxE>qaRe4AWMjXF^$6y}IVhNRk_3G(*wU zd^#1Z&`8zYtrXDy*kiZ>^_DLs%JWKb9U|d|7WQYY4TsoWh;(&*o0%$B`dcf6<}JFB zt=*o`7RVUW*LGZettuLB+7FG)AUfQjkV`Kom@Jjbto1e+Is2JO4c9X|AqSk=NGxo( zlGf9_FceS_Ra4_o!@3GY`u%1 z7)D1jcRHls$ZMC_qasyc+U<35MrrPq^}{pr1^D7B{kIr;krCaFhjKBJ&4Z#K8`RX8 zW4nz*ioZ-GihpGK*Fd1f+7|h*NV|jtxmP^iczU)*SXozdQ@sBJb{R&r{-d_1B)wF% zUtJ5W8=5$u%FoAgz$ia#adj)`wMZ^Tas6eMMhfeC^7y67QHp z6p{-Fc;Z#h=aLAY^+;nzYka203<1&Fh`u`N;z2Ce9(a{pgo1*~!gOT?Q{-r2d4U$> z{U3aRO}5c>Helun5AKAn07ZTZSMU|VQSDTRGDFigb?I16aP43m)CyFuB9#)A>hh+4 z67JQU^F$(W+~sje_1z%^K**Ro9j<9`8Lm>);~K*qDBL}Otn%ILztWEt>e5);H(s19 zY~pO4xq>Ef0%b{oflN?nUsoXk7ss})M9?h?Qrkd&BwQ#tFLkv|8pTM$E*trH%cbEZ z*N9;OZ*%f1#%1ygBQ;G=>c3M6DBbDs=A_;M2CCaYY;qTrkPr(Bqwv~xLHQ)Wb*htSeS|Hyh%uHN6*n~yqi(Ader8F-SQBh za_L)8+^B_<^tYn=PyM&wCWv(^Nfw~m`(bQaHd}`$EfYH4trB`}EU>gsoKa!H)iWM2KNn8>-W4UKbz5L;K z!Q#P@XK(Wi4Q#EhJmMIsuf1nJ92Zo#t8s(|fx4aIIUAIliJl^Ls(8~7Tdt^;6n>{U zovV(>8r_6~qnJ?j?DRa~CV^{u9%N)M<0P|7$%t5p>02+5UHgy=Q3a}wqGYLIn2wRPHAYOig3>B0r^fn*Hf2Etoj^t?`s z?g~n+!@##E1~aE*JPqm8O;=1TzoR5k&$^t8n}SDLrg{d7N9RNAg3&@9<=ku>%$w4 ze9FzT=q;nMBc5)=#^EzRu56;G-K}f&fQGH4hQEdwaw2PZ zBJ#|Ms*_x)G9G?zGD6Y9p%-XxrOJ_lp-C-v;3WPL+m0!H^SZgV3ttw`9VTq;m_^Jm zY#S&RdzgABQFcd?5dfefUN8&cQJ;>f-D*U@f4h3Z^V!J}+Eo+mX60k8C2#E448Zki zPV0(XkQM80<&k>-VX2$1 z0ixvLFTs_V1zxj1J=rx&DvV2<*AmQ-63|1>SP-nk&$rInkZHTU<5xhXYt4uIQ5wJ% z!A+}T7zSWNIA&9Gkcen*9KQB!9eXm*%5dd2U|`+>Q*TO((+vx~`YPzTcIM0zm~ zYvD?sfbsjrF603-O$(4Jko&&DuBEcsP;31@Xb*F^R9H&0@mpEl5HEa6;DI(4`xWz9eSD1{_^KFZq@bcLTdU4SDGkb} znG6?VK<7?WCjK-`d=q!02;9BkUi6|#h}=Hw51&rr-lzdkND1@9Iaw18>=S)~^1ir( z+Y6MNBM;IbGH&)?zQO6Ro!Nu}f+>kJIk|!H#MQ{We>d=Fg!$ELTU6h`r9X3Hi+h_C zGR){g=2_IdL#;JU=7E*Og2q)|aKfo{{JHIXX&;d8OYwV&w_y8vv(QvR$}7S%u%LSC z>x$ClK-E5Yl{!n!yxhv<0GIidKeqf)zJ=ZfX{w7?i&08?>`o(BWT#3WdEMlmYvmUN)cMqEqY!1#?4Aq^Y^0EE zp}?{0+Ay5MwfLd;wzJWPJ7yZiv zyEOuTla1MZs#_JrKd8JZZA1-^RX`EFX()=44}P|JC|pJMa@DBH;aQFYF0K!JJC>tC zCUNOw6|V=%UFm9m_JtDB9uRT;2SN#t3{o7HAzkG?b)2%8GlfAIm8&m1Z(cU(*yJ+Y zshGWFWZorvM1Yf1S;_hSPrDZ`Pe&p^GWYXCke_&%HKPFK&C-xrDM{%7B*zQwB|LvV2`E1MzScKV}$GXELxLFWg5U&}VDl=Arae3Mmb@6G8*Y_xtq+n%8m zAqZ%4v_evXz@|S6+#)JvJh!R`VT28CO z(q9QO`WccGph$dthNPryv%dcQ=0PpSJOixGL^-ST9%M@pB*G9Cs+|@O=4ER#t`= zVSM?;T4YV-l7bboa{OLB-7(Kdq*Q#D2CudJQ`~4#i+2|&e`sHCG~cPOa8f*lAt!MC z3GbFDx(M&Kljggz6q-GS=6HGa4&QZJAYplI2DJ|jyb$=BLGR!k2uuB!=R;8_vvqLZ z7`eDgB9?~K?Mb4+IQ*ggM{r6ZHO-P}YkW|~>WI9zN$?k2-IGHj`i#1@VAYcwZRcHz z-TAI-{=Z^%{v&~Ifsg;{M_r!&Hv&|=_|*3?cVlbh%J+_1gX_69TKj67 zIUgD-8*q9o#qR3+(wpR{kgR~ld)+_yjJg;~j%NH3JNX&J+Y{c^Bjkm-lD8yc+*?7{ zpi(MmVb|;M(`x1l7x7!pwf+yHaBE{h0Rs$DL&+CTe1=*N3D~#Hz9{yQZ2LkH8?+;A z>wLpC!^?gC<@0U>CP;MzpS!kfk&_^$g1BY&v=-g=@SWX6wd;+=r5P#(clx)&R7t}! zO>f3xUD#Z6mLVaI>E6V44G;67ApQ)lr>Ax-fM~%w9EPUib(wZjgKf^I*tLi)d0<0D z^0TdB`E&z8Q9?7cE7#h7wLq}yLh%@WLSZMgj^UOq(jH0o!^=9_!hI*1C+yYCFFozw z6&Uar*p)P?_vgyLEyRUut$E=_Q=URH&l`6EWyO-+ZI$GZI-WeBYr(^#0;`oOsilO+ zdU?ds&$9)Uqux14zpP;Bfl3W+6}I$~i!_1euHZ7l(PV!HCYodbGC zzf%U*xNWH@b@NX^s2N`$WMT;!Juwvu|Hf&wdtHW6_~rB*|GNH4H~7OVJ$&^UF;NO(&}kx3$E@?RBL*o@85%}YhMx`U>zs@ zy=Omi4D(cvvagtPLgsS5n6(sprapOav#1)*F>;GN6c4rt9+T}ZDa2mo+Kn+(K&nnM zn{)Gr(VS|_zUMcoi)*5A1RL9FWM*|dK`0YMT3PF)gI#&Fg8jfD{pl+;u*=?QLf}8r z`c05URWhyU>|PBmB5Lh^ey!Tn$o!#I?eF3>QwiSQlle6xP@fr6Q7)Fh8?FN4mrMm4 za81coelE+$E3PO+JbO!jxEN8dS5r3dgYWdJ&rN`=?{ID|)g6)c-JR4Bvtu{z{e`l+ zN0Nuw)59E>r|vqhxK7`SY23c0DB$(Mfe&A`L`>ZFd0BX+Iw3)e(!Tn6@e!vLyx?Jb4Y zwCh=Jw^A8TU7*=`n(25FAQ4`^h%`2cU-jV5$CB(bq4c6&zvxn01h*9k<0bmyGbkl* zNUXmeEe|F-VY2D(IA&Jj!BEEn1BB!lryJpzE;5~In|S9Ae-Dvvr!2JaH55gtOh!1F zi!KKXNGGWKN27J_>^sdtCXZ&~S(clgUP2!@eO5+b89kWlJzYfFR4Qc<7;^ zHX#Rp1Qe7(Gr{kn|0=aFzIu^>KeU3n2V}#|QPbsqXbvJhs!|DN6 z7=&66S`*@9zvj#P7o2?4&$x(P*<8=}ZdE}Aa!_2xn%pnf%z{rib&x9t_I)UDak^9j zdZx1v6PA(<)$PZs_Hp!g3llopS~r;vPc^#=khijnc{SPqLF5_lLA}PNQp(sLh!&3Qaao8EdMb~-{*VY)uTR(a&O=A z-1cnBf_pO1h6i%yHVzGbeesuT85g-@8ZJWh|I2vZBaehFqlzWqmlJVX-@1_(Y-8+2SifO9Ek*!}Kg?tMBn$781A&iNy$m*1`{w5^)rlbG@cfr?YCm zrfhjHkEG`)NyfgmJE?D}VP*f1_C%&d3`mh~nZcLV661Y9?GOA)`yO4Xgl4;hQpEyGTPw z-$vLCN`^<(VL#q^YZ>9#S7Uw_a_crLvbZT&qlNOeYBWP}NdIqMTd72>#@y}? zzL@uGgWOe4+ev3-4O7#7d(tdgsLUsI1rk`s(~JQca5I*fs+W12 z?*9x4b+8=tNaJ5@tx_msI|Dc7cF(*|efHSJvuQ{EM8k?#Ttl2DEYqJLR~v)MtunJ9 zCE#n7_oH;~H2VGEdqT2*@fCO_WaN1R9GDV{qr6R7B{JGaN1I1PR(ce3cJIE5EjRgV zHLq0v^}{t-KCR%XJFc`GSe!b~lJQG+6v5ILwMX$+AN zV%VWkyAKtqg)smCp8m?4Gr#Jv?d?zAN@m*3L?Wv-Fh-h8Tg^x|)XLBsDbV!HBmI}V zud3_@TIWopcDAV$kwJMV!y^r>m$5ce;p5?n`2^}aZ>I#jg{nl0W9>WBC#;@u90h2T9U^EgfW0P$dT0?5TIp39vqt~p&nUTA zhsVJ37HP{s(0mG<4y87eb23cD2QN*K7XtKvxuSmw4w#QOS^@ORCGQw8a}!E0v*sqD z8`+{s=n*ig2*0P;N;wyW!xxZ-!B(v4T#5&7w6P;eIeZ&fyzp7>j@_W%3POYym@dsj zZb3-g-{Y<~+SHuyB8TU)dG`&`n`#+03Xl9 zG_1n-G?n1 z8Nc1!T5hI3lu>ISsP=;oS(qH5Ay)6;Z~d^HN?t5h7_TTymEgWkMFnD2*PZ5=8Zndk z91I1e> zu@)!}^ixN&gPd1sZLcI_Jh^4_snW@m1U?c5oR~cIBbqn)Ht!0I_IWOKp z^^WkBUj1NgS>|z4^K+^s5oaG}hF=Rm=MpaPH^jF#+i>_>83YV{nZm6_PucB0m9z8n{w?p$3+BQm$prS9HXtDqW$FBKnr_EMZ5lUDirsdciOI4|S!yi<{K38z7icnU(x7qyh{S|Dzw4`nV3; z1GQkdH)A09#t!*8t%d@?8-84*guV4$HgUDTTHNQlrlmGz z)x42;IAD|E_%_8|{JQYwPkx=848|P)7Jl4p8 z>$yx5bFx7i?vl#ia|0UPE3TLn-GVPnbq1rgcm0G&?%CMOrYT5ML&hDAT-oGk-^QLd z6JO2&Wfo^Z;!eO|x7-95aUj6Eh}M(PmV2GqE%bZti>! zF13&T_4yjzRyviDo4b7EYfhAC3fl1n+CM;V56VR8Nh|fPgQQ{U>Ko|s4Z|ZG^XL56 z{%87OCAe@qrQ6qh^6^k|5HnA0C__hMrd$r-kuK4<_qORZ6tKENqv5|SjlnzZG&z@U05a|GJ^1j{pCC4R0Tx*VlLl|XIs;~fUQkWWKFNU<$>YUfVe=qq<8OT|>YKTmk zT-9=q3j;_S`6nBB;3Wej-O0N|!;y2`qN_0i6LNO0&-JAjCW!d4374~4$r|$C%$WFR#_N4ZQgeT_eg(2G|qa=L3pqE)CSLF6b~#iJqwEJ zAs?J%6xg7yp>ZowTg8q0NgWK&gmoZP9rya@%oo2a3i1T-9ulja$li-M96pt18YT@Z zho(AVs-$_zAK1Z|8^hp*@2=lccr5%ZLsSpr(Cpq@WqM;Ll*WQr;;{9i> z6(W>EI!0jZ`;Q7E1v*7N5#qRTrAerj=C%{@SzwKb8FAib$VN^^q?|k3jzs-(~HvhN5q~J>@4`N-z zR4zt)!Ne<%T!?Tg?b7cs@9QajaD;fQSNUMf8)sjD8@zCQ! zw3wGOm80SX%|%crjlxU~pzZf*2bOT6dJZEv71J)4n&c}Pjm`TAIorkGsWtUPaHLr|ifCbH{p9M8z^^*17zy|7@eQTBq z$bF0DIKU&lu2&EFloQXEI_a`tBK;zzv=V%k9el%Oxn-UwL{H#D)yi7_$3V zI(e=gn@Q*uAvw`>C21>@B{N=s+5~|?i-z(@>Z}o{%i}4y>y-m$OnrT$f`&S zMEM7&i3*#V*ZCA2#Kr<6d578QX@;|pmNK(@*{i26@G-TBZ6adaAEC}A=W`|@n}w5+ z9a*8#$=|}7`qKe5ULB-li>sy(8*Hs1gNUOE( z&L(xVl`f>=r-w-7S?fZVzZQ_dUDpMx`(R(3Sp}rm+TsDTZ$GttGccyI-{lA2o%8n* zO@kvi#kg1i7g1mDRTQ+|p6tG_{DTivbAw>&ST0kxZ_bBmKL=7!>H#V8V~Cvu;a!J$(T7-r8P4IO%=w6S(;N!*<&P z#}R*Y0T$_oDeh$mri4TYW(3GOrxAYnXUZM_bPBmLoo6Ranmkrx_JTVd8-3RHM6|WF zVzPKuhf}`@$-ZzA8Vi2GZ{wtya%%NC{F_5~xE4^f4dc}^ubq&%-Y_we^5h5KcLR0E z_p!qte46+581s8tb1j&RyT&abEnjj#)CGh3O`gQe)LBCa6KejFWS4WcJ}^){Rf8VW z)}&lvwintL*3E)%(V~bK{e7x|`Q9vq@8vT2ByOcEr{#bTKff>O|E< zyAR^)xvVV#^KvPT*_9Ov!IZLh(6&_CfKxAOmQeoQqWz)Hf4nctxNEZ_gqQy|R6nao zuW|#NcuOz)?uKlo4a9fY_r7&y#WUz7=IJKN@R&%(uYf-7tt2WTQKzAqy@?FO_&5;3$EmIkrsefJX$&=m5#8?rI9 z@y;itz_&}lc$0|%FU~}Ng1RHOS3{GqX7sN8~LgSqbfik?$eO`QUMMh&LP|L zciGY~r3~NRsi`4LzTPBcNwg-(5AdeGx3^5Qx34VYQ~f>?TkMCP)|miB|HabYa!g`W6n)|03=7 z<}*>94*k?=5Zd&WQWzXWXW0zMi?xK$<>58q0lE7z$2vpWF2g39`C0m{GVFIoBC6(@ zmhIV@4hiYloZXSCRh9P_uuY=79&Nm2M&td~%_1UuynL(z#DjUKU=MX2Z}k`eJ78e& zi<^o;=L_pc+>$j0Zj%Lo$qF1n2U3Q6Z73%stm}q?g^c%*UvX6O_m^;wg48l=a4z`c ztTJYrC!4;&>R4%bnS#x{{Hd%-Fz*+KNG}r$0L#26`(Y&f+oGR$Vy5lhZ0C(LSJC!e zBM(8zvSh{>Wi~fS^wFE(h8ua&i9S=qdAJ^Vg~sz68v$bHb%KI%d-qZ9Ci3J0-%Kve zf!7FTL+pzr+&622TI-58O+?Pd#t!X!D+#Lf&n_mdx!KI&V=PBG(9mrOif_m~e4evC zYhff_TuH%ubup|&yk?|Qb+IG6)2>fm`tNGjzDCdZ6vYwFDP}0HHPZs~BfsKI1W`j2 z9%kR8RCSlUH#Mhurpdz{FlYdylOJG^Jv=oEBjpW(*Dn`iV{6~^h9XK7-nEXJ&R5Zk z#s?#eyyw3qNZ5Pfy2`b-&5@JMO<|w>sa_7)AAH$)jsC+w_!f9Y;Df+}^N)_XAzN@e z_VXl_3#m%Ts`~DN2)K}pAtTU6$8|y0Yiy)OF-k(R-s!U?Q>OZ3X+AO}(G}&MTAL8a z)}I{C$!~h1&atb^%{$ijuH%qL13yTzN^9r%6HXh-N0hWrg-3k;_OXnL9)GJn1NTIwQ;vh(F?^p@{x z)JChxS^t_a|A>*zYF=9eVG;3kwZK9dz)s(DBB?Td5ff-Yb(m3NO8nnR&KY z(d~V1JS;#=6B%XG!>~hiv0rj*mGVy4GEAkVbW`(napkFA$Y)gK);Whcik^4@Ua~4t z_fRY*_Uf++&Lrv$GG$Q(IDi=#{;W^>js><^q&41G6f(ehZ*|HTOm=o#-c&)BMR?TY z@Y`W#Kz^QER%&n|#T$KamHBu^ZseWr(Dy)b5mAh_>)D7*pFG1RYwoL9pbcF$1cwI9 zk_i4<1D*gjxp^a0B3c_Ug5yb^$(igsr^vHs)%bjjY7r0HoB?^|y)~n0_YDNi1%B`q z729X1rP|8quk&1jk)umr3$Km1<du5Zqf2kt% zl?!_JPlnZ9x+4x;>5TfpmzIC1Y=oWP5b3&;Kvu#xIlBD8n$*#SbQ6OO(OoYg7RgLO z!I-jXv!B$9mqrJ63OyHI?^OV|4a&G9W=J~YD^hLNw#G|OP=Xfi{G7)f{K4nGH{f;- z5@L_!QBg>5F%<2>_I-M4w~bO5(qA}3F`HKq2c^k zl0FlbFeBUZ$Mur7{xX*Xl>vQ^b1TvEQBnZgBbNdid$?S>^RVl*=)nb~kQ!#h#4m2p z@NdNvLD*ES^@Uve0&bg(@hW$vMZ_Fi3252gtQ;j532BV*P!?G0nztV7fh@abh{YA? zQGmxwKHu!5&C&wTU)N&BZVp_SiayGkH1sOS+q*F$BS_z>ev=nNOU36SMuXM2 z&i%|ok)@Id*88@4#H&BCi8Y-wnnetIahPLb&jjV zFo7B`7TEf?&xGvq$WANH3U#el{&y*RF=)J7-F^SOw{MXr%AO|s{vw?yRgRjS)9OgT zQ*y=e=Y4#T4Xg`2mqKCNIFI9Gn<66}GqVo3W}h;j~np~S+r3pEfdmnrlucSqShEuI=bVd zvp*S-HXU;vCrjSea30Q(>cMwk9q~_~2N?X6&x+!LOcZo;Evri>u^2a`_?7xj6~2Fp zIzcHUIXu1tWC@t3buy!FBMDh+e>&JCHPl+e$V6iEcF5pIx;6AeUH7{h!SLDHS)OlT zd+>vAYR({WUO*@NLKIqV7$M`koFD6DSn&<+jvhp^pqKp%S0`1T$Q!&|6SG4*#ylJ8 zjXewS>}?Y;FAKXt#c}kVW!krTC%3^5zZ*#15N@@}_zaYBFeml*hU>M@dQ|C_O{w1H zsZ<8KBq6`I444@_uh5jw_d^rk!i5a;V2t{!91Fb5GV1p@$+-5;Bw+BR7Y|^P#>Awu zBFiC0ZeH5NoZ6cUQ)`>Qz#eTG5IeZH6nNe2f9|LD8k)4CYi^M0B0^TcbKsXqwu5x# z#1ddcvxBr)Cuv%M1uet+)NR(}b`Ar7|Kh){gbfz@9(DWI`T>xm&!W7ydxHE;n&Spw zIs)ttn!$UL@su@>Id1;2O0?~$PQdE*AAH_KbWX&0%Ikh9Ne;<$P3*ao#G~Od+LsTi$Vn>y|qS0 zkz948O!De!vR+xovPe%nlfVrNAJ{!LcV3*H#0Lb z>0o%3-&XVRYpYPv?z2a|EAW^c5q*d*0A!13B^_nUPBR$%w*H0wU;gBK>3XKe=Buv1 zKRGv8Z}B6xE4j>}BgnMSFv(twW>e3vo6ic}K2yq(;khrJE}lUZ>A0rCV{j}jiJj|C zn)d5wsZIX=?qklNRU^-IFvVB}I&A-z^sg`2k_uR8`OE)phQ zWGXE4ru57gvYm%tCYF0CTvIa_u(mur`}-GX;;T_apL}6$TQuFs-mkhFtJ91svW(N$ zhilRFfAG=yh5PpBR;ye)gHLP|bMZ}LHq80#yZeTg(}Ay-%Te`UM7okLFEu55>!

    vp5>FFkMoEo1Vj?It6mHMLom|yg{rUqEZ5zEMbx8S4c zpXusx*flRMKeXf*_ZuU0a?dGXtSaYhM(?+92L~-$m>0%t|9ZvXNWJ>`IQ)Xkc?I)K zvEWMxhn!f+Wv%yyl5a8?RSm;|gt#4xpq>~~Tu8qcfU>|4Cvc{?2)|G+N3UV$#7T)@ zg`FTHh~?s-G3iiIkyknB(aYpjFpmHJCoffQQMvhg-_+N|AR_r0Gd8sX1^zZ716MQB zQxwlc!OEPay1D`!sp++_QmOGklEVwDLQGpesRSQy24$jqgzXrAy$kBC+#4Pef~=D zgeVx#df`=8^dS(d7kfQ(_Yz6hz1NWIE#gQ8;3aatyIPI-lu4l7!D3O*TF>{r()Sm- zr5YVn#0A$^?9JQ#wk{UNC1*>_DH!*!j}wg1=mgyOVfSspW)+8lkAF##PP6*T5J+%{0We2i&vV6$?5G(#TG63?KS@_@KWCSOEAy~t8Ol>!{`MXQi$c!#m#%Sv8}In0vHdfcjG zq@m-VdFR{wf?AL_c@bG^?I5Ta^vJ^Uof%}VW4jZVzMtVaCf(_Eq)OmP|6XXZRI6+= zK;%`}*(BrRzI1GM7dw|xL)pVsNg>D$kLH-i3IX?|g%glALZ3#zvVwwv=d8;lRXQ_M zir!?gK$HF8GAk}@fF3kf$V{* zH2?(~Sgp)$M9cM^YVQ=1s2c}JyPANGt@H=@$i7O6cJS*=;xF!mFLUF5zvL3wH~g&{ z@`+_|UtH$YGsqt;Nd1xZ#+Z-@VS^A&>CJyqLgSw%ho3m9Ab-!EVWzan_6SW{H{a?* z75!bhue2V>RPBY&Cd`W17t6iRt2+G&>=$4bKze>Xj6{ml?@eug8E3?6Ru#{1Cvjlw zJw7w_f8xpzEMF{c$wE-Zk!&q&B293P)e>p3nO)x4IJ*7xR_o?h!^e$=r$y1M9>C47 z1m(4Q`j$#-d4H}}JJ%aTs_}LMsd=WN*~d2p1I!el6q>tFJ<$)r z25V5FZ#JJ)7uV#z{=R{%fcDcbL?$I+O=ps2dK4+NNuF)}FxGGHg?Wmsau8Vzr!@X# zpVz0K%#-W@fq~{cjm#GQv^=p3!Oo}1#y4|(t3j{<(~39YVEM9S4|BE}#C+rjA2ogv zGJ!tle5M%_ifov1v;gWlPM=-@JofqD*n97=rn7Hvl(BaQ5m1WGP&G6u0qNsN=mCL* z5C|~RgwP}uX~GQtL^={okN`nKO+tcn69iOx3895vr1#!I@n+7MInR0DbIyI|cb|LT zGxvGnk1spfg`LG-YwxVR*5^Y~XSe9^Zc*r?$-Hq18Zma*`&w-i)Fz5zEQ;zIrfz1;7ybgUjAj*Lo;Zt__BKd- z#-0eW1=L4p4W*8XnxLReMqSC-RiIxhU+LSj02!Wrq|3aZfsQ2I8Dng$YQ}~>?lxp` z36F*6qcjD~`pI*4IP0NiRJDPgbeUz%h42^o@^2%5;z&Q2Z;}73qB;8bqtAS(c59hKkbeYbDtT5 ziz;Wkkxt#9f%gG>7>@d9)~F9#kXNNfx$hqIKs(eEddz;BarvnLk%%M+mr)JSi)K?1 zkAoJwOHKVd!*U3*OL0^@(A$3;i} z*R4phY2#Ivfu$wGx4v95CjiCleXd1K=OB{_u)GKYd`G-Nz)ydzt!_3)m;%ZkiM|pHu zEEht2VS~@!(8_4*&@%NHZ5sW8mY*Khr8qi10&{G@%=Ja#b9G#AW!YIrqI*AP38bYh z!AXi@AvQpRvk;AP)oK10muzoIM*8^l>Jd^7g%p7a!nWC&h5dWI9yZU;UFeYr^b0Qt z9rcS--IV>M*$U#-H1-nlUP;eHsZ#I+Qv621G(LX)3AqsbT)fgc@AmRpKYEwO>5oG4 z0nZ(@^aSESjM4DgFM6wzGohcfuc`=GJBCl$6`%pbhuDEvS4i4*7Kon7emjOAJ3yWL z`)$2A3b^87Q~6+>n_n*^YlD@^1HWQEl@gpizA2L;^vj9K*{nWU%E_wM76T7n5+G1N z^5%)ngnBAoTslz3HA%!TSU-wb($=?j;i|>9>XIqczzB00(@5%V&dbY7rq)6S@Xzj? zDsut*DvD-Cx&5#MdO&;>NDs!1-sW)(O(^$Z8d(`r$X5sFgh}pTk73|H z53IZO`w&w%_720d^JVmN>dG!;*?&2Va5po9Skcqt-^SPXUNgwSP@D9PBPUt43H(05 z!FT0O*BJSRC|&N{z9oHNAfTVyZ-=l`=DuddSJ;vjS?WFBB2yccPMTr|`m@9DnXN`? zN?nD$Bur!>i@Ot@t6g*;kF;r!s)A`Z+H0Yd8C`}$ME3W4(ff?3wWp++7f9zHo;H`k z9qi|@pX&h~Kwc$b2N0}UL#oPi2Onl0{ojCd{Z}l(U*25*%3yz!Hva?`_E$rU8L&xR zY`Of{-~=&~K&VaV9&$D1?d+oP_r*Iz;my);zq4kx#EX0}l=SpL0T-JDSUO@Z_-@r+ zg#A-`6=sXoxYOB){&slq@L|dX&sS+yud6vx+U3qoq_lsc3C8^%mnd)OBmT?t5JvoZvp8oMh+{LZisB zdY_Is5aWX)6LR)bjjgX5_8z1Mcx?+~=>VFM|rsahQnW~}#yz>xBIKjQ)V7u(>K-;vrPP+fnxwmwt>ua1FM@34 z9C5kHT&)4QwO7~}7FKRosw5m5^Wh?4iV)qp3D=QsrsPI{I;R2!>`pCzXdv06VyWjc zWt9faF(AP5nKKb!Bd)ej7PW5I{~ibm6WL9}1zQJ()s{J# z)5^Ax^^|rb?b;NB@p~l9zPvi}ouUZ*Hc3|aktc?4a}o(4#>m6l?-Gab=Fl$C$#LGoPP+2Yu8%bzkTm6O z8D}lB4pnQx+fK;a#(RnSDz2G42Csn8fTA-4Q$bMff==z3tf5Ft zp}G@us%T8Yhn)&h5Q@fM3gtHQa4FF~d`9HqDDO767|*UXkc9bQeo1gf43(%gN3Ez# zr+?lXcq_;~Lbq`D4^zn_5@4*xW*wytC*1kCT+_zMjOuK5Apoo-NETKhS#;E2v}oGPLGX zq6Chj<1&>slqd?0#Lc9?Vaf~IonkRmdSnka_!?^W{fBQvvmO0*x(5t)!M5o z$xGzO6>R8P@9dn&dGu_i$r7VVpCBaUa2lt2!lP_t-r*hSYFR%&zssaunO-rfJktN& zyRvT_&%%7|Yn60osgM}5-air+=VnDRJh9uttmYKI zoJU4o|FEy^Jzd+JemqDRM(&hS@ar}#VyKJhhb@Up>hZru%ib-U%nO$0?liCDo{hEl z$Gw^!0C8 znHZ}N?K<^v*OgK>|70{HpXPb~La9mp%eow5RxeeTv}w34o-QKAgF9Au=}b*KBJa_Y zIN|KL#X+C;o_gliFdO!^)J*0_s2m#v!YKH)bXc)Klg>%05qx)~8oJ7P@SNB)3~vx> zI;xftiPx|2j}ZGzwOR#*zqno$mMsAwBHg2*$Dy2qP^yl56N!{8RBq>PO%wQ{`6~sp zfFe_#_MpRfwfgLct}fFkbxlftW}=Q-ha9--75^9hFFC&~j1L<6sp(jIgu@tDFKgM4 zFn9p~p&XaF0r$6}bd{K!w(2#PyBI(BO}Qf3ZZ9(vxmO2{E1c`7~i>>a>1gRx|0H4R5Dh*RB4Wg7%M9nOnw zPHAt7wFU}WM69dQwHaBZIsW@P7m5h^ zUX`<{fl|=My%cA=xNCy~w<7ud#0=68_S%x>Mvm{KB245>E?Yw&c_VTwPk;{g$I5SO!%|z z>>UY}KRRjuQTI~hKMwfQ(QrqyJ6b4zDAJ#5cBB^v^M>~@14p?Dy^qm@_hD{ z7S*&jRcvYpnenH#kusk7QIwsaXCymhDVlPh#Q;rcA-094$~#d%3Er0J4UqVejGnXE z&yyd?@x9MlWX6j^8SpC4pfl!xvD@@V1druHkq( z3^$z@>!4_nb$jz;pWD#Sd@1Ve7-z~TFXdMD_^Sgix`VC3Ik!362&*rEaaxFVBeiPq z3(4=Y@+-!6%yh9|RMkaoZ0C$8#5IGSB%?YE*{OduW|5v4l^dJ^@0#9B2KkU|=+Z{w zV#87RL^Q{8r(JZkC#0Mo0|M3S2XFGLYeiyRunoXQHkabLKu`b96)$>O8IIuUoqXH{ zH=u`SjSP_T;`{<=Y^+?CVdL>!aA-xBIRk#OldeK6GmwAemx_-SWdgr(2qk@Qb6YGA zhh4C}T6#W}>3Bb(Qzp9w&-!BY%W(>iNM()gz6Et_F*gMJ$~_MS^ua&}e6|-+Bk0PTT#Gjdi?BERyVK+Q~1N>0*SQNNoGnC zP|{l&A<#&@Ti!zm0EoiH{1TJC#=ktxjE?fpWz;E2Y3W>ma2&h!7mn2bo16UAo2L)I z&+BGa-|P^D4fY_;2XM$cz5cIr{qC!JoRg)y`7|3%9qTf8eDC+SPY>X6$RvqhVr_^G zNv|PhV8cnbH8Qt<@kYunBE(i595~xhJGK$vsSzK+QS;01UH*1I|AZHu2i%@nkoD{ zO`QI6*>B?2j!OYIUdAErjjvh)%XxN^R9GDZVtQ@`UZ|sfc_q(k5zXncoPRM#f_xzL zZIdQzT@(0H{HsrB@V1aWifXq9-8cCMp>zc=GUsU2G`Yk@;~Pf;=)*OkK@<1X+aM#32rwzY#CUfp^yT5M ziv@(5aQ}{kj3DA1Xfk{^!%*7Xr!9%g2d@Vom0Oqxe0J16FvY|=txlSg@n1R=68Zs| z!9#lWh8gx*m>7GXp&21zv$m>dd#;%3K{b|Iv&&RnVG~zB*}@kGHlI^Wo+YNIsa45R zaAwBwceXC(VA1&av*459poeb0=Mg(u-m0QLx-6zaTXI3}BhG=UFxOkQ6iu)zFd4mmxUOH*^lRPyO8G93iJXb6t&)S5(z#ORcwHcR~gClFFR*W6(T zeC-x#_tYH)?a6b==EVVLA|e=~P%zbe2~4BvM6q5_6o(u=W4A?PlT`Iv1Qim=xTX^8 zDs3E`j4>1Wa<>V3_|^I;`ZP>Mtl_xQyNBUo4Fc{=pq8syTkdUc6B82@g#Td2`s*uI z4W4jBfK=7gLt^-8Qx(4lWu*xyawsztY^x~!m8DrUI`jPU?~g0LJRXKVS}fliyZ`$` z>-Bf=1F1v*2fsf|#5l}DT0-$xHGY5S{mPyG@4B5l>h!PjI}*en3+YG@e?LK-y~FWO zogDsld;5Prp??CxH|PrMvE>P>{<%^DYBu}lpO=mux`c=sWrxA9=j(85 zGfAl{+1zDhUllsGst#4&W2(;ZND+515O7<^N3wxgQ$u1J4mLT>_8mNfY^G(~T)C70 zV$SRf-`*V$?Br>jw9jHE)+z}bEm zz4_b4&wEch%Yi_~n?s8o3c-6`5sXAa;JFdL?4pks4HVvHgF<79$(Bi(8SZj9p{u`N zTsjfcZ|?N@Ie)GJmv*$xBUA-3TR^WSDTH%k%XvgzUF2x@jFZX3lEfXC{;5CJss&{%W_Nx;Je z^$&a^C4&yG8};L6gk;!+BU714xyUs%Xu?!09?X4#6~#uH0p1;&YC7qDp&OYpFI4KE z%3@sF3bz>bw7*8oY3*pj+W6XTyj?IEqLel7iofs5h60@8o&~w#tazp35M|Ojp@)ye zw{34SzHyMMJ(DJ;Cl3!&4$_}n(AoG?-<$h(9osMM{$c5&&xb0C?-^B{lRbLB!FvtQ z&-XrfRs#H^_B*kYkF4*=kgb2d)c$APiPzj|e{{P4XWf-w{=25 zlQcz4I}}8cdL$k?1PCp$L3Fs}UvmiRr{41;mg>0W7387A6to;JJwZpL+GezHAcY&= zMco~!H~%RXDQu#4?vltijz#&)sLJLUFVb|yBA^oshr zs=khL+a}H1Z13a-7&-=!{_x$^e`)x)d;C>r^90EGW70z93252Am>yUH6B9%dazYa( z^CD)1)(%hkM)q-di z<@qY4c8T|QWYF9X#gMX6&%5|Jn88KK#r&v*Xu-=bm}ptfydIiKUN4Z3G&RALO1Yf2 z4zN1ERtKezYBdqF2tH?}=D_Lj}IV`i952 zpNN#-6P-ub=IszHkk-1Te5;IqR{|qFF^dt?MdJu-n5HBKPjcQ+8}woEoCTH z2Z)N@bF*CxZ@9qZlDK1?`$1|Y>s^O3a&yjNhB2jw^=MH~b~~KcWq9@$-c63|_~E3@ z!0@B;cDn0R@~4cdY)QS(b5~t1ZO=F>U&p$GZ^$Tj=9djFmux>7dZ?2df8C&3V`xC| zE{0Sw0OW?Yb{ZC2S9z!twt{+H4*L#uO8R5;!3}s>fC$58?ROpmi0t*lN3Nr6gD>X z(uCPAnlBK7TLKn^t7g4^ik!}2|CcXfM3IW%lYLRda+wcCN*jyXPu7>?k2Sc{cdczM zy7eqr?oJgr24UmWX0$6Uk{A06<@>Rn_sx5XNKUeHt~QfeU*x$W4-doHz@CkNJA?4Y zg#KzYO?(Lkdd0L3EV53=iNbWa@T7Xc8cF<$Q}9oMy0YB!XR~npr619awXrMi_cv3! z$3ckQM8>QJH?LK!!6W^seVXZ0!99`Wwo!%9Tm#@>Qi_IfEyrx?@K~tIMfR`1*zoXBPk#^N5}2ZdC9Lkl~}ar|P$;_U3Xa&>AGA z4>5y)FaG@KRbeF?hFs2?SdIjwTP#C648W4UrIPIb{T=lp8jr|eAvqn*hNSNI=A)xYv`=mp2oaLAumlycAD1OgnE~qM4Cf8rH4)(J1*iD7e>gKZBgz#Hl)lpRQbat%*pK)&)gBI9m{?cGILlfTH{ z4;t;snE!f1U0{PL^I_^&_03OMf}7#Cmp~=5uxiPJOsuRi^K#?{$-(^3C)}R$-iVxMA)O+kDE)PZx}Sy3 zOCWYF{8V>5|E7(+FY#5(yIro1x2?SZ+5lh}7l?39ku%G^r2batik#}EmrBG^~-zAzLAfOC>I?SMn>U>e~t zC-bbG3cunRT!;62sgtOz{8it~L?f{Z;6w)m2AC@K6OzeF?HFCTs<4zxs6gEQ>=7ti zW4aBLNA!w-T4L6q-Awp{{V3WL{>hRLx)L!HP)1rZHMIMsN4x`8I^H617l}SAeZldB zU$0p={5_YFu?c!FFu<|YZwAj?&eeX#XCdTp*>^zEvJ-!8taSGZnT^MYBLiq$Y^JFS zZg1wQ&=0KXo%h*xunAslg(r{*F6IEs<}?a zQ6WjZaBLKnFKs;RuEWTj`Nh*>0j1s(wsnfvA3NbHS0-mI=HXxqGGMZ|{Jz?@Shsa` zMmE-;5b2Pxr0q46em&~6)yEKW!&rcKOC0~E_j}`C$JPYA8szmcJPWrCbWW_bl zp7lNToE7;nwPv^mZW-k?d62*QaZ#gu^U^i-7d?kzhG$633pZ*O zLVxnug& zgF(UPjOsgp>(X(^RLfh{n+31ebY94y=vr|MRABwm=}Z@PWLXohXn!FJ(!cu(5cAX? z>uWDk1uPnbd!kECA|J5tX-sD6R_u@mDPrx5N+shW&+k8wpK3i56UmE#wRS)6n5*>Z zYG$y}^tMa2BjTZE#(EES0b}U1eKZYaONuva4K(nAif!^GXRVS99DOz`LVM!UmYv!( zZ*xh7S6qR6yl$Q?Z#h{+M*?2=4J()Mk*0yyR`!$Hx-uTs`uN*l>>e8&^pr)I1v?bh z#4Rpe*=Ua%sU@+n0R{qV3NtyRI4nF{QseUZlTJab~Wc@?@oYO2Kf!4Hn4fi>LS6 zf{y`&nt-Z(-O%v?V#3UuH~H^c-ww_p0omGf5MrdmB9Sr_`Ak|^uxm-4b9 zxw_&u!H`Jq$6J9vCNl6_{OtUNL}~S_Ex;CL!WU8WeCfi^*ybsU_MDXFv+7{2 z&DkkD$SBh%Sk=rcjO4s>DcA^O-(}BTI9K1}CHy4|%)Jh?o4fthWPHK>a7#-4gzsuL zsIa8S5Pe@yf=3BJs3$E6(yh!imEN31KaAd|Sz=WQG&zRa6~pbdfa>p@YqpZatOa^B zcn04DcS%h6)p>qBk9I>X_0ha;8&}&~S<|-yb9_H709s4j<1Cf=h3$0{NSitm2?`jW z1XN!&@e`r$9ZI52jGWxt9lqKxq?;M1mQbL@XvPJ&2$2)+Zp(~kP6qeK0G}KyJ60`TZ^3_uw*=&c;M4(9s?;vZ#jfEry^{{S0fM+ z*TkPvtWBx__P{nP$6-eJE57Mmpkq zrf_ZprYZE3IpQi}20`A!rBz`n*&U-)&p$x~HAWKM2wwPt*}Ugwz8L6cOPKw9B` zl1yUDOe;zy#O%Y`hW{Rnv9Rl~g*<32*Yr)C1pZlYy$i2eH+ZBY?)JF&=_Ix-&ZwUm zbY0!DYD0=S?67z7&jMc?kLP=Ie@D}xfaGX^XmMs@*(qc5g7+=nJ8+Hre-^ZhpQo&d zl=-0i8qOj?9rC=f47Yv?UiXqd*$ElJoZ!1pQp^eV}bui2N=6jc5S*BRXyn=GR!yb zZfNa##mFupXK8&>-sEQP>&91PBeC*nJI4HuSTPsEbT^aDnX=pE zp=$yDdL6fT-T%e+Jp;Fh4h6U*5MtktR1Rv4Ll$Hz*ob~HKFK5SzNedEtmm4K^}oR; z+O4J4PccQFWJdZo8=%qDf-9a$DZXulz`UGDz_Ns~(X3B!5_OxHHNA)|S7?!3AJ?#` zU(?=|WriV$Hv8FcM#>{Qo=j>JG<8H7CWaLxP zI1OU*K%I%r$akBq2WS98dC-6v0+|hJzv5SrMa0rsCzqdl02TEuoy@aMB|U>Zp=e-Y zE7jQeW@>SaXI7Ty?CTu~UHap@gn~4(LqQwQ zoaQYk%CNY9?i>eGQ+64Hh0Q>Hu?yk-;?HPc$>1`Y`^Ev4n?`seR74nk?m zU%g}Vv-PfI>icL#D%ELq4NmSUd+j}UDlf2S$I0{=s=7N4tX#ZW#Ncw!p}x8!*=C5N z_SFu&x1ESN=zJKoJtZ zej^t7h`&RT${-Q7%ldmcO;?v$K(bq6GrVxK39FI zWR(29rYcpvW)f|(Li1z`kP|nWi}DKET1*;xTese?U#fH->Zq$?1;)`# z?qbXs^>mGY-2WIa{XnkisXnw=g&%WYL{I2llZoV>s-8ds)wxcquevj`HhJh-Zrp`AG zOSeu2s-p!sp~7>4`dSD*dfzXroYjfo$?=0gMEKkx9x)MBPX&^qks;xUS8UwZ5u~h~ ziVkgh4V$DYp-E2I$|$nRnXm`eaf2Fnch|cHKEZlg&*PO&+jfqDbkwQTvw)%4Sck~| zKq%}{vOIKy@I&6moyHZ6d7k?gH@Ct(-KrazM`4)U#N}>Mt+@ReJtCwxxnW^XdOALu zF4O=V!@{dSJxr!e&uzkh@duA)zG_C-2vc)w%Wx8PbihDLZp^izkhby+Tm;7<#S+G)}1Nl|%mn_e>4`*S`di8$hdo&kQydPie) z#wD*;Nfx&#ioxfEoRVdDx?qu>C9W^LPO3}wTqh@kJ#&KBTF@-rI=|g%YCxqNgkg;3 z%Fl9_K0(4wfCPNxp*p7hNWR|s0+#r~pT~n>Rhb27(b~kJ5Rw_J%yW6N}pS`FcRdS2kEG3W@jNe2sPN48v_P{R-Hc zlv8;R&yy{%L+qOF?v#D9Ft_D2O%4K?0NvkHeLe1sl#QK6!iBMC0a@NR`4bo@i6kVz z%`DtWY`n)V=hyEZ{1<-v|HilfYGC2#f1Bg+Ij~wuDJR6NvV*~uuowgrbxrUF-Jj-I zwpp_t-_d{7doG85QGE+8AQ^juP1==4h-9N0dWZkp0DsTI@jAbQ;!u^~tJ`>q^5K-d zuFr2!KXF{9D2>>CnOoD)`6I~nw-I)t^1Kjh_8vuCp*5y(0)k_C& zCu2V9g<_blwvdI*drZ| zWnVKyq;N2wnMFR9n*S&z@hHcqm1r>$yOtBsJjo1di8oN|`f`(DbiB5N1$nh$CD0GR z7uFr-BiVG9*4!vnF{*9_w^_u64~jngDR#=sKCdtk16>TCL6!OvN|qms%ws`~6uv%K z9a!~k%jE8|Sa6xl$qUS!1hKPrHB}RVO@0BzoT(Su6|P>>Uh{c}xJ`k0=IJY&-@Y-U zr~v9@1J0rdPBbs4z*}!GiBa^h`0lL{M+*m?7JlvmEl`WNvij|>8-2neV@aofmXNAP z^36BSiC7|eg^@Q4l}e#4YzDs6RgQKm_wVV+=x(7(rWQ&pKuBoK>!#B~{XvX=gTrr+ z0)dFx9_@0FycSal5NPy5ezPr9CUI?=JioT;zYM#nG^S(jz}I$Z%^0{9QE~s-do#vU zJLwL#v-FPZUDZ0Z-X5W)OMle<_{t)EXw~Y=98o8&jUkl$)B;S$I%=dPhF}ucxQ|w*ELE;1EM!_YcH*G zT%HHIIv^Wa-kc_gZbY%Mwo}zkw0rsaSVGbHbDs_ZYZ+%lPDh zh4n-_y9iMcmT<8ejuSJ)YaYlohDg_-$Au!G48-NQDRIlln_GB8BtR%YFP)DHU);n; z$}Z8+HJ{5)HyBS`zTT}p&w`>I*ju~vo)oCbzicW&<5+#^ZW&I^`;>4-sme*WXvvZl zB3!AV!a&bt;C5!=5L_C5v*dMD9VlYX1M=E^DJq$><5TGh6mHTu8A!8y4|mMOYHOWo zxKH+f*Ta%S5J`^@hTSQ7?)Dk(t_e%Y@(iC%Kf=CTU*I4eBG=6{v9^b2qsIh;pqQvy zyZUZder(tbT8y=^fA#}sT*~l8i9$i8-|Wu$S!H>~it1fkIq&yCNZA@)%E|d9*fjyi z@ye>$^L^GGML@W6V|NqP^M=iD>W78sIV%$2*0uvR3S^&}9z|FSIeqQMvSzpPm*9eD zb#>o3l2TrLTz?mk&Lb>cRx}zgKeJ*QT>odGO;^p>fRbN65=cAfPM7_&Adlu)aFyD@ zQ$x1ITmP*v`K!u6%hMThdZpe5-=78LA1?i8!yZ2x^hl;hOX^6Y{#6T(49fo>?Ww@a zzn+kz(7o&8>SnH&r_>~^#7A>ME2-x(MftH-ie3eLQP*F(CV3AoSfEuu@SjtLOOma4 z{viX>Y7d-qh+~+@4Fo*=_$0Td*9?3?fvh!M#P&cqc$KytzUqWFL4*4Pe_}k-0f()a znZvsDPC(i+%*zP+$tePEuuy#$z$9(2X!9Q*pJY`)2D5Xct`qItSeMaGinlEC*pBxZ zLTU)xnX<#qIW4ndXDWWb;!O}v1eq{`V37<+UtX?<8|RY@H}Bq>OY2Q`j)q=s4SL7a zP55)V5rcTgh@!{M&mP4-20P-Z3s8frHc~n1I``O|VF|J=iQG#}1yDcsk;>RJPY6$M z52UL;Hj?7gUgku%uw0pg74j-h>$Y09Lb_BgiZ+}ap=@4ZL252Nn3Y_U^HZ_l{KnB) zU?=U)lpk8;gu7dA+r07n%6*k38HdXe^+DEo7$tCrW;Tz5$ zM$BVT(^||0IFCRvER)l_Npm%%{TJ0OUs1Kyd#8LkSJpo5hs(i0-#EHEpiG^$FIp3M zEypwMi_J&oM~yvWsXlIShoF%d2R9=n<(5LuOXNH1>sF+}vD|QYPs*phX`{M0-D0gy z*NrAK;O1}1r_gS^fsSqA;dh1d(4|qkB4JKdN~M}apAJ-$+DtCj7`AV=!%Jx^ zXVe!@B`qZ{4S@^b!0g0?(i8F5^xn z%4ZlUUs2n{RLAMG8H)SIx&FgIKdMAwiIi-)O*RE;K~T~G(sj&78BdSD`TWe}26tXu z9TW-$YyLw;1x`#8=$I823oPZHtuh}fbF7=ujBYzy37^dh-GB=Mub-#D&Ib(CI+ zzc*;F4S0lydlm2m;F_bi1>PZK$X6)G-ArKh8LcWCmtu(>kD!`(WHSS-46BZV?P>|Cp~pzhY) zu?v#ovcnZ6hD7TtiYJN$LFJhpg?ds&3y({6cLpU$E;lxtT24@%&>_mwT`g*z-G+O) z3G2;UF!dD_tqnQ>g-$$Bcs;Vf-Mpti-BSlM2A_B=@~WijLOlUMDvgDX8*T##q4u`H z)nofn=OP_x_iSMzZ&mP4rW$G&=k0>wuH;-gv;!fvuXLs(F2}K0o6@*g@_NrtthJg990#YI zy(?lTbT+k>{3K;tzyQ3g`n36ZVb6Vj7nh83x-u7tfm%V&PS3d609)En9{zDGjXU++ z9nePxw(e+r@&TJ;usg6!L90cwYafQ3OgY=mm7wD!Qj#N4kZ)PN>*9t#)3D+>D81e@ zJB1B)CZ_X_yR3e>TDps7MTq1PbI_{BNqRsLB3hB#1E+@#4`W+9_PrLcpHt;*5IwuR zZVr{dDp0ig^y}b=O{~Cr%PLpOXaP#rYIAXMITVJ>erLWr+^#)$JGI#b`4FDrkes8x zEzak)ws_zAacz%w^LB%GROUC1myic8HQKY5#mfBzVa~jW==AN)Nqvsgm6@R!Xx znq734v2$TNlzrV|377{E`S~#}J2TV0j!BrhY2|AKSI9Nf4wtgw>t1*~A;8J|#~?nO z?od&u7BiypJJrL*7|&hEov-U3S}tCIS;d%rG&bDUzy<2~LyQsI_vAU{kn9xD#f`(8 ztQziqk3Qs!OHh`i0;TK|(Y$Z6jW0)N@3NM$R8+E9Edt|x*x4>F^3W>rnw3*VZb42# zxhV_u)2rohE2jgqoS2Rdw-ipxl{zyK8yVS|(h0IVF#U2JfYe>Bjh@FzH@Uoyw>R-~ zUw2?Sg?6&ZV*9VnW|@ztSZlpY$j>vsC)q9AUTU!dxONj=z{3=kZ$eFIxJFVEdMA+K z5SQKH!X{nZCGdf^yNpg^aaV%sG16Q6hgLJ-^lg=30p-S*z}bGixg^{*3@b8 zPQ@utM?^%BaUo|wanG%n7vNgWDis)q&6pPL>*C(8lfB%aEf1NZyz?CA{%)q}pMXvK zyG{MSp3h$oKRLBue0~sMcB-qRr|j->3|fhmb&1g_yl&V>-6SbFA>gmCA~4))^@Rr+ z<4;!jU}uv2{ZegLUk03?GZtQxJ{?kk+U=LR>XTGTw2ZX3Pq>vDRj*!vQk4>A9$GH; z&q41qptR~|!*pEY-wawY1F zsbX#9BteMeSgs**^T)*xuX!yg8!Z2Zk2^%PXw+R(*c3Jpi39h4Ew=rzpc6)cU6A)2 zA*mf(E+9Wjx*E5d{Q|h20K2uWwHeU5X+gERLXqaqOQ~xTf+*Dx9|p&!h=5RTesCc% z`0X#I_KSLYSG_(yMk*?)Gum+C00|bf1y`<4BEL@E6y{2DO7zytsqsQ&(Q3H0y2&ts zOLoo@Cmb1@Ys6o)fz+(EUFPkP&bhSbuyho{0K71Mhe&3 z4o;Tdvd{Ne?;GagerQ)2^Mv1W3oHVDuUnzk!0KE1o^q~J%FXqBhOR&~_zJl%5-p6e z!c9Kw4wTj4E$P2i(jPGhg^9>Q%(_JR`--%`6hZxv>?CPKQpav;Fz8WKd&$|0!Q55c zhRx3{5~Y@`)6vzrhYg+IIHC%!g@}++b+Z% zsY*y!of^=5GtNA7AUI^|x4Ro|mwattsCsM!!)zm)U7{#HUP%tfo0B=eF+atY*1xyy`A^mauQ zn#heG)E0G9ex@U}Wq*7wgvyC#nLq_-Ar+dOcjrYu5IdKBkx3Unnjbtja78+&!Flwf zY7L9E$($rI+KU8m-?-8vmN+n~KUFBt*O6WQohALVl{K0$40VH(dsH#%YHQuj; z1&eUO2dPbq`hPggw4d~garaJ}MLH3*HI$qidx0Y_#PnUHlzT37pFrzzH!X#Y9Orde9k}$ zM)$ftn!6bYyi~VPTD%!wdAAK0c#bEduhOq9Bw;7>!J`r17z%rbiMu zlE9G!jwEm-fqy4~H)rSmIw0}>J%y5u2B2Del=D!WJ)3D2N36{w*5-nDabJZT4u%;` z4QN(lRq$7hS0H7MOSP{F-#A2KgI1mZgJ62(G2?ZZs9~1}BZ_zkIq0yRNKqVjb}cZG z)ogdbtk`Lh>xqR!LFxiJ9N%T+SDXE`JN0Q3Q>MIb-Xa1!UFm?FLVGERFfJ>Wj9*{{ z2l=vDb!;yr#!D7Y-2Hs1Mi%rnV$*SXdRVOl14zeB6{^kH)Wt0cf>`;Pbe-e0K7P+Px14f9iLY_i8mE$$NXv)VD72^*A-Wgz{AP$Z0dpI(i{scJF6+vPo=!k|A;=n)5v07wmuy?l)~ng3+1wt7~1FmIzA!x ztW(>CrS4;sYGl}JcPK9TT@O1&Ws(y2FT8V^|H3<0f{U(?pe=&f@jrUyla+{_CBciD z=MxQd)Yh3AlG~{-Pt#Q^n^pECk{{>0yG2l*{*GSRNW|(dZrF&Z{qWs9fr^WT1wzaV z_z}r(C{$db*;9yiE|+EUk5i`ycLYXXtiJD>+6=C`EW=!r(0IeKTlMC@_PPEy*8D&G z264Q1e7JP{oS7LOl0(y{fp$9hHo6xZXDO#{u@8n|Kd;o)hor{ zi4VczrB#>~J12ALU7lV?{2z=uu}Zw-?RTaoea?HEm#~?&{S|Ch>XsIbx6A?T%<; zE_hBwmkF0*wHU#ng3I#;{3efw=R-W{?rpNiB`g64`*klWlGYZDsH>@V0R}Ji`H*h^?rwoaJfRgrBGsAFCFAHefBU(};}hSC-w? zoCMzUI=1$)O^3yH1wlJ3BT=Z9Cy(oXyseMx+wc=5LvIIqt&@YosiMiEaSHY(J_@k^ zi@mo1isO6!MImT#f;&NjyITnE?w-Y+-NhjRf`#Dj9%Qj#yLfPSXK{B(@DKuz@6YOA z^={R>x9YvR|GMuxQ#I1(OrJhIJw2yBGpC!WD6^uG@-uAw%Yi8VnT5-m6Mbf3dy1$h zi%M43E>u9*@t{c0;EU`0Bc=7W(fKb{pR>b62&6P@Xn*@M5B|RGU5u{E>5SuH;F;>> zjcLR0BTNem zUEV`jMTJhrw8&$|2u5R{(PmV~Pvmo&R^?UvSGD{6z<0ma*<CDTji)=>{B?b z>#id%KmArJ^dmTE(AIMb(ux$s>mHDgO^##=A}br9%T(cYIoHpzbxD+1)i7!!%gR$j zb{hpBC2cOVvy|mwq(XDDt{*iMB60;y+)Bpy7i?9H`#$bZ z)qpjm2f)eyBl=syzo~!e=Wjjy(@y@@!{2)NuYTiid-&TP{y(#ae2usN3$tARA4Y-y zjf(&02c!rewP4Q!2G=>+MdP2S_)w$LhB>1*`l%4MDt_O=L#;Ep5q>pBeJR(K?3@sUn}JFT zJ*yR2ogC~8ml^76D+3%eMDR_kw}3xS%&2mK4le3U@!R(R%3J%|=X@zc1bl8Sdbtx; z;S)+uVbx`+C}oLNb^7@MU^A9{dFz_6J{5X7HYjWVT`{3I!6=m}x@F6RwQ zupY`fXwx`&Xq{NvwPRh!+CQteHf5kh{-rOGRZP5hBf0?L8~j^N76vQh#SBeOd7rFB9wcDao|^-1nSy{_qk45L}*n@mM! zlJ+b5H+N!-uR1TO;gmh5uF8s-H`9aRYS z4fF=f_Smea?Z~V9oP3~=n)Jr9PcJ4+f={$8T#ix?$q29bhNuYkz${vteQKnapp-E^ zxe4}%^_BbY=!?r9sZ<7fV0|~-M5YDBX0^;+;zTjGD~N4wh}-#Mv0gN2wXnY;EsjBT zVh3u-UF7n+a85zNCRdbXO5pTkC?~jrL0wK=icN?<3-%>?d&8eW{o8vnHCO6H=q2)` zTfC35{H=g#v;FfY5CTU?;pwQ`@jHEtJ&GGTHo$w7Vfo>v7SuHPRrm&of1MHM8YBE2PLJMb!Z(97JTN3ez$WQ7Yusw7Y_|5NI-0Lry&zWTD3?(jb(2mej| zhkE|j!#~aBZ$132hi9kqAGh(hJ^cU79uOt|=Lc{|_n@^aH6f8D58LLvL4=RWw`>h5 z;|83gb6$CaNLAMDj8#ku@KYG}r2=Es=ZVP?sl%b6G@9S9A1pYlT1E(#U0{=LF8T*k z{gnzI*fOhU(h+D{REml+5B77-0Y4B|iu*{&RTxrREKs9fk{;yIvn(by$C}PTo(DKO zDGJ&Rb9jTb17h?E3~M%WhU;k=Lz%Ww0SNOGwz1y{1`HGgM6HgoMF&@@ZZNzXPacdR z*Fzt>a^!S%bxl)*Q*b-csuwIcS~^6VjIa{#ws>=(zh+ z-pD7&FE0KyIftpSycVBNq!aNzs%lG5L$K!u;_uAeRHrm*86_|nWJY;8GWr2Q3Y=3= zRNNx6pX$#i#rK3@i9nglQ!uG5&SAPjGSSyC)N(FhjLGV-QC2nuwZgrKc#zcG~xG*`ar#B<=x2lpr+~m33keXj(MUbs6JOFa=>3S0l6=%rz8pEHmU_G$!(>jn5q)ak{uHzO zi2xB5m-PmBJOx)?n4-PTHDEF&Sz8Hm@Eh%PB!V^oXzuu%ax;Kx<)Dn8oNl%j8&BCw zbGiU$vp50YXI3geSNWT0*4EpyZ+Di~zv4q<;fxZ*uInRc9MQOfp~2ribg??H()k{` z931PMZGQ-43*z_a>qaTPTZU0C3xp3c*i3GJYOLUy4S{iTO0p|0#i7hB%&6T`YI!lf zbd*-2s=U8OJGFP%4@HT2HNiZ3EN`Qa_siZS9UublgxV=Aug6dXAw;83i^kmf-(ANE zrKi=eNwpE;dvQY|Vx-@YY})0whK7YP7ymd~fc*FX`6zd+KyRH*l>efBx`Op=|4DyX z{KGGQs-JAL4qr`EscA6A+r}$3ZrR^Zy2l`i&TS{wbt-__*D9_|wo}-O=5g1DBig-JCM}9*wQ5t_`=bDI=QVc)&@p*mtuUkt(R|p%cK)hq?7P@B z)7fvJ??a)Z`Pjm(`EESOky057vB88ZRYcBST=W2GAV*D0&2DXdeGJ;%uX7flxs$hZ zO>L{QKSY94+hU!Fw`@+MN>zRu)8R-|H3a><^k`d!Su%tC+>stt79GSZ2qRiGPw_I; zUh0;6K6W!@9fiLA5|Wq^@Fi>e&~Q6*GzU4vV~V68vx)TmMHgOryLHa4+Mwv>X8tcs z`qb_Bp2!z!nwEy_*4crfS<nGfo%Tg?+g?Q0E%WDZJu#NfPW~ei4voOoncBboYyc#%5AY;L!ILI;Xd$7DXFHK4ymO zlv!y;TUe?nvyWeN+fa{x9)(ag)eX0|(0wc0n&UvN`OY9=P5>uYLA%zyuD*ISAP;5k zSP+*V!z9BmR_9?4SkQ@YnJx*?o+upjrJ|UzK?|qox^eXxx7-4vIY1wKZx~u zQ?aqk@;6qP)VWF9uD4_|E;{e$7Wpio*i2M|4)kPd#NT7rIDOT1V{b0HPhwshX4Twy zhUfU>4V|ok?f6fR^v3%iZbpNN=Nc~vVl6qw5cLhI?T!qhygANPh+wkp9GJ_*RjWUd z5UGnWl)}Ttzp>)$cSKRoYu6>qdbQOOhF|A$8P&l-na^2>ZQQFZ#f*IDKbOw3I6ttd zFgFoAeT4rRsW-W-Om9xiFI22z7hD<>yt0{QS2npb zWTzT)I7NEDifly}Z6^8E6kFa2^RL2P{^Z7e-&qEHZndK~pA;}kh7eE6uZZWiTnHvs zh-vqAkSyBfy3~ol=`Eh)WD&(xbN$J)8v&{|A+1{a1QkGG&^Yu3Ol@lY4om1$Hc9!Z zu7g-O%mc!=Fy}A8^3(T$uXm6oSbb5uD1;j-p9cVZeiaC|WN^z8ziTV2OubAP)0q&p zR#vDQWH}DQ<=EsIWX=29F!Do(7hnIFHdaDsh@kw%WpqfYG%{n{3PWLh+Ew5%n6un) zF(W><8?UgCRp*^eYQ@L#%ik%oB46%5m;BMs$ic~s?PnoqK<`={5v`JdPTtGMs7{eH z)26m;XBQ16U%NZ5V6RXSu$_z$IH}}_Qn(I@<|u#oG29f_^74lCwGLOb?hK2uwz@~A z`=%+A1FO)gG!*Mr5(f=X(`kVxy1+0_KXjf#mB5}hY*;W*a9$i4Vg4)q%fkYr&eYJ$ z(;Oiadl%~rzm<0sksnshdZl48(S#H(jfnd)uAIwo24i3xw%72NZ(|<9qrYW-gL>ll$bH^kpD{eV{Hkfcf~T!jCx8F_l&dHwSzHa;NKi21 zRa#uuFCWsw700{wcAZoL#;}?At!1e~{hik~NAU{{-E>k)}S!0)G&-@j1Ol(pdOuOf1xcH=c8$Jw*8Ym~F_lJ^f--@L9P94IF?D^ecfNN=1(M}jt$w9mIRWGFbMETww zh+P@5WRxWwHj_nEy~rK?{rM74#kxKMD&o{?K9n@Wey)x>cklEin_5sbQ;bN>llp`W z@Y`BmIyq*%dVpypYCVC^NFqjn5?m;#gJBv-68jaP4I*sh-y2TfN_&10kOPN(YB$u@ ziT(%)2(Ko+sE6hAAMPoZK(L!q{cNXT%De5Ted}z?CAs6jL9FfZ0s(y>HP6_VeS>jys{dmM6T*o{E`LR-fn1$P(%~)2PVC zNkmXKobj4vzuNw^GgKrAiqf4qVvVDmdSv_NIY;4_qp;@hA%pe19Q=_t=v%+f2oqgV z$}C5d402gQo8(!fX&Cry%CHzez+f_gI=S`bib~Y$G2pD=lt)%NtS|u0IkhVD<{_IrU&+0~*(f{NQ-N8vT-_2q5WImF|`tpl|-pU^eV zkJH_v4qsI!-3Ae3T3>6?zf}E|>}xk7_`H*Gn5?@c<}fLzY0xExab1F3}yV}w;xY`5Q9u=I12 zg)p#YG<)*w;~OWc!!QEH2Dl&Y3_r2KwU0=YCD`zcr8l6OEya|>GcWX7affSmDS(f- zWonm!dCvWIJb_LsZwZOV#}bvv7f`rUxW=&<7`Mhu)3j;=D8CPXeaia?s$TcDur13b zBl6|%SJd1zxm10AF)T{+3rtH;RRx0D<4tUFt;AC>e0OoriA3S`2$K?SdF;wASXRzv zXuXs)_JiC&-5e}|APqzjR?~)?)lXaRu#vgHIb}y?(%MXNRYjBEMkk)Aom=CO@ z+Hf@4pF6wdEWS|RSIw%4dfjM3wSwT+uX>B0UW`ThrVYqbXu&?ln=Q1bS%(l$ zMUW@sDys>>%8nOGvOZH+D-=AkQ(k?W3^}%Hu_aG7_BJn2!~vehu;=soCNwQ=8k20( zR<{}Lx^#t>5U9RoO!pvvv)Li{$Xq1du*Y>Rmt3aBEQ|@nk5jHYz&8Id611{T;{6W5~$Chr{bA7 z*lT{23C>v@;@|#too;OsD*2{>&`!xTAf81uMx|h8rGv>Fdoj2p0ZF^9h4vkLtsR%C zz>}S`B`I^Yoxk8k&#qRUgDci=Q{!czxi_&^Y?jN`YV%j}Un>}kX2_hzGRgd`Ue4ge zGU$k0*2;<`jRquPA_78TEGQboI zxAYHHu|qo&J`Meu&Mp9qN$?nQq>MlP8N~fTOgxK_TxNeYs2zvz@7~)wm|~V=K1Dfu zKga90kujoM61He>E>4t7k2AyCl@rCP>nHeKQ!f^NQLW&;l%D2{D0=N@eDL{AZwb@z zA?pQy;ZRJ|kM4lX@5dJ$qa{+Xj5A=5GYF51j%Y|L&~^4Oj7&D{ga35jn|JNy;VhHJ zd{R!mHI7^1GO4+ZkwAWu!qt1rQ{S_8OrSPt_L^AWl-Z8>J3Iry_*DXWCLTsH=E3%j zA~Qk{Vs}Rz>HaBbzXC=5$Tw5e<3LpB2AvkEHfV5or$^oqzTY{`pFJPWW!y9?O~-4(Oh$obtp=4&Uq05SGR|m}h#r16uS^Z!wa!`t zd*GZP|40kln`X5d=48ASTdixCE$f`zn23Gs@#T(vGWSjOi;*_)Zo92idSfk&u+oq=R@!r-DhOfM4q8xEn9beQR<# z21(-xt@@23ka*o#%}Sg1WRC63&Cxw(+@p-=L%c{Rrp ze-z%cGaCJ_8jm6;iwGJo5C3JEK)}~wQMdjUEm?gxqzbDjB{A-%R;sL5VP9e46q+nd z>i^3BLQS5_Yv7u=mClr(l5QuYcFLH}E-Q}n+Z!&&Mmw`ZTQ!FrI5=dihm<^}5^F*! zkw>n@MhDOQUO!cV@Q3MvGIvD+ekG2hkf)I27Ghi1+TMp;8m%gF^uV65-B?&<_KTa# zI?_H00=17R_!EHhqaHZEX3UqOG_ASi z8WtLK20oWIC@$C6VA9<4A|TR^XZQ|#PtGvyZIUn{w#QU+sef;-Gx#0VlQVzW;TqpC znpn2AWzi=Y2SZL#DMZmkHdF8!%H=Y}9wlcXa7vN{=xua%`Q1-2@2u(w<=Kgr3wglQ z(U_!d;wm>7-&^+Nde(C}H6F`EEGTN=;>V0ZLv@V_5UWt|Ca0m=Vmr9Pfcsye)6m2mJ3E7`3&94K zNM@GAPa_z2zerYR+2}pq`|UYQ>Qz5E(M=S+iQI0{kx~qJJSY1>9&B>vt+|jIe&cvI zp4Qx`&_Olgx0kG-kx3hc_0-uiWfJ@*z@y^@ohpmdJw{Aj9_O%`+aKzS{5v`5M3=fi z%|-8A-5KsE()&SA5!MsdaJi1r{CBsF^{EL}I_O&V3bSTyboXOB51omb>2lD?gJ3y9 z!L)WbYK)v1+#nbdu=B zY<`dBWu%W?Jn(1PyG!a?OExkkymSNRC*`@bc99ZxTC)FAIY2$&L0|!p*~RCdf$G1V zGMQ(wz$tO@d+`Q;RfbB4eLpM?Fc&X8`fJm4DFX`Jd^3L5$BW$qPRp|C85Wbu@u)0393fbGuxc zvhuWCx;8#wS}r9QD?6Ka|Juv^Yp?W|IEEVU9c=*rWNXO7BSg#hA6|d4(*XlL0pNei z45H=XeP-zE{tRDgKJ)tvxBNHW`PW7N#yj6RfI*rzKp8g|cQ@C+`YKGzCFADo2Gnu4 z0z6+UYvbhru+dg{mggU2U7(dK$kq+$^33BuV5zK(?!Tc@MH>e@doZm4&vUVk=X>x! z1o%gf{w>YFJHch(VEqiL^6>Kr{0Ctb78c+Y_%9Ia)8f+x0-mzGk~{(;A_4;9^NR4a zj39%6ij0Daf{coaf{KQQijIMYf${Ps1|beE79I&9DJcmdF)=w6BMmtPJtZ+QEjuke z6Eh1d3mFXuH^&=p#y2c){$hfNhK7cLjzNHdLGXs0nEcKE^77P)fRB#&h!TN>NQdwO z9}x*3@u?Sq@-Hz_{u1{;3o0TKG71{{i|0}^JOo4}q!&nt7%wr9P>^3DU?L*CcrL|9 zeMRt^M;eWgn1oJC&nkJEh*w9~BRDaomY!eV2AEJ(JhSx)L=+S>BqZb)NdKsSi2nll zwe$}jtAv6nBtjxBZJkYz+Afq=!31>g-UEN~@+CgO5wMV+MZ`zKN036;20tNW_5G&* zVL;UKact{P!W~;AdK9RzA*FB|0BLtr`R+6M?vXT>l8n+9)c>+ykrW{@1S`sI6<1!z z;Yh`Ec*gW-#LQ<@J>F_No_;J|`Vxy>avyG-r@f&2@Y>x58=DPj*OyjeX&ec3MVb3i zfj?SF4j-(hQ^@}FklB%g%`+1kP7=B6BIADMiamH??E6Xte{x^)gI7?L^Xh9w|Fl*e z@7_N*j7#0S^^|Yd1^xV{THeLqa(R`UfBt=7zTBsh2qZo-lX4b)5p}5XBkP=C)*b5I+JtiviCDcH;*O?3kR9!t6ckCf z62n%8X(RQ*v@_D?*H@qAh3^NDlXK6v&kNET?#!}pe#&5u|U-fcHFqB6S=jdN2)+tKFt%GKRroUeC(*?>Gg zuy3J@tv<+p`91%DLv-2>W7UaJezk-%K?m8e2!c@>Jb<<5YyVY~32~Q*A9a7(G zz!hNtN<-wneS<@|4E&5S)ZITxqhP2Zj%Wtic%a7Mw5X^UYF%W8X&!}xt& z^gMb7Tb#|2)D5p(nN)4}sDxM?!!I@*QKwk5lOxVm)^oJ!^k5vEBr30HYltbkw+VQ% z&%&j$G!YP)#m?#AdnrVpu_sEq@O;-eK5`^Kyac{@owk>;?Z*DAZG_sNYCd;mB=iP8 zcwmm`d>+;nN*y8GkNX}Q$W^xZi@}NBDfbp{zOjfQX%Yb{M1iultPcMYRt-&k;m(kW z#?yRh9a|<4S!e&o@r>^W$5s8DTF4nCep)LM3@LM{>f7qUn`HwN=c>{YkIqy>%K81n z*QkxebD3UO_aDSuNN~`wMj6h%Pucsuk)i=qkYxCWOTBkaELD2b(}=lzv>pyr#griF z3RxX71JLiB0fkwM_XMth?~IBpD;8n_c`O|sqgQAJst`L_9^&~tb@QY5vBv&#-HOEy z{?u^0)aIfFO3N^1H8H=cqVpCNwScR|!BkOikB^9NZ_EU!`EfJ zC^S(ltEQEWX*8X=9?G_dYYzt(X*TG9b#oHz<6h0)x|HnrC)F>UiA!ci#z&&ctQkje zMKDkAzC@F-2!gd0ry%^;MX|I=TUUErUKS*8tausZ7fKr0Vh3{FKWkoLT`E6?Vb~*6 zDyl3V1bYSS__mUWlzh#tE^oMU9}zznw)SXf9=}J}(=?KUrm3yJ{+e3iB-brWH-Ud% z>M!H5Ac8SBJm3kHrd*)#G-KDMB4ZWgVp)y{iitKPSG@HpX+J3Gu$5BndHUG@bs$Hi}%gl&r zN$G>JJhd;*c(5!76uz9jN8!mRXPR&Quf}@E0fW`}?6O9lH}K;?&61j;vXU}kIuQVe z$Al`MCOYz+p%@Yg!j&`^kl`7=jp5Qf+jsLY^$Su6CeQ$?B#JcD?qr zY1w9$2M7?{Z-6i@KS{1cMmdN^Ie(Va+zWF!zku&lL`4OU6RuvW{$!L#s+;5%30VP8 z@C?%Kmz9uUOSl$KYMK_4D4sJFOpZ`CoI7*$WTUSbqrRp2X(vLuJEC|h=I4U;QFO-q z&CU5XxwrFj^x3$m-#zUdU5?IS1?YE@?)u8m2mIJ#*#)-?uXFput5=1DQ20@c!x4); z#I7{R)(G%<+!;VN5=lzgcEE#go2A#wo3a;D<6^X|?!(zTFgk8-VBY7&Mg(`fo$Pa} z_H?!+>7@0#`81ijO$(yZbkb71S`AXFC_6Qd%H33<4E;$Ti$+%4kPv>#Oh)xqy#A^} zh>bAu$nm#pUP(=*HLoyFb=Zno{x+v8B8?%9HuftP#xzv}ia*u4moC*MMP-$-Ip>Ka z@;GK8lUt(OY=G)38@+nXl_guK!H`4A(&tcDe}(u3vEczL<^xrT>$?Os`ZIrElI-2v zaTeb_IFI@^eMT$AKn1nFM>GeDU{1T9miMkeNr$7UweqmHNU?7hQo1xmRu)Qt z4dF_DZ?losDyd7~P?*oPIcWm6D+q?jkhVL{qB6oSmOUI^bd6z!ltv_IFVI(Cg zBKSqT&$Q2C3>4rBt*({vZ|rLk(h=!Q^s{q)xto1;U8cQuSY_llV=8S|1?Cw9-mOgD z5bSrMq0JM8rYWCwDQSUcGR+~GxrG^*?@3S9EH5bc_7U@u5`xxO2SK9*Y7UrlYHNYe zYW`H%0EZgo4$lM)sSsNA#?0?|q7g%jY9T+99J6kPx#?Cgnv)DbvSizBW+YbxGEnI@ zwZ7Y~;E@0$d+W4=?MgX`fB>WL81!PHaICM3EiJbe%H6(B>~c)ffMSF{A$)5Lu1XuR ze@;36>ZO-2;;vAm$Cs~lEi;;noZzJC(Y=Up)$VCyf8Re&9v|oLLmmdVn^q_firoDH zF);vdoX{YVKTZ~9ZnqM022^@m#j~pY=x{HAd?AJ_d!%w!H`B61Xr@DTPUn$B)lbR7 zLeWnx!u|WeWZ?~5t>Fo%w%+6|6>Mf!vuUnl6F)Zb<4}fD8M4suVZ4~XNVQJMwcNJa z5Pju!O>kX+7arw6AO~&q{e4Adgn@+s^0$sd3x;eGx6hR?1Z&saDPli#R2a<*0d3Q@ zz1z_pk2t-J(@(D(Q-4=D1P69>1Mz zp*;;c_tJq?U7RB=*+ z7e!gx-Zr**M{lF$BB7~E1JRCEexHxFi+&eklIwC_A^pXd$IqWwQr=F4<9jb_zGEagD}p>^;oSaB3#nb=O5^MEtQ;)883-TjxUwF zl9X48k{b)#)(}y$K(_1=>+$G)RS;GV%9pRG2edR<$gOLkYce)qq|^|C#MAxXKtw7 zP2j0^zjQ^CYBuqi&zc@5w6rIOM^ORn0~xA6U+4}HM4ke&ks{_vz$E0&Vs#JHzgFzG zU>Qav>;eO1Y~%c%yi1@V)_V_~V_AwDxC$Zm-bv2}9-|H-p0p6a7 zkzUP1?mdAQ^H3b;(2sWaB`Z(x-B#)!X)w&(x@AgVlKdCtzNCEcZW?7ajFdLlr=NKE zw%QZ>@T$L~t-0aiB>+G=-ylvIfrrK`;j7IZxo}Y*V>VrQ>u%>cY$D@`+clBT+d@@D zadGC`4;tj4>iB8qH`6{v-#j)Dn=*&r-93p!PX2%xd_-kH|DoY~$HT}>#U;?%lz6Nw zE=lBK8AerFYxs4}Ak&aDerDb|YR}Yhx!Y=s#7;tiv$TY;gm6^~$enrqAlN?S0D*YU zArz#NmRS1VB`Ne`Q>eSIMCS%HSC4@bQZhA+yI9Xv?`dA5n!6vh(^_;Mq zZaZ{rnu9xSR>9N{SPMn0V@^&^cNEIXIX;48bj*-{XaF5Td`7k9S+;vJrO_TFnpxrf zW0=BfCaxRbA9HA)5R#LAI_8-;o|CZIc6`zn)NzZ5f&=(p2l1mln9I;2^khG>rF+x0 zo1BP#n>T}?Nbr>U3#E#zY0)ELBi|5RRy1CpT^t?ZhN_%uWc)?~IALzDc!jv*@Wd({ zs~GkP5kGejOy%&Qs=+>nMny@?@;SV&wH_yj1Ct~LA{p&=Qk*0N1g?l$@`d&fooRK_ zFHt?C7O&8*kvOr02e^J>A&Bv|O?l56sI;SCQ}tKGsL8G-pKR8)q9XLaMZQ3))?Zs* zCDAph8Z9ho5&J~p^=*iPdM}Qf6Z!GFJacsXDzcj%F3cq3%V`D$Z)~3xhM6itY z+2L(F2G7F$zd|yXEe!e!OT-G}!uwB!>B|suWv|&h)Sh2YGg>lgL~~sU2;}>+17b+| z3=R9o1|q2rp)$o5Th^j%EfjS2P9I)zc3vjDlsZygIbxgZUN65=!={psO|9q1<(~8o zB1Z-C!TmTX3@W)A!QL1~$Di_>l={jo!+CnW$ob+|;-c2l4=Ri*-5ML`kA^c(ub<<9 zjpcPkyWe}EZd_3TQ{B_6&orU6pGzLy-Y(~tlr}plM`Fd#ku7x?+xI3mwx5%sRCNsv zc_5)NI@wjgOwY)rlcM0|MU}p>ke2vfi^yER)CN0uJ3EPv!loR-##zFJ;V*3lBvD5} zQ+!Y%=iq$ARDc_;_{i3;%9@3iD2HkiGRh;n&#&==Nn?77g6t{(1Zs1a{!n+lEvY!a zCO_8JF>UD4JDK&_pkse4>mD|0H>8pGX7&~ZXyxEp|24P$?ivo!l zumbRjqq$EVju&z@^hm=GwaWZs=JYJ~&m%Gy;lC*7W2&|la??*3LF9opA4*#FS2k;w zO^hdpb~gIGgAn>L-9>SN*cBUz<~gJ|Vnr*BcfD@AdhGb(L9G-m!SGb~<<>LxRbR2w zqgG9AZ-rWuQ}@ATq=Fan1L!>4!#E?gBM3rB^!4MF#G*@X3ytLOnI0He>(&U6a&if+ zH@r4l-l65O#ZTVK&++TePj-R5QDPPcBn3rIsV%p78(Ti=nL86T66wSD>x^STZirhl z*}Gj!Lqp>Cmy+PixN_=`3f@5z9<}z~M@(sYl8rUV>YdZ(bm=g9Dj+#j@0Qh`bHUi} zm+PFnBvNlNKMokncFm^RNhIXimNtEFuB#fckI)O6R`M7;RGH(Povi@H9(!h@ANZp(i@-q_+Sh=O;Mxu85 zYYmebP`@55V0MQPw?y%;AJSddVRkO%m#Re|*ijNaq(Q(_0$Z)SJTVRZNKip6^Zm$| z`tXQo*8xsIZQ#0XS@Y60UzHHqUG(Q}NXV|L3f%>c3E6IRaCXSgY~PEfoC#A$v5c>t zE@S6`>}~eui;Dw1tGt-TxNhoS=+0L$bkVr!T5x5sPiST8#1@@+jxFb*{VG1RMUtSA$9kJs-w_=w04c{!~k`{ zxO78L;A_Ee@-u}R<`=|9a|TIH)iCmj8_`2E3E2Q9UpEir>}~pV%3wst*?I1ZQvUkT z`GD2ABqa+WQ09GoZ$(NCXr~i7P3P5Fc5^&2XL{2@OMMHKIN#v55_{a&m?FkFZ@gdh zWDM~nlwV97(KDQ#I7wX^-Y2^Q9@3ZVAE`em4T#rd>DP6@&#u#0MOPd3LR<3F;U=%g zfhN(O%q4``w%!XrYWX7{r8pD#oSa%y4Joufx#wOX{oc%Oip^9#cboPNo8qV&Ue#ix zGIQxJDQF0u=GQ?#7~mOe`@915brrAQGY2+1QJnTiYtJn-Phx!1orel?L$$X~yE_F<8$5v*Bq*qM7MPzP(hg$<6A;C?3gqKfhwvX)fJSgGYZY2 zGlMS-JMOOhuA~yh)*o=a&orAGCDxqe)T))g*|Re7jo^L$1R8-6#Pf>825nj`ou#BY zMGM?4MH6}0S!c=A#!K%=qb0CV>+IeYNP1K{UiDF-K6Js{hQ(mJK5tTE<3YSY3@%^k%0oQ7QZ zBX-2cS$Q<`Q8scnoz<(AX(hW#Fr~VN72>G(^h`pYg>ID-U*0-S_^=Mg2eWZVM`EkK zed3hieMv@V8KeZvnUc-?!nXk@pAvC7H|+hgg`Sf0*(W3GNXZ|ZufmCiKtB-r(Oh{i z&~kvRW1zAME<%{d8K(fawFxbJtoDpl*j4Znt(;7odS5g%6|-s!dK*{?Gnn7(wl8oh z@~>0kbe-ng+u?q>$D3UBF|T`cI&WZCq^!JY-a+D}#PR3#^R}+uEj~b;2)APANjKcM z21XuxeEWsJ5oLanh5T*<&P^3L4vKz4_`KQ|Cm zwg78Wdn@uE{Srdme-lLibQA~$^+BpfD>rTjAxV)|uJa_WBD7v8r4}F+hD#;(32@@z z)_(gwWA7zH?sNxXWU-d5lK?)xE zEAzX~%rfNV>)8vfvDcKps74*oUL*Swe(zf4yLVH#_0(v}#W~EI@snzW1oq-1Yd;~N zfhP{zX;$YXw|vA(KB^r&o|WI?$>OU#P|Zk{Tz&M`{r!34EaQ(*(eIYh6=^&j2 zk6BZHR&~ypKegCht;L-k<#`}W|J}yly87F${(e6G&+;X)YvpAJXWCsZH7?yL1+9)I z1ckVa`K{RAJRyqv3uLF#p^=g3kwO2jZa8Kh-AGR2x=i&CpAdw;+$d~R)6%gp3g)_~ zhkI`2H1f_vH`lU-`}a;Z&&CnPLCe{+F@xlQa1^fPuy_%Tf8`!%Z^(JH=yM%M z<@@gC5%!@-&iwE+t|~E?n?6!k2_d7zXG?2pr2!3@8|&)b&=y?E#0#Ir3jnq6d{2VT5=*npK!l;uYkE?Jy3LRtHD44~YH z^aS#EYt%!eej?SBkvsa`ffT;svq!f*#QpDB8N%wh$0!wwR;hvHS-qZwscQKCQ9*i2 z$Q+23iHbR5$hGp@LAUiHPJwo%uH1S8)oC9xs=fx#xzB#ybPj4f9Z^D}4N^ywR2Pr4 zlf`c^qnlWHd~;0|mAWY|0KUaA7~PW*6B77M{g%QMxYFQjhAT%_H3fqh9ncZ}oNZ~` zd!xobmhyFbn>#i8`A^f4n)H%QD!G>LhZQlgq_&mLT%{@FG4(p5ONP|z(cQ+r!oJC= zT?H9jp9DM`xz*Hw*>#3w&-|N7;WBj^l9ss4jfPRhV|d8gDr>Q=Cc;2b^xas;*lWuW z+u^;DC5lfu-Jz)e!)h(LFhDnLGrFlwl%dLfgv zf`vjA?qJQ`{|iD!{R{05ftew9qCkJOh)ZBR8~t_Z#>85+03R>yVTW34T=IE=t^&Tw z`zZl)tX0kwzAg<3tOt@AC3?i&_zm?l^}!vUG%t?SmPtM0U@i@#Q&I*__Op3Ca5G}OgTZDe9wBT z7ChJa?$&~nd?lev;@;=Ejrf{Opn9a>|_0Tm^>5W4|-#E4tsc7@Kco4NG z@DHG>P7RaIgdjsq_}ds(OrnU%42S!UrW3xhd}v1|zV0He*O7-FB}bW6s$wJ>$S6CW zw<-re;GC5_b*6XIl+>Zfd6ZX~W)yE_AE_ZXl3o#J@JGO-wg+o2ChY2>c?#<2o5hB- z{4-regBh212_`!`Qo;c<_Ji1snP{^tY4ll2Y9Tg>HD~9#^uOKHlub}!oFo}^WQ_8V z!wV~z8N6@mp&jbP^>TlCg3&cYL`mi9KJcVWkBh5S(( zUXu$WdQ^IHrraOvHfQ9hWM~*%m=SmL_Z==P^!#|QRU8Asvp>TJC~On)*;J?G&*H$o zD=*NGSgyZF&+>*J>IH@i3bL6tdc0#LXN4o;>-Vh?41*%5j#R`IphYIt$O30AN+dH1ed4cJf+KfYK0fAC-F>4_fEO z*NM#d@m;coX9gHXty2)cQvj4kzJ&-!0gAwCp782rEdwlPn1(hzN2*sJ!*n4qkn)Yn){1{f#H*k#>`EKmNGM-yi&nCS#su zCcqy3HD*$lu`J7CZvVbMR$f<$u$=0%eoiM6TCEXixgfzikl3D zR-tJBHhUC3c<<7k%j#QhE5PB*B-FlKyq@#5)!6sG?SK{?n)Zdrv+!sn5JVsAfMh!0 zK@I?!O?55xuN!A;--=q3WVQxeDwxopzS5hC+;hUT?eU1G`UyOf-JmJwea^8sMi^Tp zZ(i7}rKNzZ_e%d;N3J51({@79Hr2)k;&MvZhi?XkVfr@nCnu1rQbiDU-1oA9 z5}J-$Zdt|WIJx#2%9ICXVO@PS4ydmXS8#l<4;LW6i>7X)pq)1|gzh66>$XO=PGpou zv?4os%nX!v5cCTc@fP#Kb)|qi{lt9TDLJf!4i=-Ho;3uIQ;^~z31wQhW_a^(nX$MOC1UnN#C!467O$Q5tkT zU75%lZCr3cvrw^cEGo#v7^)~Y(Uf6|!@po$5{rvRQ@E0de0>0g9bZIU%)yos z>JoniRou1TQ8AtQ6qZB$rCr6*1>~>=KTuPl<>QOKQ+WKQkkeP*rhu=wT*SrMczcVA zw}uvoj&-t4`ax46XIXC@k*j@!wF%E^cB?!ABb?0o3qqPe#t;Iv!#DVs!W4T7WYG|n z(N}x{KbChg^LSAaX*m!QeE}KALxySc<{6%)`8MJp$wGQ@9XU2L%xx-*@4}s@6BXmU z`LT3xi%LDHh?R>IfM{1(PPDi7h5Z?iijqA0zkojdoH&yTGNM6_v9(;kSvYa=^fG@fjrmz88s7Y)eZq~ofilEcLyxl7kgeu@!Bq&c&PbFIM5p? z-i+mImSLDCLFNlx-m8Fw<1Idq2~~4qQxVRi%sYWA&xV|{?4N1I&^9x<%3NHy)i}PZ zMX}TAKpXBlH1S8{eOKw!wLmf<&*V&pNsOH7?3T0+LH>vSvmQl}3HphG6X{oS^iB37 zbNEF#rBNU~A^(B3OI}lzWr{EAND+Sw#x50BD;sdDDqQ%=Gu{MGgcl^bB;^{u6+s4M zKKmS^Bn7h?eja^o!HoZlx%Ucc`i;T{gEWz5p*K-Ll%|x>L6P323jq`ZQbIxr0jVko z3WOFQK!nhHF9{HeAkw81kPgxldN2BS`F8i)nVs2-z4&%!Z*rH+{3h@3J?A{cJz@iR46Y1e{X*Si+dZ#=kH+P&6xUmW{?zbdVovPmVu z)IRWGqSPj}y9`+B$^m2@^<>dMJ^gj8T`8$aZGHp%xY&tXUFhaORKpNO_wRj$rL2oT zOH>vtpRG0vH~(spL2lXUyiJ1h6UUUx$c*$M*Uh1v1h*Kj9yh2ZgqQwpgX%4rW4nvV zodXIlxb&B4>E7<`@^NZ!Y~Bqf(o{Kbtc zl#|ttYWH0*qM1kw{GkNDP7C?CwTiVJ_jox%cR%zGY3J-JT_=0n;d6v9@Qk@@8JT{t z)oq%4Juz0$fyHNPmv?(M2kp8TC;?xaVyWbiG6Q~puAR}`!8%>~H@tqPpg3yHv8XK9 z#{i$o+ft+W4ZK{}gx|kQK+7FAUR^S_-j)oj-2@Pf1!_7e2bUn8Da(wL*d1 zuU{_o;ryvk0dPm8KLWgzjblYWqPho0ohbOf*{TZ8Un}oAgJabfrMK>gBr^!X4 z+@f=uEK=>&?Zb^RfKZ+G-XUYD$Q+2RPWxCn`=ubep(q@)GU1oecw}*t!`_ZNOt8I& z>ZbmO)qj9xW_402B6iM+)dQYa*f1bg9dX;5dx?RW_cPYtSvf^T6H@=)CMgfHkpT(p z6vID+j?0NU8E6&8zNw!vl&0ab;rZ>g%5wxbBKL@uO9s@oLr1+WZ9&E=1q7=B`kcV% zG=Yz^*Qva_jY>0e5oWuYg zFLFN5vtfU5a?V{{$0Pq5tO6&2-XUZ&8Z2f-?aXx5OfM7CTWdazH|_z3<-t^v-8`4U zK`D=d4R#A+GY<7g?`Ca#qI)5_q-Ps+ZIV|?T+Xs~+2&U-$KcZ| z(Wezb`1fl$-YhWx5zju+l3XkCk_iKaX>BWw-^Ipm zK%BTLfD_aCih4C4@#5OB220tP<0{Seg#E^CiI_`#Mpi?nI?YZ}nmCf&@Nv25I&{Eu({d(}mkDisng!on$WVMu8O#aqg z&K7_87D!@aG7#~7=5>Dkq?)xj_XGEx(D<<`cKKVlR7&0e|C)dzMZw=5zIueO7U;vm z$GB5je1N{7k_*-2$4RNP6HC8l!Wxd1tI0MZP;2)*Vu0RK^MJ9Ydn~HZ-C6P*WP0nG z!m_6_p(5DA!o(5`H0zg)36_k%D@+F6JZ&Et)&ES++T%W@GKwkkWqx8gmwZiQ->qFRa(LYVxEloruX z-)k^Y^4CA^@A|-C`$&fdN>vZ3HJ5~NxjWH0b2#w|s7V>*Uh#tAD~j+bJ)(YUY)35(=^CS#dJ9`BWcN@X;$JJjiGx{;A(!* zpcW7rVqY5G0$=>~z@~}T& z?WAp)jeJ4|#ndBlv?YP2BZ5+mJM2e4J zhR!{|SK@cYJxL6?+t1NUQgqUm5g$cO6?0vF?T02y_8OW$6F%^8r7cu#P;Ty%kK`~k zTTU#TKPIwv8{2oC(tlk=p}x@Eq~@OeuC^MhmfHPyHFiMPm8+t)iueyutM9n0^)%u? z?>(EjhOU56>9L8%?~e=~m1m0D7$WfBKI{8t)mqKWxnjPxXMYujcXq~d z3^I8o#8;7EXd7P9pgL1KQt2uhi8hzl_DG=9Wtl&dy3 zsaa{la`G$diQ1HT{!Y|%1yNF4K7(?1kXUs-9HU11Rh8Jn>_p8Kmc`5)_M*CVLFhXe zS%np*ROL)$0NQ}-`_Iepg9V=Pk(sV`^j zcJ3CS;?i=Xg2MNV%R%7@F%4bXvRG$I)J4Rp3%iPvZU0QMW7PQcBiS+bJx7SnpIm*N zKqj3I|G&IevKo^M#g+9M-w4!*oN_$gCPVKcHTQk6VibFaF=|&B%saeX44^!7-vLf= zrw`jN1efL(#eS?(jG|FC>~s&5#huMm9Si@|*|t5tBf?3X*sh-g8oI{}+deF|A%luD zD6-%s3~N$!J~O&q0k4;9>CNZ#-MA`xtrr~4%%(JXhL*ndri7kscS-rjOOL>lbh`+L zBrTT76ph-(zrlo<(*_A*#yTssRnJoYyH6@SeZC8Ma_9JwQ?B{1D}bmh6fZ(}S0v)P zT7eMFyyKX*{fAvv{7f9^$w{Z(N4F)_DI>G6QMjIHg|@bJNw= zcKyM1ka=Hujpu5nmqZwBc6+@io<=L=>(;j^YrVcOZx7ZB*NIwM>dUkzlz?d)&*xSGnvdr%soFVkOPD1$ZyE?Jz!KGDlHrPhh^6CDx^^Gp zE#;3*p4@(5!4+fzncm5w>`KkiQ1VEEja1?)x}33R0+PS25F@w~IH;VRY43m3xALVN z(UCJ|8$e(kEa>nUokpQU&t}362Xi=cUKb|h0V^dA(wAaphjxxjx%gW9jU9?3@D;JK z%L|5y(bmu1Fb*@J9J4oh(+|5PXVyaRCw!FO_KDrFL67<3=I+%K_wz72eMQ}j+jAU+ z3=(U!XAzP_$5*?);#wYcOiec)zAcZyV~h86)^zc=xG~STBNP%62E30>6U>pBi2_yP2Kd3AO0i5QZN;q}1=P@rYjMw?DPyf-2TkXK# zH{j2oYPQnzHAq-7NnG6lL$sEx3|+Nd|9)XrlRRv}TS5Ijdy(cT0GxdoX9=CLnE|QT zO)S{KR1Q1*0`Qag;{VL_(*&C%XjEm5)%sD(8(Euj0e_$Js+N`IwnU}viZexgZCQb& z8f5Q4(zUKAC@s-uS!A)Jlgm6Sy3I$B6-kUg`H`W4q08WwG!^Xdwu&A{B*&fT@^O;B zkmk*kvfb zxp?*P#8Pbb=fvK3HWN`SC}@4jlrb)I)fG}UU4C+PEiT^)AAmSA{mJ|4y$)6}c2Frb z&f1wEfy9oIPq|$D%p+gvd*%XJMx|kLaxSw|w5lTsaV0d798BTj^X(SyL~}_CRV+53 zfmE!~zT@-rt@p5}jxd_O`OgJwed~hHQq8NJdfOtKzCK@BG=@7cHy#Sty{%s_;U_S@ zjPrIh3}I`Sw1>q#$gBHCxb(PxMU$C>Q8b5eH@?k$>$|L+L9(_nelaCh{j%7(QC4es zR>Toyss4Zg_61i#3G+uOAZ#1&h&=el^f_zyD2c@~>a`FSm(r?G7$K*yCZ}HOxf<#n z;H;cX{)@#9&F=huM9#FFs;FKXDnQ+S(tXg;CNz7#oU?wbWnX7X0G@ghzt&htb!#}Z zR>ChjdGhmKMzGy0=0y2}#>)_7QU+#jI0iQnYz9H_;!D+DGLCM|K;BelEDeV5KK4jx z;#TX3ppl+b%})qv{dN3_^@aItOkucJuz@j}y=Ur;Cj=gG^lm2o?o<2HbkzdCF>Rys ze1tMg!clVtmv_wXL$xTX_j3Z#l+B@4e~j?QWnc>M%aTPXSzSanMrFUDabF!*;oo*B zCr=*GF0Djwx+3}oiux-o68d#yl@4O=DEv5D?Fk#YaXSjKJ=_j|G; z%#(b1)xlNzuj}dKOG1%EV-w3BdyYw6PRPqPt!1)&x8yN=PQikeDH}%|zkM&X5-p_~VYh6<5 z5leX87Y3D7y*@j|d_C+QZ0Q4N3R{Z4pYT~iW4Us51kpL)4+0mvTZ|{4D!6f-t>@OJ{vVE2)AA@(2kltG{BhT}4WrSax-5t$SfOY;3Oc#HkYB z+&2R{@4OEwEMYmmW!kmZt^WvN?od_TaDatJge4%4FOnPT$~DF9MlobV>Ab>vRXn?y zA*X{&*OVNek+XDf^Eg(2C$nPW0)^MZyq+#fedy-4X(uM`$v8)llE&Ib@>E}v{l@Xm z6Xtsol7k(EcVX@cqXaer^qi4_ks>QL~;OaIJWr`Do{<^7@V0Z2Y-I3{~XBdj;|M&(NUtP-J zjgG))9_x>(%V&_zMFJ`+ZadlICs9qn6D+ne2q(jK_Qr#58UEYXULp47@$pt)Z4)Tu z`a*f;!-f&7$_OWX>V5Zwj)li*V=~Y0s|YTnLtk=*w!eCfxxQl%Q}FJrnq$>HCQ#~` zr?}wjCF7zUb9MsVwwJNeo8TxET?x1lr8U-j=Rm!Ub6S_uv&Y70vo2xg_n5iY&$qWg z+USj+j%(WK+kTSAc}ta*&m||vN-Y+h))x~!7dxud7PR=3EPDe7Gw9O?%V$~!aU?QbVT?n8WGtaA zDzRH8j!R0GyP*_LK>KgJV=9#WPAb2o)R%Am=-S;ibg!Qda$^jnFF#876=t04@nkx~ z{_MV(`G1zP) zdW)EH18m45^}-$a!1Y&Gb;TfWLps~yo%np4(DOIrZ_iZ4G4KCK_=M*-i7z|1?w7X78QvIg$B4j zh-+StlMwGdi=;#^U3Qx!`{|QFhD>-O;@BccQE#IJWGu|M?jQ7_KAw}4&yyughYWq{ zQNQ`6gnZ_an^zR;)gHeEkM@lHacHS42Zi_dVjsS;yIs{G9 zYy=a+{usi%wU?*H#f{RdY!R$JNxrEdpt^t*njMGL;YSEEh zL~~>gO_f@~{v1D~r76ngS7*iqAMSebce{bI64vAg@X$d@p#h_LNY+Vthl*hum z&Ru{NHso|4__bEg*-?u`#@ge&nbrcZZhe@yBVD}4bt*e)*#{oAVnu(>|=R{QwLHiG&QsU>kW93)iR9wdtTN2Q4C_w*f zko#muoYpVt2{RC4>ZjSL@7dH2D=I|KnV$kqO(Ply#2AK@6MEhcQD!CmLfB(M4TU|6 zn>^oUWy_e2Vf=Y)e1qIS1U}=bog6onjPbwoDLcVWYJ9f@MX7yRldz z{kFt(E@feQHS9l8fUV@u?x&3>D%FOIP<$G zp>jAl(Efy-hoO$=Q1DSMumy#ITD|B?lrAYPCb@!dHG5&YWw4I^D$KUxxCH`r#FgP# zy_KoXMauJ2)0*kE@u%Rg@#p#<fr3ik3yF0CX3ET@hEP3{D)4Lh8f%u=Jw!>1kBUtcVn_A=9OH*@e%~27CwvDs$w3lPs0$*FJu;HH zX9KFwC}|7WiS>{Z^;tMx)M4Oj zsfz#X#q!(I^0KU)2ZK{#0y7U2wrK&5&dOKWO)tva7>@c)7gFdDdYkLqF<$axcdmCpW-f8vPS~2P z&ufP|xU~3=9Po}Tli9{M)}o#;-?nSxVfY_ciS{Mt3WR9wcx8+z4#l((8ImiD^AY+C zU|L3(72bINi*8$Qt*H{Do*XWB3s1L~WhpeL`DTTV%nPBM4Ih{VLZ7IS3a1mFZ}xkZ zXFEAfpw_7zh7E16`sDIogTHzxg=;GX*1r#v$wch#9K&obyc8<&WS^2U7d{POqLk-9 z5OK0AsXO8dI5u3OI*n2+qJOweWEpz=@oY8C?&k-uP-8LgzU2R=`^GL4AiHq-7FNR& z?7Y&6TaP-H0-|9|DZx3^d3WEp+$UoQ$SqMd(<$hIamkP|a{nn72tqDRb&FjUL%Di4 zUO`fe!Yey(9z6)y<&_c1mHy4UyPRh$YwH3$7hy;4%adGh(ShpLz8?qj3W_3kL!)$F z@@zNxR>{S$1c*XzHG zIymK3zL`$zR=DHXXVPMJ3+go;gYhw=lAWVX0-OLI=gGBor6Ki+3@ylB$e|Yr^dmj#9(sy9h3Y=+qL>iZ=vSTzWt6tfrWCZRszQ7p%w)X zMu(j2y1zYO`|mUoW!jjG3{j9n4s%y|Z*8sVeIb*>bG9AF5{GT&CPqgzwSmIa17g!- zDKKTiLEDAt#5Po4Nphkyif`=#O;})s0(p!gTr~DW?7rr?asn9E<-1=gR{IPqM$MV1 zQH^u2l>0H=Vu^X5f<&X3C@6(9CA2f|7vO`zZwi|>AFWl)uu>O~6KTWmE%n&Ys6JNB z(0X2-i)+alcs*7{G}sg_C{H`=67Z7jI_aTo?_!o?dbUngiftZkfb=rAY^bO^qY{$M zL6rinqN{+0-#y!rjs~Y)^T2Z_s6?4xu0@-pN-s zMzBPqQ48LfJs|?uRNj)JD*g3#GI6e%aJW%1RN^y$MXd@^Pn|w0*%`4tyPvUjuUV^c zfC_mVpyKr_&tqaJIb(We?@RaW&ap>1uGc^3T4+F#L*{2uTQKuG4qs-m^~!$$r@Tp7 z62z#yari}D4TjFQy28tglCrL%{M{rQ<1Qv*Q@f_30%LtYYWhSUS0mHV`yeO>u-dPZ zs$wNO31n-+k@gWejdXX#U*2qQd-q$r{_E*Y$xz9#0RzMzF;mQZRL=dP`}eQi=l}oB z2mSw(o!0+Bkq&#X@X88W#L9M2yy;BIqvI&%t31UUy+)`hXw5%nCEgx4f4ZzI+Mga$ zQ}%yfte!70l zGm^GopTUw~?eNs&3y6t%x3;FEBTbU5wIj-b&3608ofM}IO3HB@QPEW2jxWne!zVem zAQbyj#9F#LQM3(`3TW?{6*w=K2#`B&xNOBS)x-5%#Wi2c^~^jd>_ z?8}WHl0R0zY&h9O7jb5^q^?el8}TaehfkmM&_UAbZf_kNB0wVI6ox!jmEo|uEt&C{ zl6=hlb;Du>#M7q=`M-~0kRs74CP#Ou&zN)OfJ z+~&&4?@U;l>hth^M1G4l`bl*-A@T?_aiQ`V(O#G$;fBD(^dNwxPG;F~UgbMZQV*(D5# zUee^wB7g?ETzJh`>223_)&NHIAEqa7(lH7@)mDCOx3g!tSxQZKpeI}(`J=*<+pg8H z!$bLq9M7_D47pSxL`(<<|K>Z&lR0=XCaX#n*G^*vr~a|7eEFW3lvZwP4XvmFV zv^OvMQsnE?5(<0Yk1CU%8fJ^Eg<_>WbDX9NGF!9i<(s0;_7jG<)xWH98UNW&GcJ25 zfEyW84{PcWhE*JO&b zCkIJ#@~>rWL3QS*7|cN&x*$`{mgJnAbf6|+4Tbhr!nRamc~TXE-l*L*G0z(TN_8_3 zS;ybBwO0)z1XF`Jnh1~SOk2cxj_O~HWHYRoY^%$dR&o< z=K9dxZE>}*tX@xybLn>J@)s}skYaz{IJPDMWHmRBff=u^#GflE_;e@1k?n|hOj8DnXNsb1jb2&+JGwSrFrmjkso#ne@KtQuQUvpsKI365VfmJyf*>OU?6;y4 zIHbH?XJNfUW9L9qLQ%pdz%@UI@WNJU!q;p<_S$ZbO1F!0+wAN?&-TTH*oA2cJBclA z@#(er2jp{uZyt&L`=EdrbqD;gZSL6KtPo~Z*;Gd)?B%bQ-Mhc$@>bT-#&3Q$yTvF8 zu@wZj)SKA_{`x(Yi`2fwJgcQ%2pjS65nDcA|V!4g6!^yWA5;i zj}v87m_st~X;-k4GUzO*Sa1Eg_x!R2_%zrU2Fd7jeAl}klivmxr~S{mW6Ubxh7$F~ zoaZi@fU8#Iya%@X-1U2rhmI%mo=yoOJbo5(Osy%{cIT?F^&WrPf-BRbIM!h4#!a-IY{z?C(-O^2*3eC^P}(P2 z3j&!E!KG%|V&?S&7RK(4=6=mE3F`8}^>}8~CN1a0yE*IqxGp`7vE9a%HFEjN9m`))VaD)1z%@O=!!kNlpnG)3=rFRwb45;lDOwns|>$WY9JvE6)-oUUMYg*X#MAkzO3 z#D&+=Qk`30v_PE9D#cNgv(E0UO(GKJ2qbk(kE2@C#{)vT_n+rzznwopX#CQ*!@beD zI+-IMqDNli)wb;+iPO-GTETZ?_7v(N2VgQ5i2a=6pkMGgn31*5a$cOsDmwur`i!ve zB2y?n=yX(Lot5K+e8sH|c`h(I_fq@4DD!p!k@Yl^wooVqevej_J0Dr1F{nd-B~DZI;%`6+*( zW!1#DPZO=%=9t?yHkc9brWYoora$|v%$pll;T-O2q~9Nn&_z7W5@u8B%x*P-z3W8$H$>%)f| z8Qe~&;2-DyUBkn=$^`LF5BO$rjK=drD7rdnrz|QXHe~ZPrP9YP3V~G4e}IyanVn%X zSN)dit)y{FL#l)ZiaHv0XJLhZfScVyO|Y_T_M%dG3*`satxN(C_>Uzv=gQ6@4_jZs zbwdZDfC!lndEEzLTOX~`Y(5q#;qwMB$NLMcZ@Zn@OwH$JjVD(du|L28=3Y+~9)JJ;V&9o_ z)l7)GP}?G6@}UV6X>;5*_B?)01Dx$V-L}DW3SG5l#*ZfSWiIrW^ys-C&Z!w(d?{H) zfY%b;%OkH0blQ+lD8_ix`v%9jw5}tB(5@m6cTNK4l_-}X_+3XMCEH1W+Q#$#5v#yp zP&snn^!Eeb3EU9)V5vTBy?&@)6%h#`;+1&NZXkC*x*}I0Y(b9o%XWbk)crQD z;Vl3K=la!R8uGSz6>Q5DZTY#lGA|ci!D4<)+$Y#5D%kD_4sfo=Kbc)z7QX6B*`oB5 zr_1I(WvqJSqif!!`VyY~^aq?cCj0|n-OSu}J{tA2;;}h|V@)6f4<+OExX&+WJC!m9 z3abuO6duN+TD#vi8v`957v+>i=-CtifBDD!)i?g5e)atvXPLJUHpuWI2)LE_&W`WVJR5xt_tirDm~Cn@sG`xK3N z|D%=@NuU$#SOMDbsjeBwvJY>rn$w5`W4itUNKIM5T|EqSo#Ei9rs zD}Fv`*7I5~EUvI%6gYkwg`r0vgqA;C^r~kxrt_5Tar&)L5BL@nLM3oA#DST?lq3ta{GpIeT{~^K|RbB%6k55Z_$qA zXKZpT)^*F!>0o?&;^_YVo07VPIhPX2{f_D36Q1h!U*evsZ?21a`Hx6}K;Dd33PWR9 zT%;L~Qlv9_B4P#U+&5H8C8nYq5=t)B?7J=)5i6(hYuT&+%g+cHfsvFl0IR7I2AAXDQd&bzx%EaVKmuM}5tnd^|x6u4RDG2`J;!7h(F}7g} z>|Y1DlQX}wYQ8QnAAi$pa6i}~ZFuEr4>!DV=GzkBPNvkXZjNK}xDnp&A&dpb3e67M z+quT6yV`Ioqp-Ro;cq|RNYUnY!0Kd)8-JgG%;y&Ck9grw+1R#cej~jw7obX12P>~o za{Z&tm@bj&VmFSiZ;sC5U>hd7LzK4hD^X_=@(ZJ>90k8>bx5l=6uqr|kI&$+i*I8o zL`PiJ$Vl>+l6l16Y}d_W*3lcH={Kj#_iOZ`qGEn@G%!3_YBh`OrN}L;M17twnX(J0 zEQ_pgN3h=38{WxM_<_7QFP>dnmR?b zt&S6l3S>2x?3Dq^9N(hbB=HN@Sn_>xPF1c_+sMBN)Kb$H*ZzKtK8I-5xFy1Tmj(Ti zXr5*6&jQF5gh`7-Kwjfls8EH%6s+Ft%YEtP$a}pKUX<~@Dk^#g+n~aN@}d@F-huk4 zuYtva;Yf#1I&Y?$Z*MIFyt^}O3(xl4V7`6 zJo|O>u?~6v07Gm_nXVV&n!ZH^4Hl07=;1B9N8sl%2s@euVl>>H+2}crjk#_HEyx z!@V@^{eyux&D}cFSw_-ym!-~G@ymmgwDifXo_;OvnM9dL z-tG_6rotC;a{ujFy($(rQOn=VDJUu_tg|o{jLC@Mc{%qG?tjgH5l)K^TGElg9E{1l zz~SWjCh_ZxA#Q0L?(X0k@N1^kp1)M*$sMoSD+zLb!-p?MEp-ijvMMoAzGg&q3QtLe zo#}ZPfG;Rk@orXSY3vua)0V{f?E*d5G)4%`-UdD4CcXDQ8TOv%Zt({1(u-gf4P=vL zr^y{A?iE>QW!w**x_GaC<%uyfGPlL|4i%Qijgva(sTUzv!wFrEi-M>7hm@x(3fN4t z*s7iTxv={WDIof5UP&u)6tv)qpB5XDF*nC6P^L8jWYt3*{rQ*qLKc@shI~a9J1Jl9iN4nE%i_TR@|B2D)SEils#}g$_Zz$^5P6;Az0wY{(2h&{T`B%aQ z9PDv1LQeR9fGZBBrRo(NlZ~x(P>6kgdYd~YYh}^jzIH94hF!K&d)RdRsi~B&rs;H` zokoIktlGAC#|$>}FL6o+XzpX0_(}ujWqP`WUbO3Ue>n(`X{v(PD}0~Wgfz3E=c(0& z{6awad2taG9kZZNZ*2*CoK%zF^UzgD@?d4l*m$pg8-hVu(#Ub9-gi0W%0v{buzgjY zHo3hfes~dp2OwcAmh6GW-?0CSjuloIbbLv5{tPMsK-Pjr2#G{+! zKF~~diA=SrY7=J);N%&p={n(HI`SFHxl{7VfUMjBVtpni zj|?xSyK6W6$;A%^OB#n~)*E`PwT)lc5o~$x^zb&HGaNtFo3N>y)9kk7rh-53OAw3S z=gmla?Im2`wSw&&^$8b~+o&5DtcYsKq&3*^WYVGE)o=Ve>WQIN=qr3(U*MMCnMI*tlT2QBRic-fTz@ zvZW*+i%2=4GvOm!mln=Dgg>=c)UwN5N7;r@!fW(hjSPYuS+gX21 z^b7RAen)d9*7@nH{&fZWbMcVVx)xIUqWM?J!e01yJx<3UVq*K92XJ~F_t!G#&+eq7 zFqe0}>cfV;C$Aa{-+DC*ugK9+Q~BnWGhzONw#}ZzA(Ca;1z!M3k4~h*NA`S2ip^wf zoE?@zOLai&p{lN8JX!<28eq|<`J#eRMV)Sil15w;t3OgQh5Oc&vnOwW@*X*S(=31? zd@Q!k6rCf?%?JW?V8_Qargo_(*j%1&qvkRVE5F4QS)3~&8eMjFO(EOKm`&$h?BLHT z`NT2#1LHTdG0e37+O`Ps!o|bKfvTQ@2UjZ04~&fh^?vtagIS|xvvP5My6DptAe?kDUW?S!{q9t?$5nua zeD09DAqoZZKFbH*W>=`udFIIgaE!^jZS;hBEDr7B*5g(IC0Lri7zK66<2)pSJm#ea zzG~Se3>E%m+qo>=Qj9qj0ezJ)KQf(=!ed{)?AEy+Sq8Tq-Fm&fqGnl&h{-vDC9&%T z^g(QAnT}95C;Qpy@^d`}DdUYi{hjMevP(gB-fEB+=gG8?RpFA70QzzMqG?&pZJ(l=E?(Qqh zl6n8fc|&QlKe9J3TAXQ$!arKg@NX zLgxGU2B)&KP*$F2QvjL>}O^%&O~_ahEpYIb~vbN5z*u z>-}V8C?{VvaaV+d?w|Klw3>cfS(v0N2o(n`wNYfWg{@%i5!G=v({mNDQ#h~tP0LaI zbvFSp!_goESpS3Y-6fUA_E%PRPzbg(=mFUzTKQy5{DnipZisJl{7(A$Nt}(*fXA6Y z;qq!J*EP5?n*{#w74T+d1d@ICicriK=*eu$aQMAeJ-ei2^U?d6jod~{GfW96FuDQN zMWN8ct;TTC!-6A zM@r82@9EI>(#_Y4&=sFv%svv?C1jBmC96Fc!Qt24hjX>Q{(r$iD=0@n0xzI>@~!vp{v ztPc^1!61YswmMpnt!tKaFQ+oteAJMe%bsTdtHZR#u@W@rUt9rE0{)RX`O7m+@ON7X zsBc$auv@3r2z@q{-zI<7=(0}yWi2*Bw6N?)hy#Il-w$|U;>YpX-M;OK2{GAO*Sw{S z)0|zQCg5tioWkN3wVTCKFfhjnQS;-9E_Vj5Vl}3ts*=DP2+Kx=kK@nZ#L_2g>Sw)n z0XrmLIYGepKd3yR6`Y(|$$*~)S!~*vK0GVzDXAY>N9!Z;7;JUKWyhYVhq zGoI?UPQfrW*i}u>Sd4?A`J?_?hR^bRFE~B!%P|p~K3lxpvh3pmQAz+M3J9{lmtS6m zJ7sChe9zTwOlx|j>6c-mu`1K_yxBt0O%?*wW!!8I5b<1}fMJnNHS7P92 zu4M!Z;z>^_8Rlcz=N4(e(v{aFt&C^`Eal2eD2FD@h{;R`8o1qvE)_@FWu=;aAq$|i zI#(K&vKU~#6c7T%cz7Y+UJQZpU2q?VHfZnpjOx;FOiy}o9MGq3|9HLPEkcO4QV+;# zsov9N)eH`@ko*`r(bdHUFoLIpYX=i+5UU4THiu{G3IAUh`~5%2lQqnEsaab$t*I-Z zDvSvIdE1Ldk<~BS9Ba0Vqcnh_v627X!)L|J=90JK*pEP*x=USXIE?Oc6S*tU5xGLI zEq8l4F}D3OnK#S15b85Yk0YQ_N!^jN>k759S}9LzDY5U#Yi3tv?)iv85M{~YUdYXC zx!BX)$}q}|&ZX@+aQS=hI!ptq%XdEO(RkPwSiEV=rSkbRLhIp>DNQ z7aWBJ+O8kIoy$tU6_$vkJj(i#lir`R`A7yws=;3c5QQ^r$K42c z#6tYM>RjK`-y5SDBw3Aj-;8Y)Ddt(zIiY2!{!xcacTmOroLuS~(Zi{rF;n?If7S0Z zR$%oSNRG^c%H8}fD@HUb`cAV=dL#CbDT`oN*|Eognm4^%i&E<{vXDH$tcJrw!8bIw3Uh%u8K5%@^T^#8Yz(l5jtUc zaNpNL;fJF5PBw>cufXWQG6DXgfeKyYc_!_B*4F!bUzm@?Co=mJ^bfi_=|7Xy%uR142;J9UgvX6L|LhgT-Yy^ z9;MfvoPlU^@`@Z#>H=TT>`I)|Ml@0Z%zJT)11%fTh_2mNdpvc%U&YJtm>hhC-z~9! zfZtuZ8#=sUB7+rBlY` z!&Y~kz2)2#ZS+VtqHIe1EM#$&_aAi4!i2Y(uiiSwY=5Nrv*I?bl-F7D=NiePAbbTRYA+VFgO!n!$#Xg>d_ zp)o@eaN;sC!}4LClgy{ReBo?CU*8jCWoaf{DPP8f5K$Al%3lN(+bT$WK*#f}qjm}< zJ+0is?^0+LvQ{thkmEvenm)Qy{gJSfOxhE@-vFkew>rF^Gc~_uYK*?D<10WoPLRX< znAikA>;y~KNR45^l@nD*biTj-(%6)EwRIsNuY9G1d_mZ_v+BOLn%8oM%6lv$willyH^U>HtvBHkA9b zUz|#m6-ChrHG>}gIMg@3;|LGS6yRH1%H=t%OD-O@u}Hqydg*aTmVyPPEit*+Tra>N zzh?X@iv{`i(-Nyj0^;mQv*;yIg&QJX77Xl6==3ale~ET#bnsP`lNwbfzwj}~e>Ez} ztH{YMEzf6JyOAJ`u@UqF)PZSL8U@I7$=Y(7!13Urk}_WsCKBemS@KAo*X$~-Mwi3y zxs-B#Oumny=EdayM%;Vq!?Y(Oiwf9Iwtqw{{Nl{|Q zP6%SJC@or>AZB8(NUWMw9q*IRIX|Ay?fmw=oqr&|p33xJ7CD*0(3)kM1m-y z^)XYhk6v&9%sX@~SAx1Y+1yh;E!wggP#h}>J9VENh2sRfFJ)7hb!(>w*5LoPW+l98 z1BMzBi0r~^8N=pcpM>9Drj1^Sz3JtQxl_3L`-kqLO~V&>yha%9RwYlA^w9}1aL%&MtM4~R1T_cHlbtony` zg1kp*&rB|X$6wZx;8wvF9X>G6uTN(iNSoSs^H~ulL<{9UZC`R|>Nj!Y>KD!zee1^lWWsnmD&I+p%LnZ1y?$^JZjBInzw&q?B50G?208vK;>yFRo7fKGESRx1^OuI{%eq!ed-<127FmRFqAg?W}JlPMtQWE9g9I zFE5KjJl%Dm$ZXBpZ{b1kNFSk!)}O!m72SBhh~d}4{AIHnniIyAzY)_NoqX2dRts$) zD;3y-1IWrQUb+HS{b(wC>UqBJ0ai@PWpxVjdtSz#QVCwT7ww*wzn-O)aZA|A!89BY zYq)FXB$IK<#?F>df-ukzCP%zUOO0Sm{wp+7YAmQshA##?*A~_fFSajMroQ$e?~@ZaV}{IL)7a+@&Q1)%yhLd zI>hHucB!^KnC-z=u&Hd*dR2mK+Ej9x+v8vz76^KOD<#j|i5!p>$yrgsJt!RW1SJdn zK7Uidg{7&mC_eEHSaOhD76x3JRrqfnSj8Y{nqsj z;SXWPngOtu9bIehD*ps!vIt2kSGoT4`$fbnEvGHbku=Elj$G?u2NR-GUjiCYQh$;X z#Y+kvwkQ44<$Hrt+OrYd_ld~sBnV$V)`I0QAuC`RlD!w zctP|Nmw{3UAo5)X-_-~(mykV)qk|ur@1~~07CT$y7MM4saNg0g^TMsj-_o)p{OaZ$ zI2`ImuUmom>ic{ z8&HH3$P>Q5rFi50y>#qmP;2|?{W6xZBVP9_uV;>W8(~k?ZfUR>htY!xL|{5*<#&8n z`BbO^s6rsMed5k7x`@br87CWs%n9I~9miG)sqJm?yay=%OlEbfcbvTM1V_LUobPib zI&0gfW2`b4e+9Ri;Vs4_qSov}2i*m3aq2q8sxC%`^=WlvJNj%_Ne0?V1+YqZKup&L zbk?{Tp!qKQ1N}jdDz98(J9@d*T<=g`5kQz0p4#(oe$epTNVe_I^AOlm;lK1;dqiK8 zE!l>@T3XL*-@Ox&sHg}tnqO%~<-em{%YUurawf3Z{q6E40shjl3hY_RIE(ob+N~Y5 z7+QpG83c-0I$_niM|e7)1nz7-GKi(@iK*wu#+J=k{LkqeON#WmJ*h)IonlOW13D8d zm#matYY5X$MNI#n`0oAx24w#)U&WTj_oM0@PMTx78dvqCb_!Y|ll0c1u6#mQ$2hN? zZsJPfOPJ%4r%olk>^;i?=+F3x>1!JvZf?n@yeR_$tsy?HSD>-=K%E*SmJrBh1mCn2 zZoZ|@Hl4@tfwP~spsiogo$+q@V-~U74rcT|j)(@@|S-jybD|J+mGmVg>Q)8wjjtI-!#N;K6FjlWyo6Kl$#OE&kHp$4|1`8}zOCa5Pi`@L?nX;1oI>*1 zc!-D|+}#;X6S}0@M#%&8e6+GiGDs4a+Jcp&)%8u%&P_fzOr=|o4as+}ri?lxVa1-s zvAH|I8P4oEmz??rnzK52rPQ@GmEd-?=$FfZzqbmpiAL#E3#}o*Lcx1%!TMHBV+Rdb7?&A1=*Tiktc?c`K>3dgkYo`e(!!MI93hg3VUn z8LhSoeKJer^cFw8)>r@Xs{#c980x>8g0wOig|TRxn(`X6GkHU|(xa|(Zn0^zf--tw zotBFX#sPIURc0;YwSOQizR4_N+X24MnuJF5B$aLtGam+>TZ%7yj5argiAioE|z&mH6>~e7F0Z1>b zPMbST$ZJPauW49_A8>2hbN>y~Oj>h;eR1}{gn&3VPyQzqw!fp(+)HhRv;bJn-~u=+@^5aJlPyz%IXW9TR67d{u)4)l4#A49I&phB@rt9f_ z{~!7$+^@u`z<^J9K#-TwMFy-9%k&0syF~W(>npA)IneFM{wtK!hkJLa|vD`=r5 zUUN}`kXScD(R%vp+@0nxY=E5aML$-eSLOn zoa(R2ehB89GA`?Bo5?jP$5_6S&eriE1JaH2O1G4cOkTfHw^!EqQm?7LAH29D{bVR1 zEX>%hc6ZiT$~F*{v}q~5n$Iyyr(^+N7in{}0EP_P&M|AEJM%4Ntl1LWo3+ zpnq~s_BqULY~r^u&B@k-c7o{8VEY`PU@9ObY1QCdh6C$0g^lzg5?O+MD6{mj6sq0K_i12*nZCUwc_VQ|Zbe?v?Sjz->Q(EQjCl4Q#1m$d_cFhW;GpTG zYpQ03E^(K++3jb~&F=^vX%(xrpx0>0V%}De8nr%4A{mEx0RjiZeO-_qz zdP(g@hQ2lKtyokkA^Ofop_0-uAfK9))`<=I4)}qjNp#l(G9n339d;E!nMYNU-3mUo znk(d53TZG0)8+-6=*ldJV?}`dm5t6Ouv#&QpH?$(=34HT*d4|06@Slt(OuGdv~#U` zImf=ma9s0r{aIUEZOZR*o;Z#7zwWF4=^nb*mHkt8-tB{_QC$3UAE17+_>OH&gs$Nt z2SxH;jCSf|o?U=4O#%!P@|fQG)(kA6ox-vdRQPhra{k1 zzr8~t4W@Yt-7vINnpT|Qj{R<19spH)#P>~G>*2=oQ?4v}_fwza!UqlkDvJPkjEHZG zf@GYXp%q^GwW43;Pip*8d5}yM;#TF{tJtynI|;Zy9iBv9!Jv?p<*?lJ7eXID8qLI| zUJ_Qsg_}6!XgL5Y-bUB=(#6dCe`*4$J+=a~JBuo2?YGYCC40HMO z{dfLFm3UEu$e!UC#*9p1VEb^(O5@O$+|q(LGP~2E>0d`InO3sAMN{ew`1?h9JjPp$ zlZ%wesSU1@?}3#qx|aL;cr?#yhMz0kpx)YL>1z3p*VDH?dvPw1-1y!?gz}&LLK7jg zyHheKupq2Nf`?gWFAcupHKz4yTH9RcMxZk_T>4@L&c%mb3`*K?ib5qbOK~L_D0-E) zD_-pa=U>3L`WNMTHT_z@od=F*OQ_7BWbA%;?F0)23w|rKJ3iV|^`T1Cq*9zLh>Og4 zLv~tc(`QjjBRk-_K5**ZmTK*X4Ma?g0icq4t>SdaL@Smt!UwSX?e@J(umv53)X+%i z>yr1?>$TPY^t$JoS|J_mNSt@<)4Szt? zNs~rmCuVQ#c|ktB_%cUv%RP25)Rs>l@y%m!41drRz(|W+pZfRW4S$orI5BG1c2%X8kz?r)DwAvPUNe}Xga0@R zo=FdNiUqyCBd1M;(^YdJ*=Fh?JTpzLwhm!os%F#8KO?h|uiI;SwG8!_H$zoB5gv}3+4cz<#8~F*s<=m`^-n)xfiQ=7 zfK^lLZAk!f(V26OmS+m29w8>OsJEf(?U=%9zvk!2Co@qcUl5CR^=fs@Gx;5zF^15k z*O!L@1ll`VUMnCFFF)8hRa9pg15p9M-S!6#CD{h&_Be@sew_vCJPe^>2GQ@6sJ=-1 z|0n43zuO3w3K}LCA=+%F?jciC%j;#pdk@_G`0f~;_C>5qezSFny_fBL(u{>o1f!c5 zwy4rOD}sHCAwA>mP}-c4qhUSj!X1fw)TJelM`reS@>B+X%%=#3U=do-xQbI2V&dX& zJRkTrmwc~axWqfa|FdBXc96}AwV#1Wo@ZoDC(xBkh_~)lB=M>ICT2Lex~!r$-H!;c zGz2_~+7jJ=TR_zctXxy>V|7I9$j^{uv?q%8{e<6WV}FmIa%<0o`9|vv=YvJA8<)4b}@yf5&_WwmtlT#t3GXqeP~({4smI zzMJyYb}5^-bd%4pYr}+N&`*8uoZy@vcm;8#Y=Q>lqj0i3YcjLnJv9m6l2Q-rvlF*i zB-+4NcjB9<|C@Jo7NM%iX#?)%a<`f+=ONlxkuWUEd{OL8Qx@|f(U>N65KiCxHC=Xq zRG7zSV{DSqXdl4c!c9peVOrHM@s7N^R7?VVU3%D|1dW7x806iyH)H21|8(MyUXcnkhE!hRE%(AmxB96NnBeQitQINaoXi;SO@tNV3^fO z2o{00#j(pgQ!*uej>|gP#M7pCx=o%uK1A&RfX3+Jwj}r^XX=tR#2*?uI;Xr#8dS{7 zUuC<%mDGgQ&$}QF%=v~6XvvK zy`^*9p*CZdPZ#pphC{CQvx}Q}_N-dZgOvxfE;RaF$2XNN#j_ul=HEkDZAXf4RWm=ttQGwc@e6JTg$` z2p0&SrTt34%;yC%+Xu^eac@9NM<%ktH&^mdz#Abj$hkeheC4vPZ#Mb2G3@#YpYB7{GDOXhty1>cN-m2B=ce3DOUe=hSHqAfy-lPPG|H**d?;A;C3+^g>c zJv$a|r8!ca;jfuUZk*R1OE)-XkW?Y)*qY~tE3KrSQJrS%WCp5P!jjwXsh~GPl>@n))sMbh9D}ZIob2KrS^WB;lQetP=puJhMBcw9hS9kEy#bjQ zmm)aG={<1zAI(?^$A?lCoi2Db!B?zY)o_6=wmf5Hz)sj9cjYLQ?wU&U-zvTfok6a2 zy})T}mT}t#SUU#EG`6N8&4YDk!SSI+nBHT$3M{CkoD&UmgXYDC>A+c_JR39o28Y3n z9>c?)K~n3G$XDj*!z~Byf21(^DaHAD^Ka?P7J>-^W8lWY;ftfMY42LmY01`=NyfLH zI6Z6Vb#LDO9e;IfT`_8MDUYinHXo$><+Y+bc;k``b%FDyQD*JO_O@8`mY<{)s;nFx zApMGd0_)>0QvCY^c%zA)@su5>-yjm! zWNYraqB=)I4=DW;-CBB-*3Pt$>r4VH$*`XPu}1J~k5|fzVC#R(XoeTS?Y2d0{0~rn zN*PW3p7qP~zrdMAP?y=;z4u?~ha!at(yRdHsjr0ZYozm2`WC$TsCxZBXBy3NAcc7Upl6e>f+PiUI)YW(?>IM^4Z ztNuNw0@7KZrLq^c_N7x|o8lVxPV)&jzw@o|^u@WL!dlloapeY?gfWt+O8I=Fm5r5| z+4~R1>=N*Z^Au)pMw~S5NTFhlJ!U*(CISPz)A9BPft<%$0+#rX2B4gJduBI;N3VM< zdbv$N9Qk)jg1q+G?q2DOXu4@59`TQ!ha>bHXSZbsy{5LK7=c-l4_{Ute6UnH6`n{z zs*mHOM0x>SO1zW7qycb-W#Q$Dtu{>&;F!*QVNlqRL88l_vXCY?e2@O>Jm0$?TX9b; z9m`v+2!TQ8{M7~?2qd5uxvB{6&7~HpZuI{u1O~7*>iI=o-oLFoOs(~WB=6s(i_8t~ z@4F)0cI*1Wg9TXh5&b<3d6A2+%3@B&EPSW(uFZwry*3L@;RT@(W+pXzjYm3j{n%x>i7CR91Cl^~_uC;FYR+468@;)^t< zB{Q6>5c&8dDa%1%diI9r)JWglUQli>-S{^<<6jD_LMG9zV0vageG)0h3Mn7_#Lm_r z&NdWX0l{PsOBH^LqGdSJAN1b1e$1>1`PHv+>~beXL&rU zboGCL>^W>7btZ2hoqt&(m@q!C05qRC0q39lX2Bc#FC3y;63bejYFka88do0JSS#pS zAWpM_ao5vhqra4a|F9;2X0Eu!-bm6d-If1YYD3BK`xw<+-gF|5LHLA*q$<ziGUK7NzY>73DNl!0P zxCRaXW(NQd;O<@W=?A{CMRNHyId6a0md|9{D_+O?hF~S!Lx$}{qf_PNLU1T)5!vEQ z$G>+pzIb2r_VC~X1lab_m4G~3Fd5(3^&@B@_pfX)9^*V;VwXD*4YA9X=2xxVREA!x zRHnD2jDP#_GhppW#J;y$R0wV~l{A2jmq|!iwhL^V9Da6A4z!vukn&Du%E6@4oF7HB zs@Ca5{QGUYsN#q2TB-`LJ~Fh{3}!|n;rX(8&I*r2LqoYum^H>8ZrR*xWU0oMt+Iok zI6ePc$`v++joqYHJ|!N(NSIWNAmZ^}QPwSx{AJ~{ziQ8!@pbl5McQtk4SorDXmOIh zjpT6eavTY}DRYsA#V~A*DT+1KQtcetk2-w(c-emr{i;G&`cGvdQ_}irAh0Z9xS~wo z%$vi+LB6Di=<32t3+pHkLth_Me5U+y0{rmai(&mBv&f@U30CV;qoj!N@j44Kg>Yej z{>7)u8w#@9E?{K8^o_PBL|F;vO`2=HvnX|S$9maUIL6SWIN_803EP@o*)pq+{Q^vj zOT;R)$;4X=us3 zzfSEv!yhK7Y-+0+&)7~Vo(~v@C@wk zbkU1yI_DUi$y1(Pzy;&My!cNQ5(Rs74_sktyyU3nzE0HkhjcQ2;GeRHX#PVT>TlpmkrHaKA!$EWlRVI7qY^@zBVd6T_+~y zR_I%&$EOQpvFV6ob|#8JUO|iK*g2n(J;SvDE}UF&WMr&WxO@vfX-Ic@VP}+`wMq=- zp-fD6r`i3>-{rC*;V9WuJuy6C2;qm9O@%Ca*0$x|h`g5faD(OGD1y`ky(-omF5Buv zegP*g8g0E4sw@v+Jy)@hp#Qu=e^*f&yb+96wwI~+%kG7lh>Thx4+jAov)eF*k`7%wgShF;7tH|_@y^CgsQYCD;miEsE@0}z?xap`p1gXg`%kb%=9{ja6@B?S%u zs8)%1pmK$xP?-DGKEYUMT<`fD5f)XSf7HhTZNtNd(2JH%0c{3U^N>b*$sNnxWcn|h zK4YQy*(}vi{a$C2vgcddSf;?YC8)+J+Z)83q-0%TeAkj4+X(oz=A-Vq)UxR~ADkN=nSAnDt{$aw;pWC*S|}A@w*-*AFk(B=62V?nq32Y z_YIGmtJ_mLc_g@~(I}1>^ktMCbio-9^z1P2- z=pG_I7$qv@-QI!LpQ{sH-PxS?&JxR_EpwvZ)o*0Uj`Rmov(Vu&hMFR|XyWrG9*A|L zf^U|3(TG#4NOh24LahFA1zI4}9tgMs^)(DTn)s1yWeM`NAjEMj&dIp7^Nk97jn#Hn zRY*$k7)RG@Q$yc!R+Ih4icu{9lFSDh-bR7QDdpSkiN^oYJjF#d+gMCn${SjS92`qI z4;Jax>F~h1VG+A_Vc5wVzfJ|KT9kTU_$#?dK5&h88&oE4G9LF`jxWiO%vuU7?6GQ{ zi|UR4fr>m{4_pnlr~f0Q++W4t{1OmgDjmkw%Hx1+of^ka4D(1TJbLiQ_FcdxuJNw@ z+mhy4q34m*_r&tSyic@ErTFvWFe%9e{wwZqwFa7NLTW{%W}Ps=BtwmVsD;I_F=NR4 zues&;&V@l4Z~*~4PQy`rE15N^0L(Z(wCWdJUD3?rKKZjhJwl#7VQ(`91&@k6B87I> zN(4-Cyx5UGKvy>33Gc|Ud7iv4yg9-o(B@TpU?y+VzVkF=P!=^4dm~cGAxNZY+(zSi zVMT>7Y3)CncaNMaE&6NJZy@WnmUx|Gm(Bhhwj92w5xUE^&UH$fD&B515^o-rZuMh6 z#`+A69WMV+rQ^%@4{2}&7j&8Sot{`S-E|JEDXX7ZN~y^6$ALv+`s1V-mmF!=07qjH ztv;jy{)HoXdyM~t0P>662S?CvHc@Y){RRA+Q&ZcW+jPMpvm;$1Q&%sZIVhnB)N1fh zkrkEJ8Yp{8dSo$iFYT?ReK!~#W{IiXOvkLV7wJ|=k~;|YW9Oo>@PzigkG%OB{f;-6 z>UpZqmL)N2tN;EHf4sVxa`;MgCTMKw)g7h&nUZU{v$r42e9rTWU++3OMb3VmUt-Q# zaQe0UjXJ%Nb{n&y;Hy@OOYkG<1hQ}S^i8<(Rh_BGu{a9Tq{h7iJaQl^(BMUtn6Gd5w-+Q{iX`#z%ccs=ci8~>VK9VMc} zVgPhJt#;yO*n_>(@lpR*5-^5IH%NS9KK)}vH!)S%oBmTf|BLgk=QfqOeTXgulF88b z&b6}Y7)UhN2%Tcbx}uV@&*lz)7_+u-;R$L&P6bsNxvUSL!bf$%Qi{9P|GWTinm;Cf`hC$-+THQMRTE%6mUnVw$ zusq|dFL|=5-S9%uP;Fqr+5Ah@2SY>X`fNUlVjC0+wa-jOhq(^2B3|lRz zj!COQ^MHIONkJBU0Zgi9KFzaxoTN8+rBn86Pk0uUqhsea!>JR!FehM-XnlA3kA;39 zF;}yDZ`VPv`Y@(tA*%OSD!BE>`SRVPK+ivWn}?0b&@l5W#)O&wnL&yk?z|9bp41L* zwK8?kHA$bzT{GV*(wv@zny2J_Rq28|vwX4HPk-mJlmB&1pU zJ}OMjk9;)C3s(v3qx4{80Ia=FUHC^{1l4g1&R%9FNT94cJW!7q2%1TbX~i z8ZH6rd-!;PI?8U*0Zcdj-*lELUwQTOdthC0+N`9(@RRiUoh$B|%OB0@vja{ZIYy`n24f?GSK=X{@ z_IQ}jk&ZGiE#<~SBq1q%C$PGe;QP8f2d~_G=gyE59G;*kM8kO6t=>%cgr`O#=KS)G zNKn+zzHRq+o`18KCLdB!tPXRhpKlY$_u3rLVl`z*wt9U&Nb0w<*II^iOJ&r+Tg79+ zPSLXIpzizx85MP3Sb0 z|Nci~KY6dYuf!pITK+a+UmouF=mls~$h|7@?li2UQ_ z^4o1b?Vfe7cW_`wV{75}u?L`2vkkL=1Lvy>vDDhf*-GN4&EO`DyNs|R^M5pNUED(@ z9|Nc479`86o3RF+*nILq_s zTMRBr$H1EE$8Jq=@-sYq(?$!=KXLFK4c85+XwwdE$gt&%T6RQuW81w9#+?M6=a;jt3-d^^Jck*wdum(%)+VWG%P3`KB*26trKfgf1>?V)@l!4G0G;Y7Mh zDm3)?`6$j|a>|5&Rk1pp$*rIuylT^*=GyOl1LnWJ0$W&bY`g{0obXjCp7@WZ2@4y1 zD1xqzZSWd?MwZ*&xJkGaBsg_bAu1&PUGU7*{eV70r%#_L7|RCx_PZ7y1AagETnGxA zy~6*|oPFcE{a{WExBrIEaaO?*fX@YCF@I7rreR5X&Q0z->s#y9Lo4vY*_R-# ze3$s*sVve`;GvCNF38(w9mX9Q9NwZdf9UY(uW59od?JBa1&e?Y^_Pow%n*{u$Iyc`rJ3($@Z zg9q;fJYm82n_e|6i8k`oVM)#WfM6~PJ&zjJG4 zukyh4&%-L#fR&6@A%LbKKfP7^l0&RNxy%C1(mJbwD!4B(ccdREyRoho8u4bhUejDE z#X#g@)>gAZLI7FTI%T0VXf*cFR^QBVP4+c!FwK87hspVyc2UJscH1X^Bb^V5Pz4sJnqBP%peMVWuW zO-xZ}zvimy%(~(8w7PNZ0p4GldH_!bNfo%+_+ zIRPqAYB#uJ_K^i~A*%G}ruLC@Y)iD*(n80&stn6tM8rk?j^pWV?Ckhb;cU20m~pSU zb<2Ej7$GxbrLPVIlvYD>)GZrX&F}t3nRp5=B?`_0xfdCRL z8Yh_V8q8*qZT>t2(;TabY`*&y-m!T%^6(!k@*w7q@n`kVhjEBbWr>&%*nAse3|hGw zhCnaVM>XiofE1LoX+^SyRNp;BEPcqnS3RLpG`j}t*4E{#N4CV)Ov`7Ax+Mfgfa7xM zGgBRxI+-JGRaG}{fMVy>*~ z^8fj8i0{{SX;w(BVPJ_<&7z`mMTg-ny1?S1H*Z=~dE0Ali~JwZ4zjTPZ{^{AJXX0C ztD^A=?JY-*1p`!_sY`5fNgkTc|q?SXrd#(iEZ!RN9VruWSmhvGxF{g&9^l z{m8ser=uj_rlhED7EI5lyb{Bi{!sCpfQ^U^{vP^!#Q>8!{0Y>?vMU`~r?wBiYv^R; zl9Rpt2YeM(_whd(b|juUbVL4CK&S{P3wfArKR1sHs!T~lZ?RJ zYrXoHj=sQY-*0a5;;Q)jF_nOZma_RJDYsmG2LRCST@vrw%KYn~O`5@r_WQZUVGP?6 zs~rxW74S^l18i>n$sX8oZSSLBe0U>>&FXx|-9+?X})l zF45&7-lGM3jjx+TVhWFc+A?sFvM~SRc8+g>$xNf*fe$!NP2+V$b%#w$NrH-FmV-o1 zP4j@Wp_F6+C&8%=*h{aZ+Ks~_BE~J3-BHs$zZs*RNio?nymE!EEv%2|jj;wiD(pVd zm-vlnpP2KksFB6bYKmxE_C8!Hu0Nzv{cW%72$-HVA=DxmCvt!J1UAk;hp;r{bB@il z&qp;*t-!S4YOcDJj-qKCUD=0D`5?T;-v-&N`+Pm6K(I@mmzooKEPSE73Hf}-)Wa+r0NTzYLCvcwwiS)x1c z-Yyh^0|w3(?`7V@8CpXtlE`84xP<>1lq$-%MVKs(@Aco8`(83L&S1=>HneDNX|8pL zj_V#KnlVaw?>4X#N}^gMIDmBZ2M#R-H3xkE(Oh}_e7>*$jtEJ)|I^{$81`K2{MLU~ z`ysw93(fvkipYU8jsYH42Y6>UOe}=?iW}Evernkk&b6KFBxI=8gD&inlyoVN^09Xo zgcQ;1Z-!Fy7StM7Tp_nBnxS%~gZrh5Thwsza;xAdKuGCWWu)byZySBw#H2$3yIL;V z;633YrB4TI_G1#7?8b?ERIJyRX-AX~*!?>;@&jw|Yp3pFP<%~ZJ}_Uto@ms-n^H|B zP*zW#-OvfwPU?@ANmrsc(dKaudju=dI2@t0w?R)yS zYoQ^C30%Ej5H}KmoVC={3=C71bBzCvv}=3ImuIL)kh@N$QJdNmmQMHBhtl_XJmDZ> znK8_#z@F3`U&c~j{y9Nc`%(WhC)*o4xKhdgXpn&9Dy;1@sg6lR%E|bTZSlAl@ju>$ z2JX@l^mpP&m!4Km#?G5odOtZi)@964RbDn?jBn^j^TDOfW|Q)v0F%<0yNr{LE(iQK zV{li9ZN!$~lGXLKWKwO_OhX`O=Zi2_Wo576_fmA*ulcW3bk#e=0kv0CE!A-o9`d(? zJen3hp>SUG`MWpP%6gXI?k4JTDGiZWc*O+jX{%6vf~AJlP$Ia40WHn`UQZ zTTYKkkelg%MzF@E5r>QI`~B{O_m$^@)INUhto2zVy>|G2-Y+MXQO$k$tF`7%A_`yL z20|Q}0es&^`=ja$>fo{6gm6e7$E&`M_U3pWv;aZTfM-u62C~)IDL!oPR)TssDpfEJ zY~P@_Fj%nO=2mg-Fb!|F`mX($5b@N`@Rz=wOt3;*3f1FYSmWJj?gK1%c$lMp;IBIrKWhG%bPl!+FW+>bk(L@OYjG?u^QmvofMm#&#q$3N0fY#*Jl-o6X2lm% zk!i1b`r4!GAm!u4D&n0NL^RP-h;#oB(x0gFsuxl4>gr^rq|fqV@O+Mcv&95nJpdD% zk{rQHAN?Ezi|ARO`V`*j_%L9xH_EwHX^c6z{MHUXM`xkMPuL5j*3zHAyllI}mNPFk z_JaJEw?T@L;YJP{?RHhw)1%d8mFXq9IbOQq)8XKY4sJ_~?;acrD%@Q9GJS2-7 zdr{$J%lJvo(;{oKw<(oN`t@en`sna1F%AiQ_Z0#^SLy$_lxbl#;!n{2u|C ztS_Hk5Z8gBy`g4QY_$m_q(~L66OAz-#3;M0j zky^YsR#yr|M!PpDyY@9A6{`)C3)?&1W$4mcHy(i`FazPKK4&m}+Tef#wlhZ831G!; zW6R)V%qAa*@~~T4V*QZ4a!Cx3GCB5>T_KB z8o_(q(_1oO^NTN_2;E`Gv_+z+Kqgymi8jwm`z_|juBcqy{lQ`KNx*cbf~(pZkp@*>0lf0iFO>7y2pB+ZBpIE4SxL!V_}${& z;hs8Q+P>&ZO(1=!tg{!n5gs5jUjh6gsCGGZBA*O)H0jfCOC_XseCdl zOr7jmuS*8gtA&E3Fd^raD$Q^Id8bZob8|oCvZs7NMc}7(nKUbl9vtQ#zdnBxEYkj! zgrisYUzp;!al^OM&Db3#%l81~0*eeL$sS}d>Kv`vE5p1ndSCl&m`F~O*2H}?GkZ}~ zMJ)o9;{|WVyY9YRp*4JY-&xVQ((M_r8VG*2RnpRx>rmw)r##Jbrq7%A^HP&L@1>76 zF$Owwn3m{5dCW7sM1k z?e4!DZfhWP;<&CEe+c%TiQbj7kIs1tQl}|hB@z3#$Q0-j+K>v!xGn^*;@^qvYcETm6FvW3SdWlL6nKRb`pu zz{SY2q;$V#D^@u>-#CZsDMc=Aua(!8H&3h7rfYjnD{^=(wdLi5Tlj?^?Qf(Wr|q5I zVa2uH@XL5L!MzUpcc+>l39B_RU#Rk<6th2nJpa60uG_e7{Rl@7z)cLskYlc2JzF%| zrtG9x8$NRowlWEm7HRKwPhHRH_cHp+)m(ns2}puxjx;IF;(?>LyRBSd!C=@!8kGTD zfjfZ;OPlEThGT{(xkP_YamS%vHV2bv4F9B}OIqQ-p)rV_3*LJG8N;OeO36XAWiU9m zNarEfN5NfM)dA@L^h8>M!-46B{+g4s(INn`mk$GYQp7jgo_OH;g($fDNHw~eig!BJ zK3hEuqIpqX_m8rBG$Af-#Q^%+!t{CLnw6@SZ-gDlk>hjZf#)zEB&}zc5xAf7WsP=3 zLqbI~mw4C>){~bhXtoanG;kAs4oARKPG!n((LWhKPh^zA^>r^&8B?l5DhtIK>MW*e zKH(g>aJyArj)C#5y7aZ{y!~b>ra2xCVW;eB#_WBVKHvNDQ;XzfCbVq*D8ZiBuIblP zrQ=t~dxXHl9{~STUgk#}x%M#~XX{ah;PDfjwr5Cggy~iy%Uj#eMQR?5S-x(x{aJ6A*#8d zDi24b6jH3U*fB)?$R?h+nsktXZbQ-c*XwQd>-52kxWXXh&5;hpN~;2#3z*#-^s`yu3;Rw?U6rOr=xljgF52 zoA}2BvM1IIzeEj8>FQC=nx}|v5RE`$!Te%V9G-W|grT3o$eN(UwLaBv{(bon+T6<% zf>Xhg>I2cIsAy-5t;`SNGMr=IA($aXS5>*wgSr ze`)b>>CepplIK5;KP2c~vK{VczQa4LgP}MqkeYq^6x=hql!X^(u;FqL|DIXyVq<6B%M%24V$9(0#OG~Q32*D znB>cJ4o$x_Za>_?<=El=Z`?|B?R-=|Xf>$#|6N1Sr$JRfUb!P#Mh(Ao%eD)}qO$iI zN5yVD_$4y;FTgESQRxhPTm#^5ja$J=MrC44kR=I1jQ5k;f5`5Vn)GH)iHM@)6C0Nq z`9CUFz?K&jmy+@riIH!rSt`M4u)e1&k^C`elQx5cnK?h|3NCEI*_2PsT5B!y&5^db z$@2=7^44tK*%>VQKbmPn8E%7KyYR+IGaD;&Z9tjLsMN}0DyP%u6w;8AviFAUu;l&f zFo7vKvldXq^H>-^1LJW{`760lQtHDKDB%4}@XfeQj{fjPwfeLEFI}oxO?6atWzao2 z(;B%M>RJQNA!kO7)u%IcC#Gjd-g3k>@z7y7kB;UD2@RGr)i9N2rF`+3=v&wHX_+=P zrY&o?wp4#gev`pDXR1gV3UnB73A6vJQbP`aS6KrBkOu7mV32RPT>W@cmP$B$oLNup z=0D@%R}lVVbXnfY=--3jN=$#WTR1z0=jDCrz#Zb&bPdsZ;@Fh${=IcU_wt7YDr?h& z?d|Q|yp?EN`*W_G7{_$kI=_nly$LMg-`t!r$UzsMyUP!%URI3jR&8fZ4Zd~}5g&(w zSOx{#i6yl)+~^-0YK1deWG5UvQf>OEX#F923Qj0ckZy^3k|F>1=W_g!HJ3hga>1o3 zw41LJ(foWAjRr*89JnV5Y}vxoWOg|_?hf<*q@g!Mgho8pMaf4LcM#*<>$uw~6ms5` zlOFG3!IW;CKw6RspO3*H^tN!)uZmim1$*3K~@jCIL|%^3RNg^;ErPTu~9@!XaB z@oF}xrX{fw`p-TE*W(*jsDz+y+}P5SEH6$qP*vnB2TkAdxFKAUKQji?Dfy)|1}H?oQAeR z$~Zv7Y>nUMjqv*HA@7p^^{jA4GSWPp&q$Uuk;Ao;=YW7gVgCD0Ay#$R_OV$6y(R3zu8rf`!Tl^sIVsjEo#W) z!=huA?uGe((hx?3p1o6TCS7q;*V~M3&b%PNT|=wN8x;`8p(y!Nplay$S19eUQ2)K% zh_OeJUL)WIY z)>T4@j>EeD<;Merz2c*SrD#URU;mP;g1}qvs|ui%Rm{j5d6@tviNbep5K?`bjNQi` z&n^87?B7f16gNc+s_{L^lt0onbEWAdF3d9WwuYo z`6|spiKvjflhb&1BgRSH2@L~FOkNQcNlafN>CotU&@&mlY?8n2U1dw&MlD~@vBYVx zhkk3+OhB}`L$?UOtNqfxdk+IlL3ex4jOz0^w5XzS!zD8YC8$o>s-)w~LhRv763l4a z^U-Qds%069{`QV-qXzY|99}WlMMeX$^?5@kZJ1?it#@fjm#v{iDD~nwhD1=6XX04O z1pP+p*%ObOy^X1SF6wjpz3#=u_FDTHQqkpE!qHs*<_xs-Kxnog(U3XaVnrp16nvBy zKCR$WJ!pD$OExw+xrZlGrgbIak$kqjnRLOzp0^IV43epAmiB*S z{jm5)<0Q~EgtTDSOLV@k#IYFpKGn}c^kqWLo6CH=qWGKh+9Ml?*Y-7uI=Ztb4e7`O z7Xx2!Y?2X%=f*nHxcRddfc3^c63Q0f;l`^@Y~Ar*OEutpj2+PX2jkLB&z&|hWa zr(ef4w_Kbh;8KT-69_&@0UISrubx}MRur=SC)cc}*tThJbL(lsaihh3w>(&FM}Uj* zxPI6O%}-Q9mj|g0JH5(teB7HQ(;!A;XZc?GSJ`t4%?b~~+aQT0(SZj=+~I(6+s3)RobFjxEK&gPA&TR8>aroB z%BPa^t6eTD8m^=QTpU`=LGdB)8Ghkulis~++M~rG7NYUyNZLIH8`PRRTXRiLG!|{- zFg+gx#C?j8&(zj750rW&HRU|dFgwqfkE9z+n3&YiGPLZB4vt-M8}eU&gYetz;5Sd1 z(z7W+o9$NR!U9qzWe>`PjsbDGV20+SjT*0CrHH}e1LLN=2VyIqMvitpv~NQQ82a&4 zBx*z636e@67p<40R?DvosGy%+NXN`M@=A~odV#dGkPr2E`R}&&VB?t#nmG8}KBWaL z69swv20Cm+m)b(5bUr8#DJdYo;9`uB} zdv>}d7vtIyZi`ad10cIjdeF)BK#FouCP8g(>GF(NbNQ3h4hXv&Z zLSd7y3!bWyZY5wW^mJSwo?9Y=l?7eD_l$>q{`$l9>Nnu}$Jov$XAzi3ELGXwo_;e^ zhf)1Oy=3Q52sQ1T+J@je)t!NtRvMpbcN=O$&$ddf3@z>7^~&Vb-`hPqzDnGx{75uq zA!pGNC#W7$<4h+ls)UTUcRezc%RK6g3sk44)5iVb^M+isBByy)*@$wmyu1iKNHRXO zy=_2wvKK`^XlvtHC}q$^o1B;~PTCyRs5mgn<3r_EQ{U`yZDU{9Z$Rjh!0xJT9DFrv@94F7Req~q@3(0h zf1UY;YF{bu;FbvU{_+CElJ#Se2?wVG9H2+Y3@+@EKD51#{cnhS$Au zkDv%i_^vd?AiJ|?IuWgkjt;4Wd85|gS@6dQFYsVamoZH#F%=X~Udy>Yq5 z8VmQFJ32nTE1$m{U0`DNvLp`P#b&1!$TYIiL`@P#uG>i6ZsgjuH|tnN3eT&X5VDAk zcXpB#thTuoj$=;vR&P9;CAO!f`THNwzSSU0fkL!4t;49^5B}y`(wLA@Ud4g=KYtU z`9YwA;BeK86TR-VtuwRx_YMDZ3X=c&+5dpFL`i?=VHR4an5E6y&0D0p-`we~$0sI@ zhA>9B3ZNhF1~83IR>CV>jd~f!zb&2+mwc6vtUREJ6QN%eK5X^5b0b$I%pWm&vdz~Z zZi$)|&!pVqWqjy7Bpj-ng!=-oUc55)nnz?VkBX znh!D+r;@S{JFF-aZrUp=O{w36(b1G&e13$Hti`ADL*u@j;qDGmD8+hLiQK0QH=gF? zeRK=sRtZOCv}AVBY_Tji7eFJ%g#2T9C$;126`+l?uW8D>y$);r%?H@!Z5f%Cun};} zBM9v4BmLW4e8tWq4P{_u2WkorZ^qA(U%k*?bDZWEGoGwn&15h_NBM2EdB`(^c@7{R zizBRVqPxkdC3_=z&`DxNBl2b3FpnLggqfpIM9LglZq}+J<+Pe2nM0XqlY*}3ht|yY zJs?_YAQ|`Jx{7`jRy8azC|$a4chUts4uMj?){r8iC^D~TlVaD2llIY4Fm>V=JJEciTLQAR67qn1YDYTm zB_|eximJ*1O;%Nn_Q;!zaDNH`q_4sNZ z%a7mSCDcD40*?3`)eh3D`bJ~~^Fva~*o}pvUmB2;jyBJ;(y;SL$h77eZT$5Kh+7U#@;U}hvyZp{|UfYX^JKtO{p(XeOK07^irYO zAp0QhrIgs*ync!s%%mA=^j7cus;6mRSWvcKr>9}nTo1R(p#{<4g;<`t2}x4C3F@jo zv6#0~RQByyK#6M<9-4#o?~2t*sfcM^FGkrqG2a8QLz|P+rXOITWUpchNu_43vnLYp z8av;(IXTKWHh5-MdWSVq?SLbG9mQ zE)O1BHZU<@^Lo{>tVbnKkS{EBZ<^S5K!v~pVR{TbUK#ORvtsHjblN@F5Sa<&q@D{HaS zo*D(8ei8aC$cJrW?1J=+_t`)hhA6=qe6StH1}#ZwN?x*PPne<}5jiN_H}%b(BcCwH zq;OvQO=XeE7QvK2j^;6S>zDSbfocQW!rC~72TY%I#Io>kKBswEx6m0+oIfgs6Gc_f zwge*ss@c7vc+6@;fk_F#<;a+R$+|~r^}P6**p^@Ol#yJEL>Vd>zqiZJ7mhl7d`tZ{ zvuN%Sp0IRADwu!n>YSz=&Yg%NZye_4O#A*rU`0T^?($-SgvA?Dr1eQ+z!-&FbGd;fsUryIygFeV$Z`-=)LrG7lF(ovi_wwEjTL8^_Sz@z{X4 zo8+IUyz7m;$1ob2T-hQ?+OQOdBR*GmcGfoms|d+}us>pobaGu2tU;9y*M?jirHp2} z$Eqncp_R5oIcOuLRBoN5dG1eQhN-71rMV16@PK z7lGCU8^YQQ#de^%RMpB0T@k~qtzk0UgyKq4LKuERDPPRtXn~lvPlf2bHDz@y44hJQuiog_P8IK*D|= zs#S+z`=*Qvy}y5yjX#uOa&js)%TGEHK{#2xCVmH1ehNWn_nv3d}AY4&ZHjOZ`{ zy`1yzL^1BwMqtCNjg{fg;^+u-%wd>7E! zo!@*?5LvrD=z&6qXXfvU-8C&hjot5%u=+Lb4QlA2V|)X_3~n$+v(MGi1w`+hc*oy= z>Uv7QE_kpU&(24>$ISC(>Ya(@<~x=geJ82FHth6n>YYOLWUmNZ&O8L8__=3e*ID{~ zUk|y<7e!rdXlSJC`ZRf9TQ8; zCI5GLY-uvwmD{Y#M2p`X$j*BPl|AWNM7cbAd#)D@%F^lS(b4qmO-ouzHyY40`VC-Q ze4Z~VaXTvN{{;mvL~Fb-O}T4NWiXX6S+{J9L?CX^54BCmvXx;t3m`_GR>{uoT-i z3&wr|DO?e~kQ!r2c*mj5d3>F#Y9czAO6;`(LzRC7+Kbxe!>RX3fqqQ7%wCVp3$YW= zGFspbgRDtVqv)o5Scl5P4rq0iHrkvK71)k@ddB1Dk%8xffuKf6 zny8t@ zWn%B4)TLjKUXP89yRAAp0&qTJ$MYNO@#fQ8dx}&Wjw8kj&jy5@BO=sX$|Yo=6tz=A zT1}u@&l~HQGMrijx>IC7+rT~8C?AI1B<3i|B%URla}b=(@_8>H&J@(Q_kyxzD!fej z+lT|ZUPH8+Qa5S_NWusDgu&BHt^X=xh+>bO@=7|za&ov1M`uiI4U;{u10o;9N~AtK zlGSmkbHf@Mo-MnTHW>9Q>PT-Cx{B_rcqDh@?t>_CpKL|mD#M0TEdn97 zz^`P`tg)#-h&|P1@6bwGV0>!P(>tYXwOgA@(c*oVn#nhhWn|yAT09vs1<14&VY9tLs>_OHQzG$k1i0)kA-1A;gdpF&3YDb4uh?b zf543+n^-)qGuw1aVK?rWWQBKelnX{m4$nu5JFjQGJ}Bh=4q7(QVKtedEen|` zEPFqJ7e40YmxPktz^GsKW-HvkH52ij(_51|RQq;xiD49BWX){DzNB15R%P}_gRy8( zGU>w1Hpbgt=e3vwZn=Zj|Cp?E7#RK`zb{=^+ipr^GGbZFvo3PRg(Zx}IWMbbVlPq} z;7=s0?J51b$mO(hwMlp0nHhc86y#j$oHnZaqSKYEZ+np&m|Bj_b4rZLP^irJ0E>E8 z#k&PJ&9p;<8VTVmtq4|J(3ZIXkl22>GVaMS_ozzYj`wq1YPq4@wQ3s^A&ZAY zr1lO81pEWp^VzJ}yAaE5Mam`%WV&*}8B@3aKuoE2dD%Cu_u(_5lYaw9mKwUW$kNe2 z9#MM7Ri?&CruA+7Fm@y$rIq_=9)SmC6jAhx4wAC5+46{~>KRTNc9~wjFtjJd>ze@F zM%Qq%?H`{tdSUCm4aAVdMw;F_WiX^Q+h^4tVoL_|9-A9Yctk-phZH9bMU3b5!k&$L z`$6oK?*T!`Z@}DQqmNw0>b8ZNC|D+$`rCfWxB<9jg0A>ke}f#Y_ygh>un-!UZBD6D zf|xStR&tl66yB4Tk+Cq~;?KFY)ZM`(XStf|NhQlOEp22Jh5NMm%b0%@-Fh0 z)m*9AB@9g!%?wl{qTSt@5@6N(?Cy#EZdqat;zg4;cVro~7*DBvvsCmsBvJffx;rH8 zQ81Hn_y(tZ)xWw+?`

biYnR83UaitJFfK=*1P)tJml2lcFcYty76h(_GK3kZ3(i; zSqSkdnnZFEPbMX7dh4a6Ah~8hZ>fis`qoZMDh>hG}i zQ$^@}I!R(>MvwI2+&S9YzSs}Bt}LCcZGLaA8GLcg!%a_mG%tm55%M>3!*fX%+wJU($Y>4@5BB4IE0lFEBoQREv9K;e!=wlS-g zwKCGKg=iVlJFKV2X3?)FWT~655W(%9y-?eE(Arm%Fe`bz2Gj^+!%or}Z>m((oNstS z*8q&ou6YWiXDyXcu75(XLk}Y9>e4?;^>wh0i{J(E#4oEL@a0xX3~8Y&=`+ygXW?>- z<3V0p>(yf5|NK7uEgWQAKT{-Lh}7X+{(4m+8JddfSSgO|!#ZhI+~TKd?aGL|^Ja@F zJpuen08z1bxBtq`61hZgN)=Y*|+Pp*=ghZcf9m<@aFPb545j_>5FiWWmh-NZK|M7Fn9xFP`Xr}eikg5oD(>&>^(H=rlX z`;J}`o(Uw=3Jd?yo5rp!`Z4^0jG8Yj_4Y#yRYJCG`u3}4XPibGSTs`or^_E@!5md; zh#AD;k|N+OjO|tm5%qbaFq5hZiIP7Q}t0Z2?%^4v{XzTu3)PTH#Ydd=&8dX?cnD% ztc-e*H3KV)t-_u@=A3Fdt@X6!<1L0KVhjr(-h(|R=7m;yeSXw({)uS)(*W8vD}~=u z?pfiK82eM1(&2h5N}-( zKJic{n)kmpWBO(7$-Ay;I*Iw4vnkGj%T-13N2yf+{j_Mk+l1=-3;g%=N$g-NMA6Lb zE784+FIE|(e@K2h#SScea`}Y+Wtv7R^MwGk%;%edBVYUw(n+Y-j^g0@=t8tD=1-$& z8%p3{>dytAawlYdeZ(!zLGE`e31-kRFZO<;u;o(1!ks7E-1*MIIlf0`b0K~13I(M; z;To@R3nx7IS6{o6GQ*3*S?^SH#Z+vS#*~%tLcJk#+#DJMtP@^Wd7A5r*3|1diDlfM zSr}~Yg=zZAiWG~~-q&hs@ZM??Hdv*B>U8km0b3OGGCbWQ|2rVX^IrfdNe6pZb9+}8N=|=Zq$%0e%{^Ty+2!nvEzLjt$64|}&T@Y_s;y#TV{ZDl z4Q-D19B`NaHUckjQvQcUxZ=NbtGPP6nY#XcYc7=haOGZ(a7gYGT=!pC+&?kAzZCov z!~0hj6lS<=DI(ZT+&7J=82bg*@BR&z8qh1Zobcege*SCxVL_K&~SosI2XEF7He z;5z;j0F;tc|0nz>Yi@06zCb}meSwOGhWhf=>sPPP z(O@GSuW34o4*gp`JifPj*Pl7^mv=^Ya>1?ziO#`koLOpJdiK}17C zd-d`a-m6!5j3fjkjQ``~&rby0mq@fP6p<0B5s+{Zk#Q0K^dgYKokT)L{L9~e9xqUl zP|%POU&61dzD7VqK|(-5LVJPo5(yO@3GFXwlozTu%B+%aCQ-3y|;nZ{rOr%jy zsv+Rw{-Ej{pYPI5YcjQYenCjbC#e>cP+Q0A>c-<9T=1(Nu9FyU?62dW=HO?S|ASBn9s= zX4NH^g@cA6BYn!bRcQ)#HBS6ECWzujf`SY2M&})kEDeh+dSKZgb0C1)>kjI);BDsq zvI-nSi23AwQc|T}pfQH4JkgV4y2a;sz8-tz2ZrFT5W0G7g1drR9zOjBMQsYB5SHq+ zvDE6=($xtRwX!;0S{Z5PSvo#eIMQj-^G!sE4_8v8sEH>f+ex$gx7** z;9Ci7#ZuQh->gkkFB9{2n{?ycn8o30B$^!Uf~KVRCzGY$!J&xq+?{v2k)XTLB-)QnI!HQxDw;2 z+q~sATz;u(<`g9YNr8TEV<23zM}^=}(^8r)!(5Vqn%bPgqt3CmL!b6Fv4AriBXT>S zyt)sdCyLD>FmcgHT?J% zT?Kqby_|N?UJ9fBZj*uXAjzxlpX~xiS(Xt4d!-98l?j(RGzPeym`?lNVjXpIk^S>z(7hKv`zo7I^3rlHy5VUjX)ArmpU* ze~CEuFQu*h?>T7yU#c)SI2tX8)AoAFFft&{3Bo@G1>pp-xvoOP*sg+{wQE>z@Qn`+ zaSnTU)#3HX`cng>c6%^bVAMW+mp~w%@XdV7N-qwja>yTqDXJ7%I0C67G;u%W7{{<=`a#ztjhFk;fWUPXgi zgEFclEtrJ_K=e~N5b}V0hkSMbia6M{o9imki_bMjD}N9gE?NWx(npq)&bC(QED}_iu}6*t)h?3A zY{@4+-FQS8{XsA?AoqV>6x%Q0zdnItV{Z~Jqb1VBSM1e$@k+OuUZN>v${b)Xi%_(M ze01=v?`6H~kTPFp-GTJjoh=&YYz^%2%iw&C2(P3Um=b7|g7x)9v7+!L0XrX)?GB7q zWvj^kAjA~*!Hhtmo_olF{hEQz2c&ogM6u<;IXcBKZZwsQoL@MIjxY#W0<$H=z#!;jXY%HIS+c;wGK~wWttQs8Rp;!$zQ==F^%Bo$%w`(!qcwP!W0Xi?9ELu99Nq&FU#OQXa{V>W8ZxZo0qa@HC<<}H^2BNJV^+ceadsz|Qlx(fHwMGe~_ zK{KM@**w6DK5%lgStB42^w2&(V7Uqvfsd3*mqF6wkN56cfXjS>)&*C>Gl-k z=Y36}S~#m1R#$Gr=A*#Auhv3DHQQ5euY6OyZ+!(km<>Y^*TO%dotaax)aTq7Vv zrbe!uOJg;^8#9u1x;Z9j1(-0P7_xp)hsv=%NFevHP+TzU9gAX!57ybURYR1 zh={(So~c;n;_3Gy^C-e}z|pKC>Tj1JO&>ZFO7qS03HIOiXVS+O?c|GS?yg^;jP?ufV|&|2Nz-IvMt3D39tq{K>#JyJrueog#A2>ju~(dWUhFy(={ zJkWzuH-?GpxA10CnDqw5NAz93|8x)tOFaLnCXiuw1AZMQJ+VjviC-8p z{?r^E!frA5XUmdNHi2*l_|f_ov{MUXbB$T%24Cbf{Zfu)NFFk^)kfuKn}B{CqyjA$ zTLF&)t3s7nAx_1z-uh(xRFR9V=l5PBPj~vT$3F-TA)UaC3$5=fF&4Xm@;n6tJs&;` zvsmv5Zve|C4+^9M026Ls$kTwXt)BK>jXvxc^F9*Go1KRC2G=|5d%MZdB)yBYLw%NK z{t;8_fg}92EmALU?w7>ujCXwL19=c}QA9~o=NTtRLDuiZeG{K+VJ#VE;*m3sz!RDc z>Fb%1D|JJH--!u5yUu)UQX`Yw6j1GN<2y$S1Uan-GrA%O7!DD>bbM3+euT(jp?J~; z;ZkO9D8~UL2&7kM4e2A(BvD6WKEDHdHJfn=d(vPi?`4p?tvR{Uoa<6cpkCn&t%=Z{ zE;$(fv!2m2&rXl>n9-&pVZGx*doxF^>|TP?BfvM6pCH#0%gw>y*vjJ*d9o+C6EC)! z*EqUyw|{`S=&RgFA>|~>{{B&wLuj9kMOr~8*YDAfj?3Lvn=oGwZyD%uVh>hYhauz2 z=9BjOAeFuR=SWGB`2KBA1}+tz-ZZl0)Q_}rM6_l;V60^XvQyen_p8gpmIfm^2-8Ke zq=0VX3s84-M&{0fa5NT*n50bpQ5Dw|=z`+A4PkZD#a#?ZkqjjCWQXc|;<`5$sYe>^ zZxKyPnDtO;{T$kU34G41ZFC3zgK+Fz@6@YF|9eX#89C}%M0L$@OX7R(PXsx2^0%WP zU;$}7imvTCB7&E(?I1HJx#cc zG%8+m)Vf>}D_c}D8pI*}S zNajIQXzu;If2WOt_?Kyqr5d(NXfkP(JVLq5^V+0bAN928AN0#C1m`(dI5gLko1d~^ zW0PV8C*PZ@e!}yS-}CLd@LqEEVB*wK#SLJ%PWoT0+-D~2#lVjkDF8B8H;gYLUv(6wV5;?nYo0vs%*V@N?EnHTo9Hp8$zzv`Ze5Z> z!@v&w#ff7&R>=&(duR!sf>b#_l>8Xja&h0@bKrf}U24?yz@<ZU0 zxOTw2yuI}Uw4V8MKf`cSn?^!4gMezaucqX_TNUJsPW5 zc4`u+Ng*B2pg5He9F9;b%GG?=yvJalr0GL5!{OZ)os$8oTszUg|9Z3ef_OJ zwJ9^LkPM6%jP3qGVEegv|Dr8r^I)-;kE2W~DO(vp z<$t0pa^gY4kbhTBcw4f_rDD?(E$p^DpRJoL#LL2G*7sH!)Zg|S*+viXmw@fKr-`1X z%{-cpfiSz(SF1Rq8N0Q5z6%3(Vb(s$7#wU~=^c^M9@9DLqSpk$H6sxHm1W4pZqs{S zXIyiB#B{HcSXNq*|cCY)#UaeJSlK$*Usfq+JIihyf=edVQQ71C`ER>LLAn`-pul#a$ELU zGhm=knLPJLCHv8%BP@lTD$><%=yQY8G*-0=Ug0hUYfQNizkZRfU6UoLJ4wuZaRORH zsBZ_9XU@$Ya9zf)K=H(V9VF8B66|o66_fdB-~~GK6{_-aX%2+h^%Xy@pWJ&x`0;lA+9C?n< zm4O`0yLLag`tXQeVp0vrnCh+Xf_3=z7JF6*_$47PB=zwQK|8!&9e)t;421q5M3yYw zm&fP6P^0OqGp8}VIO$RxB|-d>s;Y~g~!$0qsL$U$;gWw$9YDfs4%jt@k zyn|_v2rT*ZKgMR<4s2d}5#_Rp0o+#<1zk1*r%`hQDP7e>6Y_?;`yklP4%!)#6K}`% z2YUU6zFgY3cM6@Y>eV_-)P**)IEI2>g)}qF-VWPdfu8AG;ZPi*O2d^J*1R2?odSM* zCz;h`lNnEUX!o~?nONgVk9}{S)Z|0WQtkJKl_J)E@A^i7#cC{hwr&oXdfxa5pH}fz zs^cWIwa9v5;YG>==8kmhKu3S^IG7<`fN6E$_jj->B)2sc_}B& zMw>v;OHLZXa-Pe_*SRR;?Edm-M(-Z%l$-EM64FPumYa~Irce19px(cL>6XjtG>wGaAC1W?^emPF zM}(WBA}Pb{WyPSF}NAy9 z^gRT{;;jVCW5#qiU{3molD8t~SM{Fxv%c_DFFL5$-$<%%QJBj%iQzrTI9YJ5{&`yu z>>Pv##X2^Wq~B{Im7W`TkF;q>D67{M6F7Y=%thA(^)$?cj%|&H7den^@^~R<*KF18 z?BYx`8GtTpOVAY|Vmz!NI~9?&8v;aEj9ycorzFz#!mU0DpZl4iwY=sTg0rk+v}~C1 zHNW-<__Gl<&?GxE-bv#BN{_r>;GmDrnEyT#8;iWWS)J|D3^eZInZ0b$SvHD3o2dd3 zddyn8c&=1rrln##xc;X5?lWLGqK2Q)!uZ?T1en+gdZQg-Z9uh~496ZJfXAM^eT^Rc zv}mXOL@RK!fHaU0h?STVy`M`q#ox4M?eHE=qUXyS?8aAdqZLncZ~b*14*sp2-E`>&Y@m*xce69Epzx^V~us0$VK_X0b!P{~%Oesq(v+ z?K}b6_zk%%gg!{}T^F-ydHOsab-1rjOxOUS!@f^E^Olbjpg6DxApMf$)|YcXo+gd9R9@4AZV{?!_T zw%rs)@|$RnMP!ANEkG~3KPg|tljD|%vdN~UA5p4*-T8g1A6bWxPinE;iUhf2Q0A|- zmg8VmY!xlEU+n8A!b+|u>zR93s_e?H_f;SJ9C9PvoTJ=1pOwxmc$3Q=zAR@NGRW$R zGKiPsV4N48=?`|N2c6KM3hLFMarLmX&Ua28u03@EAmY$E3t_a^{*uB@?2QwZ8G088 z=l$0skks7GtNY4F`o*CS&V~$dWLC=j0;bY1QPTmt`94-&@niNQ{M`Ume}69=#hywL z(_%vp5P|c%8%nxV(LR=tdVL3~D8&&v7763?8wGC{5x$YbGsZ#2gwK!d9#4`y7{1&d zRKg(aTh_#UWM<#Zb!=R8mh+OZErd$oaoqto-V{8|vE%P+o*Q%=9>jViGZWF!@B%YP zFhlxeIAWcw93A*+k=#YaUMM)=?5x}LSvjTBXS5d+>tV7b#&7u@?4VUiK1UWgiD8@9 z`lXFtTWeoSPZ(-IAQysj~^o%%g`j z%^X*#Z~B{WUaSTheA*o+r;yP)7H(zuD8xdTeZ_Hx*b4)lU8+Wv$%J?YhwvN9^qZu> znsyf#Xj6Hc#r6e##Nm;1#&bp9G=giLjuw1!L{U}z0nQcdf@X>0@O-mfA7nJjFjbY9 zslUY}Xf^XheOYu82_QFj_oHr>rinOGiiM7#YCcb{pRc0e;M^Ke*kQ%rcueAiK&TpY zPbug&bt80}zPK0+UkvY(f2G*gGy(G2m;`gAZa490ey%_I6lC^Q3VMUM($cyB8M8zs zWip8bL;6=!G9~EU#rLEU&dw0Qk(ExYKjxY(>H4m-`WK;iu zf6X_IgUyF{;DsbO$db94=(J4O-RFT>=aCEZNs9uVy->1XRf%cDT zdy#Ffpt&<_cdnFKBo#0x?^ZEcIhs|l2#UnSCLFXI$GA&=H5`4R!zw}GKBGH4y0?SY z80^zY5%%dhgJ}Qm7q77OSkX(G4c8)nBMn`{9Di4hyiM3xSa0$LF|jGj8#K0Fie5mY(T$l= zG{0=YvX?Kk9m(YPNz>kd+>K2a`YN;GS%%Miy~J?DQs&FD?qP~DCt|r8P8M*^%pZi( z#-d=4%+$GFjv<2o#DuDy|*<2u|oUYFPBd1kGmb1lhbFpG-^#T zFmzu=HxPQ8x|gj?M| zxyTCxggk*_aibc8pKGM~a}5%DO>MKr;e-R*qY`t#uNCG5sER%}wMVEfp}ZN36EJJl zZ}c@zr^h7o7~ciy9Y(bO_mF$!U)$G}`(nMf_Z~YpAlKsp-`6^g%PqrmCDq6^E*P(ly5TG)uMOeafw$N$g+lXL2n)xcN`cLU_K673 z^|uvtVACbt-P}O5+JhKByUwVK({9q|=HstxGj4tk6n*P$6IGzbw)>;)f+w~sklt&1 z1kMs-^o~l0zN1b%e~>P$zG7Mb$H(jOh(15^G3_t~R2nMUk3r%$nqDtT>YBg<(Dve< z?ajR98B?%~+)si9A8gz>ko6KoV`e>Va1N>><63S|gD*2xY=l8BX*3FjxZsEcplJj< z-(xK1%GRIiWq8-j)|*DiB=kFrDeVA4Lc8gijEug<cP9v>{0NVv)C8pn9x)Krii6I+{gFp`b8s|VMBt_ zHO-ITPDAIBrPvlDVcw9%`x`yvP%i2x8)xX5q{+~%VoKYRKAJOXqxh-YqC3%(C7{$>5_jMmBtVAOS}hxe*HSXkrSC5+{T_=Cy=)3p+HlIo2Vb~GwxDxU4gR3 zyoRc0c_{dNURR!QLC#?zxzf?BQ2gY3U=745V%D9Ra`~j-kf0B;-`MhPjcQ?C`pCDn z+^AU1@nZGDYT?>Z@9>#{kDZ;yW2iG@?a`||hx7NF^D$-q_+!4B=0!$+^6v{MynGbB z!e_7%BFl?%wQRD496V;P+SU}#>kxRDAxw?6^8xIQeOG2p!4%pcufi&>{9r@orC)mF zwKFC%@ME0kTeZL(* z*rnTCP89dKg)g--fj=W#$h6zPu2pVKH850Md6HE(-?euEQ%gFkMBKFv^BpK?Xu34L zr$zElY^PEPxo;@RxAG6$_^>o;q$dg)*(7Q4xDDwAP_qzGqGQ>z zIYV3Wr`y|-T=4SG;h~tPICQOd<2`>&P`uo{Sa{&PPzP)}7e9zaldr1z*qq_h@W_ zyGKJ~>;CMltfT+0c)<51o=v<*o+}xelCL8Ml}B%DQY*C9*(32iI@<1Rv6w6!w;j(# zDjMP~SiTbD%1sJ8hr}qGHpW+b2yppvobdQ%5lr#VRzo|Xtw=*g2200%a!xxfJc+r z72@6>ggBnV1>vO0+JU+=?{vie9-zo57K8rseAp>?Ew+ZQT1FsabS5K5lmzc%8w*Fe z;R$L@{_0J~RXS;Mvr51Ztp;w564Q38h0A$()>yEe2LOa`!cLys86IJ6WHg=H+stRm zNl2=cYB3-|V{)B-*h%48Pp*ZA<9dDU9Y1Hqg&zkxtVv>Rxe4WQ57SrdfsW_gZnvIF z>i7w5i4w-2IMf%KC6B~P2-^ZPp9?`Sf*ZDL1+tI5*G!T2{1rZ~OpW1IHJjD(OtJoJ zyAX$ftLag5yy4bsU}V@fqyB(0)u4Kfrinniz!r;pryxyjTI^TOX~*)bu`2d*US(w! zbXZ?j`x@f`Smj|^(?Pb0*!1i9ly{0(=iAGdl{@!}vDTz6?t(e(tqYg;Ea&r8D*F*V zA0w+GSPD&(q;3<2pNVz>LR?+S!+l5S8wGqANp8qq&CA7cWc@z_pUFFgm*!Jrc%!qD z_ia?euT+@p=WB`vc3lw#0mCPCLSdmrty6OZU4<)Am_UhZBdzoPM^MKtd@O09)g)Dk z!fb9?N5N{AQk8l(kT%Mn^>Qu`xhoJ>wZmh^qL8`z*el?pX*7@gV=`s-jrcSvR|YBA z(BiRYZ3>c6rM`I-HM(`MYh%VJV+SDw4=M?Kae15{fwk-Ivq-2#4zrfco zt9_GAXK1xNtjzrpIJj#sIKjSy5}-fHJMFem`uKXp(6Yybua66dIqfE;rDQ|mxb=n; z^m-D?xKoyT0(mQz|gkw0~k2fArG;Y)Ra&5kxjdt?@I43NyB~;bW>)&$Nw)nRH!T10_hN&W1`_9@eJM39c$!8MMryhg9X_{>LTE?C^er;gZ zbxOSd;53dr+C_GKn_I)5Cb}akxyHQ5lT?Uh-|&5FV7jrpZ>z{=O&zt)tN3Mtu6< zZomm+y?+earO!X1me#DZ9zwT%3=_4_4P{qer#ndScr6pRQGDJ622suiC(d=BN8UcF zz~NGl-Fw(<*2q54v}ByU#D? zYs*R|D^Ihjls7XA`}b^5^8HAOb*ihl5t3nU7=ybp0;5W|S$vgfE`Df6*jfIO=P-#U zs!_R=kiK%BRj79L{F`yf8-vd7@7TFmz#;jI`A>5^NAHHI-wpl`Bg_781d-siz7by2 zAR%6h41UrfJTouH=q}XY1)rc5KYw<5ZqxO|)2adQ5LcaK^&Ylr{Q51)&-2z;c?>>_0v4BI|9S7c2cJ=@U zI7_cb@8t5hrr*+eq11?gkIuP5MaYFJeG~=eisPRVEk$9tCvpl}(cc3kC=b1Zue;Nn zzy`@(V+JAou@yp2^}i_rzflmx=!n92sXOdRz?JxAiv|;Kh%%`#*#KEGcCiu5mANDH z9jptSsXF!s==6AP8A|TELPz(dy%J~cJDE2|CMV&x%J316wu;ROHav8DD$sZ?AgHB`tI8OfAMAt?W7xu%6ixi;D`zl+j+~y>U$)>6wvi4a-ZN+1 zLCe_5{o*oqLLdye*K%AUDX|RnAJdtmyC`Rl(X+@mXYz#(1+jDKE*3}Ys^na^Pan3U zw|#zjptW>uGJqfwGqo{Nr1$#$3M~7pNgWo8QPw%!-(Xz;J8#{Ios%g$W*wnAbt?o( z+EW)n_kESr)NW~NbTSJM=Ex9Prxxa9>bR595X<*?7n35-KyAZDW+i>yED53Do!DOC zT7GvnK=^$!IE!IaeNgj@ub>Efb zN<+WfC;x-MLl(yS;>e_auCJ^YuUS+ZCOJYAYO=YIn{LFSUuVHjQ)gja7}y@qE0g+i z`P@n*bdViIqQ!{GuL1o)tIgz{OZEr`EmbT`#-#I6kM0D~e~D1U6AOc!t6d4GySw)2 z3L{9MOz5|)U1&CoBu!}1WWrtJlP`y;43QznLrr3M{CsD};BTQWyze|fmH zyCbxLM1s_yn0@aoiiC^ynen<_F|Ev>f%Xyi3vtmZGr5+l9|?97yw=?BX`SGo2WfMP z^gJQar@OW8+0iX=l~Fmo@8?~6k&6RP>H$_qSL(CMZ^sYmc3MQ8GUlZRdd$Gj& zsxTh&?G-hTQ;u?xI2{igyjAeRM6z^thVgW+!+S05z7wh`Kx3}doNXR_deJA8vDWb$ zalie-D=X!l97R-@P5Jht1+jux7r;aJWw6t|(AhN@)R=u_MoOAI&=2`M0a=5LnVw)| zCF>ly3%pA>Jh?C^l@p@N_Fe1nW>uGHh#%A+TI6-+wP+APAg1$jsTaaZ-gmyIVT#*k zBgPa38*H^%us@UE-uM}$L{Q#dw}}=V`ql9LaJlP56WHFHZ)pKJ81Pp32;}r{RO{gB zV%SeChhIy9U8FdqPHdYDyjtJ=pj_(Twuc@+^b4+BTSC}{*;#0P1igGxmj#68t0Eii zby7B{et8TkbOY*7j;(E|(Shv>SslbNYerPumZ`SBp()vbm`sS|oXm2A$Oul2vzKM| zlL2iNyGwg(;%wDhiKghm)qd@Zi}}5B-g+a9%9O67~C5y-8kS4y`C$sZbRAV zsOe7{8Z_$>V%UD!jO#!Q>`6?*sn+$K7QXSW@a&2_ws1bFiE;bEQ}BaHdL7U&(+BZ6 zU2iw&?7BMrQJUz?0`Il|?aM3HW#s^4Wsfj%R9{8aPe{7}T~A*ov&rCGchsyTa7#Hx zLB&O1^h-8=Az*}KHl@Oy-cCo>HLhWsW80msBPKP7@-}BD^R)!#4Sfm`qOBrmB$G&_ zN8;q3t}VrWl)l9T_$Gjaf_Y>$f5Gk7>n5@)($T%Hpt}pNIC%79|7UBY|upC;vwI@DF;6zOLur4B*l9qFhQpMWZb>g zk0K+oJa=Svpfy@Qd%-$87=zwL2ht_(1DiFWjO(|!Oe>ZjwfL|qEqAkw3I5_Sk$-lj zdkbFb5{va4y|I{d2=o>DKCJLs(2g+EIl6w$&N1!m@V@@MAMCNTh-3XI1F7_WzVEQ?^9b(%o;dinveXhs zrAW`ZJFGXDcs6Vf(H8Y(+)=O(Sy7|wt;l3{VQjXgcEP_Y0}B&+BJ~j$7S>`lN@6=4 z0M?C8Hx`w~9Vqd_4Lw&FIpsN8A$6zWVZp#hI|wtaU>o$dm;xd*pIml^ljD)NXyYX3GHuO27w zY9L+Q?)WB66Qk22w$N>$$W22$N;2k+5yj^@tL@YIiqm5L5*NLz{a5f$9784sG&QNm zvz%CB-Eno97ZA6+Fj~5?9rNrNg~ayzY}~+U!Jd&Q%Qp1&#d~+%wHC|SZ(`b+bI33w z2SU#!&V*$21yZP@(0+>m+IFT zjs#*SFk$F#fR1C1;E&c=FL8QBkBM!g#0?c2hwKp!e1Fu5bN27`oFr?y-qbg8hwv#a zN(y}7>yx^xfe%O~PK#z-0+oO34d&EJkGsCnuVr`0oJRp)@Kriwf{q(-QFqmt{F&cp zGC23OkZ?nksQf~II@I1am8gvhPzx}Hqtva<(8NdfRe_-AlJ`_+K? z<1!|tyFBHq)Rn0*`4ed6?x2w0cR^(>wsXhXkyRO9dRT4P^$#bYc2Q^0<(VFG@#BGX zfzs*6f^fbIRO7pK%PuCjSy$2x;_RVe;fBt2wh`szHg4O-K2ki*>eb5;j1e+kZdVz@ zk#4-SwZUCZFW#CKmgo^XUxtCZIFw#i$pN6rI=0(Qzbnk0eXxOhT?$Nxpydqy?c zzH6WOprX=5q=(*An)D7TNEHwjq}NbG3!xK0LFt5Q=tYXCKeF>r9IP4&z&-bb$4PsB|K}#G%Wj+NP|-Q{I!ot zjLLO)A_b=01xJvaARueav%RtWG1UjIL#xwqCxM>6FIhd${_*t$EtQ>|`{)z5*R2sF zTSNrlZY(B!zvce5K{fS3t?v`R?pU4^IsCmVl}71WLeP3ChX ze$DDzF3xb475K86b*OcQ_Yh-`X*yQa^DcJoI@wkVe5>})N$IB{=jG@*@2%-523`FUou+mhlYW?=nT{f?t;*3`ei4+-WDXX#lPMva&(DZZpIbRjaJa@4{bu zmgYUDz4NhUL`?dK>ITr~sPeE}`a#m^D*cE+EgFa=+@HeO%Ig(`92 z?zo^5s$t!pPAU0om9h!{Ox8HYeF(@7DNbQ$^Qe(w1gAR25XRe(iIivO{84C*`->!O zfvgW7ea~-xy>!j~wXGtA95bqa`7mP_pFHSKuiJX>+3@pC z9iw6*BixkAgKA^=12aJS{MxB9y%mm6d?6ewYYbGK#m_N6 z4`W7ZUgN9(8L!{5Sw{jY?}gZ2$$5F1}dE z94UOddgfeGI%wO1Bwvwj4ppmAM3b4zv1QIs*+io|Mr?SNqZ7?se>lpG8Iw3#3o9&& zcinWXzcMi_U7+0m1LMS*#SXQ;rH%~|ip1HHF39IRb1?yY&2B#4%zEP27$9ItXC*pO zk#@C4Jyk0#hMglXsA0i%3qYgXRVu6M2#mUbh_FrkS*XhougP396}@D{2l>+o9q+@f ziX5(_n4kT>t^}wbbaoE5r~15Du{Sxqt;Q8gpDI~SRsxGK2Q}=hS|B}k>Eo%?({Py# z=YmsEy1YC_w#Ck3h157ukmMOO;Fb`>v&vsb`F)lyzWlkY+YYNgw1%12ZXgU>XwLRb z4tA#fJaX?04_JeJ1<_7PCjHUWG>v(8u9RHk!uH$`&IM>pL2|U9tta1mnVA))V8pTb$Mji#8rX4 z4)GpE8^Zjer{8|Yp**}_!G#Vr6QsKZXk=h&hkqidGD*L)YHq&URSEM8WKwdT!%mY~ z6Gp4j?!%diD@@2g@OXZZ-ak;Ls&g&#D>O$Zdilw)(E9Ak4Jxgoo;Kct^GCLSsaEGv zDPbK`FaHG%p(Xpg)R)?zG-zV%;2OEt{Z6vb^Pt&OZt@g^OKFwNzFYI&;z|BdIFmyN z4^5l;>8h=!@^)Ep;?pTMc9b$xR3@+FI{T%s(aL_O_VXJ}B+29MDBZX9fe{VV5w=Se zoPBG8+PJ}A^S%e|iLp0NSk*7Dv|i#0*VH8;W=Q%L`UckfYLNoCjVqmS92<9^#~k2V zC`AmC`s?y)+qYCA5G&|lGr2c}xJrS}igy~zh~M{^n9Zy*>dlp<52Ns59E=xSWnmY( zQXhJm{~@L@L{-Karp_KZ02I?1#KvWfKAU9TCyJ*kF#pKLgQw16^Ckv<+6>X%E1m#T zn=RXN|EaUD`7A@+*zsm}=2gsr>YL5+YS&SzBL8A97cm9Z-?&NGuG}_Rx1TUF{MT}Y zzqW4YkbMBA!8!2xq$GNe2JwN?Yc!(6gOUL!DkV0&R;rhLV_RlsX~fW)L!wthnf2&k z0l$n0qGDd1K-0TNt5LSX9M2Mq__MoEp*T|+lm$nu@2*7@kO=SRJhFyjK+yHtnlcmp0o=36DZT&pK z>E8QFM!C>uV?AWxeV2Kf=?j9JRuq{crzqj0*l}bNh!ztFLg7&rW@z_$VmfWtpOzt zWQz@uh*nP9%FuZ=mn%x)z;olxZakMZ2}XKaex^bv3m7{3{3gwCXkEFj$S_c(U2DSr zl{D?Qr~ztLxq91jFcw$j=US87Gr^MVGFZB0MMEtj>MPyc=9V#Ee=ySy4wlNL1)t~& z$Wmmz69sG9Y@~PW#F8xd+=gM9_x2BsW4a*5|I(Kwy=1|ENqm9SO z$I~+(El-`44Hv8Kf;5iE+{p@*L|SZ|(*<=dJ|%LS9Qkcc4_yiVt@{0oCuzJ*C(IDH>P7 z4{w9akc652l3$YBCQCh0+hQ#CK0v^l^a=H+T-ubaymjx^yfw#iCKzRTWI;E*u#WPd zf5yFkICZ{By_`SQZCfJy2Hz`So?>BC;_)$T{oxRuxxk$^)eAJ2Lk>vs%wL>r6Y&Ol zns2%czE<=4r(4*ldNzI%z1ZsuWQC?iZ&ry%Sgvn4+wb|(wS)s{(@BItYLQc9KQI^mGBD@5Ahm^$H|I-~!(@e` zu9;R7U`xeAmb`Y$gY*%kRAT=Y@TLwp+u3;tvW5QuNJxil^PBc##f-Wx ze|)7BcAwc9xO47J)%+e^9bV9KoO zY@rPh{k#1WoLtx*RW^($^6u*oTY4MY4-RH|csfH5=9nZ%08}o?d{`DT)LMV))DFUA zqLDpmSr+$Zjb^02)hB$QpG&p<{UGFHnCS*yQ-4;H=%rS8|CgjW<}XPLkHs5TI(6A1 z1m~|%`Or4u>aH{j$pIvzJ*0X8_|8pjP%A6b&x6!rf3Ma1%0M3W^r6QZ9f(FUO7xsL zzsqK@#Sp5}sMf}s5>*4reAn`GYi3pZVewP5a!LOWH<1G5fEz5V)CR+QlS@4pJVo|$ zr!R|51YKGx51ASj7uP_+>_Jq2z9h#~C@6NB1uBVwWMvMMCqV^)q>ARBD_uz2{f=1V z&Nhte8V$HE@HmYoeyV-N$1(vhJ<;35YKZ%jKCa{}hnCsPl2b@=$V#D2$-qLiqd7t* z2`@D2m(M_DEQzphAR{e@MZY<^|GlZ@iv0Iql2M2KS!oE{1eDLH&?EMDCu)GS$>oIq zTl0qk8Q+f^nw{gG{*mH!aT@@xcsk3j{j{Mnjv>e%qqC9r0Q%_jc^mC6m zNR#z6lXl>_B`>PtEbuiG!s7w5_&xF!)4O+#)$VB+5y*&)DYje;X7{C6z7^k^WV=y2 zDKb^TXw<8sQiCSZZu(d~b}n?s&plkM;dI5L?jW{AYe0WKdVya#jJp%6MjuD%z3w}8 z?J1J9lE<|nbmTT~AwZpD*v@>=CxrEtr4(~K+VqzV1pP3=HBhsHI3s>YX67hNRVURx zgcj}2H*&|)jC4VLr^#eqLYq6zu<1}m3Z8T3x?K(1jGQ-Y#(AU;xp^e@M79fd!SsCo zpRa5zLbi1D!FLH6knBVR6CIqin6a1SX!7D5bU@`8y|KYS`eCV=lszt&UrMJaJ^_td zl7zV(5C3C6btMrjpdfv_BduqDA_MZuJYad#{8pzv{Uz32@0yVxF)FN+@;N0I>b9DQ zu#HF#k8WG_J@RDEnt~~dj@ONktS~&=b4L>mjUNPz$&rMIApg& zk7Ilnv7nM*CUf1YA4rirp6JE22Ebja945;p20rKTAmTp%mikK)HM6Z`XR)%FhC(ob@^V0k zg;m{lMIEi6&h>IsIUgXY&K=)jFUG{T=Grsp!~vr8;|ZApeFPvIwxLLO5&8gS7ufGk zz-anvFFL+AuoEu*G`Y^}FqXl4Aqntg?R)vT+ufqrkca(}F;qpcX#UYeg!@ixyRCx> z_j1`2!zX#|DaEh)rd~#+GeqtV;5}HB!$(CV_|Wgn8Dh>o06p!0TKs$#b&7NU z2*Ghd)Ypiq_|O|~MWzwzVp!8G0W}@g<=rblszsKd0g7Ugn~c|=^*Nw2=hYT9h#vTY zxATvp>BHYgCN8KGMXi3q1)v)a{J&f1!l+yf**#Z8z&FD^d;MW8M>j0|%hiuszoev6 z-wtLm!rhf+UP?4^J9-5q2@ZnRp3@B^dV#VY|4#9A|25Y?7bel`bNR+ossOl(UzG>U zU~-7Abz`ud8z6oJaXa9?xoed9)6n;I9BT~r2#fckV3)gf^3W_&MxshGx#<=f7 z2-@T|ub9|a#o%eOGQ#?Sz=!7@3%jO?L*X<60Wo zm(H>fD+?l0P3}vz(=w?Mg!Em>=vJ(u?br7t{X!WWr2R$#5AGnO<{F-u z2(tw{Kgy+;zwhT1*D}N-artY{L~fW>D$A*KU5%(6f&kxR0W`N*vJ8d$Z6^rN%YGWE zON?rS0I79b@65M7@d|iau*HT_nmsB1IJEV<{tsE#Dt-k&dAv%T(jV#4$&8@%H5Q%T zH)odUaatdPQ2c^PQv4cA2b3g9n(K1DPY4-{W{{_Dy~8o`8Y<2*!1%|sMs~Kd%mDNa z#I=)FvV)YR5k|42^wuE%vl~@fo6<7u;V=i0(611oodE9z>#A4W`&1u1oQCm=ceX~< zac3bB1c`lG0%d7xji+7p@0mCU9zkJ1(bb&p<c;J#l{r{Cu=G5m=ilT&{&a+G5JxFSjA}HiH!W8m`VSIMgH;N-TvpH zxU66;EH9L9-t4671{hH2HBCB%%NyZxOs1=L;Fos=mQBUVh#6aQt2BSmR5T#&_(u9| zHfG&QHLP3zVH2+8AR(cMz`RFd&bJ!P<{oe*zDzHj7!misGE@ntrd|wc4cM+XwM)YO zqu#sY__3h!VAxbCt(q2XdN+os%GA|L;CG&qRCqU zd(^-)2vZy`^59h}qXj2Cm6N~?o(6?AvvUM%2Qu_Dr1e+=HZcUF7>HEBU{C)KP-7Y8 zR`s|HiN;m1Dx~m*;Cdnf#`oZjJX%5X8ow5slQ9V@XKHX-ys)_VJsOQaLumP97wV^;J!KmxvA=v0dEZ1pcl0wwz;X`IIWGs)nT2aUIk(5mIW8c=?K43^oPf*uRLr4*+0=N<6y)&1k6ygHc>`7)bXNw!Bh zO1r4YF6~CFOt>0`RNO{+e$&hR{4WWj>2eIW%hajgrQg*Jhd=Qb4KU`nmF8FxQpl5U z*B>i(+e+uElxVB|A&tpj){cJsY*3Usx}4FX_`%={N3Z7N=toqvH2Z}{jO7c8NZ2dkpFs8K{ZB1S^V{r)BTG~hT`B>7}cO+4ogxdPLQ!*noeYFkRlA~;Y+fC@Jc z=&u%!K}~(XN^C$8K31U~mJ@AZ$!+HT>#!1dS%&nk?TXmzTAP|rj!jJDciAaJBe$~5 zGm##XZuiQ=haH#3o>%OYC-Ey>=Ib_1ks{g@rSiZnb}{v#J?{pvL$51g@f}n>I(_{# z@l}Ak%pN=KR}pe>b+o|BmtAN)?;r%$A9cuUlX1COT^7J2c3RM!S?S3s6Rqh#!Dn)h*!afHK;g zYFIlTE&S^R$jr#n2h)7{y}$#EyW~bPZhbD~vzu+YJYVN{IFrINlEAcpE!E3HudO0o z)fmulCVoAgT0=i%XYyd~ee$ds z`e#(!XZ~bSkO4hI{fAO56Bl%AfN$VmdrIPO5a^X@R7B%ZO@I8VC}YXr-FeVsq=~WQ zZe_o`&})Y6d#uTrppB}^OCo`UUIz-VDzf@#dzq?x?XLd97BL2oYHS*!TZI8QIfdjH zlYT1FW9=jWc}qQM%o-K*hEJuW|6%M2D;H7pz6$VL@$1PtBZBu@eA1ibG7VC#f-(~u}To7%j zX9O_|{4hQ$`7zFeWBW31FJQOUk6Jdqt69kODORZ{@!RHLkQMoq=I#x-v%e%&Kl8!q z1Q}I7A*TN>On$visGO7ui=-$+bwu|?YTF?QtdZ<2BzR_@8jUSX{e`ealsTx^*l$Qk zg~LfDwlcLAK&7g}Y(IBJ64x9I^-ktkbNGq~ z&q^;Kjv6;PYJd%)Wr(|B)Bs3~=n7Ar*`lm0;0|fv{KXX=e5mk=#rS}EVgEh~MBnG3 zaJzQkOTo?q)?p~pG6}DdarQL}V?5#4x7GlO+CNQH)?wYLWA^WdLo}``f3S2(sd#{BhM79jnZ42BN8#aruDp`bp>=SIKizu-@H3 z6H*hjd#3JsfWNB>*utmIlD2aKq%ICbjvxEK`arVg-t48Pht#Tzeru5;yVdPlp4E+B zKc3RwY3Q%rkwZJYn1n09xAb_neHGiZ%m{4~7kcrLSL8YQQ!QeHYG;}Ijq(ss`%u-I zW9imRgpgko8$+z<7_mETVKeXcDaCB>9u$~Iq=YXBbZ~z^P&81E66s7rPuIUQrkh_{ ziVNA+3%NS!MXol*+CP%U`qY?|Ms`v zE2y$khd1ozut$5IW?O7eUFzR>_*QdSgY7qk8zc)_10Ad%3or>83l34AtbkO#U$M{b zG8)EwT8jeQohOP7GbW=8xyVHdW{)iZIH?tpZi$LJz}Tw$vQZE(`GjvgIAPYVMf;xp z730T>J_B!wk{>G}pc|gT(P>2fkf^r*%2kW$(}()97&sKZ3ip(&){brIyg97-t=9&^ zjPc=dcU=fMvXG^~ld^QmQs|K*6BEz*E2eVx1hkDh>ngt%w_WC5-O~~r9e1uTDVje@ zLhX69MlPm*3(pO4b>Epc;s&z@Tz>f2>Z@N3Hsm5EunQ>Q53I~a!9IwQ6{YxWn3N68 z>H?}e#taR{MiYK|6np~)aZHp5lTNF1Ci;l^4qVFeQ`4(8IQHCG(4if_BI|Glv(s7& zE|*^_?a}JkET}zqp8WcOwn|RskeY)!b?E0>ZzvLZ-Z%TQYiz|yUBW|2W`^bI4J_!H zVicPt9d`#5ILN*ui%m+6h4bIL)bbb=Q-3HM+EH{GZN*(|5(}^*)L+ zs&#VS;lX8L)zJ}Ge9z{`rhL!dKbxynaQ24uSZ{Z4W~qJw5}mq9}Tbtgc? zy8QD%c5JhJ*3`_IEuQiQ_P|LI=^^n0q8VS%b)a-7$d`dK-gkw&qZAd5a@+!IWGRB1+$ zfI@;UK8}oZfZTVj{8Uj-_~osGasW~-mm;HLh=G5?l`_)g4Pjbp%{RKx+ zD2w4?iStH$%x=){RTI|AL7-=esBS?*8{zLiN}!y&IFkT`mkYG zL2>VsWMQn`*N(cqOg)!DGQ}oGY%Ds@1dB5`Os)nc z-K=iOW(&(Bp)K!U!q_7p>=!Ax(;y?`pI#cn*|(+{fvOET7O(}+fa3$Rx%)AK(2%UM zE;Ah^wG@at>2loHY7*AE8q%qMWoR<7_@+W0x7A(4dfhTI`0!oF9v zlklisujUr-$mB@ZxiXW(C(3uWfi4geGxim|)|23%Yprs6&uT^Xp)gVOmU}O`^#vg7 zV^-7rY3tP&Fs-E|*Uak6wWx-*nOT7d)v~QFs|81ujirUDa?lZX{`Oa&F`!58ps=6 zEKzS8Y#V5Zoi+auLmJ4g=5mZGoeuBFA=qAbcE$u_biHJ=tidwDdJx*zt3-QI~5KppE8|+IQeaKrOiWC z%-!#$*P=<1(^`Lhxf)sY_@T_i=p%5BFrI=DFBUCL*-u=%t z%^@M%XUV1GjeO|RfHXI0aY!_CPtEPxA^xSgtg8UX|IUn1&r>TG3Ik09rtXP_a zM`8;?ekd*FBA3122)Y`Ckqb@P|e)&AHU4e1*_1(tG5py_$Y-$6K}CK z(@2lQeV7BSPwP)a#&Y>P*Vh1BLya}gid!}(b|gsW_pF?U-33S zdrH4eKL}DZG=&C_|*5AV*?J}iY}^} zi6^!v*kw#E1%WMOS$cVZS=T3#)6DKKfGB z$tuvu7dy|brSho+X$cecJ{UQ3g>5m3^RNRWK)Pi8K@%8p5vEJa7_0it-)!!Gf zz>^8edeQaX?B{BdO_gZo3oZI7=dDT7uO9op*xcgg_6$Ohyy!R#yEQOV)li#EPwZ|J zP0c*QSjo|Oj-@Mmk3^7Y__4s!A2z_wnO*0V8J3-`NMG@hg6%@&(s#4bftQAL78X6e zmqW_^f3$Slyf+iQRIk|K5ub5`Z7n$^JXtL{wC6Ku7mWbNr&qrFrnBxHaF517Fm7YT zWtk%`-aA#*Y;x!0AU0!I&Q*K5HmP#mK0P`xsWzQVU0PsvaVktsa^W^Cn_9OIe^qoE z;1U7YO3v~x=>tJ>DdI_*O9I(ieGrdYYn{{VC$i1E)~$&|9ZMon$8-FZT;SrzA&_2h zAyv#vU#;p6z3_$?TB2Z{ggTD>Sy6{i=Z&o=ee%f_>J~)i6(siP3*S+;ApB=rw)zUA zL%7FW1%xWt^?`C1bTs$N-5LW@VV1{qHv-IDC*N?;SP#pnJ>T}PMf~{BO}FEaQ9`C4 zCeqYGmZ#_JL1;hpcV2n_a9qON*3w$*d1=*u@4Zb7vD{Okt6f<++YXvd!2`Ut>G7m# zRd@Pl$&e~2BbnUr+uXFBRXzQ|?TIiw=DVg7<)9CgAg?=nQ#+Q2N<4mpQVN_n^0x7K zq@;2wjc44!!7)Zz*Eye#KPPfq?KT$}$JA6$Fv&LhHVpiQKI9!_wZ0oCAyixooGF@-cTwIKHtE zIoJk2+{xg@29vJ1z30ZoDg8pewIB9OrkVFfpe3LCu-{$Z^;qPT_%`N$+_l1l_)!XX zlOO6Xvx(aS=VWde_H@JCZ%SWgHE&>p;7Ic4ZbdMfXs7B^z(r*r#(*+9Rx}|9^Fa|u z=BYcR2TP_rUK`Fd=mJkQpGzw?%t#?*!`$eWygnDK<#~O33S~+kc%GDWFIjz45R!%B z3=yA~?-?e;ZcST)#dJ(rx}M6xjT=znVw;on&-kMjk3eb4jb$OrS_ZoVm;r+K+oVdj zj?Sp9GaXUQfoCLe>6#BD(hK!&DAV_;o8Q_TmmgM4Ywv!k0oS`UarbkClPOeOQANDW z$2Z0c`3ZT*am3w!@6@Bo8cedZ!3EH=T-Mqc1k`5@4b50R=ed$TKNk+#5Cc2F#KZ$q z#&ZsSpYc&rff`w={54qvPY(@KzcNJER=3FWx(tl(kF1DPJ(@aYcms8Jf66+g_G@|_ zdeY*w-)7%qgOEJi`m#O6{lacW&iX3tun;=|< z_x_TAy6K?m?!Ol-i1svx)uQ*yD&TpTj5o1u$S^Zs`6zN~A=(OaT90WLu3s&ZdEdWg z>8k7X9z4ql@Aa5#nFu(LYoT%k-P99Kq0PrTI)23aj=gjzxZj%Y8G5(0vEueF=us(J zWG^Nnffyr|EG*1} zEqPUr%7A7#jIJZLNT8$}r|wktKz6p>@gUn9(0(e25%O?L--@G~@|Y%0n~r3#Rc{=5 zrhrtUAN~ac%tx|Dp@5x(((Im74%nwJHe^FsNsBPzRp3`p*C@av)Jpfg#DD@a>7Q~+ zx-A!+?T44jyj!{&pnWAOt?eP|3)R8Q3nk-nmM%s1;UtC3)iNkN5nVZ}d<#a7wp)X4 z*Q@#E1A|C|)*KK2k!~Lll=(|SW*XrKckzDAmqf455>~{~?B>W{!A>{q*93Nm@Y{5M zlaajOmJX%89U(d6hFazE-djK$D2j(%oGe^1hFrNCX9o&`=HADUr@%k9(o1b^keInh zk){1&9g?bP)Bn9H(lh;N*=;(mE4+!3ZC_(ApA7zcPeIvaLW!qiqFO3oYg)5qw1gUTKcIDmwia1&fB~yTa0;X7xG1|*uSq5`>Cin#15FwFp z$9%A3?1x(&nf~?9^G^4^>Jj>9i|7v9nS!hd{MyPsw!Io-I@+N)0ybGJav9yKjO@Gi6=mYR&Roq%uf@ z7T0oX$4c~2F9{`r#*3yz++qeS)2*goPm)oLELSEcVBNib`5aXP>WXQMyC-!Ze#UrF zJ(FoR51>hHwq&I$8Jm`6H_a&iZhHLPbVyK%ym4wi+icE-s-V5wD?}~4S%y^ZgyL)M zmS_}=r^cAZ(KA8M!u-(PQdy`($Sh_djh^cV{D(6YYb8oaRk|ld61@V(c-~R`wM(6$ zQ`=W)X|}!`_FrNy0>SJQzzXBen4G(OSqWTT`VCQaWK&bv=B?t4#&`WI{6~=3M*Bah zGw}-Ct`P&clM3WG#64gWdNt7`!gyhbQ-NTzxg8ajJC)`ejnRvc%a*xIuiesa{)J6m zfpQa>FR{XJY8#x5dgh|X7baAAs6Y$XhKbeZA_7xe4TtRIW7Y4w00V@4h-^fci#K_V ze#3Nj=WAv&RZ}+WZ}R@I7yH`O;x{f$4tHkRS|u9GnUzK0gZ5=p(n8}!ZW^&LyqCv6 zLO@L=I` z!@C}cm_91$I@==J5W~O+5{3{|EH!1s_Q|30<~w`Y$z`RuBIE>YE^W&gVg(IV0A0m4ldbl-d!9_laFAl^-K`;~QTqZ?oKQQgZLKa`j_|-x77{c=Xe9DXNj0 zGx$G{DdT<yW)&kCeooSJw-2REaB?t51nCHBv_V>tpR^$x$^eQ zj|)qnFuT7bhU>x3-J3coG||18ZQmZ59=BP9kaqYg3)i=qyPIsaht_+Xk}X#mXBlX^ zTCCK^dp9auSqu5y+$j1LQID5{UNgAlSk!K_X~P8Qk;*^NF(6_~NNiI&SOIn`=y%9P zW?1`m)?bo8_p&6Hlu?h%jjkKRP037hZ(CInP@>hrnpkc#b#4{buLxot_$FU;sx(}$ z>%^7Z=Sq>`qD444HK;z%t1L^C*b$$+UA^8 z#t8?)K9K%UzBTJS9oUzQBauerT{|x$p+c#s3)Sp#=OIbxr`RNN z{fZ`G^&5K-st?r4o%^OoKP`JaLg)-SOq1|76k*)eHi!YlC#uExK(pCj*JLRBy3#G) zqK$jGw>{cx{R(jqQS3K-Cl66A62ZaQ%tFO}@Y-)%NskBF&I)UEjg^Ds{R<~e-=u4P9|TKQNnrlpK^k7(8^=1PylX zG9(I+3@0XagB+-nOLX8nwunxHi`5Jbz@&Uo4Nju1o;)Rp)ISZ2C}cKsxx;2Vyz5_E zCXDWS|LP3^D6Wrdeql0p_f{^&cw~r=#-$_GCy;~bx`rv^|A2YM*C84*F2EGQ?EFbl z2ey?M*PhN|xjU>JPG=9uUY41Z#TaIq)I302BfnIt{a#BZf~(CTpX}=9>3+S-0ZbUh zMX?&5u;SEYyVq=ds5zkV9@Oa9eke?33nU4$8(|sU!H2e9a!H&e2^zzcTU-)v24?^p zROh$xtJ~IT8;eQ)c9>6Rw*D@KA2i&MZ=`OF#`G?WEVK0hzt4O>jjOHW`nN@OQq8dE zm^tM1+A|lq#m>elL=gNeD;6*6UM%glp!m~nEIA}Ua4qVO}S zTXT)Hjcifl4tcbG8#xs0{C>ndbG{GvbG*13M*&$0e`K)}B}Zjn6B1ajKj8xOoK|os zfYBan*Zu70E#Z5Egf|X8$gRZ(Bo7mpAKNIjjq;x!tyS3WPBl4Ue%P2=!PA&%v-Z`& zeYYA!C5e;-SrV{d57_;k84hRgZ2gQR=zmTeC1{! zUL10?wtBEvb~M?hHIW`VbW{>kRyU}b&}~s*Umk0#lOE-+`~JT6v#nNN_Rz^k`POZX zjlyJMhfL5z(HB`n|GEUMl6;1XnTO$Mip+#v^6R!jN!8oe@vNTOiQZ7bkE@GNZSPiZ z_RZ!`kRfV14|3mx@?}f~%C?>lAE@Fp@_1uF?L9rv?E=R-&d_||0Qq}iuK{vFz1x}2(K;bj3a^jWT&aNgWBJ6FC{C^jJiW^}{vUMCK zBi7ZshX==JTOlGzYO0N^W)?57t9B+*#ZcY9)1vkE!cLdDu1r>(D86X$OdCh8vQ ziRM33e_|FxE(FBONFQ4nozQ@Qpw3>$5Te0YxLqXa>xE&JFc?%U;X3+d0!QGknT!JL zTGsgMHo+_};UIW}q)xy{@o#_-L^1_Q{;<5B&c>Q$=OxW5X@H#nt=0=6(<|8oH?=Fj z?Vw9ZfgwSiFA1`X;WD9`q~^El&k*(8)nTLR)YA9er~t=~P|n+b%sNqf5YXJhGG==V z^&D)WhFAWua}oEWgsJ+ZAffAE6n}|SjTcF3C_6m8Kj5%)ESB$3aXY!dN3nkA*-WBQ zB~Qx2{m6DAi+)HKwhgVz&wR0*7w$7&VV!>c@Xehe!T7%<Jtf;6XOLDnRNn2q zsX8f%>OPQZ34hun_Re)o%%#HU^T;8nt?u?@#^%*q`7#fkXqx4QpIxw4_RmU2lDoj- zV(yUEaTiC@@hgmQy-9!%u-uJ_Ge^AdY{dzb5o~Q=rl3#r?tfn-^nWm1`2T**fuicU zIPd3On4yepy!D6^>vOW+(tja6XiY25cXW8hdjB7!hpX|x%Fc^3mlXt>wo@gYh*dFT zIeC8b7?axJ$VIsT0tV${@qeoQn^r6A!3kcN86v90QY>Bj-1yV9JY1-E+ku~Zf-T^;x|ZOmMkN4w=I6T@u5$iChJG?)2jY06fRstH*0AsN$zy( z{L#uQgo8utrRPGc&0Hv}#DU!VMEn|SF1~Kpq3YC0DtjQC$Y(GK*UZ8n{k!u$PPLdc zLl+Su@|JB=($m-an-7IxI@-bF&yB;ww~RSLsk9xQIKlz8EWvxpQIGtbjGx2FiDR!} zXWD?~R%&>)1Q(7p6&Kkc}p)1LW@(9VIQV5u)}mWa zHqL~2p7X{I*;RU={tf5zhP5$a03RAGN7jy3R~!tSJ#AsFLJv}gVKwYHd@PJLy2OT}O15f8+*V?);b2KjF zJf>q*#>}-bjO;yIuxjfI7l(FE7^uzY_lQ$l7?RoQ0N`# zddxbdMjrp{P`@|KX__1n-lf}U)op#vGa%Odra2JrvGBHCtfJWsy)kg=qGRJpB1YtJ z-Gdyc`wm0Of(_7#bke~pW4yB8Kj3~g%!KSyu9y*JE$j!%bD3>>3BTGvMD}ibuN0(0S5+k(xrjM33b@rOT(Lk z57@i0^G(Eq-3k#&YW7YQTBwz5I7W%Df*}$<624;x#;fA~ud2E2^|;-z+?$hVft(v> z>%=PLU;HLQBTGXwn-Y=HWBsW6d(OvsoO{I6eF^uef~rw&INLMg8iudOmfC zS@q_N+)|y*kJlm}2mNJ_5pEV(j80jJ>_AzjG!!NRrH*57t9^Bzr z|6FOmQ(}lNF?7fn;Ycq`Xk3BK6Sl^lgYqdy;Jq?-OCZ7n1llbS? zid*747Pv;qpDaJbpLp|2BJ!yH*3U>D&-9jW#);MZiS@3D#~}MJLnK#TdLbDTKRnU= ztw-BB=6lw0gHfxW(TX$PE5YO)_XA^mUP`~C=c5AYDQ|Qq){9o+7byOUGm+lK80TMh|pm9xtyF&;L2_zx71((Jf*MtOz#@&L4AdR~P zYY485TjMm+jcZQ--kEo1?wmVkX3aVG+_`t1zgR4q>Z)2*-(B_W{d}HhbNAR;r%wHD z`p>^Et0h~O64=_mxgLu*3m;{TV(&nso>1BqcB{+HE5RWr;CPjRZ0Fc=gUs)uY|p}e@fVUtlz4TUZOPhbJIyq zQ~x+kB7ID<6XO(56A;%DS+oCaA2gj~*m~Sr8F#$9iPHb^)&D9pA4S-Ti0*dpP8@|x z^uQd@THO`r4>Z{5x>`FozDd7jWQj%-jd3}Cq{Q@C^yDh6Y|u)w{CqaGc7UPs-c~GN zlyAkz+o>8huu-wedEKYFcwUUY$VRyt{KT)gO?HH#iv82=&DpNNl?Jx-TgCQtNGk z#5k*>HhJb7*6w1cKir#i`zHr6B-Hg#MY=r#=&99cQr|@-hz;*9-uT00 z5sBp9uFJouX=dL448NK3i*bt=Ly3)|6Tvg#)hg(@+%Oia5lbs~CAIj*i;vV5aDfKO zEA?v++ICFSq`?i>QFByE*#ZbV4X$i5h@83M`UhTB@7!6GL(|^qZ-Y_1!N)j*<^$$t zcZ0-G@<;$^JNs8D?IufA?&;B#YFILVs5Too3LB~R(Caa6d?*CQHNgh64))iSS5(sxQ8JU zB8h!(IcIN!nzq3#UxfG zla{J!z+-K6?T$d#?(923ANRdF_I|@Elg{j=@J~qmWxysVW5l;ben0irxBR1Qjnqvt zdxZDFY_g6143v3;8-x|m|1kTrH?aDOb>V~2*#PIrzTe`#L;>sV3Z3K4Rk@oidtjIV z6;9_(lUhwX4o;6nT2J5F-5MOx-_G3q*$jE>Wo?Dx)L0ds?5dg5E-{->UK|Mf7A zz7!RD&kiJ^+KNwrgOI^2oQC(vlnKdcg4OTvDNs`i; z@f0V%-QULmv?}a*aqbyn^W5PwSmrL?#>@d`2N3gh{7=*ePSJd^5Bj1{XJ_Z`)$jwc zO;6@B%|!!WNc_+ZQ9<pGJfJscmci&bkK4oGr^ja)8}aN2rr9;=1?`h$U<>R~}{)V6{^ zS9^L8d{PZ#o4ia)`HRZBYRtu2bSaES;d`5BQ?K{!h;_OF4(evgKd#-ha&=DEOju;F zAlsikl759F(8=xXEG1I0A_U+e8jM`4;DI)y-U@G zfZ_rMI(lM1e+x@1x%R=NmekXQbyU`~AOAA7TM0g~DqSqc_0vXlO@5d>YezeFgM10H zbA$+X(jVKq_)kE{)8D{&&G+B-&!q;Tgi z?LNETsDHa1-9G+kVtqN+Vf{WU*&E?SJcLP*;J68_Tf`bGr4{MJFPB6)IUgevtR)o2E)#-& z$aZ_8Qd2?GMz0m?6)k~bJwq;=gfVYvW1RikpTvcL3+C)xEOJ=faySeG;0$7A?D9df zv=Zmatz#Jq`ZmoBr-5X8bZkLWlVLm3qAw_7CG{*BK!nhW+n7M!tsiB2hGb-oXqz_XiBF&GNwt+ISo_k6fTm4k z8|JG$&`6J|#?i~!uRrkQnU#+38-)<1%jh{Ahnb7-62M_}d{VD3ti)=N&EM|iO~2l{ z>dveov(sf(MfcXnY)T@!zz8%kX@%1rF=9ZvjnP&Woz+PAEhL6J!-xpSB&5_#bb^j9 zhn-~%z6qAI5r}^!a>QF}?@k*NM=!W$A`ltJv_~V;>1E$awi-NlWk2^dbA!2bP*qG! zWm9sPkO63bBfk%y>=XBsqR#_rsyv!>m1rj|B8W-VJ-)^`aOygkY)cuo*Q&Fvg$o%a~W&kC@CckKZ&9up@$|H7MbIz)f z%?0rX(o%kPlaM#nlBC}pZl$S%%fg2(%p;t4vG7Uwx*geHJ@pa&EW`hOtH0UU_wx^> zkiofLxtt(B>-KF6V2Fa{=iNSj3@M@ZcKzE@NzzcLB|@KyeZzu}SNvt_4t~NLmHqrY zYMi$TNaZGB?KrpHODwVx#_lP3_p&QNQp^sccb4KKP%d`1N3$CcxzQC|soYCqI z>wo7U)wse~Ar$Tj2HE41T!8f5ud+6h%R8G`->(q2D(a*aq%!&4hUTT=5YrF8iX0+N zsB`bBGGb9UK)=?d1_(5~Dw=diJ<#Utjm?8IE}WbgQ? zQ%Y$MzoENbQw`M_!?HKs4EE{P4i4ywWj3-YOF!A6S#b&?co-d`6qahJKfVMt`g{p_ z1goTx(RrNQKN?xZc{vnHq+!)kgrPj{NdAEveefl(K;iykip#Vei zmqRSrX^(N>?b!ym^EW%r6^-s71+@3xi4*O;hrWgp5Er@XdONoo-+qiwVD)_)r8=Ic z*q4cUFct*4Rq6IBac$K}T9*vbwt^#Msr+8^Jbkw%B4?QU9RY5`4z)l>itM@M$LH0$ z^gRI6te2ayRk6u1f7FfAgGG#nc|M1AS68l-cSoP9s1CUwMyl89&Uu?}3Apw1pVy483bEzT30oV{I1XF|^+5B6yD zH+H%w%eiBHwBx-Sz#kI#a}}%` zmU%$*u4P(Azw$`w!eJ@hb1n-dE`m1lfbw0$CuL7yA3i#&eR;73T2x6NQj0y)HaO1v zN>3Ti(N8c3<7jrj^nO}Q4Gh%`X*zXbF>=X{c=-q8jlTj6;mbEoQ1AAj8||yM3Y+Qs zwcOe{QHs1EFyl>np%GaXYccthr7$is-pcc$83T8>BI8i}=L?%=DZ52cf8tTg>BEfI z+aZ>lk*OMM_89IRcn#|N)}fa~1Svt8#4R#h8N8s(Z|>fy@2!Nk67t5b$GH%ET&?Yc$dBIt@qj0PHB z#D^ykX#*c>&EH3DZvkYa{Qh9b$o=y26Xtl~6kfK0_hUm!Svc1EK=uBU$RCpE*TBPnYTzA6&u$0o%uKxT~?GA{}w*9eKLfiPJ(<>TyF+Puenm8<<(_a*$ ze|TJQ%CTe_Zr>=&yg&@fWOb5>cr{0R<&K)KBG1uhX-fLPwV}^>b%!3)$oBckYI9Jr zS?7PA+hWWvudix9a&(;E|AX-rFKCsTr^n3RG8gSFXTzFxsq4-w>w0Ay8yg_8yV)_y z=cQMZ&Xm<}2K>}m?`LIu&QhXpHbAe7-M_W}saE3q@~^#SO3LGQB338`JZu&^m8&s> zA_NO#<8}jnGZE0+AdUj#tGvQr_=r8=XltkLGU-xES!39@dG%GM*SO zcb~|p`9;IWLt?zv>c3b6datc{lJ7nI$bo{kJ%pA4a}k zkz}HpVWRa(Y1M=c>fd49nz1;^`f@n~^#a6woa|ga`%0n6y$^i;@SVHH{zI{1VG!xF| z(bN>~JVyNo!#ThpeNFn=ulI0!&deDnILcYv&nqd4ffxp=|D;Iv*LHQ*t9R$FSA*pOh%5MR;U?O!8GPvv2heMlYks{o5WqHqlTH%m7mT!cPowb5a zYYTS>Kas98ufVYKMin1Uqgyi6-jHEEBjA&-p-|r1N<+?;<=;qNJ8)dc(Hg!sh+O>w zchn1K$&zZVo!YSLz;?9~XygO#K(sA&Ns0sbaM6&sp-rbx^b|-qtPqE|9a+awg8o~Kr(Ll z80hK?yZ<%%W|z?B396{U?DZ4SR(#9Xq7!d^Slj1z2)Yz$++UEhn>79bw<+@aS(|Qvz>(#w@xOslzV<^2z?|cccHTjp4uVH2kYAs~nG**<*WTf2g6p z;^@eTTgt>`h)Mp%Y{&PM?c37$Pb4LdDJp*3d8!)xoA^RjJ3#g!kuyWq*J|vMP}Gh* zp?TP9zjHkvoq<@Bs_kaB66SAoUgvFh;*s@sCAra4POS-uOFMugb^W=k&`}hd;cK~k z0@Bx)^B5lWvH*IzPA)?yiq%G6A%$5Q_k7t{730N;R{q}4sK?BSJFYvxdn1^PJf-?e z^iHlVX;KA-RSa_G9(pK_Hugo|w0@3-x4Fw;7D9e`w;JIagQzknAK53mH1zzOX*%F^ zk;-(3*%gJab6qs1C!Tq|e!gBwNRva;RDRK61M_Y3Ii>UY;H@s)rym;r?4@3!1w_{B z`6h;7)i+9^4GD7}*xL%9a7cO~t+cT~=f@sP)85R51)rKyXmss)bDh~*c|(bmWZfSO ztDo61Cv}aUVG1`@!z@fx%u@`M4#Q)Iugmi|@{e=KkMCdBi<^OT_v*fL$hfq3KbXE2U%dOBXT??YR$7Sl-&s#PIh-CbFaEKnTcBX&M z=x&1W9H)oy`^zURx*5$!zr2!Ph7W0-9W|>rQ^WkTSID2%L@0H0{MFV{6cvNmqT5|1*MWt7FH9tPM_GNhV$n9X#z!3S=$uhifqLnK(0ken9 zHI7$#4+Wwit+0wKCb+VHT46H5P0}~MP7)f*Yrk3A&cJ5x7kgrlI`ufKS%JKg24*t7 zT@TxzmSS{19x&&hQFKqX>ztKRq!gH5C`wMjtZM&*!NdBYIJ-yYYxN~?{ltV7YV`9m zb%N&5Si9~<3eA7|gCXw9vKVRc?`>q%%af0gIr8Ub7G5!g;43@hn2YU-Nl4$tWiWPl zUy0iLNf3=`_fePi-iHh+tC5dFix+q_v;CgDHZ)5(0d0PX#%j@dG^XE%;hz$t{B^(k zfouymbANT(WPcOgLC>7!GbvQ-wS50~oTOu`(oE3x=T>}IA|?F@4M+6Vs4!PsH<)aZn(&hu80@bq@zy6(LNl983?;Aa!Q|2bp>k^F& z?XPqUfZSA_VRS`xh`q)U@lK&sJRPvBglHr4*QtG}>CP$lbo&zZ-mh%7WVSOLKe`|0}m@x(4+)jK{^M~ozva& z3cv8K^tCa#Rzk!{UG-%)U8%lU2Agzo>2C`R90~u0Jf8q@1EG`DrZ&2k$Nmje9&N>X zWjp#4(D)_WjV$nX!@$QlN%x`Sjh)bFWbuTbpVF$~8Ab0}va3kXO7Or!hS9x$$`H>p z^P;t8(bnG9rN_^kmdP#$ULkei?K(k9?>BtVCR0|gugutTGE%?0#>%#1@byv$*`H&$ zyWC26d!RJ46TPxi$hS+Ep$cZt^UG$Hv&so)K{0VsQmZBXnM{O^xf&CMFR2ab@k*Du zyIVYP%>|Fw7Vo8eahY*X6R2E z?OsarqTFb*qxczCFINqhVb3YmM_C%w0;sI#z9sHk%hIekbAZ7WmyGhQt(;77-#W7E z?u*uzl0nC}pwDypq6NM-#bI?F!bk>Z&W6LT~&G2~S9Sm^_ zxa3IEP&Sn0*Bz>h%d$+b6efBfJap!@6<6k;7TSNP?)n;>c_drX4|AKlu}SX zP|BBuxr6!e_VF!Y%MWSsQJvAv-1u)qkk^GCAIk;^SJrBLxUbG$HittdrpMsx##Fx> zh&;neH~KUHWX{Pm8+>y>!iBb2rBS55p=_Uv)urbPeJ@ta{WMuO*PXup9@8%^%jbfU z9f*&KD$NSjx{ae^2Ew;uKL%1?z;jF3BQO@`+T!wj-2Y%SDc6MY2{>CZ)7v%}#P!a) z#d$f{MkqGFf)A-Icdl$hey2vvoYDRd-J}phCn~dp)v6=`;gcy&^ZaAEssz{S9&<$K zgTGv!e=rzFNR;bB&K==An+FTu)R~fBZ#~}RZx!jMq$|$b5%Jo`;UQLwB}YAl4NM54 zw3hmpWq*(1%hMI-9wGl=sDBEldI#4!yISOWW8_DK5A{aQw~?LOFA@M_I6^t}0h=c> zC%d-mt=mM-D|DNvIB0x^bf1@f_^%+03q>61i#FHn724anzVvk%j$rJn2kzmM(;72$ z00Qu@00arQzi98bDu)UF-o)vvZ>IflDBk}zvZ$8D^!7EwXgmNry;7zf?bSnOT1M9d zl|~QFkG%62qchC`Ja-BQWVP!=gYzq;bj0K-_5HM7<3pd$9Ya$21zCy>)p(+rRR{=p zK;W~*V9{VXzAta*Dexrj8uM+dCdc_x?Yy{actALs-LbhjJT9Pch>zysZt;nRc@@zlaIM3SsrUQ}>zQyIfK6ds%ncej z#%&^7rzwmvQfB#_TK!8k59=ZYb^GCCn=Z@OU#5UcmY|QSk>Eh~uDOg0ezpqL@(l9G zDBch+hlYEuBg@dy?^5?tFZ#p&ySS;(|Fd~j|JDZD>ngk_sSN`#94uGS34&b-XfXNr zUlaikS8l0fu2sEA1CK5KVC+8URmk}w@&^N3<37I1&z08!*_OX9i;J$s)>|1ef>i(K z=l?km|F2#LjIwoMGN(|Y>(HxU{#+OOg{(gq=$ny z#21LnZLH$)QGPb#$ZPhH8lbjRVfEoFrG8JX%)-YmO?c$&fs-JtxHQ z9}LlZ1--_GM>2$42y?_vvxtqThqII{eVA5e!#SGWkQvAn#7g2uNV66|lMtkrz0T3} zjC!9N^7B`LlL0NB%H{X%on{KTug-dOv~9_&i1G}@uil86+DuErT;C8Osx}ZsqCjfAj1pw+Xa56QaSI)x}t;D z9J=B`yPSaFA-^upU^Q|}=2U*KK^k>xXgo*{Tcrx8D;aOkJWdSC(gz z;~#Jh1@Pa?cg&qS=x^K@KU5Fof#;3`RRHTr@#a+ky(wb~h19N2l@Tnyy?m-U83L0!oPnY%hpwLEao`bT0)6!6c z%HoGPoNqe#0N>QbqrNYD8{(=xOo!XQu!bkr$N$MJsm#&@vlms1x;eP=T_IwsA=NDr z8~>=#5YuPU-vp;uoz5GrWeERk>X??IbzJ2kB*JEercAQJyf0FtGw8=J3Ks>IiF#4p zT^8?2{qbsz&PP?TeI`%}{9q0(yz5#G()_JkVe#)%3T;Xx@g1avxiL*vY_wv>UE7}n zEFMAh3nM?Img*$izGA5srX^#F;G(}m3H}k;=c*R;>uv(@3(8dS!z8n}A zM&c)(ttM-~UiPNxqr@f(hkc5jt`g)Q#(z41&p<`j_jJaoel%qZeOhjK=ahOZt%UdU zNiGZ5afqqzx%L}It5LnCC?sKvg3qQKV$0jgPVTV_)RL|W%lj2FJcD~u;^$g-ii=vM6LF1@4lJM!P_`4*b2)c4DH|5 znp5m8`<7&bo(TCeLV#9?8$SnN{e1gF;c!yn$RjIVAe9@I-r8LPwxSRXQ2JSWEw)zX zN%A&P%=LT(_8T@d<7W0u%BV|9 zG^Sq7$Bo^9d9WFxUusrxPw;~^$T74#F1TWUAy;-i()Mz{YSBSyvU&k+0cB&yK|5^R zYdgR#;JB)d7jr)^2utdA#b@QY~cNunkDEW1P`3SGA0(yzM-uD9Jg7rmA*Su z0$gw?FkoV(y6`ZskfOnb_YX!`BmI^8EMR6zgx0yxkA{q2*Ei&x;zlJ%hd`R z7NHo@T-0OqUZIuuNM2Z>(Zn~9g@d6h0aL<_}9zuU9LS9{UXKlT}2zWD^ee!Ba*O{CUYYI+@7pe6s` zr_ugP$(A^e3?LeFF!Bc@Brw(FdmG6xu1e=Fo}^zSy^WDNK``e^GQr^0Qyk z@T7qdeU?x?r(i2Sn8W4%sIrXg^=qj9khA->?oKO^|I#mNi(t9HR!Rk{@@30TXefWH z%z+bd`Ye$C7rOgk13hs)IYJ~m~1V=p?e z$>r&*5o=1D9iURN`v7_L;@jcQvbE8VH^Ob!Gm;u{G_B277xS&`hM(Znt2M(pYmU@w zWfd~PCLU*tnE_<=oDE4;HS)yRp>fhiqneYd)&k@uIXk_HFMm10em>|g3)EINCtQLs zuAmHEjB=nS^-#P~?MUVn9HJn+jcw8gW0R9|Dc@!2cBy(!_7akWeobEnEGCP#FX88q zPY(=8y7Jq9)Y1s1@6EkycE|D8$kBRRBD-k4vsEws)F_ZB*1;oTdKqu&H_<94eIn2} zr(jW9<^eF*VzFuNc4L_yoT4|09$tlw^Ti*L@9N7OaK1A*tf1&`e}cvXK&O6GZGEr z*s6H|BJe8au8l0>+=@MBPOvJ1~ zr1gVizd7jja5#wI=an+%NL0!LVQI$vy9ar*>pwIeZ}1B7wLkKujBZ?lr+B^HEwTYO zW3|uVL|QuGm{&UM@V!;tuHc_dfrzx_4pyXZ9d)q&>;-OK-zOwB-7jUlNN|*iObsj& z-4neojB6n?#lXf8p72~*^=sy)?K}9s%sFtwh?M?X#1qzgF8GRtD3&E=z`*kKYjB4d zO|o?`$NaGTn54&u{s4^xrGUOn_RXVL1L$OKW~+_3UefK@emy)f-KFeK??>rIUgDk< zTSvz+kB_tH=rX4AtKR9SE6T1Wi&gmVjauey@>a^dsdH_sWIyh0@MQYu*UmN_vm}j%TMf|})))`b9s-(n> z&8RQ%3~^31(0Z}#)+;_VPxPx*-Ig>+yN^YZFG9ZT-B+(3;pOC{yv1q{x$z@|J$7-I z)FCqIvf^WAjj3VCNql3*2si3ST_|09A6PLT=Z#)z*{lyuACj!mk>_`#t`i)zD7KS% z#R_6W5~>5xFS?ZoiqL{9Y=dAggD}pXew6mQAE=Q~lbyTvfN1Cs#z|m707vwXfiIru@7)4Ms>oI&PfaWTgTcjgmXjGd1f02jkW4XgT61=V2X+n3W!67~+e?acrwoY);4 z1s9Vg-h~AtZ4(9VXNyGS+n=Jr{qM_WV7?*f^lXQ$0J%G9OC=F7zn&vtzzh`I@q8{5 zreL{&rL}Nl1egjiAM$v~9@JRm_^ipat5)ZQeoR-5wsCY)M~o3&asP!BS-pAma}i7+ zQTH=pGvFe_T-+p;2?PDo970B5@mw2Jf+teY&i30m*@-@6Z8w;jtW@ge23IF#x4O2Jrv*<~O9 zu>?-E`|Y`Sc4S{pL8Lug&nsk8SfhEXRx6F2+05!3LtNrSsyW`*L-XcFoo=Nt<+Er$ zSFf;FJlM6r=EEDmYTQT+d#^0`V-}N#xGqZ=z5$fN{RN@w4LTZb&fgB*xitGNU^3xL*)zi$&Q`_jkmiv zkFDUYFj3XorkUD+1*kM>HjWYn&}6@6%>6{$RabE^-?NAncquYaC(*xB3z2rxTwz0+kC?eHHOUyrJR(lR#DIB zG}UA|0D96huE!{GWN%2C@*B#x65PEZ1Yr_C>W=hkmTc$FW+so0p$z>2XJ!BnfkD?d zBQCcZ*I5ahOe=UfzlV3X+1=;%X18^{^Ei}bzTT~nI?=3vlOcIxX$hrSYe4P?l1bLm zgpR{W`HffSGPe=TVA19ra?N6v)HoA9fc}7ku$lgrAibMF<($MNc;UqkgfzLWx0Y)? zPnuR`c6UpPx{nrtE;wA6)O zJoG6mk1n6k4sy0*${^Nnb zqMxkC=vGnSX-v5L`Ig0+{^2_Dl23nDm$U;gCjspWUP1G#ieAj9@u{u_2W36EY;7pm zOH7^JuMY?5E)Q=-k1+UQ5A2jm$8?-~Lrsp+0MxrF4FZ?pGAm!!KxU(~XVnfM?pG(j z>4>^>XX+JOY<5-j;fo(7#p1hGFCA)uhSo0O}l@W!lkM?R?m#yES2DgeVpKLFB1=P|Q62*Xq$NI|0QI?9C8FL8Cfi8byFLAeFL z@X6HI73ZNA_;Ca85s#vrIKnW1gZeTg zZ3u|%^5NrC-?*C3OdKAr6O+R3L+6T6NsPzz*n#&-!}4fBk&;r~SC57rUsglJvOs*W zmBoSVIAY+b^(MC#C4#=l_U^0km!gaw%FPm#bt!D0b3R%f~g z%NY@OBy(*UOa`+HzTz*tdX-(54*CSXyt> z8vw7*dAOou*h3l^lcpC@P(}C_EI{rA@X&qP4CxpMRJ9)5wK&c2r1kvDSC$~5#+P1d zo;Vwx3vrW|$i-SKI=k|l3d|!y+@ir1$0agx?<944ybxJW8gID?+qt7X(?o{XRWC^L z8?u|P+!4}P>JQg{TpIg*Er#M9FMfvkSve6cu|tgLxg(%-?$?rH?)KM=VC!qSo>Z(D zj_mEYjb$aLKvvJS%*4k2JGKqJ(ND>MM@WfRpS?H6VP-!MPeV=UU-!^(bWb}^jw~z9 zJxwdXx-Ve>-l6%skGgN)BEWPb>vx)cVv0i9JmG_DsH2Kn&z1G3R+A*Fik?VA-Z_K0 z7{yzlFRSU~yzh7H*BDZ`SoNXtnO2-GMM^9w@|XeFi7EdRF%n65vKpXhNFUWw@+EECe$*Hdh^5 zwi44EiYk%3%VG|$Cf+palYGaclW#itq^@FQ#0nN>Be-+xu`;aS;(+jvQA#PX@X7;& zSjo!M3!-i8qc76&e4PBnI$#?=GD&-@k0%ehxs)MrM}hcZJ@v0pBVsooAej0ru#L+FVyC-YUw4qo31Q#|JKbaOhn+=$MH7msp(A0H`;4!P-At$_idR0)k; z1Dt7*w^A1yj`AW{kFUzgpfquHp5N`jcKU9 z{Wj_7e!sb3c`IQyerz6Z)U&>9_R8woNgZ{yli!GKoH1GykJf zqAWmfKq*rA@^6KphGs^j#yj6Pj2^Sv2R$HqABW^_UmVKhFj{;KfasAnDh-!|P{59x z+bL|3XnR?c@ZkCTI%_d{=1nxPRN%-ROUZosplVK4KeNyaHsG?B=K9f}-h4^gF$N#L z5qAXkc)iQ1FA241SX*d_r8C;DK8SFXIt}zssGq9;UzfjF0`n7y^DOk4h;>UK}~TS7@|)#mBfROLGnE{)|E7 zxrAjRRPqPo<%v~Jv>2!Eh~V(-X@fAg_pQBs^5zda6yd8*L(GGfqJfjWra+3Rh)^=i zpV|b(a(3Fhf~N$8;`Aw&p4J6XA4L#W9M0ulH7FcRYbf_8_F@t4mi;Dbc8GCl#(Wi5 zi>7WR>Z;H??oeuR-9zKLtj7WtIq+XXiO^J~B)Mx^IUYTEVv9WtxOlFz{=QBMec42? zC+D0S3#hT6+V!%x`%fCgpcY9%3|yRlSKS-5lmAfxkbU~M&RTsz|5ALETS1fM6kNiB zFsV;CC(Q9wBIiv1$G_D%3;TyZ)c>?_oD8ukS*$UB(N}2kp(#4nj|Ovp@#3=l<16`p zk$wHBO@k-C5bJw!Y?CpilS{8e>f{wxVc!47Uf)0N@qeI|7)<qV(Fio91m5Q82%x3i8BMR3~eg|rH@oF*oz=#BbWxN+J=17 zp%gU-#NtG~*w6pA7u6)KeY;pR=C&9uMwn8~W$*sonZbXlNB!6N@DoPany~xDqf6_n zV8MUUb}EWR@1w0{M~%oW{EEE!pUxR>qWml4q;Wl%B!7*Zo=x#5JB1TMl`s$ za?S7|)iacIwf6!|83@ZK>&(d{#d73hvW7&`Z|$IVB}S_uw1TNUU~Gspx)oq2g!7sT zD9QYbMn)K?tK5^#I(ayO)c68hCaH|3bGI4w2cyH+9W`B}OUT!zonWiNO5I-B2TVeR zOKpFaz?v*xFKw(~%SSxtlavb=qnqv2~>6!2Sn=w(cknp0X6m_ZSK# zqvViNZ?#V-v|0C4`1JoGSK4BRXmz zEOKA?7ZC2}Kmbg?^J@X#qm{ls#E+pwFMC6;K3xbtF*OR+&O@-cc<51oL3<`8BP4X} z<-;hNmn{r7J*}M7tD>Q=j<@|x$C95PsgI+0R4MUt*^2|)b!4q*rQ?p3_~bIP{7jRF zT_&^~*fi$(7~TbLhhyJwU2xisfFi6XLXMT0pP5-z=GpWg3Ub{+rx4LWY18tB8UAC zP}T8tj!ilOvq;0OHVZ?n)+Y{yn%LGOoq?NY3Ig`pvw#(hR7`-|pZM zdZ81OhcwG!4HHIW0+GoD6j4^)kEvZQ)H5-d{XILl&|w-gA8+g$V9_7twQXH*1+9aT z&aKhWt;;Nq15%4Oj-kfy%jr6V^t2wz|6dmU|MI^+55^v0z5#WtIF!HevoBs22a3rCZ)OX?D(D0kF&rT%9u>Pu^9E*4i6$I<<*^Xrt`j*o+DEufZ94?mLb)*sI!jefz6xM{5e z9I61{OZL}0moj$JF~1d2>-W|_I!>6SkKbvOEYL(_%BT)XTLnB?3j97KOVu5MBusmU zVU7zD^r)4gwZo0Ie!29G(zu$ea((Toq^<|_&n~(jkF(e=F#G$pu=MUy_lL45Ogg~5 zjHd%QO63T+e4p7Et?9WJH~!x0NNgHL5JFSb+mdJ09(5D^-f(`m&Kfc0HR+P#Z@W>q z&T#*(d(tFkbfeJ0$YrOBvZ&GBb?*$#Me{*3;?^qBqA@}6VifJ7`z%oeQoBRxEX{K9 zHQiO(+8RPx@`Vvy|K`r&2Q+JxF4&-CRn+WFJKiHUI!$Z#XvM3TX{(#95IPG_xcPv;+1zRs6+d1pF)vj~ zF6B(NHK$K#!LtP67amT%r$4rFLi2y$Kq{mN1wjvuM2Na)@$a%}`=_A;>RBxxH0zW{ zwj0vQxCw!yZFv&t*ff_#K=Ymrjj+6ihObvdaT9Wr5CAnJs|fRsg_6n z!wZA5yARqq@KPZWB{4ym9{{A%qE?T}FO+R0dwZcBv4;i591rLE-@5@*hXUBztwnko zXCd<*?@u#6)Q*N=cNiWyGcUtp=`R7omj+nT5$6H=Go0%RFK710+zN}+U0(sS#dlka zx2OEX8$WJr;ms6!m(NGk*(`CQ8u_AZ=0M?Nm~QEY96`VJdN$XBd>S*z5DVF1iH)JJ zUV-#aU`ajTJKysa4MQ$N)*|N_AW67~xaYRn&vT!_Et>B*Zd6D`*21nvQRh>T*Nwn%Kn9nFe?n6Q zWF71GvtQ%$u$;T!m&G_z3S)1%$vY>zx>Iw_Zcy)hh8he6@_7}viI)`#zkL>#o$ohq zw0;!bEyEm>!|4j~?YWo8EDPZ{ZA&E{xAGDM)~K$|J>j$O62~hqJiG!fBVJi>x4>SQj~pl+d-7=mk}00 zKb&9Qc?`AYqJ2J%%~L$)d}+!rD#KAX)w+!qra@z! z&CZM6km#h3K^VHWq!p*XoL}LBL%Pn~Y5m5disEpSnWSpUP6fZ(eecRUoZuD*1Ut^x z=)Kq)8BKB%QV9Q<4z>X{Vd48_U}gBiMw=^`hAEn1@ftX3RUsnL+GsJ-u#{Cr@|Ak( zoqaS3toZJ?Dji?HLRRl)27#tEtzI3IEwG8EuWtBJcZS6-wCYK1M4o5R9M)xg**WSR z+^p<;PC)tb3dfgC8;k>J{|k;=2YY0ZhLBd=071Krk#VBJH50oxmWzp$x@ga6otP_=3 zhe2{7XPb2z@{#)ZGZBWiJ|Y&6u4}8cjx_f9f7wY@s@)6kfHgNSd1Rs9B9>h9H3S^w z2e0Q*6Y)H=UJf^0K-t;NJtY$%O?k>F4JzH5UeN^#`;R^EOG#HFL`$g{qQBAB4dFNv ziq9?I-X@|IrMLJ20t5zxE*}<)17U+Uug`VRZvo!mMU_LvW&*c~t>@`?ky_kBHRPK5 z=K@smUuxQWI2#^&23=$|ipcvGoG~2Paf4lbGL?tMg36Nh_%aoD&L~AvnkR(vr+}+@ z@0sRE-A35Mj^Lu93w&bn3!v=na9PfT9KTuaNZOJXf*u*=30i^+Mh4!k9L*G`7Oh_mqQ#BiFRiJ7)ld&X6aqNxAW} z!wKldim|nZu|XWAG-cz7ggnWlzZ=>qgz-~3Xi_ek=k?-OesWt&^A^z>W^P5phxs?% z^1$M^9xw&4b+oIPjce6xPzE5L__3d`sOzPPb;|PY$q{OzjEP*o1le|dVK=f@TVx@+ z7RlQN=1iu)UcLxSg{|f~Z3H)Kl|B(H4WnmnRc7Y$y&5@%*HkWduafz&S-t-M?IQ7) zkWy4h!ROO9pq*Upy6dK(tlaj`?udG$@cKo7>t^{GyRML2Q^-}fcO4`o)&~7m zTyscIhTfU}K}-h@9uQMXs_%%COF1F{sDerHz6#Yp3kPOj5YN=*TO`b4G-QPIr_Vhe zR-vMktgCOjnWz0oj=0{SvnQbEaARz1QT^Rl%kG;$H0k4^#p4 zxgdM})%u@Qx7X`34HEZM_oi?El7IgGn!w@ycjbeO1TFWpafB_IF-k<5abq6Z%n_`7 zpiF1jr%SB7rB@2xef!zp)P2+@=2u2$|1fQey9{^7d4+md&_OS@_O;0Bd6(hK#yPhy z9TtP@ZY!e(Gxv6O*d{Sl@uMz^;%$iNC*%g|050IXS=`726>5oCoQvjLza$>4UhgP! zTMqLBL2cirp$|QCy0rHUG=wojJav~1V%S!F?;Do)lK}$)1wg|&!2Qz+F?SS9gVh>b zH%?1R8q;g^41t)n;~Q`yceS;}J;U^4!EP(RKfE`&fZ$7}`X$j2TE7VHn>ousnQdDT zW6Ph5W!PsHe+L;sywJx-O^O@w6C+k_yTJQCG zqo*4igGWCIf6*U(p$@EuaPEndN@Cefy~go>Zk2iM9nbNW|3$wVu@uq%bXlEuK)^m7 z)-1D!#OjVax6Xgd8G^J)&2FbO+pPT6MKNYu58JeQEwXU4nY$L>&(lki@yiPnIk zw*qvh5h5-pX|5U*sNAzH)KQf2Xo*{5YzH zvcQRHJ3=47OCC$JC#o1=Co=>4dur*|SDJX;-~&9(z#7=55KY9wpaE1XIa8c48!Fgo zNRysz%I@H$=T4A)d`Y@-6=0YjQ^iUlo}?!k=f{ZW2K{bjt&#HxU3X2T*QCn7n+uUx*bQZsOr4mp4Xq4 zufZdW%AK;wd*TB^;s1kWB{w8q$roV#i4R;KFn#xR!12H zkOEgXEpMtlk5O((%K^YfOtF`Qv>`pijzoZe5NQfyGvklX?yXLX0n#!T z#6qL+G+2`35=bwDEkBEs>fJcT5EaQi&P1}+dI&cCGToA9+qymE)bvZBanYOW_8Ul( zDLe()BZpEEtr(8WehQa-N5lKE3DCe>g>ckaRrRDpoE%jxYqdRxm^AJ$dEbs55_Y^1EucG_ta zOsA@Vhi(!Kq(|^m#2WtZ%>(p&xo*+(X$QSv?xIl9k;Mo+nS3;Nxl z`8W^H{#hdcb6ij*Lyjbn_$GBbb?pG5lTPb8ML!=4k*2RwUM3al8JQ(Is?ov-5VCGW zFFM9P(Avhl%3(QpDL4T&d|>ERREk`ffrmnpn{ci@_vGb$g)A1k>BZqkool&9Y|#)S ziVS%NV+-)S$^8_j2Y$YCT6XR)Ka|cGhY2xqA{7d<7U=b%rD(0ugB<3Crh? z3?)Fbch(6#eSs0ozlI!Fr^Z9!IOSxLYt*1ZA7=k+&!#20^Bj*IC!Ne=VAR4vV#H7R zs}tR8Iib-Fn=9<-$q@4t;nl82>JCNtnGGzF4+L7UOl)adVI2C$1(}uhnZfg z?32M3e!e{WN*j>&VwUt(VC+dy?WvLY)oEnqMBb0=5qB!u)sdfU#3_LY7pDOXQP`i} z1W!6{)xo?aq~<_IN8y8p3zVgqV}qES7Ha~qUonSKF7%`(pgTzE&3*zl;H%#B4*(f6 zk~>DuCfN1Q*fiZOH5kBChq(+J1V@RsRyS1b;NIqy{s#Y|)hYg8=xh8}U!wDjC`aXD zI}k2E%>9GlKN<&^1c*SL?>@6*o2ef<{qJ?R_ln>D_dO3{w)_87KKh?e8zS3yGUhGV zq}9I@KRL9Y;9GuA>hm3wH`LNI`GxH&e6zOnsI-soib&`kVTf>N;?KX}*9xABTWQgE znSSMJE%+BSCqPD!*2*QFx=>ok#I*GsS2w-(XyoGz)E3U{4J$^od1>BOoT8c)<;pMw zl{~8Rc0j)55peUkBuSp&qTT=7QJ{SAANrmLhBNcGF?2_S zi9?zCXybTs9;fcPJ>=3Z>Ppfz|l+EyMuau;t$Xn)T`%eVzW+Q|#RC&o z{l3}<1|uR!IALner&vGw8xQb78bE18X3RZ%S?o;P(avAb@(zS4&`E7hcaa&lQ6tk# zIji{;0Ghy>vC*3)={ely-jSVwzaT#*)@BX)vr*=i`C1h)f(vryly>f^AoERlftYA_ z%j4wk(+apKR4-gzj5QedRGkM-v$$`Si&dgcxDXG>hFMR*IXMkh^T>U2j4!Fq*h=5Z zz1;oN`#=yPb5;u66YBV7)orS+i>~&9!{7&@HGWPbwX5*+fPr(sY{iCP5s?j}6?t76_n9 zn!dg=+eGhcU5V8rXdpE$0O zZ6=d@whqo2;v*fR#mx>rsa<-vqVy|xbfLvLs&=@3 zTTL@ZQY3Y#=uwM1IGrljV=oovk*ddfh5%2)6FmoAt;%dAY>A>u2D~k%U*wwSVVgYf z>b$D-J74fZ$+XM*TMQfg>FRp!QY)0=xR6In5{{&`IL~|kl=*BzHoC4&aw2Q4;#Kte zlV|*AReNHraVfIDcVbO42icj$&4}Uerfm?;d#+?Jt*4ezhv8~X`EB-bTqg2QrYc8L`iFWYP^>>^ zs@y??H`x_5b4Q?WF>uca@WXElfZ>-rOE)rLM+BVI(s-{cfrKRe&(86^7eB`N`j?S@ z(&X9pMCXK;zv495I}h*+NFRE}H)*+5rq4LAR)2{-HhSVUOp=~}Jpk5-lVtYEg_SG4 zGh}4;FX!7Ll4AQB#qbDeQ9fsv=v}7yztE5PFTUizoALN>l@*wV57ob`C5+1ceVZzX z`oFae?3Mi@de-5FTbr?%EqGrS#q56U>W z9LElPQWD8-Ici;0D;sWws_zMRRp)}B>;X{wK0h|I=aspGh(5V6|J43pGNe1S!Xfyw zy$u!Zft2)-vgJnA)>ZYxCxbJFtKIBAE|!6R7B85X+kROcO@w!gR2WoJhM#Ek7xR&m zgnT<%_y_A3hRYA0)d(q{soF=x#SjXOaAz(|)_o%|_No!VQ#dw-oi+hyw#O@znkcz~ z8L&BB%<&o#=g6_OqC~tl0ET00oh8OmX`XMRng?9H4r>6(r#Fb`Vob2p4D#2Mw`Y8W zTGp1@NLGq;aMILe(*~-9*^*oAgSu9eMz=*Ih#o(Z!rN*$9kq6@K4?j5F;5bO`dR0W z5)-l4k!qWet6nD+CE~8X%EUErX4 z*MSDJ!>DbL*2SZx&OKlMzpl)4(8Lx!&OYE~hNVOB=b25n*9o2hLAMI)i;hNXywiiI z;4Mow3p`Mkq@(Pilr#WmzwwFE_oqfVD%maA1r6!_ngrSXI~!G(qVN^vn+MRS1`4Rk z7IrON5iOC6ZD8veYHMWEaG@2UwTRb+0b0{t4@3Rke!ltF!+K0enAsJ|xNd|r6Ri+f zAo~5Jh3>bPv3JA;m*2e9ErxV-3hWP&+)DDv7deqrvjV#Y0HjY?yr!-YYy=QqA5;-t!;fT>(lrAbXs^pCP=w%mYT%LQ@)ft0`aT3$_u*VS`= z_4fQ1cs%jo-~<8>@HNH%f06Y3S7(X;rjYsnlq#Mj(OOtC@M>FH0nFW0iJ8dIrgr#H zi2!OSKNWAipAZ#iY7_&5W9M$Zkn3I3Q44Z?* z`Y8}8Im%p>d?6y@>C3Ro*==H^WVNP6ozKt>>0X<)Wnm7Nd<|jU&a=T|@@P4P5f58m z3uFmR)a@7SARV+`VLzBS;%4!9;k)?NT=Te}#8*SIIzkDUsB(|`5$D8+X6Y~GEbeLk zbgPq1V;i4AY6AO(g=l#kM%5DZowWUR^UPhVIJjK`OpGCZ+~X!!)6kC|R^+x+HZ&S> zt4HkF*jAvmx;*3AE;{iajn0e{&rB?TB(-bcv#5E#*JHCt;G&m)(_|esVtM0S$fjie zHmo)F0avd4q6A~ShGUIF_PxQa;hU+Jq%9vYeMz-!!L-lnz|U`M;s-Zk2;dpSc<)}{ z5uDqWi8ZW_(OSaL6MG>hlojzfR(USMrqQb0)(-eS-A-Gf)ZxJ^ud)5+eb1i>L<)+# zT$DfkZRE(Qb!Ab@%Maq1U?%k_suG`gC9KH0kWZaZ`A#}{0!FDOIv~C3W*Km6-)&c! zB9wk2vo!YO3zGZM5{_0t)L|QmEYsPWpV1a5vg59pb6bOI*Ofp~b3NrI2&l;isCs$J zR67&Lx5T(A-OvaL7w6VP$w|MWxl_a7Dp`Ka(RX?rKK#^+p5!*7mro1+`(7U4O}k)( zC+ZbXZMM3LEH#blo&hXKrY41}eS0@TdJ!_f%+{iYZ8?N~^0M~15^Me{F;k4ZEZXL( z@QG9fjJEPeBmdPzx&&>%BcR?QjX@od?UKDrz7Q97z3$SC;vAU2FZ%Pt@9ZIR3V)-fSJ zIml?4zhVA25xdbR^rb^@AL5sVjhyzA3J8~k1kUi+Z-ce zgw~u-?lakZ;a-R#<(?dN`4DAw6EREK2YK^2-tRk%Td+|8h>3Xz$(SqRcYWXlrJ~>x z-51$^53?AtrBbISlY0Ay7;^pQ6yHsSb*>E3;l%g=8Pa__8wjFumjj>mu3Pt z+$&A<60|O~S6F&Qbwuzt)Y0{qx61zUat1RHI_W_d{5`FNGC6`qn8VZU+#VqtZhroB zxh7B^`Z>|?QbsULe zC>(#_4(=Jt9GdQ%1`Q|oBOJ^5H*j_ugEpj7Vv=owdE}qFAb+BtcCUgqoiqU?5=p;6 zd2g7T;s6}EQk3i8R{NG~Gb#i7O6BB2>xZ4=wYEOvh=hC>uY)?d>L(gXNQXJuo}?5d zwe?pc*m*sAU#ru-mR~wt05!)%kR;SOWQjP=MdMMmtD3vPJO14Fy~cc7 zq*8cK@h7A=5$70{_V@EY7S{dCg@EUGcjZD`eR!Pz=fsh+I5GGl))dGn|nPud7+eHwEN?E=dVYmm!z291D>jY=ibLS_QESBJA%$W)E~ zSlc5!V!VgO^+8PV#Y*OUX_+*ytA#;(C0LTG^M_pwyIxN8GBTg=VW^_*==gkh+IOLQ z!(JcKGe@&#sbf+9 zz`|AZSD(|U+=3`Ol6~^+R`vX0DcScBHOiuBZ{;l(bbB#1pYFLqX;S~U6Tzky=0e3fftl-1KT&=IDf%)S>$rxW%k|=V$K7zc+X~u!{lyZcP4Sf2|Gi{a zF;Rjl;~U&)zCMc@qP`duCd-j6r@`0}BIgh-m2@5sCRH=sU3^i?7nT^u0r_tX@4de- zBvS`Suf?V~itS2KE)s-B{DYNlWa_mu-#4~mRwio*8(AIsk3GAR!n{KNDY8^0tep%6R!p;2Z-PO{!bMN|ECxK=c|Wp zJZ;qm(JED{2nmVm#hgb6`dQ3-dCA_lMBO=}dJw7dKVUX1PCeYn*LVf{f6pg3?INX~ zH^$_A=**E?AhcMFwa+4WFWM>h>he@{j_V+C8#CXv5JPdGfk*S+<6<#SiqhZ7biAW` zpHy0U^9qBn+;Z^C@a&T;j&oq()pcY2>qX4$I3&p^^6OjT$f&A)8#G!=%?v-Tb)nP` z<-5OD^~W%u(M@0-Y<8_1oEkwPwv3bu#Z=pbIIQytl@6@My1IudS=S7-$r?KC;Qz|@hp|NCXD9WQ>C9q z8Vlp%qg;nSKZ`Pr@KEP8q^4+;50Js)=l&a2w{hzfFjv}$DTN8Ghvpkg*+ z8QS_vM!fgJNcV@}9;`3qQ&?i0TE(~IyXi;d&L%E}R3(km%O(C!186?qs+oMwo3k92 zvd?d167l=Q+1gba8E&2w%p1~5Wc>xNjX@j8=qaWpS@vs$v;9Z>RL;NICHN-(=m!29 zb8mGfIA?TwdrM}1=@J?EJbQpDT7WT)IjvovW)=m^eJK9J+SG4bA?DuKRJ0^JI%fJ? zej+7}bzQ?hB~0JIS)LuU+tzDVLEf(r=e*eokox4R9>t!dt^4jO(ZcD-oyQ;|Qo``U zW>tYwc|tl`$v~X8z@LvRKiI`T*Og@JvQtvS(fsf4=$)=UE?rUdy7^jK}?1Q{{lV^?nfP+ZwZR6oRfH~V4=D-uaX ziir}iPU=PmvcK*hT`#F{)5#}hoC^$otv@^P=M$;yTM4DW-7Gh+e3cUKf-`C(H6g#m z_Vr`l<93U38ER_M%#n$toIM}OCBUlm>)iZ&k6yt{CTlatWZfUVCuvduozv>hIHt}! z{Jn;&YKb}-H}OKYpUVR#-oA$fLdU6l zpY|rYRGgIO%B3PJT*&|n+YSkyBHB**d|-B9nTV$r2Q2YNbTGhsAyvGEi(;+XjdRwD zWRGDrsA%xb1X8PvxBcC@e|m9LVUu}M7d>y-nnL58*ME8aF3$!ta#;dl`(TLe6t?$X(CSaBh=uhnaPuWB)12^AghsaZ3! zpoW;l%k4+Dq5NX<*0z~mdFpp=!l*>%NevnAfXYCiU#(Y>^}S33D-= z7b%R(=dg_U`s)7Qjk1*5tQdAP*Ov{g0G1yiaJSsQO<9QM{jmWsWk%ZTljBRg{3GmX zAO@Nwb-mZ<`=->7BY)I-{tave%C0jAGzu3tuTN1%mFrUjb-v;LO=-{KKlQiIq&b*&9AHm1^7!W%wVNRzX< zEVx{cMk&tJ+EZ8#VLdXcm^iWAPc^C%Lu?4TqbA1*#+PEN#$PjDGgjmJ#lgy_1l>Eh zV+!kEInH=_{bdv?lB3uMgD~frJ%N8E>82c($Y=R-M81%99#!5hO#;%0=ijvRnNq$b zN!Te|+LrAZ1G~j%O7pD2vby&p6jwFDdk08CqEU2RCO9l!JY?`nz`A74{cX$ZN1F@9 z3~`JRtLPaZU)h%cn*R4I+)Qg-St)~q$$JVUcDW;MO}WiS+o#6GvRyw4yG1SmJNmt{ z!_t;9+KwSVUe(brcJNbMQ7 zna(;tmxjttKE4<>Ce-Br2Q#oFZgPEhgQjYPb&aq~crrup_|<299@$U7Ty*9J6~dUL z>G3D7Lrxh!$hsCL*M0_V?sav6!B2A*X}L#BlXF+wDe9iiURP#3weliaVG8--y9`I9ii zqNqfnYnw))coLA8z~|NR_@HOgxOCe@U=ngPE;#O&(qY};Vd5cw&F)ho5RkZkzvGU< z)e0l7k$KQ-D67-!H-5--;bQ`3=w3Mb5I))vY=H;rN>hw`0F7BZf5ft#P%li$6By)q z&m7&avlDMPudf9?3?;GF`rh0XyL<7|HORKptLub_ zLI?3h8{_5&`LgCPe1~qt`K+NhS~1H|UzOZO@(nY?ro_RL8k<(Ht2~e!5_TxIyG86W z`y|fi)WtcVJ#fG0Kk|Zhay5)wZms$ULmiOszEsQTGy0y27pq2%_SQ0jgJjizxW7de z3r2{JEYEErT`=Mg;ymxCrVmV=@_6sgr+_~Go|CQ2AWpRStxe+5W0^6{0G_Z@>E!f` zlOx|{%}S_W20b07qHnjeZ~n#R+=lh9dpqyIO#HWjBDP()v#_1oa|*`_`-dZ8clxD{ zAmCCA5j=LZN2?av_aaiAQeBxK4l7qq7%D+@4!Gj?Y_A@*49QsgS9(XuIwbB1;mAVq zko24=q{29=sbpyB4QAJH8flhx&IrDtR9y6nKa5x$nmrD@4|EQ*pycp7XWL{?E@e7O z++N;lOx*J=kdkJYlG+c$b6JHurL=NuM{Oe4Z_+yER*Z3 z3x`dLWQ;j@$uaUo{b=U1d))@aUDK`H-e&Q#8 z1;rtnkv!5`{FP+dF)Q3INm_$~wOL5^)^alR2ek6Xzi+7 zKU9}v;rBU(l$!YP)3i5@dNxH#(E;S%DSc-ZO;;MVF$sxsm<)z-qc1Ffqh7X|F4WkO z;W%t=Z7pjhrAt?8g{h(A)DKqW_^q>=h+psI42kpCn;C4TI3C1evC&=G*#%YPN&s)R zCFi!lm>bImgG{@n^~pul0^<#KGYO2rp9hZ#45#zZc>P|w#wvR2F2<(wsg(nGzk`R; zp!Lc#%)#{iS=a`6U*prWR@C=%wz25fUSn>Qo!QU{V^}5)WP$|Y1N%xcs)A zM?7mkUYD4K1!BXCDD$IrDoz~W`Tlhtn&%3K21|z@Rm(r&Q{nP0&Wq#u)FdOi ziKRxts9@IQ?h}!(m8;L;w{S@<+cPR>bx{6+aB8V|d3FIEN-vrrScjUcHWT_h6T#r3 zpIDl6B6%;nwe#qB_hgr%wT z3KECMJlVG3iaK{#l9TTAI(Nt+t|2;;!x+o##qH{~PVaqIyr|5x+jO(HqVF3sa3el) z))%>U^F(e)K8#+rcRY8?xuljzm$+mfk}W+G}QaYV_{r`+q-C_4Ey;iM0j z0)rfQ+y1w-`{R{`4O-zq%GTz0#+{GP*m;PRhZN3(daOW&Tb}1OHRC+j7mR+<2J$Pp z!)ck^_cdfXvjp<&Lk*|wM!ncob)1MNLxlBy>Hy*)Os#wEwW|*>(ZU-0&`l(fC;mDE z5m>YFvlz%K+J}N_AV3ui3>XU=d-TnDvqX#Magt>Wt$3Zmpeac>Zm5-g1zt(Z5!zUs zB&I2&`K6^fx}2(MHK(hMPRut6?=wttabRHBN4y=A)AVrrmOjHMy#N1b;Qe>~w?}V1 zyBtTo*{R5;(9hK8Fhkhk+Gk7`1&c~Q4rv16TsI`QAO4$n|DVBl*&>oF3|8#TKUiOo zZBCMFM4w$)=J!bL{?Af&jdUO$lzaumFz)5olsn&I0%YWrH9)RpQ@MY0Uxd}-9{$+A zEwf$_%61$Q)p~_2*Nf>*lhZ1SBTCZ-X*x1ezS4(su{WEi3 za?k|DIhW=Nc;jGm6-cdsbZxrvv{&NjGR1fzdoQjAj8w9>2IEhKe|sv%it>%Ni~*Ca z3hREX6fUVS82IitKZsJPlfWa1#8c96p`_;5di1=ernK?Khrgr7a$*BTRpp}VF{_Jo&Vu!;NpER-7s;L1&UkD|Q-iYtV zkco9ZR`%=pQGryLF8w))|6I>mg;J=Yu{q44g(HcZk*1>F-CqSd;*P9wFzusQw)5b>=kg zoH5#VK&Kbax6M5gO-QtQq|o_EKvLOwt&Qrh;lYttY2vx%f>q~4k6I=W=Y`8Pil1J7 z|K5nMB%0`6vp4AzL-N+jaW!2O)-t^&@G+xdP~|GT!XQtG>bcOkqG_xUJ2M-xc1iKU z72`)sk*ci|HWMFR`4ho)JpW*6<_vR+!U3*gNaDB~kiQv8`k^OF%bnWY^L+YMw85RJ z>HLc|QN3ZEPmV&|i4bB=v|F{RX|gpIyeJiKYR4a?D5nj%$k5qU1+ZN&R(84v(X znPEeV;KBQvL6_GIG#Qvcw03D%C*pw!9~*cgM6JOXZ?_0#_l}|3Y!VNW_0rm^k=P&n zv$FfzUYb50Po>HJ)Btm26Vz-54T0GQ-D`J%4Oe%Z3Z5j_gRz^qw;0~{hcXC^&vuMA0rz%^jh?t4 z&k;Mb{p)n@kUg0&D9~3J@G$5nXcuP#)}9#&x<8;&JDJ2xSV zm!Ci2H$F|skqu$0hc%BM-N`R|-Pi?yQXDwzdSrMiOYP_TY#J@jODoz*JDqE|bM)`A zsX?r$qz(^^k~AaUXE|Z_8hT(x@)!1&&BC6gNyI5J?v(vH57~ zE^51O>DXakq9&>o^))qA@7dLXG1+@sNyn@DId)tuNrOpu@$n>IA9jNSW73ZSZ;c`l zIVc5uI!gsuZFNl`4l3i$a+$aG(r{VOTf5$8^YG;j_d29^x}0|*X(3TldOcKO5Feyf ze~3Af@yAR)(B)zs(Oy^`L&d1m>>MGsg3M&&7pTN$M@}9mdVklw+*4bi**si6o9AbF zMfW<-?Z-KfKXV^MG35%+6;w#$(Rk&%#$iXl%;*b^6K~R{5t%qC=Vni-`-+a>>ZbLa zEm6~?;iGvejc)CRbc0WcS{C3s0d1EkTT(?t#@wZtxJ+_Ig)|2Cn1vX%%vOKF^T}Zp zl!X+O*K(_uY4T<4crr$H6ijRwcyuQkgZmdp+A?I&Bec0jbl< z(u1`>gg0aPF}8nsZw{`hp)E|>|17!z~>kIgr+SR2%XS0l(bGUrr7tEdaHODsZ7s(gX~L z_(~D&;a~rh*wfhVWB_E$E&u>|_@L(fLW4O6p#!w1bz2%b1QY>_FgC?6J(}p&rv}v+ zm28?97*F(HK7O==+j;ly(vyNwKrNPh?)++f>lNuTexrCsoENq!sMNhrST*DFv;m&~ zb#}6rVm+O}Z1A|e0B}%2wgM(k6HPKB_M4^Q7-pvK8zpg?)1j%n!R1FNEpi0x>;Ho# z#CY1lGIgrYxE*gR(kIsO$9&nvGU`APfjAu?{6_9Bm1+?}77yC_1mo*4Y64h+)`>Q3 z#b>sSbIkvjIC^jfWO+ z^}c!?xy-WN@1mz43=J&$!Pq?OidG$I;!@;u(~c*9|$A?5L*ecO#)>NW-_`Qix%#S5#?C5+7mdNv<`stiyVFZxe0 zfl@HQU#%B#wN~at;43oy)?p7F22}yW)Jn!mY97ldcmenUefSXb?%qLx$B2@>neX+= zI0ts>y}S0Csj+5+ECRzFcn(R;I1TM;>wT%Xnl%R0AHa`xljSM5sHW(rUF#o^%t7ED zX^-zSQb6YD3}Cc6r>;x(@+r>NFp zaDyzP>>TG?ULAwVnwn_1Ydd$2JS1JuNG2{_3j{clAiHS3z-t)LPbaDv^!%WfwJCR` z+~)`|$c+5KX*p-dn%#S)65(11Nv#d9YGu^z6VCw+eLm;0eX5nKx9(_^Ya%R`g#On|nw&#-^gI;$GnK?p+ zVM>&Jl;M$RqHBwwTKzJ4Hr5$7tNn_*EU`f5A~pwDWAF&g-bepr?fwaox7vgb`=+lk z;kafP_3k1qc-bjO+gmcGApe4B{z ztN1mvyfjQds!Wa2cTz!`&1EZo!P^C^C4Fe@dtys>k0mu-pwW@AK@>bk{|d@ls~e05 z4%?EG2o-9!CAJCfKW~$07$ymx$xLmy+T3Ufy<+FDD{y`OFu#TrNr} z1vEj^V^p_fLS*4K(^r|LeP|se_q14&;m@&XM4sB*y7)J&xD{tD;odD+wF5tig5} z-6|>tl862iAA~~#*YOf8mf8JKX)BdOZCs=D3QL8N=DofeB zNhir;aK*U0)?ty&h!lz=b56-C>M&s(Qsy<~H{cPfF?MX)Z?`$9tNu|qzpS~FmRfF< zCXrd(-P>*m?^wyGc#-k<4*02CpU{M+mn!?^0QM_t09#)SUZg3Y!<}NHNMUu+ZXHc? z$|%{e30q&1uRT`q0g4y4{e!i_jxsnT>$_Cy2yjHG# z7fu6@wh&-yO?|GXhE8D?b66I$h`LxN)Ok(kBjSH~7ku6j*=Mnp*F8Bes32_~D=0K7 zuu05*05ao%Y(r8rqrZwT*Spz8m7gG^%cGI0eSeerQfnP2>!IfOd_e`0JtK6oz9d<< z3V@iHD0}2)Ee{iEirD)}g2=5$@YPC8kJGhp{$RM*{W|0R9p(=QndW+Vr#Q7mDWYe< z79pE!9#u1dBbdq9Clu>;7SO*rzNbns&lXqyz*)_3wQNd_M9=V?#JSemmi~jqlM**Z z#VcDb{AI!qMK94p^F8_9;Lm6{6%}kaT(Ztg5NaK+@>F%(4ixF-lxISy(0xm}(_YpJJc>7YdlwYs63le-!HRt1g z7nG*pqs<>R!_c+dw^nQ$T%lFLs&eH-VIcVKW&%Hk;MY%}63eeCf$TJF4{`EXtPjbMZF{w2Er9HT~*7YwDZNZ7r|+^Lwqe;71;04RgIxBtDj!h!a|B=NsbO ze!0gsVfBX6qLvAoHWw`CWIMvdvw%CKAU-UoCJjOG4Bd{${exvGGJ4nA{HHv6)hX6x z)yOn9;c1W17f&f-+qTjs5?R>BrI4}7pH`wAq9Bs?1_LzZpQc*554*k2P?b78TAi=7CQ~{{4|jENCw1#u zO&g?hXz|=-w{>;aMj0Xv}7-*%ctcOjg~LVfY!fttP7n8ZMFazV!R_6=*zc=r@6)w23QG`jjV zubkv-Vz+wt(M~T+i~puX3;#D5Qm(l3-EHOqUs5?jji`c|@pB(j&xGX2zS2!PqB0!Eg=SJNjO)e!$Z5k^5rCmI%?osXKOqN3WPc zwdH|1x(H(?V2R1TYy_Sa=>XX~$7wa(HS+WkjYuqmi^d4-$~xT0yEIY3tCouJcaw|9 z{$NubW;@RX;+&~@BNWM}C2@xG5!TcpdH$-?%llMm{nGTDuYYQyyIrnsAN9swWU*BB zbE30|;kgwy`^U1+Y^$cZE$=M~GdDC>ldP@cHYfH`kca|Qt#rwx;HVtwU*{Id9vT5d zeH{lSE!z>Gc=bdK@ndn>V(eOIeB@rK!myo_e`-7pb4&EyoNG~mvywXZCRgF z90tXdO!;)xW<;$c^1n>-u+My9lx-Fq0wK#3tpvQC!`CKiuu$?0<@uon{6n!0^=*ap z{!1*_fB1>E5H0(rps!>0L(B0I@brLLH}p_Md{RyH2rhtTlBJddY3S$-^obP_gqS)$ z4$Qay^}N@Ks%{p?wlhnJme7uw(eWJH_Ec>VY+m`rS?y}X(hnYUXBq1{IvvW&qiM=i zvz$=PbGkPP~_d!I%59n9j9l?~=3>vXp7fNxZ{y{oo4r2)&Jmq3z zRFxW+9!E)~pfa`+pO+8!)p#xDZdurqQKUx`QRZgIxM#i-tQm$t`n}y~c^I!2Cfa|r zi3C2`WFR!Jjml%Qo37<4a$_grpJ6kp6Y&(Qbe#idjP(tbGhEtAD&G2>rFuLsCGAt@ z4LvAY#}m_s9r^+378yq-4wswJg>hMgbM&m&lNbmABTl-%o>+jX>z~D*WnTseV#8ko zZrT{0%56m^-it0*MT)%@dqX63pmI}D{V!^0W~t%WAje?f#)S}>u!+wgB#g8Ydq-ye z-g!g+`>FGJN1|z@`1-JwWp4u$!@#^+O;k3%XBMM;YrRSLb%SxO-NvDBPev=P)%M;+ zVCYYG42+P1CV;Jf?Tflzd7@l#1-TRfX^q`=pDk`9DakP3&&DB%A*!vfKexs5X05G` zRoDpTM3A^tJET|uEYD}nkzzGoht>j`o_9xifBP~|q?_bV7=b6I84j>v(OO%k7Z4Sp ziB=3O!W6z(0Sdxl$^gE|q%CKQt<7uWD4n-l~xiAVu z*{swi<_x{~9M^0IGMJc!+F@bIVe#R-IeNPp`*ntIDy7GUI-(c7SnTz^e%poge$fV1 z_W~$4>l_ph!-ML1`|H*h^SQ9JwyA!eL25EaH%4;Q+$kgu=|S3>SSV*%(&54QtM&jYoTFLU+os z-*44v4jnvfP4=7i|K4fs?}TtQwxPh|{L)fbB>lue20}8nIo(s0z_7-3>j3Acy{*pv zpL5e9B#XakCslkWG5XLxooSn$-ksQk+Wh=^_NV|0`}mQj`BjT>MBhyuW^t6tM_PV0 z$RlWHZz@ib0)4G3AR|Q3-C5@bA?fGAZAhjyt=%#u?u0Xx@zl7TohRK6Rl9JMtM~T& zxetpFFfdAlTt5yiIX@#4>Key5_eg>DX>E^TksFIWvRF)@XJz!B#3xtk5mdZB9QI>x zNpOTPTzS2`hrJ04F-MMsD60ZysG5rH0-m);avAkxrfou$>a(~6Z>pU`$4LGlTIETM zTRZpyn=hQ{YyrM)jR$`QK!SZ(js_vD_RM*DCp(NGKLOXj*O(JpGFrUYD0lNI9F>Op zuRdG7rpgT|!4mI~{lD0I>!`N6t#34i7ARU=f@>*K+~vWcSZRwp!Civ{E3|mf;?^Ri z6bTz2y9ZU>}~@ysNSdn0R{3F>>Vm^yqR^N9crPuiHd;vWn?_4v=aN552ALmS#93N+EJl zD9J82VtE%>H^d5~KF!+Tuwi)9?KdJ-mOu(lsTAr|@~F~mTzcqkG>oLt!CHM^XD|cj zP)XsF3k33lwbu<%}1S9Sk-vO<<$oj ze!9cofap@2@3*GXzfRPdo$5^aLl;d2T;{?-Q~Y_KwU{7t3hdF0-drE18ay=nx@hUs zq(tXgUHvotO^&HXrK_%=#MOsah^@r-Wh0K}{q@c^@34+)-VvcvEY)x2oei-Duz!lg zTqU+5Do;7&Xlare+%tM#hM8A~hIL>0I(p`sS$Hge8Wc<@o4-0H_pO zZa!AnNbhojtgN6sA!A5JJ3CB}ygb(H=jB|dPEpnn{pxVH8S{vU&P2aWf5Z1jgEVpY zvN)Yq(86`!M5bRYjvHU9by|J%>$Xdvic5NC8+TpyG-PfrhDO?@n^QF7ps?&LfufAS z>1Q{SHZuH9WD)cFp_YuqI{L{*Cx@NLSYW2Hp+hWt zlzoG@WbD&&YJp45sZ~0i@?Hz8&r@hqB7>A3j}FjdAUHZkP!)E$cav3&(4inzytyp@ z;l_#U=LIT@bq#q*(gw?lJ5`aT3a3MFYdlKxv7%aZ{CN(*;RC>H<6~f4UVr_wj6t8s zy1L7BoNNJS26*FR_~;*uu-5&&Pu+Gg&@vT#PZgGas)R`Yzr6q zsh+UdsGngVO_D^}dVD%)JRITUcY^(txXr2T+R~dV{K1#4V3mNh4#>pWpn-0Qp?vdx zCP;hSlqGFd2}iRMXsLFAHGkmKRO9RLbF6~Vd$28_%>He|9^c;X>zs={=ek`!8S~S$ z7=}fDOeW=NbwvR`6tMo)U9vqfr9BEG!$Xy(ab|fL5`t^bLIxR{YW==6+O|^_ktI-{ zRm$fEEXaA9Gsmhdypq5PN(IS#M%VZG&JER#hW5k`@6imCbHeK5?`nxc6pELG#_hTg zsAf^nL!Ck?0v%H!iM7hH&0Q6KBM$)A4p!jEpPP{J^Xiq}oOGJ~yiBJaWd=D*K?t2> z^O9QkVLA=fOzt!0g*@wNjVVzZ5k-{)u^IGK@Mv5+TTD=m!|Z{-QE;aOSj`!=OeIC= zG+}dJY4VRe@3OMr0M66R)?ttfoU^(LRjqKe@1Cw^kAgS0A{@$Z?71UB5djv}pHoB; z@`U{;P@|&>V-hqD@y%HWBmDwYfl-&N3{sAJzv4bS%@$_53Li2&+B~{MfpIf(`Mco4 z5WRO~IX+GNMa}oR&QWcVOQ^~A8;!L}RKyyb;`1h0PCq5%hiYach^63il=nL0;_nw0Rt10M=AXMYxDJPXLrc~M8bnL>L4`+MHK=azcVHM?+fd)9~7JK}Xit{1Fa zEBY#v?K-tzkzqKp;6%A>ahmD1DZWhRwoNaMyIFZU;C}y)J%N)y--Pnx6WyMH=QyrJSn`D-j#pEDYa+x7$Jxaqq=IX zAbBoK6qa!b`OCnS-9cGK@79Vq>30VZiU9&{QC3TY63d0L2lvf@VE>AF=qCbt2}y_b zs`~V>`AR9jQ^Kcn6wEO^j(fLlw6PP6Y<{z27LD@D=UkY?1YJ@hTR4nRnr6sJA~TjB4rvK=Z!7q!+H;(?afKMG^o*#Nksue*tL#53rN(7l#x$94fir}n zkY^*sX_%Pqwyb^C@NDA!kif_}KTQQ@YSPVxhx_FQs-jgShAL!<`};T`Gju-*$9W^y zitITw++ppJ>;6)NWTC#kj~o*OYTE(whB!k~^hYr)2}8KNY3YaE2Q|B(n~K!at8b5u z*#hPeQ*NR?Yt7yH*x)*^+V4#*yt9rrnWlBt$ibygR{_E&CRfh%QPFvE-ugOxMieyE z;Sd^(SuD)*Gyh2^VvOs$4qjeSih7#Z=V@7@MGH9~=KD}5ETni*oF=r%jjfnEJOIBb zv8}UfDprcv|>w_rhWDXRY|QPAt^sSepbF7s;a3NuTY|{qGXn0 zcBgfTz&9IR%9*aYQ|__k8rH6B5yyA9HrfPW0bCTycgLGb^I&cJd5nzi*4{K9R`)e% z?8%<{yf5#)nHX*-o?ldI6YfzBk7tTUzxtFKp}()f-AVJF!3Hgkb3G%Bxb)hGg?yAh6gQ<&R9ygTbsokB-su>nKI@i9Rts!q3qIwrOxSbiY;vep zp4JS{__4K;v!TMJV5wxH_1uAW&;_#e90b-}E%{b2elTlYI}vm|B#akE5fL$P0oxD> z5qD2HSWmw0dS5sSKO~4v^9^_2?K$wR*PGId9SBwC;!0#qD4%7de@Da~4VkkjbF*&n z<_e>TGVhaih>)~?%9tW9Q3YV;N)bxkU6qG$>}APYz8{rLiGWB5i+9Im4_=Y`Kwh*( z{@EK|Tf-Xm7#BXR{AC?IsrVF9t5PEONedF%qI3lM0wYFwnufr`AL*XtaVVdZjkp5+k%ZVjU4@BKqA%+ z5?TnIN3BOpt9ysWN^|HG_NhBTJf)M=AzXT_gPf3JiGOyE<&@TrdWX@?+a<=h2|qg( zYJ||X$A84L z&!@eUwbYP70>W;V*{XS~BpPpzN_5I;bz3{=&h?j7tb$(MfbfNs8lp(UdqWj^-x>il z@2$4}s^|ijITu>N>1vuzbAv9|JUApmeu_%&ppV4;DQGS~CY&GwSA6%_guL2q;X}6t z@X5#d1KboL5AjtmirG+w7{kSBQVScQ$^%1cfv?nw7blf-VJ$jLbyvIfDGY0s?yB$$s}!XcP$d+pH5*%mH3ajy9dd)-i}^a)KpuMN0NF}M-GQBB#nbDC4}o?m~zVLr^X8z!8uqr28OTgCd^ zrJ^6(aw#S(KT`mq?g<@l0Av$z5A@*EKhtsY$xD+3P#2EmEYWmN{=ry{@~YH~o}>Io zzm+oU;+qz^Un5E+oK-*yhL2-CTRi@cc=uj@s(vV>V*$@P_(sD8MjJvP@oF}Pub&sV zWZD+=aHjlyyr5^XD*d!Th&^J9FW8iDd^qhsRkpG$hjfnK5^Tim`zewI`f&YlG;gDT z@=ykA+c{%5D%;mGI_U_T_<0>1llU78xD(mP>4sj$Z?_b0u6^5yA=F2+@sJGD8&J90 zhJU$!E`Ty3rJ4>*jgIWR$U#Y3AHInTQKu%AA4K6paKUBp4DSw2?q;HQV@h<9&4>E| zO6uGdx~grl>*pk>Ulxihxz7Y`h}`WOTepVNBnY|<05dtAHfUIV%<19lEK@7uqwFz_P_P$AA#7%a4FJK1krM&0 zdKRrx_GBdX8>~J&0)7lbTL>il_U;YJ&<BTa{!D@?=^~UZd)Jr)lE0C88{S>wYxSgNg-R5fFNR_c_eN~S7GD|jHjaBJ*64*G(Wd8c5N|FQf4 z4`G>eQ$6$kSnz#Pg9_l~26J_)#-C_1iI*z}n9W)FCWDZxUI{m*bn24^>Ev8HxH|o# zW0oiCq?ut?Vu#QExMnx{JT3GVD^{sFeQ2uV(%=uwN^W|h)=CLl_cYv(m+Q9LDJ`Xu zuwknhbj#4;_UgGJctr>5_xT@qc-HxKc4t#Q{j6r0kgC$NaRr@$JY*iDEnK6K#S(|x z^uNj3EgQ>MB#(ebCz1&cOUeO1tBCE+eU`zFnj=SB23;8!wHrjNox zRO?kfDCgPk)YEmn|NQhVkt^4+bqPajOis&@)N~nK^avCguHlhoAecXH@aMhf-w-Dx z4k}K-6em?V{h`eyOO+t~i-GxN9AB_7Y-`;9Eqe#Uq)#BHM7(~z4}v8}TvizegvW9> zU$%knJR_;tGV*#D>I@N@S_d8mN5cKS!SuTo&R=|wmGBo=N=7F7 zu1q*9>%dnoo74pfOKX*{<^Bh(21jM!{~xQo|6F7rGX8$`6~#Q|7a_6r#7wk4*b@|8 zS4T&=?NplAD17+)S##IbfVR8#eb_crqcvrxc?|`VqtnI)dgoN{OXO99=gu?^F$kt- z^c0Lq2X^qHv(PV9H4|x`VvPY%au{H8I-M8yY+9m3;~XnA9kx81y4be{d34#L?zMOWb*TF! zU5_HH@^Wo2@z^c;m-vkZ+4D}rWO7%eq)x)#I#a$;xOE?tv3z23(U9od8rcs z!g{9d4i-PSQG&|Lh|Mz;o^IF;iuK@MYW{=9&jar*;*YsRS9{0dVl~jTLHpM|+ahQf z52K@!58%=Z>{{4&hSbtH280idxa^O4IGE)U z6zSC0$1@Gv3(uig=9(HzCmYYl|0RyGrn0^`T`eu79(rXs0Xuz@;Y-K4a^Y{bC30BQ zu;6?54$L?GZQXv3u`OI*DSRHFkuj9Zbi6~nKOeMU^zzsI&CWliUxQigWmvY!HR#X`^-XW#eJbFgL*-JcYaZ{sOkBl&#uTsnU`%go+|kE!eI^fTBY_8v)?P_W zufHnkwyv>#PG@35X&_0OMrW>SJTMJ_1Q%U{2i=qz?un% z%V%56{BWMzvo!g$w50%9^Jj)1XVVs+1ODRQ!k8vGmhjEsnT$;5V@mDfTkLJ?lN9_s zhOQKNvovVyrlFH!amK#})3R8#d?o2|Cy!zWjb7=SCxkxtd8^kVN5JSR= z?a8Z=fE}9kf4P_cV{ZK4hVI|p)!)W%|L(4a|L1g9|8BMaZnghzwf_iXg*?G#B=|?L z|Nk4TivJ&Jk^YUea<=sFbhk2h!bV9h7WUYD!t^}!e+m*3^e<)YJUw1pxy!jYxw<%8 zIeXHJ(7%*(addHi>uL_NqJJrG1-1iOy-||=$L@FT=FT40F78e!9WNE_96hbvQ76g0 zleYr7Sfb8Xwz9La^`sXN6sCXq*2}`v*A=x>#mU?T_3`d+U-UFB?5#ka^e=VoEIn=M z`T0?2s#!VPpkzK?ULjt3DJkrK6!`bd?`6QVSBff%fJcu2fJdko;P)Ir4uJ9a3C5Gh z7#L45FflQ(u%BULKYfZ#f=__^jGTmmf}DhmjEaVZjta;`O-4q~NzcT}#{Pnxl8%dq z>p2h0bN1(dDtUy7iHVJcO^l6A{G5u6>iK_r`~3qzi1n!Nkrw(RMgSV&BXq(?zq;iakk1Bpa&mU_Lw24Y~RbK5ENJpb|6g}lDUd+Y!(`V znWhfLB<6102KSEp?_Iwp^Z2jDTT_)k=@^dJp5rIwXeBwH#{FN__5yRx`I$B84N!r z2s1Z>eu1?FY~q12FEASng1R{LfjV|sxzE--oE@ksZlsuiG})KHt#g@T&Qv^|s6#@F zUq~AXVuL7AV3lZw$s-`m9Vt|wo2r_Wo3VSg>Oq;ImLySJ{-j2N$W#0ZduNGMWxi4* zpE43|T>pj^&`3=<#9%FR$%&OE`UC#}YxH>i+DIh)qpeUXX+|0h?^o)J4xQ&k{#h+| z&FFrcNU8j~GcugWhPp+$D6SK$1NDyq9ZXD~aXV+79dxcYc<5zZRa?Lni`FioT_k^c z)w_M&7{!ZpeL4nkB)aK`>#+SH`>^BwD6@b&Wloc7stuHD^++&DSk$#q}B#|HGux}fQ) zj1Yz*d_m|Zk88&`56JtanOt$HR%(SV2~qDb=c-KuPDE@erM$XQ;yRy0y+H7z&w<(S z;`5|NkYBF6!s~yK);yL+J*6^Dc4IPRzKBczxu&%zX|8)4Q zO-}r-?^gKwc+6&wj%9Fopknr}?cTSYtob*fyHLsXkjS#ZEA^o-Azsy;Hgt0xLs1wa z5qmP`4@SX4JeKWRFSl@iZ3@@(C#UxoXSO>axQ92K%RD=@fh?6zcZ-WAW%8%iC)D)n zOQz$-IdbBfvCR~7|67@rpciqgHQ*rHx zH*6kZ9epn?^U!4w;I7B}IVSCbGcfb;mu4_#?t`5GM-OE=0xtEqzf-!g>zsx<}HEwLlb-{j#YsEem&aU*LZ;(0$ zjl9nYQ-wI=qO}|lPqWa(1cIH^m6P|c*&tTkK6U3-e|yuLs`=c3?+k$ygd|= z>fMMrY@T(ytd~zC%zg~yG*=<0KgUFUmFE#W7a$ir^BVK%20s6mFAC&F>(un+w2f*0 zR`K|fFn9BV@twDRnn)@!v=Bood{|TGh4+kg1}pDDMwc<~rCcvAb=kwHlt$`!H${2< z?5cRXQNn{$^vcp+ssVA^~1h_tx1eW^MditT+`tLqA?%qOmS??lM{#lzSfuJ zYrFHrNNe5}AP4U?Q1mUjDVg9jQ~4a%E>F%IxMc7i8Y(qetJRTx#La|?nlxiX$^9u? zkAHd@son=!XPr=vg3YZUUf_X{@a|`F<2KfxGvK$3it<)`A}booVvgHY8ae_E(wSC8 zN${6WH=jpF2{F8%cJ@3-89{f?VQEt;W5hG~Zi(t>dOZTy2LI__D zo-rW{X)BO?dJ#+!^s*4)8_rh56C587eQF8O7l#%0FY<6S99!KVoIK~J>Yb@u=PFD& z2!Dz`cb(UY>N9YR3n@(7gn$4IwdId@E%6>o#0jVd1??*ks_yr9&a&bYEa{sX^FP|y zQfW?dbP~$8LnL1-hK9J@7s<937gw?G*JV{HStVjIOJ39EdG4cs`K8!8?Lz3tD{Aj1Ke~05IFLZ+vb(?>7Gw_4B$(R3Kh=jET}? zOs_~GZ$A>;!*pVU2bsW?s`|{Xgj}{1S#xy1u&4?>=$NKd_2JM;6F;?+D7tH{D>{Bj zHQ6ua`vsPW*C^qksVsx5Zbm@hN-XT+?RKexZlWiBX99jTa-@|$7ku>spY<2tkj%u^##Uqq{Q>yqCiXYr!obKX zPci)w=gbW47niiYys&fNc@^(;V#$Yu8WG!jf}qFXX&Ae-YW+5cKVaa6Zt%Y}uKjqAS@$R;4g@!ItV5 z)SSBNl|}~K(QjAI;6QfoDl;QgG(D`#O?D11Iz_h&O!qMKkbec?biv=kpd(L-k}7f5 z+ZJnyL09v458u1I0Z&bg7un@8Y0R>J7hbHt0Q=N9+}3&6&1ZME@R~CG)SvH07DCS{ zA0mMQkIU^`UN?g}<&bNW9kQ7pPU6>0o%TZAe$Mgf5?2mWYfKe*LN?Lm=|0ovblaeo zzQxvZzY=@5Rifzcn7r}<;A@j&t;!S^)LCgDkuBKT+H~Ni@j?Z)Xeqz>qWc#e3p2ih zJYf&Qr+HZe2k4zAh~z3NiW} zo08PCwXK8}RPQ(XpwsZ_&*zFp#Q*+jaID|~81nVNv2_#P#Nk+bws=sqFZ@vC5~P&!DDP5GP}f zkk7j9)yC2X}W`#x(rIopd-Twmzi^*iSTYDZ^okIrSi%bXoG*L^s5{ z*|$Zljrfz)RlH+fRGQcds#j?FOpdrzKl{E*OZZs)legOi`tWrrQEFtLQJ1AzJ2d-z z|Hpyty7`|T!57+Ztm`Zt^^xe?It_O~X0x5X=S2M-pVuk{#`cLT)QjisIvsyz=)%UE zhKQLv1=({%EjKU~;#~4GATAvzQt|GT5!}c0{e4kI)EWti1Fxj4yG{fV5__05*#tmi zf0gX!E>We$Udh6B3?(2Yr)y;(8E+euwwKrT|HVL(RjbFeZCVTp)G-(@qOkDzA#MDG z7D23&eT0lswl*J6XHqp>&bAmTrW66kwsFqqzFc)mm8>I#u zuYA3UkAc|pS!hP(_w{*ShJ@<=m93kEY^6B{=|aA=33F*EzS9PPd%|>Evx$5 z^t}oSXhC>Fo7SA4wkL_xjHPo(=}Dc6m>OqH!=DsD(@7Qbgrfl-szwIOa_>hmNB#d3 z2k`%Jswhp!qC~4FK^a0Sl$&!VLIev^QQZ2_I1aBETL&Qj4_hjL2W`I#R#2(7^xAL0 zmw_S%1Bm{>UK#O*PmZjd;;jcMe6?s|(0Y;LI@KnuU6myIs%VvB!uVom=G$iU>pntn zH-upANYmLbsFq$TYZ?DnJ03>HZ;XsMx+_~$bVFUzsk^K;i@lU*JY&F$z)n?p>klPnW#;wL>YK%^h>}=-$5hGJaQOt z?@0pTQ8)1Jv}R{W57)4Sa(M*tymnq97g@v?8G6^KQBZ4Efhqsg2q()olC9o+!=Cg+ zw?v7=s~j07Qn0ItVi%NSo@)GoSzS99_)cD?@6a>OV;uz&4BisiOFGacUC^kh=jt@5 z7TrqUj}_X(w;}#4e*bFomyltC04}%i<5v76NKU}X3Dj47FNBYK?zO58qIR!mdnk(j~@3!@3q5F5lZ7L9wf88B5|}PI=1o6@<5=x%;Uew!(8u_3z%+De(W)C$fufDr1=_a}mFslhMSq|1JWBUdd(`CMb)~Y&7WghGI?Dx<+UA8( z(HnzG-O#+pYPlPPu9u0tmr)h=JL5QN<&ch-mt?mKtRosCEi2asCW zWsT|Je4G4f11lo_P~@=X@m4{iqTIwj?A6FrwP~pOT)Usgirj(qdbxR#< zbcSN3+S1Pef*=mrAQI~~@O{QLP@4v9qsi}p*q$}gxI@1QRHeOxB5`n2IUC>%>6yv! z6j`u|_=Ow2_bF0cf+Mq;y96;Vd}&Zb8V++|NK8!D>Fm5LkB&ciKW#m~4VDbNT(}F_ z1G9xM|9D#_X={4wGn~mCPQ#g$uId{-#}5%K6T591L2plf({vBVk(nwbv<=t8kh`_O zzj*i!@HuP`(q1BSU?&rtxvCOE%FN-HBeR&qPei@Tf4!M75d@7Az9am<2z&g;8cx9f z8?g8?=yv@#;M=#60cO6Z3<<}*5~AVqfhB8;JG5J^C}e%4n>h1r1qD*`R%1F@Y+w-# zd*H3wGKpV}h(<2|ovC}V0q4Elu@}x;e8!SEnATU7Wf(+oEd-p4u2jIQ||362r=>88(qQUchFcnT;^xXk1!qjseupmDEH%hBQ&tpgbW41!nja&0V!oFGC-!{A?P7*D+hCDYs}g1?^q5q%+rc3ImJa^Kah7Kd2AV4hw!4i(X26_MY=-h9p4cp!*_Qa-k z$x}L$4B!$Ft?_;3uC~|ci!s|xL!WmUHEsWA@4_({5jv9o8xTiPG9&R(tRea*6}DTq99if+Rh?@*yRuj(hjx!nD&5jFDju@6pbBE*H&;~ z6P8FbgBQYs#=Qgd^*UUjf1fgFjwvX5cf%^P%PI|LBGW|4l8*pmXDR+B9v)MD zfaBhwDyH?*5{)azZf1U-9YjK5xyvT@t_&Mpd?Pe#Ft*;Jpu45;NO^l+sjlOAh8w<7 z^~RLEpx4N2M1WJwh7vGUJJ@=~99WV=hm$z?>gxENP2G0S(wI3z+nf|yv~%fMX`jhF zpNxc!W~^dLW6>{c%s~zNI#`ZrssjQVoppjqn71t9@`tza@rFC$x`s?@nb5Jg(Btx7 zw6k}HOR)nVL~h@LN*ALTf7Q7(1DA0w&(&R&C}XlKrR} zDH2zIXwZxF%Dk|yHeZNIl4ymWzYyD!&jpzQ5!GUuB#1!qczPqh^rKBJ>R%=0H+RqV{51!@-$5YX3_S-{0ms#k=O z-B&H3RQuY_l96XNT4s-##t}_Cb`eX4kTAfO+top_PYg>6#WMHBhBDAzH*ZJ(<9rpr z*-0(}to8c8?%BvLk4;1Ek5Xi#zZqI4AIh>id5gPu&N$K?B`x1$ogqg_vXXE0#S;zN z#kXLrjX9^)Gqxx=h*=p*zuapffE0B@@FQU>wq0b&0QS{7b**1_TLtDc&;KWL>HSp%bZ%eqF(RFd9p zLO6Pu>=@QvK7kig&hP0fbLX#LE)K7Lcll2ZTWE0%?B&mI`cTIT^yi0Jj(;SzDV|-| z-sd{qlZ>VI!HEqbuAQ9>lcNFTdDw3uZfC zXgL1BySOsnI_DELGw`m;qJ>EAX~fX9hlk+YoxCm0_1nseVwE4%(jNxM_)^}esu3qF zwhTi2+yf2WFt@e65)qasF_GzpUznY=jBS<;Z zp!)&G5DHw(gVrog$E0Mh+q=lu;X1wRu*v5?d#!DBgfMw9TdHpjrYoTP?&UR?%n0yH`m8$$yK*1z|9_$s^H?dfyGG!FFI5Q}Hk!(R$rmUq^V z`iJu;g`G_^^v+u~zIRNjv%zeklilCDqHEM|4d|^EeOpMDO0;qn7)JXb_2RgtL#BE< zk=paCbKqfP&0b=j;%OC|T&#$oK@zZADW zLQ}f6asrvUXMSqKUv~?J)o!nGCi(B%ZpA-HxvCuAaEq2YDYZws4R>gNdHbxVt+02M zGhv9p!GQMk>birieY*srpOxLjJ6@YmFQ|OphnpmdaBi_BE+5#>ntC(&ycpR#pHgbD z{_*r5`xm3S#z2yY2d*~Zt}(E?@EBb9#ay~hH4D=pyNXKe&-ne;pETdj9Y7qpOjjkv zVmo!)Ws|X1$d-)MO`3m6HaXw8M0PgP?S}-UBww_t7MuSD*cxqBZZ_F?WjCs7%o=&m zht9kFv^=@ytkB#SzbHHFNuN$0-ng9VALLS7NrxCt^Gnj_{W-?@&utF7$c)2G`(t6p zyF%{yTPQT3^P7lr z415HC1+C8juH07vj~71WR#<;N*+^q86JMz&iAQu6@u52J_B4(%Nap{Ojw4=Y#-CeN zkYf$?KhEU?gH%3*-8x=jb2U|uRkJ```yL->ONmk$Z03IBZ~<|u9(^YR8FKvS7vBOt zV?Io1m2wKQnF}lt=cL3=YrZyG6#M#C+(axsqEumW`{!mN4HIVS1-i42qrxN#1N*;W zEey$dCXu=v8fDK{ggGs8xtJJ0phOLe?aTQZ#|KbQV()M?94D`U6)_@yQsi3da{+4LE|~FmN=ixW(y`8=wG);9fd94)t~8v} zVWWPUtgh2kVzfX4jYQ?EMJ%=HKJ(o79X;2{p`+9w*qjnW;kABLg9Q!FCh}baZ`pDO=#PAAi>I86-%yV&-Iq*4Kv!&KxR$czwR> z`#VW=?~y9=X>~e{f>u6s({vIW=r>$0$!G5lH5ZhT5+%pNXZzHyish{m654D4e~eQJ ztu4(wu?4{d+>_lJGkpwTmg=fW{B$IWB4th7Ch0qIm$5(Y4Bm)<%fvOhEf3y?5=zeM zu2yo$>U9$9e|a7^4wI7DA$qAGmgCQ1-0+K3q;8I-bBZD=?dS$fX}X!up;-)g$bgreMLmH5LZw;kQ3JJ}6bNWKl zQA5Q%OazoR$)EUaaDX;f@!9( z9RJigEEQ=12c3|tQaD<@he%t0iiKf{IvVQ)1D9ab3voq<%58q$tjJxYW?qA>g`LI0 z^0$c}6TLd^@i4sfy+5oysQRMO<=U&iLUV%#?7t&=daKFvpc5~MAa+#xOL%a_{YXQb^%xvQD0U{ui}%m`G;d_g zr5jI7TXVbL{pSG_+mcs1#vw5PUgB%D?hSW}Vmem7{fs!D@fad94}$WRa%~S$ zvHHShLne0)_dOMUR#(X7fp_&yVT+JP7KVe*Zl$aMZsL#48kb+P!6AlKx>?rT&6eql z%%9SYZ%y zUW)f^v*ax|cfQ>xO0UTcQrc6XHJ3!JE0CVfE8)RmOGCnhBD4IP9t%+&k%6A#b+eyC z+qhdRRk;iMn}SLE^>R4_3$+9#{*39QEi*b(?FmHk<{K*^qsrOR_*XOXA9yQJ_JRO# zuOl0WZF4voJ1C)mEa?^MqaerrqkS{@G@{0ZNXYjUpG&m>f>aMN#6R=$AS2h zs^eqNu5sLrG5x=25zH)5*X= z$uV3Ciya=ltKY3Hnn}N4OSIDzNEB&>8z9-Ny#9;P;$j=n)ysst=lEI9IQvXKI$M}8 z&9}LQ`HtU!6?)&Z(=)LbNhbH-&-Nxlyx}IpFZH2fMDB?O1PzZ5O*(u573&j@ZDZQ3_$tx{IhDt{bv^S`jz6)M%ngfmPE~1KteVQE*(@1)(Q10 z=r{b|R7#f9cF}g*Q~y_S1=WXmJ3!wLhD>+-c=JkDAxmDT`IUT$&6I2>qk z-yQ!ss`Dn00WlLsQ8ZwhC}KnhG&W!7?d+hri<8?C8q8# zsL#NEx9&guX*bo>N3^qVDrB#lO3UiXuj|c1pnW?5oXgLN%TmV+*tR9qhdl7A(0q{@ zd*55(zo%2mx5XP|Tt&#nd*WN)nw_(oI;-1ejYB7Hv&m7zN9k9-3C;guxA=p$gZ26s zCugb8b%RIPf?tla-NPRzS&)nw_&(iC$oNvwdhG)m#`A^@oe)mB$jtcP0J}^NgH@>} z77Mdg$1_5#<-zr}7#DS`Mij!y5%yZ}pnAkWba%*r2@Op|f!oA!i`~B4P3YE=0_&;7 z?!?K4;>u5c%VMpF2F#=+CsH(cylDyiRbMfU36lnD`!eXOTN2ex&>(C5FmpAfWvO1=^fVsenQ?o>2BTYka@}wPVk#NZl8qi!q8|gp zWl+oVzxF^LY>K3 zWYmO6b;y;OYn$=t(FH48T|v3SYB7*{j6#{=2O9o}&q)g(5=KHg^B==%>&Gf4z6|NH zv3m1*7b3Pbp?TQ-M=m!@p}zssC@!krN!a*LO~~qpcT#+TRh`5?@9V{9kXB{i1z*z@!bcWs=ToY0XU)#4d z7PsrN;*q^2vVGs*18umxFivw+-?3hn< zz0=h4L`L>id@mI+Z>qVjr&mmBPEP|flO|e1miG<(lMRwz?b5JteALcmep+-+<2H5? z&H2uof5lkujno>Ar9K!f^K+1!d~z>+b6}f zeoye`1=9Aw^Xt1V*MnDgEhCI7r;`yL=*k}+q+V}13S92joJ*2aakZ~< zigY;8fXJW43}XuPh`E%J#NyLS#&`!SWrJr7bDx2V)e~18K}RI^T3^)bO6H>4)$i4L zf=!phiB>9#oL3mvizjqdcUCr6HCP;u)0$u_6r;vvc{`z@y_<>3v)~6)O~q4}QJMa? z-LFeNA z;~5?Fb3#fDdfRLMRYD?ZTfJ*vYx6C0%}7Txy2tyh_y{dIgaFsaJPqC$tc^_`GLrDB z(CErdH+8cEk}vAXe8&?}*7||#9YviBeo7^AYx7Tqk1;eDwp$ab3lUR4PAJbdxqvsx(;xD6h1|d= z4cI8g!89jclGmQ;0#@bRpfvxKc=3lQyH?4j@kXKD+r45f7i!IubbQ=67Fr=FkjHXK z*}uU5{peX|xUfeRL9u~Yf}nmBR>b%Jg!-*uy4}82T#!Edc)EIE3Z(HNs~{!x;F0+B zkygSZtA@9nJQAug{b@~u#p0~Iq2*=-ZtV(@rh3zo9i>l(+(rP7iJ@br|BPzBzzACF zd2vC&fWlBe%IPBb7^cJ{KrVpt${yL9b<>>YO$@B>gY;-U2GFZe7xIauemfoE-mf-qKJ?-*|$enHW@VV}?XGSkXHg(8#-lOiQ zpZC03MG?E}!2|4S(rZwz9>6%}A)g54adhbA*Ze+^%w|m6z^YHZMZMqfkS~R*;XEMy z7J5n>EBR`{9e1NeK=wG13_rp|cJmhXJpI@!gds$w6j%uex$Hy!zJ<9w3H=}&5m^zU zm2VtY7O{;XL_WWOzd(?qF4p<-&92{&(XcEP4^eS3?K>|d2)cN?aIKa>zj>U9rd4&P z3Wmkgoa4+K=DDc)4yQNoN^r4VMw)B5HFEkRlZtZx27Kpw)2{V&)Vz1=Sy>YxmFFcf za`~badBo$|c9>d-XQPbvNm!O%^2!}kgbduYqq--WEYl+ zFOFqzJq7I^W9vq(VOveT*p;{5rro8{8ppJGBsZRh_F*( zh#2)bN~AWfUmX*Xt6Z9Dhc%eWzUjdmm2=F~&HC3#Q`(RgGeI(6ZOS$;W8SH=bR=vG zAacfOWn;Ns!0p1YXiP-*yjRjFfYTxqNW9YK8Al&az&&&|*@l^2iwH@4xRE?XR+gYF zsfEQi@XXfW>@r5zVmzdswB-t|diczf@M2TYgB_0HbnR%@?Y-G1@Rr*Nc=A6{1vFF!EE54kt6N2=>js@@ehzG8h?{fX zIWv3TOAveVzo3Nt`!AmmQCEDo1rd>ji-!2GpQzndL@?05Q}NCo%GLh$mr{gy{a@ym z)B|yBbdXSRhnfY5ySs0v<3~J{$qiD3;bFL;gcc$8UwJr~PnncY3(rbOuTXtq%E!cC z(ltdl06`>(ND;cGjAjJ96t{~Aa(>>k&m_N_QGbh-st5x)OhRfd93U1B`A*^U-{<7B z(6O?-WOf9_#u6iTk$XcG&S)ND;LlC3I;HWGuXA1+N8h zpEU}!T*kT-78ATeA&T8ng<9^|7{DthH7C`Y6DEZPYhHY?i5fUW3?^&`0C2hYHxGAf z?C1MB2e9$K@az;>bV8?ppw6I5&#*M*6W6&a(@7o}2)6<}AX;UuVGob8ggcWsyJ?o1 zu!748lrNq2YgEZu>z(0ne6l8;*G-)s=`ty-Mzs>An7PB`786d!`-E89`;o-T5_7@> zsQBNpo7OcprbjRRRCS9tDou#FaNKv%fwjGqFQ1@KPz&BYfD&gp+ir0qYE$+E0uqbO z-8t+M^{WMMYHFO}B!6G5dC%CFce^%MCmrsxe(TlL)F3LXvq+E4@`zoLz9*PN5`MnR zHz0}L4(Xv!Ttq?PZv}5VpDc2%L+3tL*0VCS2$OZs*v!~9FdBI&u@1k}Lwe&^Fy6VI z(d8{W2vKn7^n||Mi3S8T#Y#{*vzI$>qVO%i&3OU&jWx(a93pzP!QcAXuV5-nm>>7^ zOk{gab}ajTH44Qh66;zIB3;{FCOSjS+COTpF0VO)(v;0XnaEo)y;SuKTgOtcNU*3p0#g`62NP5Y6`^r@_7k@b6>rUgMs~=VJm?`h< zPil?FVit1~ly2)$&!tc?nMSX7fq_m-W0u7^I}C)AH}9#fo?3QHiDrj5cNYoZ@S> zVE`Q3FL2bQXH}%^#QN0+=QsT`185f6XHol6AL3Kg$x2pDSclF3iz)K|D`&?CVGfa? z50iV1`|QXup1y;I_x5a|A$WElTH?2o0;(rPJ~IjsJiPE%wo+ah1YcP7i4ryt=tYL` z5aQ1@5`CAvo2+HA0%$bRI{*)?UcBq7n_WF>wmd4>Zox?$%7|s?+QXWF&1= zqQnR!BV2fU*_?&K$mE$#2N~OK>Q0z+%ngY)wxQ--FYJ6IqoE76m^765_!JUa7|(G{ zd_K23QYS4gIvLq%aJkriVQ|kM3L1$^gvNs4C}O&v9^cZh;VdvLGK7qv9TT;bJ=bS< zqFW$>>an3(*pW_H@k}D~;rNhMZlOIu7bnlr1A-YPeZQM}}VSEv)r2j7CmvIiVOF3VOr$vN@|DKo%ma6SZ`Cy?K z<}9KuL%SB=fntZEj>HCjwZk_o2$A7evYyYTrm<4btfk}vOBj(olzaFi{n!6#MMWI* z-&?wyhx`kr!Z$QeP^c#1q_EFCc|RSwJ0P)xP=0k#yQ)8puy=?r4S!=jJX25^q1?56 zA*9W8h5oXUCCqfeK2rDXNhpz>PU^vI@)`|d;gsY7l!H`TOIFe-#Yj!}RV`DL#ntx( zLmXnl^LGKGPX{6mu^qONmP7T0pzXs(DY`Vm7f{mi9q&np5jvU?GXzhsei2=lpfO`ky3N{%rEUSF8rs#u{HY zGzdy7(C`(i9cRP8;4v^#Rz7yBY=2lTc>;4#i+MKCGM&h${jgSzFu(i}Y zVE#bJ{}8If@C~&t70U2B)3Um%yYG@CT+ktAtI+H7C$)MLOA{4~$&fP}_Z(Dy|E?+u zm-&UW8^u!FclzMssny2Mmz9ji3XB!^vz$vLhoSjS3xLl37jQx==Lnlb_LZ|Rr%4l_=*{Yc2NwHaunsPuSNXXIdjWvz!JWQag zie*}f=v6v4LbP3Y$`@!Wcrwgxs?ao0dSF-J_j{`d6O-u#qENcYz|~C#u7i^cu5GDVzO|+Ut@b<5l8gW)YrD z@=a29_n0d4!G48AboD?)l=hPm%quSZNp()K9F&W}pH6ZehdACW!9P8oXzG6ezBciW z_s-kabmMo?fRfoGmJD~TZDbn`=ChUQ3pQsu66e8aHfJd<0e>1uckG!T=yI+&w0^(Bt-XcJC^vN#pCX8_hnOrw^A3Hhb z6>7+aABgoKO%adbQ@xNcHO?S~H1bRK3m1-5MVbyPq1H^<3Qgg{u3kC)q96TM|8I|@mT8ILtE9r+T z@e)i}@`C08a#b4E7|IEAFz3nn+GYkMJQZTNrj9MvQnni&mg=pP-guFN`ou@vO$E(Y zy7s{le!>Cuv&$E#@u1wtzXBBa2$J1A; zj|t?yY%)n6nS%|9wQ2+@^ZkzpS-(q0N>8KR+-a^=wDt3pKXqVB1%Q_FSQzS=I9hAr zaNzKo+sk312lgX%U;ZUAZ{09JBJU1_Y*41E#oC>(@xWs$&HTm~i)xJ3-gG zfA>Tf*x;@t3n_KuF047&D;pdirncA8DtB>8y}o4U*Z~bCg$N_&qqlZX;_KvQ$@z46 z^;5HP#Ya+72_@5ow~3W$+%m4-%zN!b#fI6-1=Ut6r{bfA#38wRWsm6E2!3Y;*cX9iVh-yMLsDxM@! z1oVQa=~>yq7wyXtAfLZuWu=e6(v>Y|kh~as+T@l)CmrCU9`2Kcx_2yc2TPXU; zn(hk6!ZY8#(kPv0`-m?^<$fySDKh8D0FYQKw?l7aJp>eL!=ncbkDv2MPWBda(kxc0 zfRoo_xak8w7}|_SvF{&U(oiaFB8}jWREjf2musc&gb*+ph>ebZ=e^ACvzwMnzfen# zOs_Dku=s(8e@3>t{^;9`4pYB+X4UGz*>C*@;{dl{BI#Xtn^_SEtU16Xwe2? z)3zk!8~ci;605Td)~t2Qjk})LTj5oH)npmIK$4#y(C+8uqnZ_vlATQvu>^US+M*mg zcHkdlTZ#)NWov==cWSV+%qYZ>6RWc_0X~yEaOk`1)%@o>mq;0&@P;@f!!O9l>Bsv{ z#Dg<9?DNO;CV?z0TXYCbe77HCx-W3lEG()EYLoWsj!x6>u8*smcpX?>r2WH2Sy4!^ ztT|iRri+B^C7k9}CHBbU3*FZowMXoxCwd5HjiaLyKP=wGe9&}~AWj90#)TyqP{vtU z4j3{lS$Kb!W6L0TuqZn(urY5KwQN6u7`6uF5R;_zG?x$Z?P`;4+jkhRe%bYPSUEaMhLtq5X4_=(8(v8_-9PHH8l*;tGjNd$zPQ+=`ApW|{i}b`cpJqaeVF(& zpbZ}S^8wJ2Em_@Z7Yw9yB4Be)P=TcA1y}jb9xECz7e4fK58WR5Zz+Br$Y& zOo`Ki{9eht>LZDxbQ!2TXMRu@Rh{k^B zOdL81TMnmibbaCUu05zM@e_wxPVC{K+W`$PVs0Dn+NY?`z3ic_yhuRbs7qPME`1p? zj#5QEgcpwFcDfts;9K)7zP%6Rup=XCZ5MOt^6N6Hxr5qRCc@mN#VKUk;Hu=vrlLcS z=?Hi9aFm4nYty5#;xvyRwhU{DA34>&g_Mr1tfDsW2r!{pZ(JhxP9@hGR1DvGL|0-J zkku1MQ|n|rJt$i|3ny0_wH(6eWTpUS_i$%Y*U7oY zwL&5k-2O0SUQYVY4+R0-JRdG8j-@<(m0uzIdI-H zZH1=~0$WYTEscG_Mw?O`n))6bhdq1&w4(6!awoV<%pGiqV$;R!&IM~4u|0Py^2Kyj-&b-94-Bq?)y|^I3p%*rIF@WC^5_#gVf888MdiJx4F5&cm<}ZEOi}%qarAmP}e9o4L?%9mZun4i;=>Rbeh_( zf;P6#w`KJyhawl7vCY-q)>0%zVZUHc<~q2x z$Ha>3(RHh-F{st{u=CV%dt6=gk7>$K_xs9Du!|I#=rlvk*Sl;?&X)8hy<<+)d&zUJ zA8r-_LH}ab35)y>qWB{IrhifFpuO9FzKTk1xGR z*0!-gqh2`}42ii&BTRB+BxNtM_GJ};VX(P@G<`KHAHMbo_tDWSly2D)z#ZLXRBMKn zA$5~Mea0nuGu@?Bl)=TAMTEV#=>psr=Y|}+0Q&Ur703S&GmDMs08!gXIq!MgQp~Zf zI=Lgid3dw*m6epvdqc3tjh~=PLBDdVY}D;EIp)jmy$w+i@2&FNvGc(9yWGvoH|LJ2 zZ0oun{*xR&S?MU-S#5%U%Rwp2T-b-NZ-wnh#T`niAe=jVj{n)zmlWgfL<|(i@L=Ec zBI;{D6^rwH-^cc-$SKaz&J!xM#(2$jP*vBBhvWpJ4fmC>T_A|pkGkTClHPsmd5s#+ zJBF6bZF}FJGd;e2tO&LP78sVLoFwBh8yJG} zZBdb)N$*bK3Z7|BLc~QLGzf+*NO{(Y*fR+}NplZPdHf8?8|BzI8stG;j%-*Jl-+7Z z3FR~alg3O>j*yy~Iz%$^LVvb+e6!#PIr)snv@jnH2p&b3vmg2Mo~&2kU_?ivCGOYA z$=#EQo0i{@wH$MLqS=Tjlo*_7lw0+C!p9T zy$|KVh2Pz2h$k#_9%cx=IV`Z(`bcN=}xW&3m;cVKUNJgAOO4RB-pr~!EUoMP{4INM@5o?Ay; z1k_y}D@!g5yPqvJ1xbvHwl>jbLaduUoR6!c1Wp-Ne)HZ;g!T5GeT9M)qCzNcmv`O6 z`OX281iqT;j3~O-iR!ySVrgyln#3V$`Uy#TIJQ42g9yxC91!PHdQt*l(nFB z6oBFY+}f6|rbxP{0AJ3PTf#o6%3xQf?W>4tu~HcB_uIi2T|*7%=&LV|ZK!$+D&%aj z7#dNAR`$!*41ti^fB7_B##nNnp) z2dT4{^%k1+uDzEVU6`8Y59NYN>?3JklVFPQnnsQ@7_?2U{wK z*|waE>AR^2F<~FLv4v|o^>(Y~s<1hsM%t}wY$2P-?VIpOr@mbAX}uI}Aa-iB_Bxqc zxm{4Yj295Q-u#)dlMEXOH!?<-K4WvPRl6Kn5>ksUn~BIKkB%*bj`q)YlmBw5iS{ol z91#Hj937hW9;kYAzS9i_-$E;>J)-ejJmB1wk7`s3Ijl+d`xHY+b^i$}v>}FH7fpHQ ze^SW(<=*jYSQ58?nxz`UQnP-923U(-vEj2r%I$IYNsMc#r>BFX{wF$ipd7NzF2Aq_ zQFPCwLHY9p1De2aemTU-fn{ENXb=k8{a7|Mfh}OGw|x&u?^ur|wkxVn0E8*Pw1kUN zjPJrn^4;ZZ?Q(i7<5#gl_570>Z6-ENSPd)J@cMgv1F>cIC#bS#SPw79Wk6IWe1zc? z!GxrCD`E8@d1!K~@w2bTgVw#N9yp>rF%eG#*UQ}XJrTA`vvdk)Pu2OF42IZ?f~C&a zGVQeKN4g(+_g)Pslf?j+?T0d*5!}AW1dVrJR;`?dS3ApjmmIVBACw#5$a0$*o=DmsT!B?NbZ>6o*ARblxkL zj<01_gQ3J^%rm2|k>&JZ)9ak?4aAM*5`$^#X2lp%!yq0dL>i73vbuqfZ3)rmxRimA zpOY&HE>Y7GzH|20q0Niixz@|sFA1p-3aRtD9>=TVYanqNg@;qSnc6~mt=DP|v9!JU zH<#VtUy70gI^ftQ%9g8kcMMSS86P0v`}6mcd^~j6e_!apF!evG99*l>tabjqGeqO^ ztRfy(^p+jhCvj@$dib=TTc@7|Q~+p(2N&;*82*picqkMm|$zdVKiKD85%N)WO7>OO+ z{pOt~gH3o6z`AlK)J|&M#$*u9k~0}T!|F4M{9$!}tZj68+Td*VC9|O=Yr-ukSWfzG z0k-|UK<_zf8&fJ5RcQYAp1xJT|%i z&SUehHko1;dJaZ^HOl-C%S(1v9`?U;ymU#M(bBM{Y(Vz9tK=So&6)9^hNVF$Yg^!N zP|fnRPKAP<{CMyR_jOc)e8j(=eTyoC5r5vKOb$y_p5n$V!qxQgEC9LD=ikA~UlApAZ*NE9kaIa58sF4qWI^J%N z*M9EL*E(+_X@8MZxO^}m5`LxeEGm?vhdEih=Nr))5&Q-mTgwpe%om)Xkj;7R>1!a# zlqM?n@_2fnon(^qbM<)Y+lo|gVe)qKc$$j^7RBBqyjplc*}8rIq#D@Sz4K=C`X}v= zePmrvUA8zD$<+NsxLG+=-%I$m`g145>n3}X+-ezIvb&S_G(w^^WX?W|2NIvui9JRn z*m-p+6X2-cqo!pLQyAs;7#{uHFmlt4&!t7pu7_eO`r5Mm(e;Fkev9q?#>GPc`Xnds zi;&{h6!mSmC>gIAgu(+wdZP!kqK`!!^zB?3l1;PXoeg?3fD!iH&{)~?wcOfBO$2sZ zX4BlqRpaw*yO1D;%zA5uq?vU&$a}gJbvaY!>GyBxS1;EPp$WHhGX7Q-c>)4r-I$N$ zlL@2;)K$D36&3tF%M;JkqkvB_j_Pl?$B3Dp8sxm$7WvzssqY?%+u6zGawAcNLsx4~ zr=oF+dYlIqa$r&~?IbPEtJQFr79J(t{1gixI`z^%skel!(4}{YY?x~yRM}KnbrJ45OC))FjFR&e4V9^r1>R*YK)7x zT~`(y-hg8w^kZ;piqEtZuu!G8)Quzrn*SiKZQNi>grw$1sxIXOguCT1#;Ut2G90%W z<~5Wf6qXifG|;#XM~vl2zno1aY`SsL9v3+!>m7hs76P(mD$JYL;jT<9lAd9OhsnNH z)(CR#Vrp{p(ZFHFe77fnzKppQWkWB2Gw2i%QPBM5O$2;O-Q+}GxYyTKBWAad*n0DM zXS({RY>_SuT*a39&KVrn*ApT(DAA8({x?ZlQM>Eu7H_{Ote-4O4q-0uY^ZE{2vVRN z7BRal<^$QzD0ZY!zGN^?2nA^ral^(e?BJ{zpop36-MCS^OUHN#%ms*6 z)fRsdntZhE@UkFhkC0x=>IjPY7??8mnSS!SG}7GXn3l=3>EftqTjcI&adDFh9}zcc z-Zq%GagNxSgL`Fu{4~>PHs{?|bM5sX}C#e7^lQ^lf?{v))_` z3z`-MkD9SI9dM=9r5y#+uGEg_SRXYea1JizbN{@UuA{54?2D#TikP%c)t{#ng456) zRCv>bo$2J?4PA7Xidr3s8qM87W?j{yer_F2+3_iUi&rU-_N#032f0x4KBIz-Z$7mn* zS7KKlKPf5-qKH_UkQK|o#<}`Rr|)N`EnA`XRaDzTrIdZ@zfxsvDAKc}sdGk`mIu{T z5K!$Ck0jssPD=m4Fk_S`QlqwTWc&C|%+PV%uhO%hwwSddWQcogWm1h`#sh2Cw+b!D zg5}uisq>yYC?I84H_8#G`E#1n*FwUfiqGpu75Zeps!FpXeU`}mLuHvVmY-pKvMeN1 zJ_U_o&Pgvm+vC!hst-~w>Kwgq!A3?guV|l*X6ha2N_!padN=%dbOhCOS$Cw%>Yvzi z(+dAq?9Eda-Mf$ssZ!p~(a%HX$&j>H;Lx7dIx)X13SjPX4&ztFDkDkF7s*h%A1m=6 zdUk0Qyv8;jU>Wc*somFtCJwfxYg*79IKgqwy|pjuXDBLW)?3?VCi5w#CI;e!AL@!{iZ^-_aJvSIKThQB!bgj4;m#_`m~4q?$?a zsM&G+ksV+f-^bkv*u}-(-Kno!qucwOZ?EXqhl0V3rm? zB%J1xskmu9oA3s%MBq{YXEc6j3AMah3`eC-#2%e|vpcPS-It~V`3kEI=)V{!ujA!S zf3wm1CbwiXRj%X_CG&OrchJ;FBJRv0?MPep&l;k|7214s)%{dV3e~0p5_-17c#=o7 zQJwA~n=C&fSohb305Fxz?>ac;)c~6ZW6YuY%6Rz_8^`a*-JHfuA8{5)=^|#c5j4Tv z)jycNDUXZq(8B9H_@QE^{t|<%6v3eH*KB(S3XB4 z>mCbDZ$N2owKfw))D>x73`n0Fc`p9&zFwFSGYxJ-At3{vp-|{24>p_J zKEDL>oX~j6QHbbeXyP36K-Q#kYT*w@o{v1JUl8H!Uyk|T7wlk6n4wk3JPfNfL;WhZ zq}^QhfvwRmucJq^$N^SQPGFx=EeREp@V9LvcgI~-m0U#Y6X53mP&1xQ;xp$K=PPC( zo+CgiDI)i$ZzMulOA|(|`1Es|YWo{MrNMc~I}zGp&8{N-Rzu4r5!x!NUzmz-aZx#c zp4QMvz6sj5>Dyk{$VQJ{B`UK=>+F+qC+7GhF+rkag+ko=9#zE0IWbveXa1*&z-(ZA z$n4M6Si&q_ z7;X=uKXSFhw{SK&4VrE8Vv#d_P=ieWDjg;Pj7QI3-c@WJ-X+x07RAt}nMPoVWLS?# z@7h&37XTCkE9)}Jb4@qZPvE5X@~r4bsh7L-%~5YjdcyvOAJ$RaNGz zITFY3ULNG2{xPg{kdLdDmV~R=s5WtKFG%+${L-I92mRduBNc_e9+erp{`S_;K~XFZ zfJsZ&qh9<;VfnkiR=NxfloG%jOO5E-g9;Q7~_VwM;Cn-awbPI$ygz2YQfH&`u(~%Ht>70&OZIw<9c+*m(;LAhYn2T`=DMY2IL_VZB_zz|W4Q;EIbQD0BWkx4~cT9O!g2|&8iKd<& z?oZd25jAR=+4Ph4KHO&hEG=Em{f!G+x=&~>nx3r^;&%qV=Ja4KX#6+$=!=AmNoOocP8uUuth})WbBTLL!z;F(cHPTI>Jnk?%}b!AG`5b_v-2- zSE3Q0!dHpFNbJ3vNK#*46#|)U3viwCyoO&3xUM}P-`J8}4PH_yVTF#Qp82#gkh`F5 zF}O7jbkT4bRRxo_pRFCsv=J!tkrc~O<~o=6KVqC^^q3$H-;6Zl4?Fem$(Fx(%dS-RR%0DH}JI4Lp9dUE^-3^7VN zA8v4C1B>^Mg4=Xcr8#{V9+S+7zgX-q+r1WR5AtH@{_Z+ZPM+#tb%EPuNrP=$E=o#M zTGL`WDk(Kp@@B}Xt&>bx`iNY#@BMfSDH|iZf=cE2d3wo2O`orz{-ywn-cgLmcmbHA z7q91FOsgr{myXrc>Q?g}Nq{kN7Jqz?YUy|wl|3^optx@C>m1(d{?4XOZl>K0&Z2Z{ z$NUFfw}x^JzulqUA@G0z=6U_s@Ak-Ph^(YD zJWmX;GTE41VeqZP#KKwjFzX_~36Lm9%0O5hQ?fVR7Wd6z%?w%xm1$@D9T<%d!Jx2} zyof2DqS3!eQK;eU{TKQ|^A*BeJK1rG#mwic&?HksjpEC3UPP#b04LHO)!(OOJTZB# z6@?868(6#KL1Hn(QM8ocV$yLnsPT3c>=Gp%J3N!O$NYrFm1E`zywT2AGszImI zn|@aBcP9|W`1GP2Nirpl@8zwL9Xw6u=`d%NqZV<0MhbDEMj)9U)|@VR z<-9;|qR|kYg86`DO??yB|6~_Bz!9t=bs$PFsrgDonIfvyPLzMkZ1dHUS!OMa)jb4O zk9mY+Ai5=)DUzMk|6)2bS=K~zN2KX(cxJ-l=g*YZC7+>j(_^$&?-!uxl>$$%y@LZk z1%VnON0&1FB*kp5i5!s4V-{Xj_x zI05o?xGqpH_rqjMZe_nko&Lf9l&K)h*P?o_VUO_87;RNXwLQ?D*xKMrm|!DN0AwF! z!lgcZFVM`CId39m=txriP=d28voO2pw{yFZe~Vc8D)vR3d%jqpd3tpH64vl8I%UnL zJ!<-t9Nd`btd?m)OQ9puQhulRbA}EQSqi`R*>^o`9F4l9HM#S_@L=1v6F>--WO#Wn>Kp{1uP#?GIE61c3rM zhkr0y{(R5>&9D|O=Kn6Ng@=drZ?F~)4t92~zYAUce*o6%q2EBiLxOz+^9~l_9W;co@)uYO84D&Bj*xsj9E8@Qq^zPB zH#s$}XzLRb8%_R2A)A8D!MA#Xl2hNV6PI1sz}~T@cH^9i!_Y71f=etH(({LZg17w9 z^B?dQ1_-?6E#x^I3}jS)!M8x*Ei6KB_0HveqO+?fHlQ(VJ7F+cKf+%8&-j*$bJUPV z=zYEnhSX>IS19FIsOYO62rnW;@UY_&x(H=0)Z`Tk%X`Lqn_ollKKFO&D-kL~YAa4>c`_Vh1<1!{l-6UF(ax zObJ8mq52TQ*nnnny?1S_^QQ=XL3-_0@`gg9mB8eCmx=4LhJu_1h&i9ZM7*WG9&v;w zIi$}G+y;Sle(p4yfuB%~=Ym4wkdn?G+x&nM90kdb;%rW-alN?jt|VvFINCaI)zewT zzZ`sY-zv=(1!XJAGTwQ)*M=|s(irU2GZ}XT-`>-iGjg^#U^6*545<5DXq%_%H{xR0`%v9tX=DcaUQSCZ!@XCMV2 z%;cDsv=UFRP&8=}Xbo}4{jXnz3$zsRPfRmam@S-TxN&^gE?-83>`y5R8&T* z+r4_s^_-cWTfSL~%|^hv-Iu;CnY4WheY1b)1{#Mmj9b~X^zUMae*wqWP zSf0j+DbuC0xh3NpxKEV}vbfZ`xvn!Y5+gBQ%Ld@ispplak4Tu2r@N}5yELKqr^bd< z>eEiQ>?0f^8W|bl5@>bO0I_Z5M=L$}F4~yWLtk*Kg-W<_!OeChrz>PudAZ<;X_ZNy zF$YsE#cVp&Bcj`TAI_yizWF5MN0`-N6ONU*1tp8$!W5* zXAcHTTO9nGVnEzBZfUcuTi&p|pO+Y=W=gq(UgPcW?0xr`UGoYx5Wi$Jesu&4 z*TAK(pDrH0@3jpx5Uc>Ke|eiJW4buB(wwvT*i7fxjC%=E)L^2T@xE7dZWAnCXmSCx zeOoSfU{L>Bd@8XgG_nokGHM)(S4L;?A9X%`%a1*`f~W-o%h?39FWLDf9}vm0X2z)! zmPXPtr@KcS3m{e0h!cXe%p>+FG;0~ginV7@CR&7cYQ?GE zlGFtPg1VdM#YU0eZQ^oWGlvc}a@>$Q7vkdG<%AiO zwJkeh;Ior=zkkaLT_wk*isqqVaj`dUn_CMy_eb|Alw@xC*zaz*-}mS^;zp&7X!4cD zo(kGqeu-t5UZD@kmfe1 zbz5|~Bl)7jo-jhcR5J}57r{U6;i0oDqNxqzLWTp^XP(wPd0<+|-NTNJriD=qfX(GT ztc}_694P92THxJTm{|h(sZbW82H54W%<;J~#x+c9^JDAmpNv?{$j^%Nq^b9JSq0{5U;@Xfv6E)`pq@?R zpwl~vTk_~gd-(x>5lth&R^`Npqc+fnZ*ximFqhWEQk zVOp~ld5JO`WMl-ZIab^?fx}thT*9=Q@v`qaFhINwN{LVcHRdqxpwES;_GxDHl$e>8 zwsL6Dp4!tZRThS0D~LwYRh=Z9e9S2T!nf3|@CIy27I651$~aR+q{Kfe=Hq`HqLU|z zx67$ebhB2H_f8)KAFSB%8N4Sw z$ZoF?GVdF&NOdw<0j(JekfP2qFG!uVf5T zv}Ro$wlaJ1_6W9cwq#Y4tb9>Ogy!wuWhbDRi39g(MR~OP!$JvRNV&e1`NxkRskejt zLfO{XHf;_v2Hi$te^=i_T*Ou`y)Tzd*@3_9H&9=E3x6~6_oMzf^2owe%xAROnjGZ- zTy1^v{xsEPHo8Z$11FG-C`z5bX_-pWu|*_{7V&mMfsyJfRP&8p3tha-K_{*^^p)9w z_KksRgk{TbK-PLZPPei z=>fV1svZK(d7F(QVm^Jsmxw_yH=Q#h*LSK^hj>PfsPIlE*mKR+Uuzs$JB4$&8>ysE z-J0_xMG2^82Fk_%vNH{7QW`8VA-UkHyB%B#|CQPohlNXE_c>%lT#{As>ufIWpt87; z^XW>9m%H59S@fA$?&==7BC@FF)+IdoB!@nrzelswz6p3qvphF%o@$^JBiX$)$!I{cT6*OUQ_;`kjusdA1>8sDUNh{2XA+s}Jtuo?GEtKc#&{ zc5LQv^H!Lg>xaJ$uN#NEja?b#u{kx;AIz!EE1<5RRJH41Ju~2%>$glShLfXt(?3nc z$rS2UC@D#ExSSoXO?ynMBWxMgN!od8wO3?N*Ymj6L9RZnu24nVPA(@OQbdBA;5huZ z6PokomjY%pBtSWZ8o-jwqQvq_8iz(0a1XC-PJGpSoyaPI9C(|jZ+4}ir|nIoqz zDM{7GQtlljubvGF?J8#rOL@`CFS!PbvUA+9D@yY5h~=}Fyd>Ow+QGy-Jo)8s`lf2? zHl*}sGi6Drdh`=v(Mp3}@egHX<-3oxZLRx!@%833c#6Uria_3}K4eHSs(2w`Zpt!s zD?F*XrU#K0O>to|cs3=kz+Fvg(Jj4D(a1I#U7k63Zo&UpG2Gd2DW4=<;cE*h-Kx-1 zDco>W*~2`fN#EjZ{qXpqG*w>NGS@f8%L0te zR%#FE474l6lDkyY8ei&=(kRp-ly2nvG-Ll9$YrEe7jgW{%e!b=78>l3ow8tC zoLwT8cYVoAeO#eP)2K;1gP+1zqh7jMpdL(9LhzQQrYsac*NLzZR6Ae#^8h;$S&c|L z!xLOrX^<`{BQ39w?;>R(i0})lEJf-D2+_d%eJuSe`O0=yYh58e9}(4; z7;0AXtWHWSPMhVIC!>u(8sT}8%)uQaT5-xpgJe?$_4Em;92Gd#jJPRziDma@)Cx~f8?=qVKouE4wSj)Us`~73MKoUBj5s4^#LgW-sb$WRQTa3$ z(*METn@2UB1^a_^tL<(#VnkWAo1GxgEMXPxZV7w9kc2=W*vKYf4?769bb|s0BoL5Q zNg!l_Ab|uz1X`3W2nZxBVG|)BOF)rDRCLnscV53YXL$3^Z_b=ElYhP^CrPDlRo%Mx z)~Bj&6}4~?f}sZOq*0G>bJnxz#jEJ?p(NQq&V?QAaVdRN*|JE(#D9Evc7%N-1qrJi z_-&-lWStP5Z{gZ;9Np^Bj{2lhO-;B#SIV<1FoOMj!Ocx(i-fqm_T$M{+}bzbMyUH- zK?8)Fu2qIF#rdXwC)PqSTg{nNLg)Uefa+7!kg{f^phJ*o?vj3*qhI`_08{g+%DRRV z{Y%NgGR$})$T7VRV!uCp`0Bt@m8A~ig>3s=Yh4U!JZt;q8v4TU=!Q7GA6n<6f;>Gq zjL^GVV|z``qKiI^Ly~SyUcLB4ew>gJoELxz^qfajM7$Y0VGOi-C^X+dV3KIT1>uE5 zkHjYjt!)^kj0)yDf1rnrvw54r7ECtZqDcSIL>U?uE>{usEw(bc-7W= z6j~7hfsjTEQ9`5cqtfF+fq=UUYMVlY7BFB38x301;2chlpj5|e7ul8C5VOLBkRfS~?%ZTEKQq$^a4PQZ-4v!uarmi6WWEB z@@Cx-PB5s=?`Kg#*K&rv>o(>~?#3Pm!xYdJSCx z>H1&HQwcKy^|s8;l5R81ev4V=G7t4;_{4IVul3ed)<8#A$dD_I(Kp;DYtrU>`DdkXTIxD@_6LSV2677) zCsPX)3f8BaSDm`=Qdf~>Xls4`;P{Z^5Ng%v&Ef-zlj@Ymcy5GlW#p@k6Oi$y>5Y?( z-y0l=no55$dDf3g_O%4%@EtoX#9$}&9G)pGo){d`<{w|YhWgFS9f0o0=?G9%CRtx% z;=Fvs*cEXHjnfg%fth%G`Xlo|do-SPg@UQRKLMQRx#Rk=-(=e!v^rbVrb%2PWduvQ|^E1B=UkD`WvqI8FbH&8dqI9LB(O7T^( zp)E9bF~C+LS!`IR)Wr{{PX>(twj{^(RfP;!ZxjXH414#fZMnJ4A(*KX9eIw2;t0Z~j9fZkA5 zlpcu2wb>N%fM2EV+_@_A%i6OWHHG7vPaS^3M;xN_{e7yON^s@|luK#e4W{ zIguqI#N+c!Jahl3F2)dtH?Noy=A-8MghyZj4<%FMZ-BZrs`FJ$7-ysrN zW0sz0UR|98l`vaf2bK;D@w%_y(lWfc+0$boZKPQ8P~HD9N9O3T6CSj%>cE0Z-{P!#PFN$3+P~M`_wDOR-=-TG1nrN@;Qbr*{czXU)mZ@^^E5Rh1j3jR8znD4l5!YR6!OH8I@Ywc&(2~yyLE!%<^GD)}IU3_tpZCbs=!jUVj2S+Q0 z(w__)V-4Of#OZTn^(A*&P0_f(#-&T{n<;Gj0|)A$qt5erVn(1c5PgJb8o`{ioEEfB z6mhTr8WgXX)n;%bfPQ01n`xAa$%jAZ0Xv6U0G`@H$ZF_UscX?7*wPyy>xr?j#~BW@ zy2~dOmJN!e8^K7fDIERz^1RI-=19~7x=s|raN|WqYJkVqFYP_vU=I>(-R#lv!$rr- zjSoEAo;%} zfm2QO6Hm5fh)7l@jN+H4P*5)|^JdFk_)yU}Ek0iQdP^nS%G6(4_q z$Q2F}OU>@^huDQ7SMbQ$fYLx_dt1l^HBlSAua#$R1RqmHGA9amwOqbT`I_i0O3PLR8t*ih(2*%h24rH0}VZ@;CQs&5*jm z_cvo(0(^zzU!~mG^V~BXD?wW|GMCKx17%0{Z2tC|=`ajm=OfC1O-Ec5>g@BGVeP@O}eXT|lD zEM9|oeiG`k71r!N7H&@sBt8bnb_et2FrVMNs;HoI>}AzPbuQ4xpBIHhK-1>*^mfHN za7Pl%=rJiT5QiBP=;R-9)2u82hM)7CBqX@iaggam+U8r(lYa>=#3T75En6AlKYE>L zaf)D{ruP;#r=K3Xn6H%y%L(a=z>R&*KZqKmkRJ`jXxV!O4pu|m^tnfm=ok&i$Zus= zgSUU`--FS!ulTw6ag6{5dkjb}yWY7!TW~>R-WPFJNl6m5K*nn!75&(=*z`b9Q!=3@ z(92D&1kDZcXXqD=Xy5{+4c5$W?!n`lJFAGY$`-O7`Mex_X_SnL>7MHwrqO__lcFGo z@lNHhAp+wB49dkY5e;b<(9K0w%B7NHkgSR2Q~{5=y$7R;{f9;#%Au*mp!>Ys+?90O zpFEhh1Ou&s_;nGy-}8rM=ydR6qDTuqljxD6QIJs+V<+( zsG-&*ie<;Y>5j4-L`>FZc!OLp>fnY|bQq@?5(c_F( zAV&;K0j&$1qRZ)(4H8d)xcR~sV;XbG0r1mDc-A#Rx}X*UU)agkAJGZzAE`?Xw;GQcl=8MWZSuj2efKSbRH1|h`1Lz znUnP|i~q{ucXPnU(n6b6yt}w~ko`sw45wcjm!{1{;Eh(uJX5F%y&>d^{?Z3>b<-ur z#J01C=Hjb76Pl4qsLC)Ac-c3Wj-^mXR=WrGVEDW4D1wqetXgm~r#(r_)pT&@33+&G zSYoErk{0&iXm9Iw#UcM_ zP!4ivsKP+a7e>N~F<$9g5-dnYL7|}I{a$oWh}0+z%+eQxJbyS`NE%DEq`9zG$0LS< zxmtO|n*DHePixZZlX$#blRE6$n`<6?DxRUT$J4v7`oNr2mnO+)Le6e!^-yOjM;$uHH+SN(4~*Vo-ud zMzJri(c|s={B^UvoJ#j4BB`n=VmyUtT3&j>vBnRbf|qOs7}r;JzMm*~ozWWoo=&YP zHA|FnJNh~mf9^L>o~Mho+8|daN#9cUIqFOMozo=E!`Nvc3m0N{AqAm+j?O&n|CXJc^k= zO*o5)4 zgH9yo!N1o3-|VUo4dY?tK2;iYnh~SaRe3v{6eVGw@!LMV)qXcOvKA%J9kRNnczW`H zt{Tgs5hQyI!@@$k(AU0MR>s@#k`#-zX41y$yBV%MfS z!}px3F-xo@TI1wH%MsW}?c07kX50XWl79Dd2)c{X`1%ee2RZ0I&s1$0{IDn{IKxRk zwDCyV9L5X%_-@ABJIh_O5%gZOt1#y3AWGg=OUyKD!h`W<>G6aV9|z(}SCLkIQ4_&9 zKo;A}drc*Ebru+V@LE?qnQ7reOvHzv_x_kIO1LvjKB6LzB`siFa zxB1$IaHsCBelg(9-Ce@6hYl>Rv<`XYLgxH%U#M()-taJXNt`Q0Dz8^hHM=g)=Dv#L z?BSmZkHfmJRU7O0mwMSM>ZTqLy;VGw3l||qXe*@usBjkwU2Qi# zKh=04kY%FSw};vJQ3V&9w|21e36~5i@egxE5#3@XwFumY&SO8;Ej73+Gg-N~?BIS1 zmL@qytMppY)MT{bmL571G;6Pwyq&-CZpfV)#6S95eZzi+&wG9qucl2se~v8fenvIX zl}U`DJd%WtC-`p%2az6XDFZ69rxC$)pK&BeuJnq``6-?VYL@1?2dloV7&$E_oFB`u zVSqg0OjY+hm1yjO{zWxF%qkKX`pOp#wU?w_$jPIIAZW+?j%iovPJ(>4c1hplu&5(O zaWA4y$$#!K44GGt_E6mZ4;p*9m`g|gYu)*;{qg??3IBtK@g$iFSAqYWh7Y&hEB@#C z|N85{awY!xYxN`gVUov%&vFyma0CMC@kTR{hPk#M{(@R*i%MrIS;V|uM}AHh`%N0S zQ-2@AyO%vG$T#|}rU9mJ+@$`z2*~KTxE<>FT&$QD|0MQYO45+o^~{w|icQ4#hrZo{LWhL46r&!YV3pxP6*?Chytge^t zY3@+(Y2)$$rP^*ehVAbVTv2d~SfYai!KqvE*LlZ5LaB zdw?g;H|q!kKGrd)8-lb*sTxEbI#-~UTtDu|XCX24zt}zS7iss(15N$KB&1euhqOXT z!Su7+#JAa%;&|-L6?+VxvszdJR9SF0&z{1lNdsiDBH0N)!H-9dA2@Kpiot4+Oc>u+ z>BloNjJNn?%N$YqDiwvqzS96zX9_McQSM43_7?@Ksl-Ez!eL9=nh}Vn7CUPKnYbzl zcp;AaU}5a`CQ!Q$GBTl!h|+>yF;~jd#3&W@c>sY1FR;M3p3iU#kQ-bJKogD0-l+>O zw|#}H#(DIP_2i$K&UH|4jVsce|6`N){lWM|Nl35=2bmc$rEr!-l~yW;rr1O}B4Tak zS!!5Rknnq_9{)#;XdOVfb$Ir&)0gc17jti{fUdDy+-pDdS7Bhgi}u)!pu)*(RkLhcD5@tCZ+aF8qrI44p|PT+ zWi!PeW}ajhP~4uYe>3r9=!$hr6}bX$`(vuVURHK`Cs1idUoe%i3L`58J*YVDmrJjx zYkK`L-8ZfNC7dpe4D>^4p^8mbVORD$I~j&aI{vyT-19~iAb||&AB6lRa~y=lKgnw+ zyI#MIx@U>LD?p?@?1ircoSMhGOrhH*PfU33OwV=h0wh_Ys~QtGd~pMIALcNu6+~cm zJmG|Dim`rOz__*#o0gC}qN(QQLJEj>?IcB?Wm50xZ1!zV-7$Uri0L}hXdRGAw&RVx zwpq^Zn}63?cUK?{>p@p9uTK70||X|&NjuHm>F20g7KCj?L=T;lc!7Ow9S2itGh?Gg~A*TEw3=IL|gDhAYFWl~Zunj_d7%Mt~=r6we0K5Wvd9`CFvZc77N6y7b5rV2b z<%W>PeIC3BOqhCPRZ=J?SGet%o4uZKOt;iYQ^ptPz=_#XN1o(WXm86qR$95edE@iw zrb8v(qCP^djs#H52Lfkn-W&D25O4Q7`I1;e&lCYnvJpJaqwd|gZPSPp;q0^Mm2c*A z;dLVjFvM1dOd*V4@P+|JD_0IL8b;=IEGJjodB)+#x-G5eR}6Q%HF z(3Uy6g`GSSi!aEqy>eA*bfeNOjZ+09g;N`s&?B)xfK-6#`T z6;uH0j>t(@D#gW<@t-QhE)UKyk%Qw@dH|}}V+7c#1(UQ_|pjok1L})InFTV24L4v${Qppn!%8sCA907A8giJu`OlrG~0%W*~`D>;2#X(o$ z$EEZ|^r@cCBI`L8Y(j*%w(TIRu66UgtAYTIbrAr8Q6YB^7o7P}9*G6LNI;6umCYAlV#w{P_3X>VaK5_{kgUI zk&F^TNc;#eQhQ^#sR?~q+gH)>5{Oz&&VB*5>Y!zpV~VzwJyyQBeS7F!O2AOAraOl1 z@d!tNI>S8zPlB<*UI1?#Z-OIp+**g{f5JoVllXYkTo2r+Ks%G$f33LZ-z!`xj_sb!V7L!V`#i0yJ=OK_%*Q8%l;^)hxmBfh?!ivU(dXm z6;Qdh-C%t6g0{ec){0SOw{}0Ow?D8i2fNbA6htI0O+lyEG%YGgjmUv&76V@IT0dn9&_g;jo2!edfeGsi;|2exR#@fY5Ggf;fd{ z3J%BwLv6J$t_VdkIoW6HlxQ^b0ND*s`OHmWHh!t6*<)kFDYY;a=Mulr{8I}0m`!@s zb*)l*v$V{63D&aNDymD?(y)#9~i%*9|{`vr#_ysz6Akd($gz%5N6A z-{^!}H2MLgW#bvQl*_`1WkW{KOX>}`HT@dlL&1DfafkIuqLLB_#)}?~7%h6d|JSkd zGAccpfr;Ikt-z@kL}weJ7*-3n6jv=LJf^CK#b)`uHyQ04x|t*AxuZ1)fEC==4DWx1RhF=c>Ka%zWsI9Gs}e zkt#^H2inHucFv5DjZ_QVn!7lCu;`5?kQpz#USNo|2HQJ!zZ$pIn0Nc}ArFqMGar9; zMDU8r|L*p?=DY0)3jFwB5JE3kd?jtM?x_=n@lkCOVGr3a^Rm3g6;+MSz1bs)vK``H zE`<-HuRrF)pg?WbW^lE);(@ZddAq2-L7)wPeL&JO%48w-hpu#yr6mF}aW^#@inwG} z$v$ip06iTw_Eid*uMD#&68!L_Dm_(Gb6Ffc4uPm3Uf^yZJM*6+?E#j8lj8NPgwdKFgd#3?;z6hL?FBf02+k2# z;s&+OEGjZf15pKtYuKM2|LN%I-w^R3m=H=DLt?9O2mrMm#*%5cEiC^kMT$&8I~~a= zE*`$bPkC@N<0Ci6X`o&EVs~TMWw_B7A^p}im1xb)Fv^Bed#Bg-^`ACo z8+BD-^>~yKUL^{wFvx9%ykdomj|ZT zBtQXB8PhN8x185!!5FkxTzO`{4bL8QLEw;96i0~%HCi)93e!1l-dIh1Pp@vS@RC_Z zqT$bwNQ%$n-!g5c=d4?g&@&l|f5gRgd&gViwG_K)E2LTuvhFf9xI!~HNBD+_GCR8p z>znTeJRJo0x#f}E_ovOV#(2IlB3RZw(Q8u9+o8cC;9@oocauH%o3J<{qa5AmVqgeK=tSL{{6Xa@O=a>!8W){L~dLsQM} z5%Z`5ZeF?~XGj9=s40Av63BQxT$MNf>`=Hsx=x_HnGUT8ENs5Pv;aumGZ-9nLi9w$ zK2%qh>rvzR`=+(`tWKry9}o0Ubf4f#hwQIlB4%tc=haouZ*esiSX{^Ko)k;VHg&&Q zLKJ?Kx77V(>Orv1L)4av`^_R>p_N$gyhteoVd;5xU;nZ9gHHPm-$P`^09O$2OkoiN z02J*Z4S4QPC+omGu`jcloO8WpM1jc{eWuwxre}-MRcZj~aNMW&tIK1-E+a-yRUxM$ zjNB2vmM;)+kwSaEF9LR_u-IHmV%dtBf?$5>kF>(%BaSXj@d2;Pg@JxS;YLhBjW!1@QEhP$RkLSA@+h|*AUz-26QKSV*g8zyLgvCWX%$ej8 zCs$yzm4EnQ{}JC*JZ@`D9Y3oO1?lV6f4+&l^(2j%lJ#f&=#Bc_oxoe~&n{_d(j+BSWc{b|1G~NMEJ4 zet4#&RLSY*eQ~&gE)Uv}Y4<5_avoQ*$1`>w8Z7x?IH)YYwxEu*duoGgZ3|)NBBOoE zsF{<;BWBw#44SE34ltI4$L2Z}$8C4*wGFHT6AysQ243<#DyRu6nXeX=7W72Y)ybCY zs1=d%8n5!yL~u|Z2GgwRNC*$M2cuCRf-`eY!kepbaTiznr0K<>9$qH5MXwr$^u~cV zr2R+@u{>h+^wBcS|K#pVNd*NN(^FDgONIV(z-UBK!to^0ET@ag z55`0dlc-Gt6P1l(mqF#CW5s_3JkE45#~L5~Sf(%asA*YibGRv$+0^VqB_>T?#HHPn zB!hC+Q3bDxvC*qZKSXKsGKqNFD*>4j^1PN^YG|!~?Yo6b>sL?w#;$DBp;C!?_`pK5 zQmYFg{zhr~0tcY4eCff_o61WR=Tn)B%=>jYS-Wz&lK5X-%bdzD0GG8xW`&cEkD(KI z6T(`yd++OOnte#qn1*VEqoMGV^APw#*hF?!o-ZXnA~s)Ni%qyW#fMBB4m&VkGD6Ty zq&67#^QT zFRu1b62+doDOJI79u0oniS*(FK*op414-VL;R{|7ta>`%hZDF937`KwKfksS=Wbx6 z zM71FTmj2ho?^54Py!(rK<6Q)*chncv{ZO*$LaBzL2_ z-@AlOhlNy9qQlZSgL=F)ZfbZZ>z9Q9evCare`o*>dMsYA4!X~T-1#b%#{ahx^hf)+QjaXL;6M0^qy0+wh=jy}^BfXYz+-~YR?UpzcJ4lp)4%{kk7#z86C>v)!Z$R(W5 zD(Z5%nS1z#r1$Z_?vKXEU3Vu~&Q`0``*MX)z?a zVL%_BS3x}%f_$Epf;Ut31a_JNTwAvhCrY9^Fgz46P8yesQ87EH&`s|Zhv zyQ%$q=vNUH434U(Ay!vbMMfEhy^lRNY}}P~q{yU0whz5b_<3-+8zd=v<5ji?qcYJ; zquEO%KxM?F9XBU6Ea+d!{q!%Z-`sKWJo!E%(9+^NyC>zE>eY8Y4QP%2dHSeXHlgzl;{hmxuI};;w^?KCW}7(ncfxHM=|i zviV)Lc?0@aY}2o&05H8DapS@1hun@DwYD>#TOzip z<9ks#npwg12)O2ibk59#5D4xvXLf3zBb< zMd1#}ZWuhJuD*3211rMf&WzS}ELe8h$|EySn-DVj%OQ&29qzA-0!0e?>KtdBnKvC#_tgAE#qgqARH3O> zg>_ZX+-#uhupW-bkoyOkN5uYAx7VP-y7{NbWw%-K+GRkt{#U6R#pk$(3SydLN;3CQN))3ob1~u$o=)*sW2!KMt}?Y7 z$YBdXO2OvwYaS1^Sb#o`fDSbW1iFqc0Up;rp@F*Q5N&F^<=}q6vN?JW$Gm9vDLk<) zegQsDi*+frUNPq3+#XZ!d0i|SoBc?L4|Rp1G^i?x=pezV`<(2-mONER1-MORXWV9; zGuFwyUAD)lzhkt_Xa^ZWZHD!MHgRW!&*2WiS_I@R01`+m0;16g73B0Yfij=eu;F%Z zyd!HHYMENmg%9zoOQ!$2;w%4Zd*3y{e2Z3$o}|p~&7!eMh^rt%Wl{v<6+vDVw2{MN z=$pYyE$L)DnVOkgslavLA0$F3WLld-DYO-0=xgjA3?78&juOZ@I%aUqV-0GD|Pl#jJOcL98}?J zp?c+{#uU>E|6BKIbzNCKkMt_^q^rrmwPmVT@@{)phFfrtM%kcVq=@ouN}tB zeQOJmy)PKcgjBQNPM1`MqCagb?z!HIW8%{hWw$qFI>AnezU&{zY|m_35jhO_Y${na z+4x8xS6bTO6w53mVk&!_3HrQ`CREe}2r7M6j;56#_)oM~5*hu!>|PGPXB|&;IQ_9u zqkOD_8gX9B2pUZ&7@NLFHp;&fz~0LwP!Q{Ae2dwbmcqw8*n6wyUd(H4LP-CyFvN6c zQfz^^ki8w0Tq*~A?j11lN!IMl)Q2Hccwn&r4))Y6*6jJ$T>ooFcIHMDcq&!Lv9#ju z^K3uA3F+qR@$Vu4z37y$Qrtgj`9;arj?Zb?Cre`xmUA6LzR~((2C&60+>_+B$B<6^ zfa!UDqTmH3EzT{k49Xucl~;O~Anl&2+uUd)`fQQsbB zB>`9WU{!+J?jaKiYP;9QL4w-;|AX)zsO>R+E(?HPhu^oq*oYWSOBZ#9{4l#;pYPE1 zJ8qq~`OLLLvb?NV{-XD1?!8E?DgM6)#ZUbBA7@P?7jwnFPgg$#pTY`ko(rSfLNEz} zm>=a{`cRYYFloP+q<0hJYsH@0SRKG+>L}Y?Du1{sWLpgWItAJ?jwIQo;U6WU?2S}cP;o-zw2zl%@I=* z;y3(%et9rtCb+6W)ocXT{Vm)w2h1)_kp1)ilH`-`>~5&isOhC8nfL(Dd*1TxeTX)t zB(mK{NfgPao=|HhcQ@bn9XA7hqVa#RASETKG~@~8Fm~xp0ZOUW#pT~)LnNTZ?$yOV z4u&iQSF!jN7HuUP@3l@^dVg4{Ibd$2eL1T2k_$@1kLH@A>!PG&TnFUVt?qOAz6Mjf z8kloJ0&>+l3Jx?eUJ;Ih_s!eImR#g&&g+?Q6910+W>XpgmDZ-D!=xeEx_I5C3_{E~XzWWk_!!x*oy$FK2P6aXT0Pk~rQ zt+TbF*cO~Q2&-pAgUT%UDy7?F(f#?n`w((*$;dt|eJmzzeLaHY20g+`7X>zW*U=l^ z8;ou$k^`fOk9@ZR0T6WeCUm6pXfg9Sox^=RCtjM}DVwGWw!T|RRJ_{rToAc6x{ygG z$CkSR5D%<%a|paG|0%~agJwgM)XAzxgYibovo|!BYksy50Ot|oT-$@PS;$}%-(ge7 zKvnh*0t;wMOIyN zf_{8^CtMhUphso7qpLTvuAA9=K)Hm#+WFckjBmKGIA$x(m$C9GsiKmr)NL?BwM!8x zifl!`*k=03PUiW|(>Y(lxPIA#0NF9aUTxmOESvOz;1p=eC1ZTunMEJ-KZbj@tme2Q zDxVIQWwJ)@s;3i80w?X>40LV86IC#+lj8o??=%7F>{uhIwR zX*GIF>fWB+3YwE7jt8HmX)gKNtQyO8Eh(qSN$OFWD?>YE&aL}3iT{+%tkrhOaKG8a zNDIBVYV*(_I_?05GpLp09~hVs(OI>b;SrttwuYiiLQ&8!)a_W=@{a{Q#vQond(mqb z9s8Ym<}3i5BFa2Grkwo*AvcHU+_13K6=LsrqStH0R_t4>am&TTRpd%GoA`c3mJL+ z;qO-RZD+xWzJ`bD?>ECV1PBLO82H+U(C6&Z7UL+jC<+PYxV?^K+Fre9L$>k{^p%MZ^YCYxJUCk!*^D2-B+=;{vnJ6=$LxWi zprPk$pgy4k?F%}C8~xer`ff0>$jyvK^3pk#(+-(4jk7jI9_AQ_FF&24(T2=gCjCp_ z#o>pp{pu*ROEo)a-=p_Lq22Ka$J11TY46Bcez|;Wzu-kwL(iP&iN(bzi@u}P3FAJG z$aW)etF#7{kUpMLXxl3M)f0<1!9`B~^tXuE40~-Xlh|}<7&uu%?y9-2wzb-}`m;_0 z(_<4fmB&&g*CMa7rxo5zG#UCudsaJE(KdCFPoC$aEy$5*bkk7$yOQ=mFM8-l^Vi<& zbAmP-naaVJwpuEU*Y^FG=Wo1)N(RSbB zO@J#^*UGlb$fVX!&|Q|#vAOPx=blIAHIib0Bi_B2v18KeZ-X z#vuu$sz%!Y8YAsq@~bYv@@W9JsDX@qIFzn)Z|i!go?6JIwz+;^B5S>jm67=k16Z^=LJ0jCm*l-%W9wwhNDX zmOgghs8C%-IC(B0yS8Akmn*@_sKy8anw#|*mKhTymLm>0%|y3sgFJhgp0Mj7w#&t} z(ie+^!?zXffX$aF^}$Vo*rsYCdqgEefpL3~={hB@m?(+s+*s~gJU_qv%$2k3=XC5| zvgyXS#$055HOn&FX0DA>-~p@*`T2UoqFB$GC`K-S9zkAW*ecCw0v;d&3DPi4j(qJ_ zS&bF9YEX)eqj2Q7FTJzxJ_hIrjRe*-ZD{P+fUC|`RR@H=vA%Nnyq8QLdUSH+S@dz- zGpkXTc32d-k{@~Xs{MJB%%;<|L(dny&?jk1!BJ~piuxwxdYquk?#f6 z6>ToQ@?&qYzoLghbIkdW{BZ}lD_7ks1e-|SN4_`Ft&DJH!rWY20;0w&#H;ULlDXUI zwxw)P>QG3{v%;W{zy7f7<}!b^=@L}gX81U(W7v4Tpd&uB_mksdi8Ukibk|ZyaI3|6 zU9HrJ=w`W$x-8QeNmHfwzeaOiX*5IMDJ?QkL*5>Ouz75*#|HI0WR2{oVSVZ`8Q{w) z!cP-|1f%XaNE%0-fqibd9?t5V@VxM1`?KooQ)Qxj2pam^I2Tj$J@EN;*ow$U2q~$q z9=B-r?I;_H&iE?zS5fW{PB5^OO(%aslzo=T54kK*iVAhSv^o9vdv}Pv#@eYUXXQK- ze;{uOl0e8A{!_4fr_rHb?eq`erNsbcdHRjlRM@LBHnh*S3 z49h$ru0%kE8{oo272_PJ=Sdd-la50N?%JBS{QFD`-}EQzo82Zu4@uX1mCJ#;%z#37 zLVstlwH~l^I3mV@&uj2C-qhWA-5lTJnOE;_xUr4%(hhX*E3rPHeX=3AA44-mAfu(B z2_{jZA10rzuwQd+iK|xokD84ix?DEW)!-{!W47?zSRj?4Ex8EbWPtoG(XvPZ z_Sr+`y?PBwf5@nW<8c8xt?9>}8Ps1mqNko^f;7M;-TI&g5iTbjC|xAW2UnG{Q!_JB zz=D(NR$CY?ga)EyG~ox#En1ZEUh@^ zc#W=VH=TSkvl5OX<2EObpxVCP;<9iQrIj=^vCTXcNI8<3e4n?*7nJ1cY zKQ{PGN3TJ?Hs$k&$~TcTT76$lgWJLrPoMgcmoPTIEc5o@`iszmdaz^$GBft=WNR&= zN{ndO1)G`jH?VHy4N&C=G$zp7Un7lc{$Vp5WY&Kx#a2Ehz6rY6tLMueo1|h^?Xesy zVQPh(o|}{W69E?JoA_bHLruJ4R%c)59P9L?iQz_iM6Ae+8~!fXZf?{8l$J8p=vnb9 zK%F-Dv2@Iv+BBuYav2X8v%jb?If#AXf8aWK4lzC6nJWHtXPIjrHM#DBgj)x420M=i zKJqU7;5j^_fP?pm5KAjd3aIV=r*bjY3=~a#JcwGsp3n|@;_Tdg4FEt`v>EzTTXUq( zwRf29ej1tllS-|UOg+kVmKkm1$-NI%MaSTQ02&21Nr0n^S3C5BwAT1=th@ZBy2hzu z!rNXTHpJ&xS{4|A)j^*1@&)@1y@lhHIb*|3j{sz(PA2IY=Udjx9JeJ zY|!d~Ynp#hZF7mRDM%E@<0FXfAf<)yBYnTY3_@t+}H)L$`xqNry3^Pcw=qo%|1zO-iMm~nr7{m zlp)K2ro~pmaKefa2{KbYHInua zn`!p$VCeBHIRUktjC^%WG+8wDN8~S3-;A+e@+Ex>NM%_BDk0jnw>(5r`__2nUVY9p z$N|e*i^oo^ZVOn%7V>CT_b-2+fLiVN1S)x#ZtF-_OZK04evq(!mazNJ6h`D9DnL)bkNwzv(W$c3##e&7a$M*7!YGX}-JhxNu6vkX-T)Wf7a8UBj|n>r zN)>NPE&rco@c+cSGkfI7f7|(sYtRTqhv`{SlU2~sjmo7x7h<@+(CBPQCd%g?vzh}& zREFVOcRIb`RFrNib!f#jY;@qO)EEhB4mJJ~JK6eT#Vk+kl}gisT>DAUS;?U|=X`xy zaa>p9>nF#{zet1NO9K3gn9R#jVxJLmZ65T^Z-sOoxUg=+I-vGv-ID!HP@xOE<;-BM zrh0ID6{2k>x~T5njO?9jjZ^4Xq|iU6d_?bJv z>7Rqpt6Od&Yp$gPk34NpO_$?cR zf`B36g|)%#_UlGvs_CaiZ_@8>w_a7i4?+qWzg2v9o=q?>{yMhSHcbudTKMk?Vh;o4blEw>?R1HZQ8y0z@$Q^6( zUsC@M_TD?H>1=HqX2xD{1`$w-I&?x&z<{BMN-qIJ2%$4dlY|;Nh#VaYLs{|U`GY^m&fdSZfBU}o z-uHdo*X5L>-%uW1V_~5eRBjA9DN~qK;-=Qel=NQH6Oi*s#wI`)^m9He8bI>W zpSeG4{v|}B?HN+9l8zW=(dauxl`WQs1Y?b5UT-nc=u#$6yB%Yt#xpYXDsuXw+nN&# zgsRy)GWvZ%?<7t`84{UQN9*p5(ZCCdKV~d+fg7j@l2b>H-eQC*O~Q>MNzMgPnIN6* zS3{%$T0h*D9m0{&7nklU3@X<$VyvZ{9WcS&&-v0e66dnj_RQxtCW~E@Q{*p_f7FXK zouhh(Su!$G65J*Zz^xd=FP+2-V*5t@`g`Y}w)x{roSYMHLFKIbQ?>NV)wKEW^rJbu zLgrk58)q|`xh1SjvcGUxk}ZdHAwG_AB0kYAbN)rJc^4Wy>rW}kFkua7`F;~9yQ9oX zvZoDKtBFL&FHJ4G&*u+)a`tWQbpnQG)0_P9Dc(isqRl`q&s8>sT+Zs&f?jRa$t;a3 zpuguj8sY%~cBGHq7z*nY(ruc5tvfqg5V44xx;hWs8g)ihHJOV5V>n&hnX(12@VG?8 z1SKZLwVW}Q)zn|u!0OX_n#1lA?=_Xao*)r4sE?W{{hVDN)xMkdWtOgI&pujE$d0~) zlL)K0LxP)j6Q)BZu#k@WlJL)CV<+;*j^x0tu|O0v#rQagGd*bqY)q9v)EhcNV3yGC zv5vdg3l>8!J*^UUj8O$n@dINCtVW4$vSdg*I8_PW~K zYSZ@$f7g{1&T@U~B@$f~CJf8;9?;3S${N^6uV}D{l<~GbgTj$;4F2hXVgs_9tV$Ty zzt;z|WR9fFIS_>nDm#03Yc-~gM;~loVCk1lzK*31W#eL1Cx=dp+A?fwnY>h^U($K7 zkR+^IiN^_UP#GQF<6dyli=K^l{sl+OAh}X1QST@SbqGY*$4jTG^t3NGqn^F&JwLb@ za^Kj!-IS>uUEru*RfZ|qeH}udNAJ4I2Hmn}lstfJ%zud-`2Hf7dPAwZy zV?0_QQuJYSW7Y05f3<8X4+brZl` z3jgl-f1$_zJr#fCLjOG#{})b$!2gE}671aQ8qE%?8ibIXa;wdEOyZvTbaf9(jR^D$ z1bFs~#c!+!CKuSzbkDv>_i?7*8(9h-{Y{YXAdlR6h{jLArBqLzwzD(Oi=PT`S(Jw# z+mU`Sc(}F0v)WxvuXWI>g0)Cy=;JF;!DBol9nH6&6;jVRSI7p~gEVsiYG+RI-6N?t z{J13x7&6cH4Ar!4?FP3eB*<~XQO^eM2HJgsj42ae!b>i9f96!R9pxGIGzLjsj#!|S z_d%VfV^aF;jViR{aZA8j3-;_f2WAtlYhCyre6TLP*Ie?&hZvaH7sazp*%nUmGkzL3 zl)b{W9A*HZkT*v~GhT_xI>;Gk?z{im;LhV7{?SiuD|#5#H6Rki06@I+rAw#(f^5o4@3-JwIB44`A;yexASH0}o1|2Yl2cNel*iA* zmXIQ_>Z$zSF8>l7TL$*8>$-77$-ayZ9QL%xf|p4qJn9M1|EvQGwLEy)tgVFbIn`WU zBBukufbvoIai^4OBc-l;n#rMBlM?33qvDg?mG5hZzmP+ld4{6OdfP{tgYqH<*ROt0 z$}wgS=&VRWn=6KuIC{N-Oj4Dv!;7t)$Hv>ZJSheIJWbWh=uEk(@XwF?ubFF>$LFLN z0AVAC6VHK;g-`j&$&bXNTNkM=&)!Jk-&k`-qpBM+Grcr4&dJ|P1;WJHK$qIIxPu4m z1zWmXD=Uw=8#{x8UhzN*mf!1(Y6HB4mQd1oe)ZMIYS&r&{88mm1rSduduTew<#Rq^ z7N}XlxVk!4o2L8Q$6pkF(gu{FY*F!9_I}jh#MQm37qLSC|7^Y=Fpiz9YwXxlgP`Gshd(cj*|o9a$`c6Rnsr$-vu>ylhWn{P6|9_pKb z&)GHy@!vc%j^R8msMxBf$~(*gA(DASuHU9A1}Vob&(yFvq?h~R>NJ*unh+aHXsZ;w z4r-I&!OVtcN%C+n%|}buI#0f4mRU+nr5IGtkS=W;II9a8o*#D^_mNN z+YN;b5z~9lEFGW+m%WL= z>8{;U|K)aI1$#shiI}?`@}Ur+V{GKc*UZc#*BclTm{BiI@H}mGvLlnU?4h?U1B3AYC$-H{zC-VYcL`^i!ek0O(JU7a9{?fY4bv+8LCFzR^f710 zD`&b=i%PJ)z{P|PIx)Y<3P0T~7B%?f7(O%giIQyQ-bn=Mn*bkyk~4wDAl)Unvq8oP z9Ca^1F{kK)lyQf0z?p$c`gS=0{ZuA5mz=im>@)y_8JkLP2HcW=V5*iYrm-13 zP|7eE)F(P{qD3R8yM@}}9#XA?&d%GLNQ@a7#NnAh60gU5+T}aSk^3yaj0U>Jy+XZo zl%?{t@mqfn7~G)$?2?^smA^L3zkiEdJB_hGY=kqlK}nDZnMOS|1Gxhxf;^KPQgPoE z?n6fzi)b|F=GCet>THyIhQsu}pi%m|sjyt?xpTsMgA7vqN=Ni>Zw2e0v;o;Zz)sm8 z3x%yX8FC>mWo11aYsv87Wnf8h)@p;ju?)^9cLPCo-YLCx_CY1JlA@a5?~+>b%q?~D zino^5g^D)a^_`!n*-95vJhHW0Hzo&Z5a`COtN+{y`p?2)2m0;_L9|HdZvx7T8)D}J z&jcnRaL6rp>`0c|S+0BU52n>~$|nbP;Pzbqjn_AvK2Dhx_<{38W$k4?wK2O5WUT_n zcj`pF`+i#5)ROQ5H$3>l?m@n0>r4*4@)CV#e#N;Q!-0+F;s@=Q&y5ccWH=5N&{Q-W zKKk3eBPBR|0jE3qKHqSIv_GqdXw=t!N^1M1LuT5geHIj)$UsSM8%S8gZ}a65CUlPt z7ME|%?;nWjr)xPa6rfwI7Yy%am?{7KM3e;3R{ZMf#SQ<;UfC`_m#15F3Hn*4#$e|`1 z3%Vjs>sr1P(S3a%Jlz~SJuXoc<3>!XM3v@djgYdyt(Cw`T%Yf-27+P;#G@ZllqMdR z7I^6@IPeuw2j!DP9O9o_&5rT2$dg=v#1D@VteJ-Or%5g5jvgcm!J)OOlLG52jfZuoZfn+d=hm?|3G*F}WgDbAyYa0gu4r@8K* z6n@x|GC0Qe=_s^akV{36!w$7N{8P(&D9b(qiD=XU{cLP*1oEiMIsnSjD29 zyQpDHbRB*5A|DLP*km5Oq*j*%UNN(pk zt$(TX!54&}Nk!3Mn_n!S1Hs<=O$BdOx0ke(8`BFuLB(!zExri=UA>IT`$m)sA9SaL z2xnrA`Sqx$rokOUPhN$(k=%3;JFBPn3SK#pIpp!*EY}`~WS`m)qCx-P7Cm zWysTyQw(gzN6#Rr9-?S3BrF(f#2L+%kCXi#*F0Rwj+Lzm?k`f&G`N1?-yIPh0{gbi6_W5I@H$ zom?B9*LuFl(@1L8J)Q+FgxWv~!@`V>D!PgZT6 z=R=}BchK_0YVDNEM91*5&Pl8sp?`~!n~PWE3v8Y_CL@PCq*G$3m6*;{_de1vv_D)WI2^2>ZE@yy82T#MT<;#r6kv zjK2wl_I=hyTRdBGxm?xB4-?Yx8%aSGdP=~rKg5+VP>3jK6V@@%x1 zI2C4n6kTo990&vE{V~R=bwY9f7mFpE@H;mggHlBetv0{*3Sr=&`SIoAgI1rI>4VYp zHI!J=MFJti>>{15r8vT~Z_RzW4Sg6lY5d2uK;Bbei>#FoeA{}JO;tfs3wC@tANxm7 zrZ+e3)qBY^2ESYEZX~~sx{^obzF0sQ%pHI9O`tAE9T+F?HU4$t1~GKg|C@k)XBd6C z!d@A0`U5H9%t?DZyKlC`o(lEBOc`HGVQrp9Va21r38*|b;>Mooc9Fg<3>YgkvU%Ue zx3|Pl#7^?ovHQ0a?|Rg9;{BHTgspe(kh72%tVn~X9P^F_n_XQ=kqCwK@UR~=NCmY; z4R@sr(;&YkVTDAsW|Xw@FcKi<*3a0FZX}y;0_@Tk{1vDpLaNWM-{YIDSTsI^c%_wS zF+@$(0{xT z?%QsytxvURH5>hp_N1-<@Hx@svmaUO7Zy+Ny$s(tWHgZXQ+~Fov8Jj9nUA8PMVW2m zHZRo`m9un6g6eQb1EtVdxqDYC1`L!trAuYeyhXU0uGqo2KU;RkiOX7}`hK7GSeHk$ zLWdt9&QIrG)l!!xQ8y6Op4rEWAkbb6<(x`{Qb36HU|)GgC^||h%9ONbZC|Upt`VQp zzp1zXpYE}E?D2dZZUeECKJoPd-z@9G#E#cuUd~)$*+qDBCZ3(P18)9+vUtl4NfF*P z4iG+T>N8e+9BwJ-mj7gR!@JN+(D{y2X58HlF}>0Ab0y7=Rbb zS~vK8D&WH~{u%Tvt-Ky!yPBLtW0GYq4y|E_oqN&y7~Lw6b-iK0s>2GHg~{^FrXGRF z8De35Yx&U0p6zfAG_f|u{BdtIF>;PP%XjCP$MZ!~_`kl&L&=Y@7)ECNVhltB(_lLC z!TlOsxh|_b*8xo$;gczEPS27~PeDCuZS3c>52J*K>A{p5i*dDU$gj2La7$O>v z-2Zd7Pq6s1Mr3#oe`h*y8|OtyeM9_6SsPF_oO6`D)y!i~-rZse>1>6(2zEyhX0|8> zP~CQ9W8}*`Da5|$SdV-J8sVHy-KwX#x28o&EnA=9~G=_hh#9S zQJ3Zqf87)`iWpXHUc;nXdS&R;VEDBZeOjgk;=Bb|beY)Z6>;;yXf!gHK85NAgFtjkpN3X^BYl7GfB zV}OD8_zv~wOy2j*aifg->)aTmToqr>P()dkmZqGcv~@la5!eksN_d8~N)iJHJsCx2 zCD+*Lw*KHai6{8$D!E_~fB^sv#;o1eJ(e_zFxj{S^U)V?@Y&~Y-+W@{7~s8eehm@d zixmuDMMNo@5k)UEEF4@3fLAfns45i`?GBf)m}-elf7X|W6A-hVFPC?%lJK0kt>bU; zt{F7Vq(VCvTp&|vw*HTa<9F*JCEnoi`w+)TAjECPTqfjZ$E@>#>=e{UI>tEF(=K87 zle|cfpOTcbB}pZvq2WU^;T0(<9ZO1po?bM6-Nv{~?aWl=Hg8G)_}@DInfI6S`TexGi9u@~w|6}X8rM!$C5V7|}W+FOG?-dsM zIH?G|-Qcrg#V{jwo0l`q(%|NC_b`<<5~MV3KEHn>h){TM|70awQNa5y#3u&C&+c+DL5RAGZ=fPlc?gnnIRz_W9?Wsd4&~Z_+T*E+ z*>K2JwIA7k@4SBBIA0w4vs1va)mwiw|1Z1jzZ>%%-5+O;|KDe0{m)n&{<-`2zp~Ze ziy{00Zq zW`_H6T66q7!9T6xVB7XyuE#flhKg4oNcf`sAD3(rRzg#mGabCBC3`A6CMNDC>)lOP zWBe4|-V%|`lROo6QbX1zCtRHOArJos;>+<%w?}9BVC+YTsYUSvcJ|U!sBZ$$KEw*R z=zVsUbR}0*$GL#$)PnDH%WBr|$Xl^4+LkPZf24>~emFo8<<8o@ZyxM$QPqU%ydI_sG=35lGzfA05&^V^W zhNgw|_X@}+mKOJaz2f}K&xD8EnVNGrJD72ZE zB%bTS+zjK{Q`9z#1`YYHqz%(~#Gm@Dv0^ZwN}-Kh%J0r5*TnD>#imKecJCK6oHEO#om zs6RaH6_pdZoGMt%+gXS25^`QY|N8iOSA^W;?wGhYt|*&M6?bxAo6kg>N(ev8B|kD8 z+?aWetAdO#5Pg?4?Tte+o@BQ6DaZ()tqW$atdHi8@QD70$nZHQriJB+h2_(7KtH>o z48{CpZ@C1adr#H&`AUtGglEq&D;}g=cIM+cHs6E`GN`HK= z8z5EAik=|W0?5mJozyC_N+nwvZEHKKwCsRbOMXm>Z<8h|!{e%PtIEz@n_W@{T1A<; zOWNrf`o%3zK&Y0l^QDKjB_ytFh(PB3VZjx{p0TvDH-{u|6(lxL_Hm>B+Muyv@T*evv8Y04p&p5mf##|w_e$e_h#FjN3@R+DhdHRSon1xr ze!v*#pA{-+FyFcsglOeVpv_o$o;j}CYP{8Mu`^rd@ek{-j+WzU=MKi}u$j$T?DyXCX43^+y4-K}-LciYBM+IkSxDUk(G z5U~UATkc0rzq!Y{b|u+HGi%LF>A|nFSS~L-xbdOi>O@`k_v%{ymaNq$a(x)=V?E zR{(~?Annr&qsbk1Yh~D0&1w_rgm=U5#hR_&&r8I*3ewN2hF;dm00`3T97dB$Y9--< z;}~lFPMae|<FT*i(kDDl`ikg!?94#btHI0Ul`DhKER`u@fMT|)#yd)=1}G$>UU<#=*c)>6C6h)d zL~GlZT<)ku+1TPp$jF!~k82d9jB^dptD_vO+yfbr%hvEbJv@G3GgCPhoA|2QsBEy; zQlq|t!6eQ4oVSq-;!|;TQ>=;Q{T(+ZRe^#MzVlP$RC5JKuV}srn3B9GUmG+1?usA( z7*_b}LN?_l^JqR3i!wJ?rl&Ye8`&>;V3x0Hx^9NiLV2z^HV%8vhmb>Go;DQq0~VNzn zqMVenY`-KAgFrf(pvsNk1aO7z-*>N+5MTHe(Hv9->D)SwBE zfF-dOFc8H>UIbEYXWY767fyT`US=casw8pa5rt9IB3a?%ajLsjE8=TRqqLE5jA#IR zXEiaK+_ER8H^`>~VxjKe?^*b5!H^uK42(UZ(W6tX)tJt^Hv&-O|7dmU#e3}c>MjY) zF7Eg>p)^=z|`;m7BGPs*Kt`oV2ns&zXUgBvst;%D^20cQvO8n0Q%NBTs2Qd4`tA}c0v zm(YD&R@LEa+2v;*Cmh#@s=PrFX^f!S&r`2yIJeSM8c;(yu+goXqeL&1dQ!IrsoF?? z{ueWq9~~uN#gu2t0jP3Mhvz(}F!Rf$=!u$1rTNzy%x3VLW2oLeKCYPekiSWxfgTy zl~-QIexo=EXWapPC_Q`C`rYl=ou>C?a|;rS>?eJWO3R|0IUxkV`JnZEZgDzUvk>{5 zP;WLm+^gR{P968Cp!P(}eu%M8FJpZ9-h!#B&ad5Saro*v|0v1cfsWNG3};IvE2*c0 z+w0AWQ^po@P&EUrHop8U$2I+7UagAv;_V+ZvzO?GxI&Gvc z-1L~9O=^oF7FOLFrJdA0W@G9QKZ8OLE2j}8>Cy|G$A%j)NUJMjKZedeE1|E5cUW@* z>{RU6>`0gZPlV6W>G4NatH#Z<%A|LO+6kG@5rWFm{qLJpClh?LG)U_9Cb|GVvL8^r z5B@1`A_G96>1L4nEpN6pm>OTX$|VL2=I;Sk_Jtx+vz?BM=b8FHS={hCm{wK+axKpR zjbrC+;viQI=BSr;5KYVc#Hb&x%+)8&26~gJU2Xy5fKRH1OFl<1;HW_UrVyps$=^$fxxgqdE8%G$UW_iYftQ7|4n~+@aHOq%4)BJ z%4W6`Lbay0a9FRxy}Z_vPOfBi>r{C~dW6$^z46}pRh@+@qLk|#fS11~tyV21$#jB% z=7AHdD8gky44^vH>g|X#v+hR7rz03f`R9uCX@)<2htsMR@?s6>Q}3vx;FbFIi>_eJ zU0xlntPkXhu6m}4y<)^(e2>&qZ+MS zNS!Z{1mBMiqlq_cx62+t;gs%H zniK=%i$EMF51gzWXtTLF(qJ4-@}G*7 zGuYEwf&`pjUq3y(q}QbSP3}9MaP)mgs}CbBHbAKgmynP7Z@FlZVJmOzph1uIH!h7S z^iqroa@5^!t2Vo4942j^Ea&jK?7Y|*H9%nfUk-!)!(sgi<;3sb#m3}rE9dh7l1XOIY`w*pE_5yMid2QRbc5QvVU=l7OLSkh*X zr1ZqGzZ@Rh4Z%?ePn=;SU0Zb$#;iDIE3-;%;)^&I_SerCsra|UeNFB+0twh{Wh=NA zs8-c8?VA4M9?7bN3Wj@0*cD+z%OYQ*LQGJ{FkBh?Z1}N@(&dBGy_^DaEYS5zw`Z8( zgO6Kpbo^Tv)xu%*ckAy4gxx0-S(=7Gf5Dpk$n`AW+xFMM?eQ4?fUVl$!Y{=^|MC0) zo^u}a>~@)CVn{Y0-!FE~FkS%VnS6(r90JGin%;i5$tSOL;lcO>gKn=${0{@Ra82Ma ze8B2UKhn$;nF)DB&oire(zr6<=@G1sHClo?N4iaZ}2r_uJd?X=lXYi z8k~PO{`YMBdu{yRX(@lArE_>~w`kek(yP3Ik!TU~3&@NPgE=$APK`2x2ruvcrX8JQ z(|Oy!tlvhm!_|yQUzEl(b&06Mzy56pI1okNSgi5p2}-WF=@u1x-2}xG;xXNP#*$yf zFWV1QN_BtoM$ZYtYVtGSj6|ob9U#oOc{3*cBO*y5tpDCG%R;U5QT$7W-Sv%ofqR)> z?*y4R{rI=8J&T`MBuqUKCG(GCtw0?X{OA+sBoQ#3j?np-A^zO8d`tHn;_F|+{_?lx z|3iQO{_OuwD?^ZC9ygfXF3O_WS1D&N z(vmnK5TOV!3e0FJYC77`Hq>7Y)nv(Jv!FWp=XPB44fR|S9y3=LtPhyalSU0sA+ zB#b`3;RutE3WOfd8pn)D-3&QnqJhVgUiC#E?{XEyKb%0-UV~q=_!wnhmKfg6RIo?A zm|bp3>Q$|(<>B~jGM2(>^n(^)LT@KnUUFs;fM1k{hHF03`})*z_=0aNY4c}`g;3ZJ z`Dm?y8twi9EZoCa7?86+o_R8CQ;ibUIPP`f?Ru-`ZHOBEWK$6EEL^%cchylgm>I24f(@ndmSqEj-%!@u`=%(yF;= z?|xfzpq}WqG#d&crt>bp$cWu%=q<080+z)mGa)V3?XH%uhlnHll_q!;y4o5P0e7U^ z9MwYjjPU4lo)7V}XHo{<(WoKid$T@##W5ynLP)drb=vb!pB)I#mvDF8$?fGMJpc^w!khE!NK`-F(v32ZY3lqcJ7fHYl#3l8p+hCv*hYH~ z6nN4jZaRv#4f)J&xKEJgX%WZ9a0-1LHHz$XK}t(G=}b^U9(d;A(}Y3n$UY{gSOR_Jt#*eB2T7a5O5l=4xupJuq7PCeH{361g`$ zKT;<=_VlHVC+%7TwcpXaNU6lE%~)b8>vXv`?zSY=tq#BOl~lVv6QSy3BX~&vR0d6} z-$uj|F|ut@VYVZYl(*0%EUCS^w?0f|vvoM$)jfqo*?MM+X{T&==wFV&-&>k*+(SDrjl{LI{?O122|uiuWe6x5TWp*YEgg0I#W-_TIq&mGXn(U{ zcGfn?zsB(*krm2d|U2Ft#I$=V;UyH4l7ZomLlPuGtqkh6$MER|1h#CY2rrS@Q zY895e8Yt^Oy89XF7goStqB~hUD-4ew)_VS`s`GmsM8__yrNQ(?`ebeO?A7s9opGRO z{;H2i7d^%+EG}`}ii7xII&qndU2xY%`Gli>TE!xsY-P62d8^KwEOr^_g^P7?I{5)R zG$El;QX^;00zU6a>QgP#N^)Qi>d@pJQMJ8L9|@)5HiMT7Jsh32%$MWK>|w)77WjNt zxk?qmBcGgJ%tc5q;%Es?b#QV8NcT*(3Cog93p27y|G~fhro5wJJdAw)CpC z#HS(R%yt)a%4}mD^g?d(`MFb3u2i^>#Ao@QQxaXxpA8*>a+5XqK}mKe`J5E^VCt+Q zy8r_D%&&HJlZs2t{M7SdZ*9lHMZ)wZUAHhj`t~-B>LptS@#_#Sh+a-Pm)C>9-H!Vr zeJ>T04EYSJE$0~qnyFsy&;SZaTRfHT7WE1IT_=ouf2nh}D%KR~_H&oP&&5?*8KFc+ z*%a>#Q-#Ci%=cHy>1C=bJ)EH7P~9v}upeM1y zO^4r6jrqvZfXgGg$K$g`bJN81E$_@E2qs&-F_yu5_JLIvH2b|coI9i_#rpfp1&0P5 zz;Nmy6xwHcveqy9h77}GN5zB$n9xkO?k;aF8@#P!`92JG#@3-!gWlvxsVLXhL6>({ zFriajj@mrv!&-?dwTPfkwqHwKxRbb=qGW6O!93hKkokAFl;=@*?eUv#51>PwN z{8gLpe`4?ZlbHSg#4!J;&p$(y3!LKBu!0yR3H~q1B@U~lGpjY=+|_QO;3j@zmYk$2 z2@A6m3F@=NSo{7I;;@H@p0aCR9y?+tAaDSwl~@19YCfT4TXBv%O2clHniRx@B>pUT z>&X2MpV9_69mBHvl8rSEe*+UfnhS#k#lLtiB@nR^j$V+8Ay9u+5m7ZlRrpdzi0Ljk zl9YhJqVvbuYWpTr(+f%v-M*`wba1iSv=w!VcE8-kPo%??4=9Vq-Z4uYM%H1w#Z+O| zMF*Ttbu~XBjoQs4zC`)Xi^r2Clk6FMGOOA(o!`EDozWSJexO`LXO|~HB(bL+%!%Vx zUHLV;=^DPff}OY6*D`v*-98>5Q*Hl}H>iYM2IMN~JMv*CQj{+MU(Ai*hf()$$BrII@qse$LW4Zh( zecgW541f=VYJIQl(;S552wu$&e1+ucnR=ClVUX4@7}K#a>hUVe?Ge zk%13W-vl&Ie-^kr^pBRpKl}Zk9C8JIFj~C?(QysDVpmb4gC>U(4?uQxZSh*cjcBi6 zPF)EG6JF7@^`h}NnlO!Z+74nV4HBW4qGFs^tF4dLRsqgmWZ9FHVGw9EAWSk(1(V_# zGE(ZM1{>eEjE&q)_Ghz|HSgV!^9M>bV4Ppb>4FM;z3c&Mvmo+g#BE_E?z6P~hBD{d z{n}L>x1V4uchw3=Px8;|^a^RE8i&Rc>iwaL?8eBM8Bs{`Lxjr)R zo4~b#_8QA#{bz-rIgK-B?`9f&YP4kiLa zZt6;z)s{SWv1->`qGRW3t59_2`h288l`4?0lw|B%0R;yp$v;t&i*ebnlfT%_-ctJ} zFjtR?Op}cfYHtG(rkf{%)2}{Q_;Q{(|E#;1KrJ86fm@ZeO%4&WG~V(Sk|a0o$S;ye zrL~e`!LZ_`_X~51!`@Z&{8_&EjmMPt9KU7RH1DM0(4?E6aYR;HT|5h!Q(KGke34Q~*CJLxo+h8`rf+2E=GTIpGEN6M-M~ z8#syc_zA0)TO*r_XUca%bw6(f8jVEu@z=F5dfBk-2IP!L`Wmdi$WzjbOkY$WMy3;3BUp=rd(<#9fXk6}Qt6df71#_;ykSlFknPg-= zum}`41ZxeR%J&yZeMtArn+q+;>I$taFN=_Y>%Okf>K3k2_{Hk~{qAhy`J!?Ir~=m( zO@?Jud6^BeztRqVVi{NF;-7(cRa#(7DG(l?pS$RovSRGMOL{)uG~`wxibW3-E@fkQ z{uRR)%q%&upA|RRd0a0pL#+UKP?)f&nmzy!PtRl?7nGFFDM3%9K5>D!Lm2 zC`iHn_*)WMtSgDYB|IB!TpxKh6d5Em_@X9kZgI;QTMs(!Knut$cO(`ujK%HxK`9^) zChzhHso1twi4l^-(W+^*;pF%*lw$ZD%D9zj}S{{0#q+fTgTM;HdkCUZY<`ald`OOOunsH3b$|)eM#0Vqc$G!|H;K z?LsQ*HIo)NEvWmE8X8tQ*C3*1LyRGqi-&IiXlhkqO~_DA`wzMW&UOmAmBnlX~|Wq-=F1x)a$MI5T^WrcKNdZa6RyHr$6@UTu9 z?tt-RwbjbP{3qx1Rl7kjP)^zRVoZ6v_$EqSSEF{|w?PI41!TrAG2`njBedCL9k4Uo zx@u{~$<2oeg|QFOxW3Az4EgpXm`85Dr2}J4sFN849#(o2Ozz*H)*>D6E=Qv}Za|R* zF)vI-G(CCPa9DK!7w8gCtcOLG2CSUFR@VJTevM$(-0hCLP&AhpLuVP|Kc4Tn>tpe} z=%GB$jD@IBL1GC!S0*v9b6Ka)|)t6jahTfu|Ru zcIG0et2*mt%iPIMWKsWi6uy68wasGYT59Cn!~^PDiV$grNlg*aZ?B8~grS$=_?c@} zmi@vZ+CYP{2fM7l0EF+}Ok$vdCK{`vCNU|?NT*ql(>Rb1hM&I24e#!T`+!;_Jk!n= zB@plb<)6t2ag&rhLN*R!)1B^cP!x38UI*)#cu5&wAI5B{MEAU{hhk1jbc&6b`T0b9 zncTz(QQR^#!ADc`*jaRp&o4}5>=q}C(OFE$k!8(EL;*~!+no{voD#z~z&o(O&-HIt z&`SMptTJYLofm%Q!J^9JJWhdBOKxkGRd5;sd8WChDk*&Mb;{y5ftu>8I1&H=z!f`; zb>SuSA;+Nz%R<#+be0S;g?Q3t*fSEfqqxK|+DltAT*e{{Zb1stlq$)|+jgvTzCNiL zIM9S^)>(!zACVw|xt7WLFg4jHuh8rghuxT6h*i6GO3GVa@Ael#)hp~;8EV){af!c+ zOWw&~eVCMVar$hK-SjrkpyiRCCJcCvpe#gu#N~r)KEE0Py9Qg4G+rnVzHVH<5(e#n zL1fp}29jM!`Bh(&*Bdg|u4QuGv}kHa+XHS-$IX1>Z|L6yE-b|Exv0MpaTLt>t`2bDT?J+ zQq%p}|GiF>qeM}Nb5fVJa@~e*QEaV|bIBDuwyuA3bd8C0=p4ff7n*z$&~-Q!z$q2cmqRlRhlOdIDwvR^^f z$VcibwEphoG)Gf%hPtsdmRF}g!_99qccz3<`bgs934}rqrK!?rExs`B`U`Htqvn|1 zAi_A+ZMJQNGkJZJ&z1bLq^N9Qpw~&hmJ;JTTUg%BrsL(9_~oLJcrqt%PrZs<(Kn(P zc1R?)(XwAFJJp={p^70|p4DYltn_HFVr8klPxX$P@5IH;ec?lAxK2Z(28PAU+Q5)K z4;xhTfWiIgWZe|?FF^|(+t$9rp;h}ZH+pt7?4`X%_LDhjS#`6GvC>KONO9T#p|%FA z(qdt9dupcpVJF1Zhv9Ero1R8#kYN#v)a9b3se1Sk zLknj`h1F+dUvC^fHTx)1RPlu{EQRc8grJFBer|pCgTndyMm`<0sl&=Y(VBu&k7%JI zYOeBJZg*QNhW0=Qw0SvAszqOwD=Tt)RL**7omTPb^yZ_oI+GSwE`~|5C0atm&TMfl zLnMSf9Ei^4EW2*k9jbXK5yOK-EGig@Cx@m%0wdXLd?gRf&xDHC!~wj+2OzF z??oEB>Jq%DOLv7$b^uT8yilHsl>0@J+9O9NF+S8f^a#Ff|9<9~ld}(PcrFb1Ak&f3 zHH6PnaKJj`p4~8VEwqZ;n94sB{k5oui`08zN|s2nO`Y zO5DN4-|&N^F2iUtk!}j{t6e7Yi*X9U%;Z)cc=r=Om{@rI`)*hFfL9bc7+-Q`>#L7} z+H8k(rwWPq7*QWl5Jaxs9Px+#!+4zDh#(;mOjWIE9iWrLRKE$>Xfz-Zcd{c%Qpcwh z2^b^r-qmjcnUedzG@n@>5JL|qH5{*vkCvhAN49orhu1xA_mnm$~z{VjxMnmbo(r#{b~36E3k02#n~?_ zuDe~edeV+_NaY2}7ssDUt4+4-tY{e5<*9P?i2VLSqUZW`Ao>cEBxPf<|HI_*T_+n^ z4IL+^jCUo|xyt}b>sRIFuTRL35yb%DyKIi1SNY+$Y!#IhH}v%6B1u7N{5|V`%SQh3 zkN=O|@A;4ZgW>9>K+IH3bsGd(Sbd!f*A+R5q`SY66V}tL*MOV*;L+O!%2b{Ey;j4& zTmU;Bzxnp@d1A(Gh8=gL+I25@$f@@d{OLKRSE+cM;!gns<&3kfvwV?c>GPX63Pq2q zB$4{rE!ZlMqlStVilS5a9KJGECn7wOgJ~JJ>K~k&ug3Lf)I>9s0FtbSht-#|K|*Ka z7nhmAOR{0}b@{@v74RW48@*uDH4h>1yoX&Gk!nt)Cw2Qjk@@tHBWl<5D{qx|citaQ zfeYblBKT+Cm!os-$n9@Y{&zv(7(MFz|;mVOf2^cXn=t0ZSkA zrvnEA%Q%BRwdxr`pn=Gv%k<3Zd;Q zVOIHwLo@txyk(cFj>6SDsZj-JQP5iZH1g~KlL1FbpHKMEJPd|; z)DQGI69TwTq#hL)lo*A8fOcFI5GDjq<~>>(HwEIw1CQ#2w6L8cl01w3ssN(OQ2>~! zwgyo15h}1*K2mxj1k3XNDd%nHJK$k#z3<1LGd>wrzRQB0CCf-*jk@))F8A@jHm-Jx z*g55O!hDNfd5p{8vL*fPs*pW=H}^HZwnGYZ01zfve(a8UK?X@)2k0Wx>lRPUFUgwt zCV;PQ(pEY%GpVA{ODxBx8(3+_yTn{N|5ubU36di@f zMhn3i?Ja95A^9Ud8w(5KN-Nx(qp*P2&aRe|bbX|hj%KzWUsyO(;grN~#Iu%^bHxb< zHUyJx9LBvXwNg85g6KVF&mgu#(ji+cy&}gv8J8C0d6VU_gys3UT05YV(;NK?Fh@Cj z{)M0&R^6o3BD{Tuyq>q*EDzIcd>F__SeXk42Od3A=Zh41ywX@RihNj}Ri$Pk{rrC^S#eKS@&8=RNv6rtG5eg5lTjUc>vO<1Ynrw1J z;?a0G)W%Yot<__x>*dW2v?tLb)Wi%{wf`UX-U2GFWm^<&oDkeeupj}N;O_1Y-4HZr zaCZw5Ah-ny?$V6~cL;7F1b25&aOZXQK6}6O#yR)@d;d7^y!-#(tH^cT(tRPq#R%F*t z$loRwGNzF(CRNF0k#}xK>vqrS*gBn`<{Ln=YwFp0E%$lWnzC9XjfeLAAti^I77+-#~iT^soa+Z%7lPCi%U985}ZB zt&{quIZ4`PP1L4Q!*kYkIwjjsd&O0)Ky$aXDUI4qt|{FD#LqqYcBO^!wxe~gNhkd_ z6P9~;$%eHGVd~pGXSHF2T*b?WGt*91<#rC?#c7X#4KL(|$-3RMLq;a8nPvCf(ruhF zF2S!-u&XPx{1HHSe|wAh>{e~|*yXq+Mgue<-jWcfJ(k$NShsFTzPT7Ja%CERnq+x9 zwEG-1q|`C-&2COHVMLSBG5yelxc4rtOez;zR=0U~PZ6dQ7vd{|G=a$YQ}3n3|Uw+=^$aTh`KyyWU4+{%C&MX~zaq zF9Fip3NQS~OC$kGm(AO`3q0*&Xh$iyVw)fwl1_u}Vi4f5W7TD} zA$$a&f{9Si1tGR2rd3LuobhZ+x~KiQt9ogln=OME8`a!Hhw&ov$@B}?WjI-o<@Y`XA?SWUD^nwUnygg^Ov73ZS-OTXlpwrd@q+U0}o~~ook?5wQ zvQfbYWMDTjEv+{=^G6mbxvFLjBMay#B0nqHHxbcysmqfsd#G}sJ zkG1ah1KhPymRlO?6Jn(5Du|VRKwIhUN;UF~;*##}utdql_L}Oc%W#G0^l{L{bl84D zLD6i|6G~~Cry+$nYLHCgwdT}_jN7%Lmcqf^n$3crdXU3QOQ0R$jVmN>7tgb%c)^w{ zMh1BFZ4=bFUL41K#GP38THn5AdUjL$Q@j?KdlPx0VJVKS*>hIv((IdZxA@=m1^;(; z`Tu~|`&A^+aQdbYl{$p6`!Je2@7ZF~k)$G(17~ttt?OQ`)lni=F+?-ZZ6P!wXeBK= z6z`+xuS^3fPoY4n5H4e38R}My{5q2e#9`2TMQ`EM+FL-5PztbGfGKmwQ5Wbk8U z1{;Ainuk2RU`47Q`*+`GRaTBp=1N>Z>Z%Jy6cgCn>4kwbS>An>;fvMKs0(KnZ=O4+cxF%wG^d6Gsus?eX=Q-Y}YO~aMn6Wa>E$n za!{;gM!J!xM)Z98X>?o7f_8UDYEtxZ4m07Lmset09cWZRHQ^hiC_|r=1!g#C447*q zbo&TcaVvkoI7<2i%+htJ;g&Vo)P@IDFmSlrn>Z#eH7rL$W+rqlq7UUT=)z0hD10Lk zJb*MQG0$u8nK*A1J_x{UUlo31b|C6I@R6FLt0-#+xg-Nxmr?Z`s4c zBJGtYRo-Fttn2D94+*&%dQ;M#;+CLIMPj47 zcXaQkAAYoBT|SwGvc~0wUJ?kJ1!dL-(@>Fw^`L`8zPY;>YHHtg4AWFfVFnt>K(n5w z^R?8~iJi4x(`8x#lRm%?+Y#G zrs?mru63F6Eax#vKDF~dH*9iC+9emK+_gG3H(5^SmTq1;;qQWJbqhcNl`JTXl?f}Y z`U;aRGq2Cjk)YPq45f?;QaW*YCi$g4>N20RLcfl z%=q(I>KP-U=Pw+2-aoBjpQzGJOleuls8#2)(hH4i z)DQkPa-X=|74SW+<0vPDgw_W?4c)47rZgh(rC+ycqjhTPB94;bYn&>Mw|rK0Ef8a4 zXl3}PJ@Db)WpM@@FzbslSUOnAg|N6fz0IH($Hq}=Z^qR{!|vl34?84lN9-+COWQ3b?}4}4$%DT zyd+r4U_xp6`>`dHzU6rBIXG^2GS!pDkLt*0>x7MuZQvRj-TMe&a8F@s8k|%>8k@Tp zN<(t0Qhu8lJr{K-c@nQ^=2V>4eyiJBLFcWtGw8n0ub`AtQW0`E;t%jomJ_HYa9Yym=L(+}!oE3H^DCv~i~kvJ7WK1u%K0nUK)a zEH#9*p-~XO3&e>}uAPGeR+ld9!kjV|ga+e7o51Tw`>i(#=a zwE;7rNj%ZuhQ2rI58CV;9Z_@gyyhXR=5ga+YO%%V66X_Pwk8s0JJ%VeqwbUs9&j1> zdd$Y+zw~oq=#d81`fPqPwv*JNW{W&h+3j)P&cF&v^QB2@d|sGnT0hnl&$WG|SPOIR z*mz{HB)mFZ(doK-67$(IE%+jETT3+V75CM(cHJ_Ja>|buhc#r?X6>i;U4>2!Dv3}0-i5AV{&6l&KN$!!dLOArhq0|8r?*U<%xn!l>J{X>tx3(>$@4Gqe)=*~X>EJP6x6%}bAH1#Fh=GUO!9YDpnj zu2#s)0t3~a&xV~kstI3c(rL`p^;9xM)c8GDJDoOk%xe#9N zoQw*!{PJrZ@vpxAnH1wcE(5_)OvTWb#;!r{C>G4@wjQ`hS_<=G#b<6+e&FXS@>76x zT>fV3a*@q<0JeJBO4XX?Y1@rBlHnX{Yizw#eB%ULz3>$#o(~^48Q;bYb5@kw`;HSW z{93(O7Wg`QAAM@ua8_d6Pg{2Bxuk)uUQ#XJ=q=qAvNZk$_%BraMHTN)Hc9nvU8-` zCTV6cLf>PR>u7h>ezInDc9hq&JFt~H3?9x;Ebgm#*|*7&?kQWiB_zRI6r8}{bWh8L z!A`xhvGWP$>mWohL25OV$~jk>OMdi(^oZPZ@YJ^Mq$P>_f}graR3MOS1F zt*@&(6&n3fC$C9OTq?toI?ap&20JxZ!rVzK(Q;?h)-oQvgg+vxVGwqpXn9C zd{5lujI~<}+&`r9GEd&MsP)HJ;u6mk!(1xs)fTr}vE`oX>fe#gLP~iUD4}KH`(m<* z>{6URgq$Ox{M=@`M|$Vy>19n=ids4rM_-{Wv}I=>Rn*u^E&H3etQfTh7D_gac(xH+ z+Hh+@l_g}(cBI1})-qqUvPC^8-}gEBDi{_0aMvh0G~lxZOGc*3C=Hxgoz2eKsX0ZR zIHgsmn@JwiSYt0(Vjvi{&hGB(YdFN6bM#M%4&;U%M3sZ2y5ihm4qQ9e{ThnNJwcswS-K53#L$cKE(rs zmc)SxoD($)EGDMCrKSou#a_d971ah_L8n^Z)WB5fa*N2;o!@5cz|`fnn4}gOdX>JV zHmOKn@4l!|Fa@VErB;?YwMN-#p_ZM*5jZ9&3q*0GS2)3JO>OEyE3|8?mNx6ez>0fT zR)fH@#H7p|*ii&KH@$G!iABZoGlS1nkcms3+jqq#hn7Q=VBMUekX9T`ogk3pW=im& z(2VBsV6GW&YMHYZbzZG*36O7bFp!8HnxkLWtn4->Q+t0Hw0I3IQVUOettsZMkUIK( zfSTdD6uU7-c6YrO@8c}&yeJ-Q5wd4>R8MdHsXJs^Lc9zNcI9DAe&=oPevN4T@99SW zcmn$GIxTlna|SJeKpv!Wz8p)-mZp}a*gB3iQ{Ks`lC2r0GUNV(%McBVLbvr}l^>J& z!v756t^5`J7f${nh~H`8uio(AWQ2f=w&zm((4(0Byr;B4nTxgPc8BkfCBNe?RJjQm zdMvTcV{eG>bCEn|_#jF8`;CsT*&F5WqB%)x-h_@Ugb3R$bqvqXAS>&zbekWE4M|-^ z-uIhq9UhP_w)&fH=H5xa+xRQ#Ul{lc1Ak%Q|GgMMG5kAj@ZTLod7L$~Gj(=xGBdJ8 zgN5vkt)0$cUi$Zf|NTM)dY|V(6>+DCVX0RY>SsIszY=CC<-Bwo6y^k7>wwZ zHO~4eBZuE}+rO3bb06jXqui3v!^8fNJ*Fe`jn_dGqsdIvMot`^&4odOyL3;@aDrB3P&u z5s81^aOn6D%V&<qdPqen3se-~jqA1&ARaFg$*VQ;YHVJ(m4Bj!J z3BtnDwA8}k+gUMrlNC^Zdp-=9D|$eqKB~QMu^Vt%&Gt2@KK3P6VgB8G z(C+E(mUlF<$VWtu98)*V`>QDTnpgfO#cE?egOKKn^$5D%YD|`5xT_8q^D@Y%=(mqT zq-{aHU$5C;Vp+EGk<%amck%T?IMRqY)ndpkntV$lwWh@Txif>=7(P6Ib|-a9ToO(- zk%GwyGFG-sb3wm&CE@;Qm$d@?Qk4>3G8arsY>8x_M`8MKYI6mQm1?8(NPZH(Kg%TO z_}&h>cN{Y1(m5{Xk8c=Q%J(X>Dq|MK>ewF?qGm;H^_Cy4-2L5h;U$*qZ0dEjfiq zP8ukV#Z2NJluQDmK2MND@4@4^mdq*LR9Fr-2bD@6e-*o+0~R__Ych-7H-0^qA>aJ; zU8Dq+9LPbtJa) zah;}_az;fNyeAD6Yr0SmqyrpB+2Sb}q`lJ;+rx$MFp8CZUhDH*6>C<)`D*%tdGuVw z!M21%WTU0br`|D&8RK+Jbs5Xu!WF$VI-+!%n8FS*sbIcbHAgyJyYGtSdpOuLq6=f? zj~0pgGN;82@r_-0j{2G1QpNpK4!7} zoH*mPYBuvwp2)D-Tv{CzS~wK17Q~*Z+RCGNH6o^->cYwZ=T+TSVv*Z@IRR*-7zh6l z*$NPkh9Y`A$R_UkhvXtwl+0c1yDr}!-qx>=q;<2(^eYEb5fJvnNX=v(+E9+@}5`m!GFVCVfuN_I_IBAEGak!!;J8F?U?Er_|OfQ81&02_Wb*drD zmFMG^LO<8KCSbkN$?}vnNSUn0VRJ(5KdsFvkyz1GlwDC?n8x`elEa^=gn6PsBY2U&xJ4UDKvB-ZVcaXM46F^WCzBOudGsHdD&Z^B7r1EK*rH zWz+LLcet?%+p~-&7p~`eF6co1cHUFN6O;x*Jy3}%y4qBXw4C-rv1m#JE-vHLX2w~( zTdvJwKuxz$K(=581y!CkXx8b-{s48sDxtg=qWl?UrNU;pYYmS2F{|(;&nS29HCvhB zD1&R#hm5(T*KMg%pY~ge6=Q$`+P>v?;omiMeOV|t_Gq!cI5>kOydlkKO?VCHWY@ckl*g0Je%W~}HSdHi+HoQhj&-asxvg6DO(HFU`EjFnm0y?A*+BH+S5LJT6 zE!v1a1>*@lzB)CzVT_*RH=YyUA+6zh#d_`%f#s8_I%Xm6{Z6V$_-+&d5Yg{4O9xfD z;<^)h5Jnrexeqz9%+kr|8Y5J)qr{^SbLtUi)WxRo97ow@`HlkD6=`3`(HBefM9jSJ zbx3)5i%$0P74VnJsz~(=X*WDHdkH5{MK_sU5q7;Yc`$B*Wq>`y7hBW?`dz1b8YZAf zm{To!-t-R)85vzpTpTvc-;TZ=1{m<}nLd|Od18^w>~HrWv`Gp`$j~gF@&-3_*e;#7 zuU@!-^t*4Jk+A(_P**IN3_ys%m$(X~2bK z2dZs`twD#VOj0)5=u)})q81yo`VfKIhIF}{Ju){_PT9po29=D&k(4GI@dxmB^;f%C z1yB-UUTbASNm3pgpfyffti0(K-^pdS$>JfBm`t^ciGvC8>PvS-h8}s-oE+)REp15# zS^T8r&HKv84(w&O2pLdttJBM5t-F&jw3-#oqqO$qhh36oqx={FD&|`zGWRX}{ZBV> z6a(7rwqRj(r&<(0z1j}GCdI7e#fgdoez=(z{uI*jvnT+SRucr3Lkr89D%JS<)(^hf z7w^K2?HU1TqeIM>If8GPkm}`c5v~R=&;ePv0+7O0N$b-D^2<-gF3^ zzmiTSwK2%>;L2`m-ECdSQD(Z3w8^>A&IRp|O?S0iWiY+8vTAE?5uDzHlp>ZV(C;sh;0?*JV~OO<&OQ8M#oP> z;hZXL!bS_?(92UZBbe{^RHlVQ-h}Xk>n9-*J)pFN@<;v zFE||WIG?+62HjNp+49u;+&sx%xH@ZdWt+1bZU%Fm!}YgJMC4_XK6(Sc7JvPeQxf<6 zB9W|`I4xuXsS>iup3)}%MJQKNmyX(IfQz9dL8o8oBOl6TAkmqshTiqKQ@0S^z`?Re zv0!TEX9n`k#(Gi=5VU@rW0gG%+AWxHPX)c(nM{5cnEAOyTR%d?bHdP~gS5Ep+T+w? zU`z>lXzZz@g+6J1KG)Zu-(b#)D=7e4fHw*dDGiuXNFn;l361C(;ynf`Oi$c(F#dR|ojv zIrB)}78UkbBF$hw`oxIQl}?nm22Jt>Bh+l!6(ctH7{8+~f5u+d8-U`%= zz5MXO$zpb>C+$VD?$2@x%;Yz1+XJh|T69Ghn|5CGp!JEdwu*uf<*m2&Qfmbmg9838 z4mS=P(}(42iv=0DUV2z8TOGPP76DtiKDqd0pdcAug3BU z>H|aD&d5<*@Vuek+3dXOeT4FAX03X@?wM~)3UXq*v8_CSu@<|nL`E(v+ zSQ+zMSAnc?jf{~FsmK0}{g!T|@PaX=j6~B%p^N?CkjOOBLqp$sabh2GK7LOpyq=w5 zlfjaqrF+WH9|%tgiRzwCFI|i`UyFL8isK=jY-)R}7~Zz7*6eHE?ICU7%$koad0S?D z?n$@h^qzP3b+ZyXAVTO3*kX*TD6#(j+5B)}`*H&WkP^eteRd)= zueaZ-4N2^`#DJw-lD%Krd zcW9(7C4!4N#cpegrt3T(LJq~{>Ald@f7`*FyCuM2RR7x3Om|8q8d-@602}Bex?f2p zKkJ{wt*oGgYZ?=eR(2?oYix6C9Lp8BHs<`+!c!PF%360bG-|h#|0%b~3NeH;fmDfw zG`zB7lj(g8w-Y_jT93Igztnn5*6~Gi@V;5J7LQ0>y1q*51P?I}kLUNbdgi-kcgHvS zms^F$N0RrY(_@bl4E2TQ6J|&BU zy`77h9W0iE^S3A#B{Ne?BXN5VSTa`FkBg6+l9!X?S5j3YCs;~YX1?E3s+&36yE>Ve zIaB_UK}dPH$Y{90&V4@0x_e)aphlJ%A9OmHkhd4mT$^ zAJ^XrbslFPR{+@Z(sI%OI5+?R4)y~)&I2R>$cRYDNQlVDNXRHC$f#)8XlPHKpy6X; zp<@%_6B85R6B0fnqosKEoSKx7kn$BJ^-DSiAOi^n6Dt!vD=j?({VyPJC@3gssAza- zXn6F`2%pjam#@b!031}f6@)zmI4S@<4jcjw++#O@>{m6B;C|Kjj|&M79uXM@0rd&& zwKO&W4gmoH9uXc^bwqf0cmy~AJOUyT4l?cwHZc@DH6wf~RCdRJxJm*-YH{QEQ4TJ3 zr@)*l8cvOl)t?eSh=^&Ob763v!3se5UrGVM{epuGd&h|bfQN@eghPNwf=7lWg4F}| z4hIod0v-~T7`qz2x{>1#>VUYM%8t=h0`Z^79FKDV^j|f>LBIis0M>UR>?;)%>_)-v zd`vC`=OX#kg%pH+yj&qX4eQn)_$)-NQ0t}}te`qp3brBf(JORrW(6iGc&R$vpOcA< zW=CiO`gLk0*~dp{IC^(Bqk1Y#dY{Vfp^AFuGJP9SlM&0oaGCOUwUybW2w^U^9(cue z)GHB!DT4%8NVUf`hdZJm)LWh_H!BO&M#s;sLUU(rdfC6SJ?P-rZb9a;J!!n#Y>-*I zx8K5{yr0jRPz4$r!b*JQfkQ<0WwYGd1EIKoQ^dhJNO!R*a2CpD5w}@gzLI0 zDn+&6vMJ+Bjmhy2R$Cj#uPd{nG}yZJMGoxMWgtRm3=t!JIcv*UT2N&Vx>~)Fuu+%Z z6Jk4#@maP9*pWO54^BjNp1Qd-(5R`e^UD0`oi6dT-N0^5T%^&PLZ;kf-~3!}GZ%FM z_G@KpJz_X!%7&0gfU-QZoDYsg6PH_X?SOL?`>ca!UfI=qj_$9;GcTS;PjhnOitR>Y zLC=a~6eld^P1bF_nQWVV1gnNfnTkym?Hs%2mOq)k|E5BGrm}sDkea)EJSRPGTTmJm z*_ak_B!XZcH6|^2*rj~J9T~~%LY<5yUIML2jGwl2H6sH{#uSxAqkU2WvA3Y0n3BCm zcVMDMYObxMgFkixhQ31wD-!7N)|$W zCNbKnxw*8)X4TKNurpF8ZxgUl0h~#ZXEgT@1tX$kI4%)WAFAdJ%M0SSs2aw`uQOe0 zYaaBfLOu|8^PN6VU%|U_8EK6kH>(DN=ILC)Qfmr&ALZYmD;I4cP+H3&eF=EqfD@0m zTBSZDyM-FzmouLFR)Dwa*PtK%-JyZ4%* zfnqi_b%Of5E*^{tD6yUMQykZGMaP{SaR_{dpn;yS1ZvY|UtIWL*gxjjM?7#L z)9Zw37vVCVYB%ccx>j3DAU-KcZDW)4+Oi;SWW~;Vz(kbtW;@cU3Q|)RqHiW;ZQJR& zvu}A>IdP67dt=EeXA^Cwb6_jPmL7y07~xzy$Jsh}jzNo|^o^G~M4K)iGTKYSgkU#q z7}~)%;O5xbJfiQrtjsT!wXlR1Cw%j{za(oj0X)dAPgO%xtm!sG5>U z9Kd8NNh~si({!B|@G~+K1zb(WP?X;=t4-k2k(lWBgo}|8viH(%^+cA8TzX*zTiS0{ z8ClsQ!m-J6-b{)KNjjZ}1aTTgcacEO$a@Jv*4fb?4s*wkfW>p1JB`zYNPtT%-W0;@6?|vAJEzVvVr!o}MI5uw@?>>Sk zjeAk!OwxN{*gT!pK<#5nT8V>=C@G4A1CW%2`*#HimyN@d)^nkU@%r@iKtEX-bz!C> z3ui=o%igI>rRcE47``2296%=R6 z6~&DyP)B55rYGY~#J7wBG?IM5?$x7pNy}>J4%~0}awaNeC4NSIY0Tdy-@~xy2M$Pi zeAbkoCjftIOZAx1#V}7y5){aSiuR6PU34bKimC3E^pKRytIy7Z=`uKg#-_tAS?gUL za7(LRVx{)63&4GCt>;ka7n?CTCA>>cjArSrSY+NWUv!a}0}C4Vp7zip`O4+y#>Mr$ za#?z}wt5P`vQ^I{=sIwK@7pe;q2$u0s<~v6mpR#b>{#SHb2YOo#N{=fV<@A~KoIpcZU4w6waBw;J;11_M z_~zqQAIwRIpSwYdY8Rna5Xyk6)X>AD0Y?$lng-;{jb;>524X>hqI(9%oYFQ=+%Zsx z@)-S4ccufm_Dz@NKnGdnq)65QhLi0ZugqPtUawHvQd0v3Q(qe9F6UTQ?Z_BCGf&Mu z?^?65)J-l)O3YJ z(fIYo7AM~A;5eCV28gRvYSO+wWljTU6Zf#<)m!h>6*3ZY0W7}7bnJ)~VUCX~t$RHdLeK?!g&6pQu?+yrK z7tTl_elL8U4MlI`I>tMH>0B1kpBF2zP2lD)2^Mmt@y1F)WR7u8U%TY$W465hjM@ia zVU!Mt-z*f7uPgV+KcP*Rz7ePHVt3pkLf^ndwIr)*k_0)U% zp4g`xMO9^L$K9LW=Vh4QiQ&9!?ajOrI;U%tMNmT@^-9zUhvSgHbE@)_jbKy$-gAss z38O=@49nC7E^hA{ezm2 z)Md5jRHuSl$HDQ$%aKH!4P-?Kq_66&%fwBwt{M=!%d@DW;cjJktkd5q%OSqs%jLqy zQQv!-Atb;pK>(_(Z2UI$Ar@sNZ&G7$fXQ$scgnqC{_}(cje*7sHU$N|8J!VGQqoG? z%=x7O0@%P$QFv#s^*kn`-_u3$m5WDj=U3bL=o!_}{O71sS|ic%kaI=vf*_4Hx(<@C z(f6_A5dC|Tp2%;xfh;mVM9&D2?bVjskjj`$}H`kPn^M7_xq<6O%mx?mmH}o zA-dX!epwrQojT2Wjug=BH*37a0(rk`oP-{nRTMNo0%BA(*M`P}?o4 zOs3_*+J{{n78zZ#u3eS-l5jZv7?DThrjLwQ+w~dDWUU%;*g5`^Y+^NP559n;|Moj> z*O7PouO*SY+^4kWTWM-0&!c?Q8~F7<_jWN*Xn9(TIgkacYCBHWM+CR8K(`8AdERnI z6WR_S9x@p+ZD4*q3P<{tz1{e-9_E! zDKrL><-VNCKBc_??+7_}@)E6o-#q;fT!FIg`IEfQ6Lam-y4xY{_C8g05-(R!EfEDV zc}N|X(~CJpZ3vZAn93(^eUfg__Ur}0Y_*Ec?`h}jc`-@WEBm#}rcTlJ6Lk-!J`6ip znu$5nb_H}eJd8)iKGBJ%Gn`@4`7}T87;&cGPd#5GBTyxMNeN$R@m7ju;LEGFB9+f?Ncd_0fcg zPn+GR%UKdrPn4oqS(-G>mlQKi?Gb$!aIp!b{*b=CO_MAz{_(;{lnQ0!?Fjkjq|7Rk zVkYiX*_i?Ac;XdDn*)!ov=_|G4re69n(u0=_y_qQrJ5ePvZvI<)Un9|mClz(yayaH zW@Whu#veibhf+wJNzJBUw$?FWZniDH^n;BSzU_&#&ePUC?ee4s+pl>Ad3n($swVy| zN3oISEY6|CJIL?>l6}M!d@R9he6KIrs*oFS`?fJJcsn|0%5^Hvg44M-KHa(3gV^T7ZK}>QF2FwR6)v#viRq1F`Gg>kF3oQ34PUD3{z zbB!Lw$2n%>=N@XJ6M3FKW-54>SMJ4RiTxbURWW2717<(WOfq)7DG3P4VUCN7SE1lu z2wZ<38SKkla`Gy7_6Bp5R#@kfs7`;xTStdnHr6@N%g}k$S~ezbw0?9Sn(DzIai|G% z$l&!R&{lG^rKKy7`^O3jVq_%gtLK_Uu1Hv&a#podUuG9Io&xinyHy+jD|RHr2p`O| zbxtU`y=>o5TkK|+)y(Y=Gq`s(71F&MF6VpIrH7FEWCh!ywXA6k8V!6qWK#9rFx*Eys*d+r(9=4sF)Vw_$+{Yk5D6BAiUI7ok3l`ggM zPcpSb;Jpyl3l07Ak;dEa2AhbA3h*nA5;fHSKNE}~IR9RGWL0=vU5`=a#SA&-rdEBaPnr*!=pa-^%uQbyrH9aKd{>Dk;~R z7L;PSawsHU#~b8Q^YRVPpflX3&F-W5Uhnma+gHoczi`X-IR=LGmFkoC$YllNoKMhQ z9$udeeY$l1m>Goa<*NR9L;TA&k$}ZeXs_?jS&!eptxp2>nI1NG*@IFMKYzn=78zDa zJK~SoYq5j~qTg}G{i}U{;q31d!OB{NmBm2F@*9lb!TqiI|JGS-Qmd5%jj06O^eFKsr@O}#m3uCR_;d|KJ!swBaF~@msw3)`A zS6jX+DDRvS2M6>&*QOE=m8v%bccD*$R38DQAmJ)Ms(1fQV}c0;NOz$!-(dcuHXBKe z8aK6XdL;JgPU!Xtuu8M~{p&WHcw}E7g}i)E{_K*-edT??!~W7+zH?m8<2ftc?{R%e zBD0>qe_Yt_IVpvVt!D)l?H&P4waf^7y!J^dtMM(nx$(RZ3i3w)*MWoEnzHgjX09mf1XcYheGS-=L2^Z>x@&o<3PT?hbbjN zMKc$>gvMi+R~WH1QQde~q$bdx(T@Q8s3;S`Uf8+|*#+(RZ`EfFr}$sL=f7Qq!9;SZ zd{cstVWF^acqeC5<5Wyc*j2v=_!~fn=;t? zCQkyX7z*`Ul!lWC7KNgj0pM7nXbeb@1!%$-)u&+a3`i0FlOpgmQB)NX3=D-g5}^el zu|lH3{hyE;q~27r2H_9l(`Vy@B@*bLf*m`{ap3H4fsCQ6s>K#Wn`r0D}))yfWX{;J_2z4 zm;Qlq#lgk~Q(WTyyxl%EF!Gx4vuC*;*=o71u%nPifEI(u!?T^r0~CSL;RPB8q97oY zv8{(D7=)Gwt)v8i*#e;@K}bbEmBBM$z^XDbmT<}Fxa^KDjw2gqGV%ptiEJl5X>QnM zA!{g_$*2Y?V*?0?ickS9wp)uNp*8p}qr9h1OFi@?ny9=_fCe*s!RyB-VkVGQEM!y~ zl?e=%M1m}Wa^X|f6A*rYcQUZ?f#C%p?`&ew3s9n^C|f5Gx+=;fr4C7e!%>4X=*JPr zMp6Uw?F8Wqx+v!QhQmpLMWOypSr1z*r!m9I#r%G^q3z#9xOiI*?I-*trMmxQ6&iDQ ztyBNAY{UNri-D%1^a$|PfO#i*pp|A2;<-Opd8oKbK>qPh^1Lqwz^L8ix&#$wdW1zj z@>!VQS3j;TtwyW@xJ5jOlp%rWi1J`~4gd|BqZMo{3I>sZLGfBu-kYlF4kZ&1nrto8 z7YKs*2nk3<321GDp`r(nU?)T*)->l1EleT2D#LUl4pKyij!m^z4`px|!^g}Ps4E}1 zGY3pbPT&osFhxbP?aV}e`qKU=<>6uD_%0~voD_ge-%0$@wh zFj4z|!vCT24P1r!@?;Hbr)P?{WxrdJxU;*PPQ83_maPGWBIz+8iC1hD6F6R!;Oe13 zA(meR*C%y^CWjHX$q2##uASxlKo@WaP?UV{D*bu9*Q)k-uySbT6BRf;vRER>^H1wwrXQBfg_{H~~I38=;X&>S?#B%Go{u{3{4B1~g? zEI>c9MD|P-!7)?5qya>VPe(^D`Ur4YRZTeZCr~s43(zA8nBi3_7XS{5g9VByaXO3q zq|~EGZz&mO39eREgT62q!F-YY);Qsq>BhM%r&zS2ttARtw&e!6$I8^4+EV6jHLlU4;tcu}K@b3N8Sw|E^&hl(u2 z3*rSoIr1;6LJw942)cktv1LV7fk8q}(ERw;L^SklnDbT25@}>H8QkMfkq8IHLQN$h z+dL%K;<&oA8sy>aerUmBa5!k#8g)1X;&6+s8RPV_lFI!kW)hwFo!F?NlVBgaj=nk^ zDhE%(6{wMce35t_qT%OYDNjWtV#-Q_9_S}U_(^dOEkdXfC@^ZHqz^5M7}kU2X`o)Y zNR&-jhcFj?talS*!S8Q99s#ia!`ZUvJ`oC=9Yp^R&kiD&xD@Lmsi%7mtw50@+7S_0 z)1+Rko@Hx20^}y3aluj!dIVr(_Ha31`SCdU;KWFg6lFt}l=7!dzIA%PSvXa1Ak+j7 zEcZgV`y3>AUyPQUYX;}WkBbA|=G~@4TN;#O{HCUc9hjpfyf+D=Dc&o0+=H*?-3fy9 z_bV3*VPk^eVoYD?A5W^uGJzFe@~Xl~f<(c1Rk_$;0s?tlR;VbF4vb9QFH}XPBoF){ zlJx#La2i1(#%`*jl1OR*N#Py;Hmt+7-w_=0{$Wi7M(>IhJo@KU@Kns$f6R$U!$b;t zqyK{K9(6DK+r6y&H$Q=Fe@?{NJVF1snS_Y`2)Ja=f?>CsaV+m)?agV{@axtFUwU}w ze#wIXJpzgYXmLp*JwYETKhO+hQiZEG#{PulN%|RDSRy6lOiFCjV6mCiIqO2bk)~!n z+9@LrHU(nmhzhLge?bxQW^~Gg#DcPv!{!qeq1lY{iyA>>1mf_a;h|{pkoXU|MH~JN zG03k@K=v0tfUUfNx9eCiXEIkwx3@reB(@2y8A$j>igA$H{DA96soUa(065zmU zs+nF8ng;I#BsF;W)=3y!!gz58qGm`K1i%-Az_+Rd)RP)b_(%Ehs%}zJfQq>kkYYAa zzQ|<{JDVaEh}EG7Yoia)TtIkx{0k%;ydzaOkd&%F&<#v^i537DSty1=7DZW#9+qDD zg}$@06&HI1I1*id*eaY(E4zt- zJ#fagYCo_fQSuDN`7n9`w}TLJ>0Ctlb4jV-TRV9T z111X9MG~EXC@wphJNfZB*bcDSG8Z2eZBtF-P^A(Mq`ddxJ0e2U3&mo}#sZBeAA8Q9hD!FZXqKX--oMqO%${{@@HNPo|(=iz6jJ>0*tP~_vfout&K{Gj8c_Y>Puu(BMtw{}fRm7eDLD^eIwbi!WqM=yv(iVyYX@TMnB{&p! zcQ5XN;8qI7A$UrG28t7`P~3_KcPTD`27(l~LZ8ih&UxQ4&i8#|kG+4aKkMgxuQe~3 z*PK^b5Ubu%ID*0FC^smdts+Bn7f(+zQyk$dV@{@^pw(hj7j8gU@|GRD&T*AohE4BK zc0x1u;{%pdd?du$dpLb7^uy_EXRI$H|DOv%yM`4#;P*q$^8aG?f5&fy_5hpB3x}5K z9J|BfCOyhK`JEPj#?4AP0@Ok{WS8VGx6=zJ*9P&IWx|x0F0@|9^%>rZhimPC;^yFf zCg|qPog`eEN3tX~HlFzHfB)3~lPUcVY0Se9_<#J%U|T)ZXDy%Z9!jvl-42&PLf^e6yG|km*Yee+kv5KR@6F10aj4 zdU_*DOm>-2^0tUjO1c8$t%}64Uaaj~?ubnjb&LtkklaW#08sVnB$262%Nz(?-XUKw zDdcS zwV*;#>u(Bu{Xc-^zJEcTv)oK1T500 zfVOAo`?X53@Qav#07dmj#Z5O}D*TAznh$Gs&(bCrMEo6X@}rya$(vDFP7Eq)_a!@Y z1ApS1_-cw{ij zrj^#aiLNJ&>&y<|&`%W%sn8K5mKFI;_=Q12FTeUF(@RwyJ^5R!EaKeLYUs)gsX3|{ zT0xPX&DzOs*3vRz(4=H;vlW=VnbiPpeX()2&iTADdpKEi=V#ZsaWfifs5=lK>X@`P zS@sO!%MnF*!48$wt0PrS5ko|QZ6qJHj6%Fp-sTE`1FwvP+d49j741_iJ?B{otxZ&7 zThi=%mm`XKVXhW}-?IvcQ7i^YykJ*Z=Hr@Rzg&MJo;g8w%xC68ZlL27pCP%XiWJeq zf&fYnn1~Sl&O2op3GRYTNU<+xq$T~Hsm6k4!xQ|pl>&eD<1z^GmHftO zc?RxVze~9(ZKyVdCT0WF^;T*Jg!{wA-iSUvoIXJ2Q9h=cds91+4e*yE?g2k#&37oo zP>=IT%Rla4a@?2U+sm_%eU%Z_H$c)EkV-xwu5t16NO?psWT}BJ7%KcU`~@l*Umwi< zysl33PUsccft)fmey34HXF;b(-yn*)rOy$XTk#?f!=NiN2Rc%HN&W&3`A<(7qAlP;Dwzl~lDT%sdW`D3MA=z7wn8fJ&`Ai>l!HjN)l3m_5^(GUex{;1>u{ z1WsQ)!F$vEh<~CQvJ5;E>f}pGGQ4J$6EBsw59*7^oQ4fpUMRrlxS;W3m_cU?CPg*5 zH5%nV0=4c!?~8bR>fx^M&bG;a`-6u50lcsEjdlvw*M9_}W%jKC)Ky9@FP%(vVM99} z;DS>_Dt=YntY+hUZ+bDvg8_f?Hle3O*nf(AZ?MdH&qRa3o=)!_o-%V6y@MiA9JjMS zlb`4X1EtF{uE(cYgL&7SHwL=x!oA$0&RAm-Peq;asZpG5I`^+U-Vhoeid0FBWcI`c zKAjh+??)eQNHp1gQ|^|y80y*uIn>PxXV>Vh@cPpgl+PIaQtSHCG_lj~h#$31br~fBp+pI(GM5I1z;<1ret4pEc(dh=|tCb^+Uk0Xvc1 zlosQw7A^>(1u?N~u@vk?19CBu@E{=QQKJ=My|7WNX)7lSi<&d-j#waqjygq&7EG|j zpPq)#Od5nZGG2val@{E#ncTlP^xC`qT%R-L^6GYVKIVcteTa|i9e}ENrCw~J4dmZ+AERe#^2BuN9uPZpp4;r zbFt$q{o+n$#%q_TftzIhVTWVN8dTPH&aA6$48Vzfn4m&{DzVTx83M7li$Af zJ~&F0RDn|a>lL&l2p#82`_w5FMn5K2)~TRQXueR#fqyv%MugC%+nXX;m9>)JkRCA%gKiMaz`~n82SYQb1kwfmCN)g zqPF-jpeo!JF#^oZ60~j}p_K#DY=}J-<0Xutw(9uFT*El646wV!eJQ8ao+o4t?RToX zc9?>EUs|NvoWL@fEH}kCgsaI?=~O!w6ctS6ZfqQnNDKWQCO)@JOlZ-|IgH3Kne5-G50c@Ka*xHs4}~?Q1pS#uN&Ci&|3R? zsk`t(0$Dh9J{=f*DehSfGTydv7aE7Eg8hZQ1Ea95fbIxz_)a_vL#=C>c6^!xt35=~ z(2V;`OmgRI;X2|JCh~RzD0-pOaQSYNWke}QhR&cAQLQ31+&r=L5wD49)h^!XaK-DA z_q9zm-E5(qTjB)uxv3)CQZu`soMaTV8(1ioSlQY`0I-#u=b2LiN)hioJiBG~Wr?EA z*sJg6bVsnmQ=88@r6O8t`*&2;LV@Lw@_5CD+FVq#p|g-Do8e}QlyV~dCdHBHZW--< z6pU0iYg3z7Bc_yadV032+oc9!%ds)rjY~tJyj_tk@X_lZK(QNT_qGyyvc2ETWjSHc zo2;chGRxuT=5dKYI5|BY#j^g#SvO*qNF2yzC2Jc9%LGZ6&IY=)!0M~L^}x<+P-7c5 zj8X00^?s0q)!Y^se3UoG{ZBgH;Er32Xn+<)%qgQ4fkeNu_{4`2ZF!lPBvQn#%v9%u z6f0Ytme=>hl~VS;Jo_2~`3|A)jf{*S)1K+3lN2CEQg3IWB+h-_Q-m-SPG@Uda$U^n z-HB%}HR{7QVxCO?vL+7cBL*HH6#`H1#dX4bwex4vElA|IQ?+FFLe@oa1WyS_t#QtpJY<;Z#hxmZY_bOjaaN=ynpZ&Iza`QOu&DGID=;fQ8pQ-X zPB3FGC}79-e}X3m)Y8TCT4DP`88y4`N9f8-5?)dr{j6bHDX|&)^#N!fGFOW8U)3if zikMjLY?rj%;rL+GU2B|Q>zm#uUO1pO++;5g8op3XEN4l=l3>H^Ix&+kDsBzBf>X5$ zRO0{uSU?j%r((Rw-Tq4Q%^Me2NQ?X5u+kp=Tkjj)y=5(j7S&xAAAxAyjsa2Hi>w1u zpm^k<<#Wp5q@JS@0}2xL1y+dx_8jUn5#l4<4n;l)JWF)9v!!;e>NY;+yrcL-#)YR( zRFRN`xWsP~W{*9%A+m7S$t`D=q60}Xp-q5)xFq?a!Y>k8dd3bx z-oOt!UUL+``+R0`*5z*R{9S5;TEYAm0IfTve8Y;aru~LbzDsa9yhC)ZxnrT`mJe^- zl<+xE;pYO)|#z?qq;pUSvgH$+RkfphqV;-VF z*w&ercJ+B;Y7Ch(DgI z-JF)gXn|8augTDgf+~&dkY+VJ;dQaFn8<-fgz3@{!iBEY#L^OVWE63Gw%flc8MmbC zdQ?{5Z}qUA=QxCWAP2&3JPkTsTk-V6Zi1Yq;7Pglg>tmB*DoEUVpfIaa63{&Qw;U= z6q#b2L2e(CJg}QUkG%0U1s8xvSJaLYdE5^nDnSbAT-mX*ncvuBUzV|fHA%V+?+U7# zPz&(GBtL=X&iXPCQWKSsMqLYI0|Os6PZhz%_*WlqEt{EK3pDN;ykM7tX$E!X+_F!| z-d6d4fQFm@6$wcK5D<$~QE7Qk{uZSFm}k)CJC^9&Vd7n1I}ze@8a6gDu!RKtLIWLf zf+s<+R5Wty^&XeV0(|n)2$olkr>XSH8kBQ9oZonSLRE z17tv#mxkA7@`pBC76d;=Sh_-H#xdo@&fKYWvl@e3ab`rGoIH?LI^UXk_{r9iek*Mm7dZ!^ypNp;R9p8$m zt!OGmH2-y1NR`C5N3y-&EFwQFFuZbB6$`bfyz5Z#hOu+*gw*6=`^EN4bjz?*B^b1e72U$td-44<(aQo~xG$AoywZz?zX4ze#ByY1*WL%(c z=G4Bpl$)XLVz3JZuqHii)f-Ui)!c26l*lA=qGc_%mHV>aQuN^}F*HHxgx271la-kf zkSrpb>MqL`Ql};tx1uDY8HhdU?UlK=?yg%yesFbh+rhHrW(YgtN3zZ7&n>H29zNHZ zTdA7Sn;E2$Ta8Rw#s20!a{zvOGJQ6!ixIb17)({7kYivcSq*720TtmKuu>v z44a=3e|N=I_^}{sF56?T==a>4;6u*x3g>Ymt(U#uvxDm|Ip}0x2FVgz4~CF3)}T+;ilc;8hiSqNAOy$j7CTdn`ez2NMaRZP&eGZt9=NHm^Sd zlMQiHyW0?E*R0F51>s&C@}E&F?7yG|lp5Ge*KmOaq2sr|#Qp{aUSM}JIP(usb7tf7 z;#?icbp5Ga4CSr5mC935?TiaS(V;K#8rvvD!c8Lz##Hoqoi^Zx&DwTe1y+rV2wgFFJU)69Y0 z*sAj7T;-a1dpOm^O=}7S`RnD<^Fv#Z8!N<)f zO*pk-A~@F7bSh0{7a(Jru)E5-kqLvP!QJ29#$%9z0$qy|&DmRGBtQ7OC&a&nKG#a1 zr&v79_9!?heFuZ7%FM8cW^^PwC_|zTzN|p6dxPw%4JxZfT!c%nc^>VnnUllBae2R2g8mo~Bf- zP7caWmMT9z?DDeoU=npQiv6p7;DxDVCLNxTW})?j=__yXr#H=T&J(OY>r{_CXm7<0 zHdY>gUiopo`@qE#9bv9Qk2up4mN{Jl(J2#=_u{7yr<7o&Hc?S;X9hH}MjDH%pk6