From 31b27926b955002751377909fe9cd82dd9415112 Mon Sep 17 00:00:00 2001 From: mariosasko Date: Wed, 31 Mar 2021 02:59:15 +0200 Subject: [PATCH 01/26] Fix references in colabs --- docs/source/scripts/convert_doc_to_notebooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/scripts/convert_doc_to_notebooks.py b/docs/source/scripts/convert_doc_to_notebooks.py index 4ec1608d..f518d3e2 100644 --- a/docs/source/scripts/convert_doc_to_notebooks.py +++ b/docs/source/scripts/convert_doc_to_notebooks.py @@ -272,7 +272,7 @@ def convert_math(text): def convert_anchor(text): """ Convert text to an anchor that can be used in the notebook.""" anchor_name = _re_anchor_section.search(text).groups()[0] - return f"" + return f"" ################################### From c7f23d6b66cd45082e8d04870621119e66e3ba80 Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Wed, 31 Mar 2021 16:56:14 +0200 Subject: [PATCH 02/26] Rework examples --- docs/source/examples/tfidf_example.rst | 105 ++++++++++++ examples/dataset_example.py | 25 --- examples/experiment_example.py | 150 ----------------- examples/ner_example.py | 213 ------------------------- examples/ner_pipeline.py | 191 ---------------------- examples/notebooks/img/hook.png | Bin 8521 -> 0 bytes examples/vectors_example.py | 26 --- 7 files changed, 105 insertions(+), 605 deletions(-) create mode 100644 docs/source/examples/tfidf_example.rst delete mode 100644 examples/dataset_example.py delete mode 100644 examples/experiment_example.py delete mode 100644 examples/ner_example.py delete mode 100644 examples/ner_pipeline.py delete mode 100644 examples/notebooks/img/hook.png delete mode 100644 examples/vectors_example.py diff --git a/docs/source/examples/tfidf_example.rst b/docs/source/examples/tfidf_example.rst new file mode 100644 index 00000000..0d904c68 --- /dev/null +++ b/docs/source/examples/tfidf_example.rst @@ -0,0 +1,105 @@ + +TFIDF + scikit-learn SVM +========================= + +In this example, we will cover a once popular family of models -- support vector machines (SVMs) with TF-IDF representations. As a simple example, we will analyse binary classification on the Stanford sentiment treebank (SST) dataset. + +First, we will implement a minimalistic example without much additional preprocessing. Since we're using TFIDF representation of our dataset, it is smart to limit the size of the vocabulary as each word needs to be present in the instance TFIDF representation. Let's load the SST dataset and convert it into a single batch: + +.. code-block:: python + + >>> from podium import Vocab, Field, LabelField + >>> from podium.datasets import SST + >>> vocab = Vocab(max_size=5000, specials=()) + >>> text = Field(name='text', numericalizer=vocab, disable_batch_matrix=True) + >>> label = LabelField(name='label') + >>> fields = {'text': text, 'label': label} + >>> train, dev, test = SST.get_dataset_splits(fields=fields) + >>> train.finalize_fields() + >>> x, y = train.batch(add_padding=True) + +We have now loaded our dataset, finalized its Fields and obtained it as a batch of input and target data. What we need to do next is define the TF-IDF vectorization for each instance in the dataset. This is done by using our :class:`podium.vectorizers.TfIdfVectorizer`, which adapts the ``scikit-learn`` vectorizer to the Podium input data. + +.. code-block:: python + + >>> from podium.vectorizers import TfIdfVectorizer + >>> tfidf_vectorizer = TFIdfVectorizer() + >>> tfidf_vectorizer.fit(train, field=train.field('text')) + >>> tfidf_x = tfidf_vectorizer.transform(x.text) + >>> print(type(tfidf_batch), tfidf_batch.shape) + (6920, 5000) + +We have transformed the train dataset to a sparse matrix containing TF-IDF values for each word in the vocabulary in each instance. What is left to do now is to train our classification model: + +.. code-block:: python + + >>> from sklearn.svm import LinearSVC + >>> from sklearn.metrics import accuracy_score + >>> # Train the SVM on the training set + >>> svm = LinearSVC() + >>> svm.fit(tfidf_batch, y.label.ravel()) + >>> # Obtain accuracy on the train set + >>> y_hat = svm.predict(tfidf_batch) + >>> acc = accuracy_score(y_hat, y.label.ravel()) + >>> print(f"Accuracy on train set: {acc:.4f}") + 0.9597 + +And for a more accurate performance evaluation of our model we turn to the test set: + +.. code-block:: python + + >>> test_x, test_y = test.batch() + >>> tfidf_test_batch = tfidf_vectorizer.transform(test_x.text) + >>> y_test_hat = svm.predict(tfidf_test_batch) + >>> acc = accuracy_score(y_test_hat, test_y.label.ravel()) + >>> print(f"Accuracy on test set: {acc:.4f}") + Accuracy on test set: 0.7946 + +Our basic unigram TF-IDF linear SVM performs pretty well on the SST dataset, reaching accuracy of almost ``0.8``. While this example encapsulates the basics of using Podium with ``scikit-learn``\s ``SVM``, we will delve a bit deeper and consider some additional preprocessing. + +Using ngram features +--------------------- + +We have only considered basic unigram features for our model, and this is somewhat prohibitive. Apart from that, we have not implemented any preprocessing for our dataset, and our dataset is cased -- which might be detrimental for the performance of our model since we agressively trim the vocabulary size. + +We will do two things: (1) implement a pre-tokenization hook to lowercase our data, which in our case is fine as we are using the case-insensitive ``str.split`` as a tokenizer, and (2) implement ngram extraction as a post-tokenization hook. For a more detailed overview of hooks and how to use them, check :ref:`fields`. We will first implement our lowercase hook: + +.. code-block:: python + + >>> def lowercase(raw): + ... """Lowercases the input string""" + ... return raw.lower() + +And then implement flexible ngram extraction where the ``n`` is an interval using ``nltk``\s ``ngrams`` function: + +.. code-block:: python + + >>> from ntlk import ngrams + >>> class NGramHook: + ... # Transforms a sequence of unigrams into a sequence of + ... # [min_n, max_n]-grams + ... def __init__(self, min_n, max_n): + ... self.min_n = min_n + ... self.max_n = max_n + ... def __call__(self, raw, tokenized): + ... tokenized_ngrams = [] + ... for n in range(self.min_n, self.max_n+1): + ... tokenized_ngrams.extend(ngrams(tokenized, n)) + ... return raw, tokenized_ngrams + +We will now incorporate these two hooks into our text input Field: + +.. code-block:: python + + >>> ngram_hook = NGramHook(1,3) + >>> vocab = Vocab(max_size=5000, specials=()) + >>> text = Field(name='text', numericalizer=vocab, + disable_batch_matrix=True, + pretokenization_hooks=[lowercase], + posttokenization_hooks=[ngram_hook] + ) + >>> label = LabelField(name='label') + >>> fields = {'text': text, 'label': label} + >>> train, dev, test = SST.get_dataset_splits(fields=fields) + >>> train.finalize_fields() + diff --git a/examples/dataset_example.py b/examples/dataset_example.py deleted file mode 100644 index 5a51b2f6..00000000 --- a/examples/dataset_example.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -Example how to use simple PauzaHR dataset. -""" -import dill - -from podium.datasets import SST -from podium.storage import LargeResource - - -if __name__ == "__main__": - # for large resource settings see - # https://github.com/mttk/podium/wiki/Large-resources - LargeResource.BASE_RESOURCE_DIR = "downloaded_datasets" - - train_set, _, test_set = SST.get_dataset_splits() - print(f"train set {len(train_set)}") - print(f"test set {len(test_set)}") - - # save dataset - with open("dataset.pkl", "wb") as f: - dill.dump(train_set, f) - - # load dataset - with open("dataset.pkl", "rb") as f: - loaded_train_set = dill.load(f) diff --git a/examples/experiment_example.py b/examples/experiment_example.py deleted file mode 100644 index 1a64b8e3..00000000 --- a/examples/experiment_example.py +++ /dev/null @@ -1,150 +0,0 @@ -""" -Example how to use model on simple PauzaHR dataset using the Experiment class. -""" -import os - -import numpy as np -from sklearn.metrics import accuracy_score -from sklearn.preprocessing import StandardScaler - -from podium import Field, LabelField, Vocab -from podium.datasets import ExampleFormat, Iterator, PauzaHRDataset -from podium.experimental.model_selection import grid_search -from podium.experimental.models import ( - Experiment, - FeatureTransformer, - SklearnTensorTransformerWrapper, -) -from podium.experimental.models.impl.fc_model import ScikitMLPClassifier -from podium.experimental.models.impl.simple_trainers import SimpleTrainer -from podium.experimental.pipeline import Pipeline -from podium.experimental.validation import k_fold_classification_metrics -from podium.storage import LargeResource -from podium.vectorizers.impl import NlplVectorizer - - -def numericalize_pauza_rating(rating): - """ - Function numericalizes pauza_hr dataset rating field. - """ - label = round(float(rating) * 2) - 1 - return label - - -def basic_pauza_hr_fields(): - """ - Function returns pauza-hr fields used for classification. - """ - rating = LabelField(name="Rating", pretokenize_hooks=[numericalize_pauza_rating]) - - text = Field( - name="Text", - numericalizer=Vocab(), - tokenizer="split", - keep_raw=False, - fixed_length=100, - ) - - return {"Text": text, "Rating": rating} - - -def label_transform_fun(y_batch): - return y_batch.Rating.ravel() - - -def experiment_example(): - """ - Example of setting up and using the Experiment class. - """ - - LargeResource.BASE_RESOURCE_DIR = "downloaded_datasets" - - fields = basic_pauza_hr_fields() - train_dataset, test_dataset = PauzaHRDataset.get_train_test_dataset(fields) - - num_of_classes = len(train_dataset.field_dict["Rating"].vocab.itos) - - vector_cache_path = os.path.join( - LargeResource.BASE_RESOURCE_DIR, "experimet_example_nlpl_cache.txt" - ) - - vectorizer = NlplVectorizer(cache_path=vector_cache_path) - vectorizer.load_vocab(vocab=fields["Text"].vocab) - embedding_matrix = vectorizer.get_embedding_matrix(fields["Text"].vocab) - - def feature_transform_fn(x_batch): - """ - Batch transform function that returns a mean of embedding vectors for - every token in an Example. - """ - x_tensor = np.take(embedding_matrix, x_batch.Text.astype(int), axis=0) - x = np.mean(x_tensor, axis=1) - return x - - trainer = SimpleTrainer() - - tensor_transformer = SklearnTensorTransformerWrapper(StandardScaler()) - feature_transformer = FeatureTransformer(feature_transform_fn, tensor_transformer) - - experiment = Experiment( - ScikitMLPClassifier, - trainer=trainer, - feature_transformer=feature_transformer, - label_transform_fn=label_transform_fun, - ) - - _, model_params, train_params = grid_search( - experiment, - test_dataset, - accuracy_score, - model_param_grid={ - "classes": ([i for i in range(num_of_classes)],), - "hidden_layer_sizes": [(10,), (10, 10), (100,)], - }, - trainer_param_grid={ - "max_epoch": [2, 3, 4], - "iterator": [Iterator(batch_size=32), Iterator(batch_size=64)], - }, - ) - - experiment.set_default_model_args(**model_params) - experiment.set_default_trainer_args(**train_params) - - accuracy, precision, recall, f1 = k_fold_classification_metrics( - experiment, test_dataset, 5, average="macro" - ) - - print( - f"Accuracy = {accuracy}\n" - f"Precision = {precision}\n" - f"Recall = {recall}\n" - f"F1 score = {f1}" - ) - - experiment.fit(train_dataset) - - dataset_fields = {"Text": train_dataset.field_dict["Text"]} - - pipeline = Pipeline( - fields=dataset_fields, - example_format=ExampleFormat.XML, - model=experiment.model, - feature_transformer=feature_transformer, - ) - - example_good = ( - "Izvrstan, ogroman Zagrebački, " - "dostava na vrijeme, ljubazno osoblje ..." - ) - prediction = pipeline.predict_raw(example_good) - print(f"Good example score: {prediction}") - - example_bad = ( - "Hrana kasnila, dostavljac neljubazan, " "uzas..." - ) - prediction = pipeline.predict_raw(example_bad) - print(f"Bad example score: {prediction}") - - -if __name__ == "__main__": - experiment_example() diff --git a/examples/ner_example.py b/examples/ner_example.py deleted file mode 100644 index c73e48a1..00000000 --- a/examples/ner_example.py +++ /dev/null @@ -1,213 +0,0 @@ -""" -Example how to use BLCC model on Croatian NER dataset for NER task. -""" - -import pickle -import sys -from collections import namedtuple -from functools import partial - -import numpy as np -from sklearn.metrics import f1_score - -from podium import BucketIterator, Field, Vocab -from podium.datasets.impl.croatian_ner_dataset import CroatianNERDataset -from podium.experimental.models import FeatureTransformer -from podium.experimental.models.impl.blcc_model import BLCCModel -from podium.experimental.models.impl.simple_trainers import SimpleTrainer -from podium.storage.resources.large_resource import LargeResource -from podium.vectorizers.vectorizer import BasicVectorStorage -from podium.vocab import PAD - - -Inputs = namedtuple("Inputs", ["tokens", "casing"]) -# using the same label set as original CroNER -label_mapping = { - "Organization": "Organization", - "Person": "Person", - "Location": "Location", - "Date": "Date", - "Time": "Time", - "Money": "Money", - "Percent": "Percent", - "Etnic": "Etnic", - # remapped or unused types - "PersonPossessive": "Person", - "Product": None, - "OrganizationAsLocation": "Organization", - "LocationAsOrganization": "Location", -} - - -def label_mapper_hook(data, tokens): - """ - Function maps the labels to a reduced set. - """ - new_tokens = [] - for token in tokens: - if token == "O": - new_tokens.append("O") - continue - - prefix, value = token.split("-") - - mapped_token = label_mapping[value] - if not mapped_token: - new_tokens.append("O") - else: - new_tokens.append(prefix + "-" + mapped_token) - - return data, new_tokens - - -def casing_mapper_hook(data, tokens): - """ - Hook for generating the casing feature from the tokenized text. - """ - tokens_casing = [] - - for token in tokens: - token_casing = "other" - if token.isdigit(): - token_casing = "numeric" - elif token.islower(): - token_casing = "lowercase" - elif token.isupper(): - token_casing = "uppercase" - elif token[0].isupper(): - token_casing = "initial_uppercase" - - tokens_casing.append(token_casing) - - return data, tokens_casing - - -def feature_extraction_fn(x_batch, embedding_matrix): - """ - Function transforms iterator batches to a form acceptable by the model. - """ - tokens_numericalized = x_batch.tokens.astype(int) - casing_numericalized = x_batch.casing.astype(int) - X = [np.take(embedding_matrix, tokens_numericalized, axis=0), casing_numericalized] - return X - - -def label_transform_fun(y_batch): - return y_batch.labels.astype(int) - - -def example_word_count(example): - """ - Function returns the number of tokens in an Example. - """ - return len(example.tokens[1]) - - -def ner_croatian_blcc_example(fields, dataset, feature_transform): - """ - Example of training the BLCCModel with Croatian NER dataset. - """ - output_size = len(fields["labels"].vocab.itos) - casing_feature_size = len(fields["inputs"].casing.vocab.itos) - - train_set, test_set = dataset.split(split_ratio=0.8) - - train_iter = BucketIterator(batch_size=32, sort_key=example_word_count) - - model = BLCCModel( - **{ - BLCCModel.OUTPUT_SIZE: output_size, - BLCCModel.CLASSIFIER: "CRF", - BLCCModel.EMBEDDING_SIZE: 300, - BLCCModel.LSTM_SIZE: (100, 100), - BLCCModel.DROPOUT: (0.25, 0.25), - BLCCModel.FEATURE_NAMES: ("casing",), - BLCCModel.FEATURE_INPUT_SIZES: (casing_feature_size,), - # set to a high value because of a tensorflow-cpu bug - BLCCModel.FEATURE_OUTPUT_SIZES: (30,), - } - ) - trainer = SimpleTrainer() - feature_transformer = FeatureTransformer(feature_transform) - - print("Training started") - trainer.train( - model=model, - dataset=train_set, - feature_transformer=feature_transformer, - iterator=train_iter, - label_transform_fun=label_transform_fun, - max_epoch=1, - ) - print("Training finished") - - X_test_batch, y_test_batch = test_set[:32].batch() - X_test = feature_transformer.transform(X_test_batch) - y_test = label_transform_fun(y_test_batch) - - prediction = model.predict(X=X_test)[BLCCModel.PREDICTION_KEY] - # pickle for later use - pickle.dump(model, open("ner_model.pkl", "wb")) - - pad_symbol = fields["labels"].vocab.padding_index() - prediction_filtered, y_test_filtered = filter_out_padding( - pad_symbol, prediction, y_test - ) - - print("Expected:") - print(y_test_filtered) - - print("Actual:") - print(prediction_filtered) - - f1 = f1_score(y_test_filtered, prediction_filtered, average="weighted") - info_msg = f"F1: {f1}" - print(info_msg) - - -def filter_out_padding(pad_symbol, prediction, y_test): - """ - Filters out padding from the predictiopytn and test arrays. - - The resulting arrays are flattened. - """ - indices_to_leave = np.where(np.ravel(y_test) != pad_symbol) - y_test_filtered = np.ravel(y_test)[indices_to_leave] - prediction_filtered = np.ravel(prediction)[indices_to_leave] - return prediction_filtered, y_test_filtered - - -def ner_dataset_classification_fields(): - """ - Function creates fields to use with the Croatian NER dataset on NER task. - """ - tokens = Field(name="tokens", numericalizer=Vocab(), tokenizer=None) - casing = Field(name="casing", numericalizer=Vocab(specials=(PAD(),)), tokenizer=None) - labels = Field( - name="labels", - is_target=True, - numericalizer=Vocab(specials=(PAD(),)), - tokenizer=None, - ) - - casing.add_posttokenize_hook(casing_mapper_hook) - labels.add_posttokenize_hook(label_mapper_hook) - - return {"inputs": Inputs(tokens, casing), "labels": labels} - - -if __name__ == "__main__": - vectors_path = sys.argv[1] - LargeResource.BASE_RESOURCE_DIR = "downloaded_datasets" - - fields = ner_dataset_classification_fields() - dataset = CroatianNERDataset.get_dataset(fields=fields) - - vocab = fields["inputs"].tokens.vocab - embedding_matrix = BasicVectorStorage(path=vectors_path).load_vocab(vocab) - - feature_transform = partial(feature_extraction_fn, embedding_matrix=embedding_matrix) - - ner_croatian_blcc_example( - fields=fields, dataset=dataset, feature_transform=feature_transform - ) diff --git a/examples/ner_pipeline.py b/examples/ner_pipeline.py deleted file mode 100644 index 9d58e12e..00000000 --- a/examples/ner_pipeline.py +++ /dev/null @@ -1,191 +0,0 @@ -""" -Example how to use BLCC model on Croatian NER dataset for NER task. -""" - -import os -import pickle -import sys -import time -from functools import partial - -from ner_example import ( - example_word_count, - feature_extraction_fn, - label_transform_fun, - ner_dataset_classification_fields, -) - -from podium.dataload.ner_croatian import convert_sequence_to_entities -from podium.datasets.impl.croatian_ner_dataset import CroatianNERDataset -from podium.datasets.iterator import BucketIterator -from podium.models.impl.blcc_model import BLCCModel -from podium.models.impl.simple_trainers import SimpleTrainer -from podium.pipeline import Pipeline -from podium.storage import ExampleFormat -from podium.storage.resources.large_resource import LargeResource -from podium.storage.vectorizers.vectorizer import BasicVectorStorage - - -class CroatianNER(Pipeline): - """ - Pipeline used to train named entity recognition for Croatian. - - It is designed to work on the croopinion dataset, but makes no assumptions - on the underlying data, except that the data is tokenized before hand and - labeled with BIO tags. - """ - - def __init__(self, vector_path): - """ - Creates a new CroatianNER pipeline. Initializes fields and croopinion - dataset. Defines feature transform (word vector lookup) and output - transform (mapping labels back to BIO labels). Uses the ```BLCCModel``` - (BiLSTM + linear chain CRF) Expects examples in DICT field format. - - Parameters - ---------- - vector_path : str - Path to Croatian word vector file - - Raises - ------ - ValueError - If vector_path is None or is not a valid file path. - """ - if vector_path is None or not os.path.exists(vector_path): - raise ValueError( - f"Provided path {vector_path} is None or does not exist. " - "Path to word Croatian vectors must be defined. " - "You can use fastText vectors available at " - "https://fasttext.cc/docs/en/crawl-vectors.html" - ) - - self.fields = ner_dataset_classification_fields() - self.dataset = CroatianNERDataset.get_dataset(fields=self.fields) - self.feature_transform = self._define_feature_transform(vector_path) - self.output_transform_fn = partial( - CroatianNER.map_iterable, mapping=self.fields["labels"].vocab.itos - ) - - super().__init__( - fields={ - # tokens is a feature field - "tokens": self.fields["inputs"].tokens, - # casing is a feature field - "casing": self.fields["inputs"].casing, - # labels is a target field - "labels": self.fields["labels"], - }, - example_format=ExampleFormat.DICT, - model=BLCCModel, - feature_transformer=self.feature_transform, - output_transform_fn=self.output_transform_fn, - label_transform_fn=label_transform_fun, - ) - - def fit( - self, - dataset, - model_kwargs=None, - trainer_kwargs=None, - feature_transformer=None, - trainer=None, - ): - """ - Fits the CroatianNER pipeline on a dataset using provided parameters. - - Parameters - ---------- - dataset : Dataset - Dataset to train on - - model_kwargs : dict - model keyword argument dict forwarded to the model instance - - trainer_kwargs : dict - trainer keyword argument dict forwarded to the trainer instance - - trainer : AbstractTrainer - trainer instance (must inherit from `AbstractTrainer`) - """ - - if model_kwargs is None: - model_kwargs = self._define_model_params() - print(f"Using default model parameters {model_kwargs}") - - if trainer_kwargs is None: - # use bucket iterator to minimize padding in batch - iterator = BucketIterator(batch_size=32, sort_key=example_word_count) - trainer_kwargs = {"max_epoch": 10, "iterator": iterator} - print(f"Using default trainer parameters {trainer_kwargs}") - - trainer = SimpleTrainer() if trainer is None else trainer - - start = time.time() - print("Starting training") - super().fit( - dataset=dataset, - model_kwargs=model_kwargs, - trainer_kwargs=trainer_kwargs, - trainer=trainer, - ) - print(f"Training took {time.time() - start} seconds") - - def predict_raw(self, raw_example, tokenizer=str.split): - """ - Predicts target Fields for raw_example. - - Parameters - ---------- - raw_example : str - Sentence in Croatian. - - Returns - ------ - list (dict) - List of dicts of recognized named entities, - where each dict has keys: - name, type, start, end. - """ - tokenized_text = tokenizer(raw_example) - example_to_predict = {"tokens": tokenized_text, "casing": tokenized_text} - tags = super().predict_raw(example_to_predict) - return convert_sequence_to_entities(tags, tokenized_text) - - def _define_feature_transform(self, vector_path): - vectorizer = BasicVectorStorage(path=vector_path) - embedding_matrix = vectorizer.load_vocab(vocab=self.fields["inputs"].tokens.vocab) - return partial(feature_extraction_fn, embedding_matrix=embedding_matrix) - - def _define_model_params(self): - output_size = len(self.fields["labels"].vocab.itos) - casing_feature_size = len(self.fields["inputs"].casing.vocab.itos) - return { - BLCCModel.OUTPUT_SIZE: output_size, - BLCCModel.CLASSIFIER: "CRF", - BLCCModel.EMBEDDING_SIZE: 300, - BLCCModel.LSTM_SIZE: (20, 20), - BLCCModel.DROPOUT: (0.25, 0.25), - BLCCModel.FEATURE_NAMES: ("casing",), - BLCCModel.FEATURE_INPUT_SIZES: (casing_feature_size,), - # set to a high value because of a tensorflow-cpu bug - BLCCModel.FEATURE_OUTPUT_SIZES: (30,), - } - - @staticmethod - def map_iterable(iterable, mapping): - return [mapping[i] for i in iterable] - - -if __name__ == "__main__": - model_path = "ner_pipeline_entire_model.pkl" - LargeResource.BASE_RESOURCE_DIR = "downloaded_datasets" - - ner_pipeline = CroatianNER(sys.argv[1]) - ner_pipeline.fit(ner_pipeline.dataset) - - pickle.dump(ner_pipeline, open(model_path, "wb")) - - loaded_ner = pickle.load(open(model_path, "rb")) - text = "U Hrvatskoj državi žive mala bića . Velika bića žive u Graškogradu ." - print(loaded_ner.predict_raw(text)) diff --git a/examples/notebooks/img/hook.png b/examples/notebooks/img/hook.png deleted file mode 100644 index 1d79b306c746e72cfa690fe3be9538eb73b57934..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8521 zcmeHLcTkhtw*L?m3jzuXQtiT_>!CAhC_!Uv zTd1Qc-;xKnLQep8=I84t0zVP>_d`ITagQYg3Dx;H%NNXBNH|DXd9USDpm-)hkYwzQ zZ7b)+M52U^S%$@+NbGFmgWscs;kPxi@S7-H(|!v7bwDDG1^>cxJ?bw!e+qd7`_};x z(1oGoZ8y&3hoB#L{-zKUL2z{f&?;^XYV+9!+FuZeA% zwOBqTjg_bQrzSp+ds1>6*o|BgT(A%8zSUHQc;FzyxkqMQ9%Nk)**3;|C=#>a7Z%*} zZef3}W5fp}mE7QEjl z6-gLcv;Qu)@LsgU$N|HB5FDKPuzKNO^I6CK`TUDk`m^!X{P1Co)M#9=mY_TF2RD0Z zbIszXqisZh#`t}t4xOcCW9ss5^F^G^79bNk{ISC?_%@po6`^FMk1LOo0cTwDN_uT# z)9kUD+~#ZgTShvj%QQm19YO0=p}aHGYg}YPAx+;)b&pDJ4CD)= zX%7>m^dC3?MWWRQg(3Z4Np_Ey)ma|uqr$`+ z-6n#N+y~i~@ofI0bwRK*4_k)OvPs%E>s56jk&A+|(6v~4(g1!en}2dA@A;cqj!3te zpsNS?b*9^3rFue{4|@LchA{NvqE_LG+>^OJcIEukXm)w25Y$HoRnc)52@{gY7AIQi z_j8A-5n|X1`dm`irLP150OPmG*97(x1*7VU%Os;|qtWoF$*Q;+!!Gko{;NpyKEB9M zhFg`vc#8Zxb)$0n(#fpwq=o96^M>3AWst@qmJT&h4SA?3xI%An==Ma}ZMDL|;tZBS zWttu|H>?UHbA3pNBKR(N>xxfWjk$hL{6yuf`x-!Ut?{&YddTb@o!V_9iqrT6R01f_ z#G^w!CN3`mPgZT>=G@KS0UBW;O7xTSC{HD=^cR|35C_c1aO`F6nwa?a5Ik|SSv%*FtHr=k81#=%BRuC8F_Xs zh{kwDYgR!UKoJtN6YnmSZwJ7c5JqC5NAY-W1xX@IhBy@DXQnbyh5gJD#w) z82!20#vJYfVRq5xZPo1+gXjep8cW_lxpk#sZnhe~6=F6E>q1wN6io^;G+f#j)7N)L zrJC@|H){7vzNdpkOF6s-#_5hy`;ug!S_Uf!vLD{>4-ua^pZlF!94HL@eyLY1P#bu0 z0|3F;ovjE@G!Cs_kX# z=4a42CNA-pQ|i>eQ8FLjzvbYqT|rR}+k`ZMk0*_MoT4MhHL> zf4IFvHAE`7a8szaR~g4lh}}rwQQ(RAs2(EE!Jzh`mNMp}gyA-xjv3nsxv7aff-Kw4 zjpxohI?ur^cu%=iOo((JBpmrR?7{{p^0x+8>#v;s^-s_Ja2f)e|Jg0~UkM9}pj!|F!_#U3CWAtiOJT8eA1nhrMm!}7q zn+cgy_yGFRaNP%mN8yiC7|@p*^Tk*2gA%pTty315vt6IVszUtjstVCn6>I;l zV%J|PT>qxx$X_ame^c=fFCtgH_=gIApC3BfYJZUvgpVuiQClsyyQ)BipBG02bJc(2 zZL3yq|J~}E(|-lM{_jEm-1JY;i!?41fW$L50@&c9U5h(A!DXlGpzfD3w zMrQ(i=-^V{B|bWyFa1F_UvzX8b8n#;RK}v4evMEwqq8@ZYX;m#5!*+6}d5dBN(N1rwcxS5QC@0*xq-=$Dq1DA4B9=^w6 zBzcqdizax^u#ej8FLng*BIjzxi2j-ktJRkG-?i?gf19JM3z;8tixU1>4GBr;^wSxt9_~ z?{c{y8g(@M4}=2fX}*^C1BQel912{N?X7MGo~Ae+d|{eF+bdVR4(Ka8PtGDIJ&JkM z!a$7&Cx%MBwwDHL{U#1((48z>q{c4lw&v!pgWbQACX2ecm#h}jSI1N544c?EEz@dh zL(U5AlM?ZO8~Z4~4UoBtLwuO$)3cv_oC!LuEKZhb#AkR+uOK`NR`?`PsNyvcab zx_oykDDKbiuG&7*ca{@;T#sJf*!vDNqmlg?p8Cle7^b@st-PkJ4jH!KzV(^Cjw8i<!;K@Hj@&=1<1R~NP)7<3 z3J>AhDZPLP4|03WP%66Dn-}8J7_L=32(tnv_RBd1_#Xq|)?*y@vQCv*C`LIL+bMh0ZKEoROXCKNY&Y zbwL4F$=YyoKglj-MU(B-f#_XPT2ANIAApE$z^{TFIZ|+&nKTW{Mwz~GFi2nQ%=rvX zh|r0-@2{T3*wu~>Civ?K2Os$Lp7`*ABrtUlMQ4iRaM>+0-}h*xkt=ttSgjYUq(efF zyxaKpkJ0fxr7_;0?nhwh+}dl)D>I7ms-%v6LJ^FwodpJhu9L9$li38eTvJMKF(>CX z8#VHlfwM-=$wbz%Wl+#!h-%2a-$!rJbFjaVmfmd0q~rM2H^LL=vBU~T()jhxa<4vx z^civbpPQORU^2Rj7pOkcs!5+kh1@l~N)a%3NmlxmMW5>-Q4N_5373XfM!Y|+J!%v82DS1@%ED$jsnx1C7g6l>rLyuCh5j{&W9hXkS z){Lvy$cRq`waX5VXIisgAqVNp4FpgsT}QK$8q*j@`mLq0o&}cF@AM2+m1}3(JaOT> z*d;C>A@!gF6TtyhTy(g6b&%6GY#D{A%77_Ry&|&q-NF*y$X}Q!4)bN8;A@hf=xk|G z*~6eub7M~mhuBt?!y{X+Ur!%!NOAmtvC!M#g1!Q<%4}BeN8@^nl4ZYmZ1FudS(WoiZ6F%}I3lE)pMH%!;~WWj-X(m||lS73Ajg7nNwvB>|J= zUQKja5CAfExwshhpIL2R>rhY zvSfKlxQ)`jt7IV-{|em9u2Wgg(L;U}wyhH{H?bRk<2ABrwuP0ph53z*C2qkP3vYi2 zMWPomb%~4nAM@s1&}J~jM$b4h^9c03%GAF4U){>x`yF|h6~>ff|CSp! z*_=Qh)x-_I+{F4pAqg6@R2YQ&#=QX<6YAXIrwlr*pRbGo7epjFy;7p& h^M5Kf^ Date: Wed, 31 Mar 2021 21:03:29 +0200 Subject: [PATCH 03/26] Finalize tfidf example --- docs/source/examples/tfidf_example.rst | 46 +++++++++++++++++++++++--- docs/source/index.rst | 11 ++++-- 2 files changed, 51 insertions(+), 6 deletions(-) diff --git a/docs/source/examples/tfidf_example.rst b/docs/source/examples/tfidf_example.rst index 0d904c68..bc62b3a4 100644 --- a/docs/source/examples/tfidf_example.rst +++ b/docs/source/examples/tfidf_example.rst @@ -41,8 +41,8 @@ We have transformed the train dataset to a sparse matrix containing TF-IDF value >>> # Obtain accuracy on the train set >>> y_hat = svm.predict(tfidf_batch) >>> acc = accuracy_score(y_hat, y.label.ravel()) - >>> print(f"Accuracy on train set: {acc:.4f}") - 0.9597 + >>> print(f"Accuracy on the train set: {acc:.4f}") + Accuracy on train set: 0.9597 And for a more accurate performance evaluation of our model we turn to the test set: @@ -52,8 +52,8 @@ And for a more accurate performance evaluation of our model we turn to the test >>> tfidf_test_batch = tfidf_vectorizer.transform(test_x.text) >>> y_test_hat = svm.predict(tfidf_test_batch) >>> acc = accuracy_score(y_test_hat, test_y.label.ravel()) - >>> print(f"Accuracy on test set: {acc:.4f}") - Accuracy on test set: 0.7946 + >>> print(f"Accuracy on the test set: {acc:.4f}") + Accuracy on the test set: 0.7946 Our basic unigram TF-IDF linear SVM performs pretty well on the SST dataset, reaching accuracy of almost ``0.8``. While this example encapsulates the basics of using Podium with ``scikit-learn``\s ``SVM``, we will delve a bit deeper and consider some additional preprocessing. @@ -91,6 +91,7 @@ We will now incorporate these two hooks into our text input Field: .. code-block:: python + >>> # Use [1-3]grams, inclusive >>> ngram_hook = NGramHook(1,3) >>> vocab = Vocab(max_size=5000, specials=()) >>> text = Field(name='text', numericalizer=vocab, @@ -102,4 +103,41 @@ We will now incorporate these two hooks into our text input Field: >>> fields = {'text': text, 'label': label} >>> train, dev, test = SST.get_dataset_splits(fields=fields) >>> train.finalize_fields() + >>> print(text.vocab.itos[40:50]) + [('at',), ('from',), ('one',), ('have',), ('I',), ('like',), ('his',), ('in', 'the'), ('all',), ("'",)] +We can see that our new Vocab now contains tuples as its tokens -- as long as an item in a sequence is hashable, we can represent it as part of a Vocab! We can see that one 2-gram ``('in', 'the')`` has made its way into the 50 most frequent tokens. + +As before, we need to train the TFIDF vectorizer and apply it to our data (which now includes 1-, 2- and 3-grams): + +.. code-block:: python + + >>> dataset_batch = train.batch(add_padding=True) + >>> tfidf_vectorizer = TfIdfVectorizer() + >>> tfidf_vectorizer.fit(train, field=train.field('text')) + >>> tfidf_batch = tfidf_vectorizer.transform(dataset_batch.text) + >>> print(type(tfidf_batch), tfidf_batch.shape) + (6920, 5000) + +We can now train our SVM classification model and evaluate it on the train and test set: + +.. code-block:: python + + >>> svm = LinearSVC() + >>> text, label = dataset_batch + >>> svm.fit(tfidf_batch, label.ravel()) + >>> # Compute accuracy on the train set + >>> y_hat = svm.predict(tfidf_batch) + >>> acc = accuracy_score(y_hat, label.ravel()) + >>> print(f"Accuracy on the train set: {acc:.4f}") + Accuracy on the train set: 0.9575 + >>> + >>> # Compute accuracy on the test set + >>> test_text, test_label = test.batch(add_padding=True) + >>> tfidf_test_batch = tfidf_vectorizer.transform(test_text) + >>> y_test_hat = svm.predict(tfidf_test_batch) + >>> acc = accuracy_score(y_test_hat, test_label.ravel()) + >>> print(f"Accuracy on the test set: {acc:.4f}") + Accuracy on the test set: 0.7743 + +Sadly, our new model didn't perform better than our initial one on the train set, but there are many avenues we can try further, such as tuning the hyperparameters of the LinearSVC model on the development set or filtering out stop words and punctuation. We encourage you to open this example in Colab and try some things yourself! diff --git a/docs/source/index.rst b/docs/source/index.rst index 6ad3bfe9..d237452f 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -13,11 +13,12 @@ The goal of Podium is to be **lightweight**, in terms of code and dependencies, Contents --------------------------------- -The documentation is organized in four parts: +The documentation is organized in five parts: - **Quickstart**: an quick preview of the library, - **Walkthrough**: a description of how the basics work, -- **In-depth overview**: examples of advanced usage of Podium, +- **In-depth overview**: advanced usage options, +- **Examples**: full stand-alone examples of NLP models using Podium, - **Core package Reference**: the documentation of methods and classes in Podium. @@ -36,6 +37,12 @@ The documentation is organized in four parts: advanced coming_soon +.. toctree:: + :maxdepth: 2 + :caption: Full examples + + examples/tfidf_example.rst + .. toctree:: :maxdepth: 2 :caption: Preprocessing Tools From f6726e49cf2271603eef3da70c080aa6cfb0d834 Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 02:12:16 +0200 Subject: [PATCH 04/26] Fix shuffling cost --- docs/source/advanced.rst | 8 ++++---- docs/source/examples/pytorch_rnn_example.rst | 4 ++++ docs/source/index.rst | 1 + docs/source/walkthrough.rst | 2 +- podium/datasets/iterator.py | 6 ++++-- 5 files changed, 14 insertions(+), 7 deletions(-) create mode 100644 docs/source/examples/pytorch_rnn_example.rst diff --git a/docs/source/advanced.rst b/docs/source/advanced.rst index ee77b13d..d0815964 100644 --- a/docs/source/advanced.rst +++ b/docs/source/advanced.rst @@ -448,18 +448,18 @@ The ``bucket_sort_key`` function defines how the instances in the dataset should For Iterator, padding = 148141 out of 281696 = 52.588961149608096% For BucketIterator, padding = 2125 out of 135680 = 1.5661851415094339% -As we can see, the difference between using a regular Iterator and a BucketIterator is massive. Not only do we reduce the amount of padding, we have reduced the total amount of tokens processed by about 50%. The SST dataset, however, is a relatively small dataset so this experiment might be a bit biased. Let's take a look at the same statistics for the :class:`podium.datasets.impl.IMDB` dataset. After changing the highligted data loading line in the first snippet to: +As we can see, the difference between using a regular Iterator and a BucketIterator is massive. Not only do we reduce the amount of padding, we have reduced the total amount of tokens processed by about 50%. The SST dataset, however, is a relatively small dataset so this experiment might be a bit biased. Let's take a look at the same statistics for the :class:`podium.datasets.impl.IMDB` dataset. After changing the data loading line in the first snippet to: .. code-block:: rest - train, test = IMDB.get_dataset_splits(fields=fields) + >>> train, test = IMDB.get_dataset_splits(fields=fields) And re-running the code, we obtain the following, still significant improvement: .. code-block:: rest - For Iterator, padding = 13569936 out of 19414616 = 69.89546432440385% - For BucketIterator, padding = 259800 out of 6104480 = 4.255890755641758% + For Iterator, padding = 13569936 out of 19414616 = 69.89% + For BucketIterator, padding = 259800 out of 6104480 = 4.25% Generally, using bucketing when iterating over your NLP dataset is preferred and will save you quite a bit of processing time. diff --git a/docs/source/examples/pytorch_rnn_example.rst b/docs/source/examples/pytorch_rnn_example.rst new file mode 100644 index 00000000..fa7cc074 --- /dev/null +++ b/docs/source/examples/pytorch_rnn_example.rst @@ -0,0 +1,4 @@ +Pytorch RNN classifier +======================= + +In this example, we will cover a simple RNN-based classifier model implemented in Pytorch. We will \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst index d237452f..22b7b571 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -42,6 +42,7 @@ The documentation is organized in five parts: :caption: Full examples examples/tfidf_example.rst + examples/pytorch_rnn_example.rst .. toctree:: :maxdepth: 2 diff --git a/docs/source/walkthrough.rst b/docs/source/walkthrough.rst index 360531db..d4f70d18 100644 --- a/docs/source/walkthrough.rst +++ b/docs/source/walkthrough.rst @@ -113,7 +113,7 @@ Datasets from 🤗 need to either (1) be wrapped them in :class:`podium.datasets })} .. note:: - Conversion from features to Fields is **automatically inferred** by default. This is a process which can be error prone, many assumptions have to be made. Nevertheless, it will work for basic use-cases. + Conversion from 🤗 dataset features to Fields is **automatically inferred** by default. This is a process which can be error prone. Nevertheless, it will work for basic use-cases. In general, we recommend you set the ``fields`` argument of ``from_dataset_dict``. When we load a 🤗 dataset, we internally perform automatic Field type inference and create Fields. While we expect these Fields to work in most cases, we recommend you try constructing your own. diff --git a/podium/datasets/iterator.py b/podium/datasets/iterator.py index c26a60d4..1a81b151 100644 --- a/podium/datasets/iterator.py +++ b/podium/datasets/iterator.py @@ -307,13 +307,15 @@ def __iter__(self) -> PythonIterator[Tuple[NamedTuple, NamedTuple]]: self._shuffler_state = self.get_internal_random_state() self._shuffler.shuffle(indices) - data = self._dataset[indices] + # This is extremely memory intensive for each iteration + # data = self._dataset[indices] # If iteration was stopped, continue where we left off start = self.iterations * self.batch_size for i in range(start, len(data), self.batch_size): - batch_instances = data[i : i + self.batch_size] + batch_indices = indices[i : i + self.batch_size] + batch_instances = data[batch_indices] if self._sort_key is not None: batch_instances = batch_instances.sorted(key=self._sort_key) From de5d083e2a69f7585e1043984745e1da01f5f9e6 Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 02:16:17 +0200 Subject: [PATCH 05/26] Fix shuffling cost --- podium/datasets/iterator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/podium/datasets/iterator.py b/podium/datasets/iterator.py index 1a81b151..0319821d 100644 --- a/podium/datasets/iterator.py +++ b/podium/datasets/iterator.py @@ -315,7 +315,7 @@ def __iter__(self) -> PythonIterator[Tuple[NamedTuple, NamedTuple]]: for i in range(start, len(data), self.batch_size): batch_indices = indices[i : i + self.batch_size] - batch_instances = data[batch_indices] + batch_instances = self._dataset[batch_indices] if self._sort_key is not None: batch_instances = batch_instances.sorted(key=self._sort_key) From 91237895ea55410a38db331dde3df3f10624e4a3 Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 11:08:14 +0200 Subject: [PATCH 06/26] Fix shuffling cost+ --- podium/datasets/hf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/podium/datasets/hf.py b/podium/datasets/hf.py index 93b36cc8..c072cdfb 100644 --- a/podium/datasets/hf.py +++ b/podium/datasets/hf.py @@ -180,6 +180,7 @@ def _get_examples(self): yield from self def __getitem__(self, i): + print(f"Called with {i}") raw_examples = self.dataset[i] # Index or slice From 78757a38ec36749850874a48796570e2fa522f0b Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 11:19:38 +0200 Subject: [PATCH 07/26] Fix shuffling cost+ --- podium/datasets/iterator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/podium/datasets/iterator.py b/podium/datasets/iterator.py index 0319821d..1747c273 100644 --- a/podium/datasets/iterator.py +++ b/podium/datasets/iterator.py @@ -313,7 +313,7 @@ def __iter__(self) -> PythonIterator[Tuple[NamedTuple, NamedTuple]]: # If iteration was stopped, continue where we left off start = self.iterations * self.batch_size - for i in range(start, len(data), self.batch_size): + for i in range(start, len(self._dataset), self.batch_size): batch_indices = indices[i : i + self.batch_size] batch_instances = self._dataset[batch_indices] From 1bd5887345b2ae9cbcaf83da409b5b6506c078ac Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 12:34:40 +0200 Subject: [PATCH 08/26] Fix shuffling cost+ --- podium/datasets/hf.py | 1 - podium/datasets/iterator.py | 3 --- 2 files changed, 4 deletions(-) diff --git a/podium/datasets/hf.py b/podium/datasets/hf.py index c072cdfb..93b36cc8 100644 --- a/podium/datasets/hf.py +++ b/podium/datasets/hf.py @@ -180,7 +180,6 @@ def _get_examples(self): yield from self def __getitem__(self, i): - print(f"Called with {i}") raw_examples = self.dataset[i] # Index or slice diff --git a/podium/datasets/iterator.py b/podium/datasets/iterator.py index 1747c273..6cce1a7f 100644 --- a/podium/datasets/iterator.py +++ b/podium/datasets/iterator.py @@ -307,9 +307,6 @@ def __iter__(self) -> PythonIterator[Tuple[NamedTuple, NamedTuple]]: self._shuffler_state = self.get_internal_random_state() self._shuffler.shuffle(indices) - # This is extremely memory intensive for each iteration - # data = self._dataset[indices] - # If iteration was stopped, continue where we left off start = self.iterations * self.batch_size From 6c9b67b6a52a9b0055bfe503a8a59231e19bfe71 Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 14:22:48 +0200 Subject: [PATCH 09/26] merge --- podium/datasets/iterator.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/podium/datasets/iterator.py b/podium/datasets/iterator.py index b6a83fe0..ecb662b4 100644 --- a/podium/datasets/iterator.py +++ b/podium/datasets/iterator.py @@ -493,9 +493,14 @@ def __init__(self, dataset: DatasetBase = None, shuffle=True, add_padding=True): iterator. If set to ``False``, numericalized Fields will be returned as python lists of ``matrix_class`` instances. """ + + batch_size = 0 + if dataset is not None: + batch_size=len(dataset) + super().__init__( dataset=dataset, - batch_size=len(dataset), + batch_size=batch_size, shuffle=shuffle, disable_batch_matrix=not add_padding, ) From 2ed6b95809d895e09cc9a5e29b1023e90984ffce Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 16:40:26 +0200 Subject: [PATCH 10/26] Finalize pytorch rnn example --- docs/source/examples/pytorch_rnn_example.rst | 271 ++++++++++++++++++- docs/source/quickstart.rst | 1 + docs/source/walkthrough.rst | 2 + podium/datasets/iterator.py | 2 +- 4 files changed, 274 insertions(+), 2 deletions(-) diff --git a/docs/source/examples/pytorch_rnn_example.rst b/docs/source/examples/pytorch_rnn_example.rst index fa7cc074..991ee4f0 100644 --- a/docs/source/examples/pytorch_rnn_example.rst +++ b/docs/source/examples/pytorch_rnn_example.rst @@ -1,4 +1,273 @@ Pytorch RNN classifier ======================= -In this example, we will cover a simple RNN-based classifier model implemented in Pytorch. We will \ No newline at end of file +In this example, we will cover a simple RNN-based classifier model implemented in Pytorch. We will use the IMDB dataset loaded from 🤗/datasets, preprocess it with Fields and train the model briefly. +While having a GPU is not necessary it is recommended as otherwise training the model, even for a single epoch, will take a while. + +Loading a dataset from 🤗/datasets +----------------------------------- + +As we have covered in :ref:`hf-loading`, we have implemented wrappers around 🤗 dataset classes to enable working with the plethora of datasets implemented therein. We will now briefly go through (1) loading a dataset from 🤗/datasets and (2) wrapping it in Podium classes. + +.. code-block:: python + + >>> import datasets + >>> imdb = datasets.load_dataset('imdb') + >>> print(imdb) + DatasetDict({ + train: Dataset({ + features: ['text', 'label'], + num_rows: 25000 + }) + test: Dataset({ + features: ['text', 'label'], + num_rows: 25000 + }) + unsupervised: Dataset({ + features: ['text', 'label'], + num_rows: 50000 + }) + }) + >>> from pprint import pprint + >>> pprint(imdb['train'].features) + {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], names_file=None, id=None), + 'text': Value(dtype='string', id=None)} + +By calling ``load_dataset`` the dataset was downloaded and cached on disk through the ``datasets`` library. The dataset has two splits we are interested in (``train`` and ``test``). +The main thing we need to pay attention to are the ``features`` of the dataset, in this case ``text`` and ``label``. These features, or data columns, need to be mapped to (and processed by) Podium Fields. + +For convenience, we have implemented automatic ``Field`` type inference from 🤗 dataset features -- however it is far from perfect as we have to make many assumptions on the way. We will now wrap the IMDB dataset in Podium and show the automatically inferred Fields. + +.. code-block:: python + + >>> from podium.datasets.hf import HFDatasetConverter as HF + >>> splits = HF.from_dataset_dict(imdb) + >>> imdb_train, imdb_test = splits['train'], splits['test'] + >>> imdb_train.finalize_fields() # Construct the vocab + >>> print(*imdb_train.fields, sep="\n") + Field({ + name: 'text', + keep_raw: False, + is_target: False, + vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 280619}) + }) + LabelField({ + name: 'label', + keep_raw: False, + is_target: True + }) + +Both of the Fields were constructed well, but there are a couple of drawbacks for this concrete dataset. Firstly, the size of the vocabulary is very large (``280619``) -- we would like to trim this down to a reasonable number as we won't be using subword tokenization in this example. + +.. code-block:: python + + >>> print(imdb_train[0]) + Example({ + text: (None, ['Bromwell', 'High', 'is', 'a', 'cartoon', 'comedy.', 'It', 'ran', 'at', 'the', 'same', 'time', 'as', 'some', 'other', 'programs', 'about', 'school', 'life,', 'such', 'as', '"Teachers".', 'My', '35', 'years', 'in', 'the', 'teaching', 'profession', 'lead', 'me', 'to', 'believe', 'that', 'Bromwell', "High's", 'satire', 'is', 'much', 'closer', 'to', 'reality', 'than', 'is', '"Teachers".', 'The', 'scramble', 'to', 'survive', 'financially,', 'the', 'insightful', 'students', 'who', 'can', 'see', 'right', 'through', 'their', 'pathetic', "teachers'", 'pomp,', 'the', 'pettiness', 'of', 'the', 'whole', 'situation,', 'all', 'remind', 'me', 'of', 'the', 'schools', 'I', 'knew', 'and', 'their', 'students.', 'When', 'I', 'saw', 'the', 'episode', 'in', 'which', 'a', 'student', 'repeatedly', 'tried', 'to', 'burn', 'down', 'the', 'school,', 'I', 'immediately', 'recalled', '.........', 'at', '..........', 'High.', 'A', 'classic', 'line:', 'INSPECTOR:', "I'm", 'here', 'to', 'sack', 'one', 'of', 'your', 'teachers.', 'STUDENT:', 'Welcome', 'to', 'Bromwell', 'High.', 'I', 'expect', 'that', 'many', 'adults', 'of', 'my', 'age', 'think', 'that', 'Bromwell', 'High', 'is', 'far', 'fetched.', 'What', 'a', 'pity', 'that', 'it', "isn't!"]), + label: (None, 1) + }) + +When inspecting a concrete instance, there are a few more things to note. Firstly, IMDB instances can be quite long (on average around 200 tokens per instance), secondly, the text wasn't tokenized properly near sentence boundaries (due to using the default ``str.split`` tokenizer) and lastly, the text has varying casing. +We will instead define our own Fields for the corresponding features, add posttokenization hooks which will transform the data, and use those Fields to replace the automatically inferred ones: + +.. code-block:: python + + >>> from podium import Field, LabelField, Vocab + >>> + >>> # Lowercasing as a post-tokenization hook + >>> def lowercase(raw, tokenized): + ... return raw, [token.lower() for token in tokenized] + >>> + >>> # Truncating as a post-tokenization hook + >>> def truncate(raw, tokenized, max_length=200): + ... return raw, tokenized[:max_length] + >>> + >>> vocab = Vocab(max_size=10000) + >>> text = Field(name="text", + ... numericalizer=vocab, + ... include_lengths=True, + ... tokenizer="spacy-en_core_web_sm", + ... posttokenize_hooks=[truncate, lowercase]) + >>> + >>> # The labels are already mapped to indices in /datasets so we will + >>> # pass them through + >>> label = LabelField(name="label", numericalizer=lambda x: x) + >>> fields = { + ... 'text': text, + ... 'label': label + ... } + >>> + >>> # Use the given Fields to load the dataset again + >>> splits = HF.from_dataset_dict(imdb, fields=fields) + >>> imdb_train, imdb_test = splits['train'], splits['test'] + >>> imdb_train.finalize_fields() + >>> print(imdb_train) + HFDatasetConverter({ + dataset_name: imdb, + size: 25000, + fields: [ + Field({ + name: 'text', + keep_raw: False, + is_target: False, + vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 10000}) + }), + LabelField({ + name: 'label', + keep_raw: False, + is_target: True + }) + + ] + }) + >>> print(imdb_train[0]) + Example({ + text: (None, ['bromwell', 'high', 'is', 'a', 'cartoon', 'comedy', '.', 'it', 'ran', 'at', 'the', 'same', 'time', 'as', 'some', 'other', 'programs', 'about', 'school', 'life', ',', 'such', 'as', '"', 'teachers', '"', '.', 'my', '35', 'years', 'in', 'the', 'teaching', 'profession', 'lead', 'me', 'to', 'believe', 'that', 'bromwell', 'high', "'s", 'satire', 'is', 'much', 'closer', 'to', 'reality', 'than', 'is', '"', 'teachers', '"', '.', 'the', 'scramble', 'to', 'survive', 'financially', ',', 'the', 'insightful', 'students', 'who', 'can', 'see', 'right', 'through', 'their', 'pathetic', 'teachers', "'", 'pomp', ',', 'the', 'pettiness', 'of', 'the', 'whole', 'situation', ',', 'all', 'remind', 'me', 'of', 'the', 'schools', 'i', 'knew', 'and', 'their', 'students', '.', 'when', 'i', 'saw', 'the', 'episode', 'in', 'which', 'a', 'student', 'repeatedly', 'tried', 'to', 'burn', 'down', 'the', 'school', ',', 'i', 'immediately', 'recalled', '.........', 'at', '..........', 'high', '.', 'a', 'classic', 'line', ':', 'inspector', ':', 'i', "'m", 'here', 'to', 'sack', 'one', 'of', 'your', 'teachers', '.', 'student', ':', 'welcome', 'to', 'bromwell', 'high', '.', 'i', 'expect', 'that', 'many', 'adults', 'of', 'my', 'age', 'think', 'that', 'bromwell', 'high', 'is', 'far', 'fetched', '.', 'what', 'a', 'pity', 'that', 'it', 'is', "n't", '!']), + label: (None, 1) + }) + +Here, we can see the effect of our hooks and using the spacy tokenizer. Now our dataset will be a bit cleaner to work with. Some data cleaning would still be desired, such as removing tokens which only contain punctuation, but we leave this exercise to the reader :) + +Loading pretrained embeddings +----------------------------- +In most use-cases, we want to use pre-trained word embeddings along with our neural model. With Podium, this process is very simple. If your field uses a vocabulary, it has already built an inventory of tokens for your dataset. + +For example, we will use the `GloVe `__ vectors. You can read more about loading pretrained vectors in :ref:`pretrained`, but the procedure to load these vectors has two steps: (1) initialize the vector class, which sets all the required paths and (2) obtain the vectors for a pre-defined list of words by calling ``load_vocab``. + +.. code-block:: python + + >>> from podium.vectorizers import GloVe + >>> vocab = fields['text'].vocab + >>> glove = GloVe() + >>> embeddings = glove.load_vocab(vocab) + >>> print(f"For vocabulary of size: {len(vocab)} loaded embedding matrix of shape: {embeddings.shape}") + For vocabulary of size: 10000 loaded embedding matrix of shape: (10000, 300) + >>> # We can obtain vectors for a single word (given the word is loaded) like this: + >>> word = "sport" + >>> print(f"Vector for {word}: {glove.token_to_vector(word)}") + Vector for sport: [ 0.34566 0.15934 0.48444 -0.13693 0.18737 0.2678 + -0.39159 0.4931 -0.76111 -1.4586 0.41475 0.55837 + ... + 0.13802 0.36619 0.19734 0.35701 -0.42228 -0.25242 + -0.050651 -0.041129 0.15092 0.22084 0.52252 -0.27224 ] + +Defining a simple neural model in Pytorch +------------------------------------------ + +In this section, we will implement a very simple neural classification model -- a 2-layer BiGRU with a single hidden layer classifier on top of its last hidden state. Many improvements to the model can be made, but this is not our current focus. + +.. code-block:: python + + >>> import torch + >>> import torch.nn as nn + >>> import torch.nn.functional as F + >>> + >>> from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence + >>> + >>> class RNNClassifier(nn.Module): + ... def __init__(self, embedding, embed_dim=300, hidden_dim=300, num_labels=2): + ... super(NLIModel, self).__init__() + ... self.embedding = embedding + ... self.encoder = nn.GRU( + ... input_size=embed_dim, + ... hidden_size=hidden_dim, + ... num_layers=2, + ... bidirectional=True, + ... dropout=0.3 + ... ) + ... self.decoder = nn.Sequential( + ... nn.Linear(2*hidden_dim, hidden_dim), + ... nn.Tanh(), + ... nn.Linear(hidden_dim, num_labels) + ... ) + ... + ... def forward(self, x, lengths): + ... e = self.embedding(x) + ... h_pack = pack_padded_sequence(e, + ... lengths, + ... enforce_sorted=False, + ... batch_first=True) + ... + ... _, h = self.encoder(h_pack) # [2L x B x H] + ... + ... # Concat last state of left and right directions + ... h = torch.cat([h[-1], h[-2]], dim=-1) # [B x 2H] + ... return self.decoder(h) + +There. We will now define the prerequisites for pytorch model training, where we will use a GPU for speed, however running the model for one epoch will is possible albeit time-consuing even without a GPU. + +.. code-block:: python + + >>> embed_dim = 300 + >>> padding_index = text.vocab.get_padding_index() + >>> embedding_matrix = nn.Embedding(len(text.vocab), embed_dim, + ... padding_idx=padding_index) + >>> # Copy the pretrained GloVe word embeddings + >>> embedding_matrix.weight.data.copy_(torch.from_numpy(embeddings)) + >>> + >>> device = torch.device("cuda:0") + >>> model = RNNClassifier(embedding_matrix) + >>> model = model.to(device) + >>> criterion = nn.CrossEntropyLoss() + >>> optimizer = torch.optim.Adam(model.parameters()) + +Now that we have the model setup code ready, we will first define helper method to measure accuracy of our model after each epoch: + +.. code-block:: python + + >>> import numpy as np + >>> def update_stats(accuracy, confusion_matrix, logits, y): + ... _, max_ind = torch.max(logits, 1) + ... equal = torch.eq(max_ind, y) + ... correct = int(torch.sum(equal)) + ... + ... for j, i in zip(max_ind, y): + ... confusion_matrix[int(i),int(j)]+=1 + ... return accuracy + correct, confusion_matrix + +and now the training loop for the model: + +.. code-block:: python + + >>> import tqdm + >>> def train(model, data, optimizer, criterion, num_labels): + ... model.train() + ... accuracy, confusion_matrix = 0, np.zeros((num_labels, num_labels), dtype=int) + ... for batch_num, batch in tqdm.tqdm(enumerate(data), total=len(data)): + ... x, lens = batch.text + ... y = batch.label + ... logits = model(x, lens) + ... accuracy, confusion_matrix = update_stats(accuracy, confusion_matrix, logits, y) + ... loss = criterion(logits, y.squeeze()) + ... loss.backward() + ... optimizer.step() + ... print("[Accuracy]: {}/{} : {:.3f}%".format( + ... accuracy, len(data)*data.batch_size, accuracy / len(data) / data.batch_size * 100)) + ... return accuracy, confusion_matrix + +and now, we are done with our model code. Let's turn back to Podium and see how we can set up batching for our training loop to start ticking. + +Minibatching data in Podium +-------------------------------- + +We have covered batching data in :ref:`minibatching` and advanced batching through bucketing in :ref:`bucketing`. We will use the plain Iterator and leave bucketing for you to change to see how much the model speeds up when minimizing padding. One change we would like to do when iterating over data is to obtain the data matrices as torch tensors on the ``device`` we defined previously. We will now demonstrate how to do this by setting the ``matrix_class`` argument of the :class:`podium.datasets.Iterator`\: + +.. code-block:: python + + >>> from podium import Iterator + >>> # Closure for converting data to given device + >>> def gpu_tensor(data): + ... return torch.tensor(data).to(device) + >>> # Initialize our iterator + >>> train_iter = Iterator(imdb_train, batch_size=32, matrix_class=gpu_tensor) + >>> + >>> epochs = 5 + >>> for epoch in range(epochs): + >>> train(model, train_iter, optimizer, criterion, num_labels=2) + [Accuracy]: 20050/25024 : 80.123% + [Accuracy]: 22683/25024 : 90.645% + [Accuracy]: 23709/25024 : 94.745% + [Accuracy]: 24323/25024 : 97.199% + [Accuracy]: 24595/25024 : 98.286% + +And we are done! In our case, the model takes about one minute per epoch on a GPU, but this can be sped up by using bucketing, which we recommend you try out yourself. diff --git a/docs/source/quickstart.rst b/docs/source/quickstart.rst index 715f3452..2e4847de 100644 --- a/docs/source/quickstart.rst +++ b/docs/source/quickstart.rst @@ -189,6 +189,7 @@ Since our example dataset is small, we can set ``add_padding=True``, which cause When obtaining larger datasets as a single batch, we recommend leaving ``add_padding=False`` (default) or your entire dataset will be padded to the length of the longest instance, causing memory issues. When set to ``False``, the output of each Field will be a list instead of a matrix. +.. _minibatching: Minibatching data ----------------------- diff --git a/docs/source/walkthrough.rst b/docs/source/walkthrough.rst index 4124f7e1..d91ae5de 100644 --- a/docs/source/walkthrough.rst +++ b/docs/source/walkthrough.rst @@ -486,6 +486,8 @@ Since datasets can contain multiple input Fields, it is not trivial to determine And here we can see, that even for our small, two-instance batch, the elements in the batch are now properly sorted according to length. +.. _pretrained: + Loading pretrained word vectors ------------------------------- diff --git a/podium/datasets/iterator.py b/podium/datasets/iterator.py index ecb662b4..97e23c30 100644 --- a/podium/datasets/iterator.py +++ b/podium/datasets/iterator.py @@ -496,7 +496,7 @@ def __init__(self, dataset: DatasetBase = None, shuffle=True, add_padding=True): batch_size = 0 if dataset is not None: - batch_size=len(dataset) + batch_size = len(dataset) super().__init__( dataset=dataset, From 0183c38a34a96b4fbe9d1125fe2d88bd4b4b85eb Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 16:56:43 +0200 Subject: [PATCH 11/26] Finalize examples --- docs/source/_static/js/custom.js | 2 + docs/source/notebooks/advanced.ipynb | 6 +- .../notebooks/pytorch_rnn_example.ipynb | 500 ++++++++++++++++++ docs/source/notebooks/quickstart.ipynb | 7 + docs/source/notebooks/walkthrough.ipynb | 9 +- .../{examples => }/pytorch_rnn_example.rst | 0 .../scripts/convert_doc_to_notebooks.py | 4 + docs/source/{examples => }/tfidf_example.rst | 2 +- 8 files changed, 525 insertions(+), 5 deletions(-) create mode 100644 docs/source/notebooks/pytorch_rnn_example.ipynb rename docs/source/{examples => }/pytorch_rnn_example.rst (100%) rename docs/source/{examples => }/tfidf_example.rst (99%) diff --git a/docs/source/_static/js/custom.js b/docs/source/_static/js/custom.js index 749084d9..26839340 100644 --- a/docs/source/_static/js/custom.js +++ b/docs/source/_static/js/custom.js @@ -16,6 +16,8 @@ const hasNotebook = [ "advanced", "preprocessing", "walkthrough", + "tfidf_example", + "pytorch_rnn_example" ] function addIcon() { diff --git a/docs/source/notebooks/advanced.ipynb b/docs/source/notebooks/advanced.ipynb index c9f7d874..639d6af0 100644 --- a/docs/source/notebooks/advanced.ipynb +++ b/docs/source/notebooks/advanced.ipynb @@ -957,7 +957,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "As we can see, the difference between using a regular Iterator and a BucketIterator is massive. Not only do we reduce the amount of padding, we have reduced the total amount of tokens processed by about 50%. The SST dataset, however, is a relatively small dataset so this experiment might be a bit biased. Let's take a look at the same statistics for the `IMDB` dataset. After changing the highligted data loading line in the first snippet to:" + "As we can see, the difference between using a regular Iterator and a BucketIterator is massive. Not only do we reduce the amount of padding, we have reduced the total amount of tokens processed by about 50%. The SST dataset, however, is a relatively small dataset so this experiment might be a bit biased. Let's take a look at the same statistics for the `IMDB` dataset. After changing the data loading line in the first snippet to:" ] }, { @@ -981,8 +981,8 @@ "metadata": {}, "source": [ "```bash\n", - "For Iterator, padding = 13569936 out of 19414616 = 69.89546432440385%\n", - "For BucketIterator, padding = 259800 out of 6104480 = 4.255890755641758%\n", + "For Iterator, padding = 13569936 out of 19414616 = 69.89%\n", + "For BucketIterator, padding = 259800 out of 6104480 = 4.25%\n", "```" ] }, diff --git a/docs/source/notebooks/pytorch_rnn_example.ipynb b/docs/source/notebooks/pytorch_rnn_example.ipynb new file mode 100644 index 00000000..a29d6256 --- /dev/null +++ b/docs/source/notebooks/pytorch_rnn_example.ipynb @@ -0,0 +1,500 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Podium installation\n", + "! pip install podium-nlp\n", + "# To install from source instead of the last release, comment the command above and uncomment the following one.\n", + "# ! pip install git+https://github.com/takelab/podium" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Pytorch RNN classifier" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, we will cover a simple RNN-based classifier model implemented in Pytorch. We will use the IMDB dataset loaded from 🤗/datasets, preprocess it with Fields and train the model briefly.\n", + "While having a GPU is not necessary it is recommended as otherwise training the model, even for a single epoch, will take a while." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Loading a dataset from 🤗/datasets" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we have covered in [Loading 🤗 datasets](http://takelab.fer.hr/podium/walkthrough.html#hf-loading), we have implemented wrappers around 🤗 dataset classes to enable working with the plethora of datasets implemented therein. We will now briefly go through (1) loading a dataset from 🤗/datasets and (2) wrapping it in Podium classes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "DatasetDict({\n", + " train: Dataset({\n", + " features: ['text', 'label'],\n", + " num_rows: 25000\n", + " })\n", + " test: Dataset({\n", + " features: ['text', 'label'],\n", + " num_rows: 25000\n", + " })\n", + " unsupervised: Dataset({\n", + " features: ['text', 'label'],\n", + " num_rows: 50000\n", + " })\n", + "})\n", + "{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], names_file=None, id=None),\n", + " 'text': Value(dtype='string', id=None)}" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import datasets\n", + "imdb = datasets.load_dataset('imdb')\n", + "print(imdb)\n", + "from pprint import pprint\n", + "pprint(imdb['train'].features)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "By calling `load_dataset` the dataset was downloaded and cached on disk through the `datasets` library. The dataset has two splits we are interested in (`train` and `test`).\n", + "The main thing we need to pay attention to are the `features` of the dataset, in this case `text` and `label`. These features, or data columns, need to be mapped to (and processed by) Podium Fields.\n", + "\n", + "For convenience, we have implemented automatic `Field` type inference from 🤗 dataset features -- however it is far from perfect as we have to make many assumptions on the way. We will now wrap the IMDB dataset in Podium and show the automatically inferred Fields." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Field({\n", + " name: 'text',\n", + " keep_raw: False,\n", + " is_target: False,\n", + " vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 280619})\n", + "})\n", + "LabelField({\n", + " name: 'label',\n", + " keep_raw: False,\n", + " is_target: True\n", + "})" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from podium.datasets.hf import HFDatasetConverter as HF\n", + "splits = HF.from_dataset_dict(imdb)\n", + "imdb_train, imdb_test = splits['train'], splits['test']\n", + "imdb_train.finalize_fields() # Construct the vocab\n", + "print(*imdb_train.fields, sep=\"\\n\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Both of the Fields were constructed well, but there are a couple of drawbacks for this concrete dataset. Firstly, the size of the vocabulary is very large (`280619`) -- we would like to trim this down to a reasonable number as we won't be using subword tokenization in this example. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Example({\n", + " text: (None, ['Bromwell', 'High', 'is', 'a', 'cartoon', 'comedy.', 'It', 'ran', 'at', 'the', 'same', 'time', 'as', 'some', 'other', 'programs', 'about', 'school', 'life,', 'such', 'as', '\"Teachers\".', 'My', '35', 'years', 'in', 'the', 'teaching', 'profession', 'lead', 'me', 'to', 'believe', 'that', 'Bromwell', \"High's\", 'satire', 'is', 'much', 'closer', 'to', 'reality', 'than', 'is', '\"Teachers\".', 'The', 'scramble', 'to', 'survive', 'financially,', 'the', 'insightful', 'students', 'who', 'can', 'see', 'right', 'through', 'their', 'pathetic', \"teachers'\", 'pomp,', 'the', 'pettiness', 'of', 'the', 'whole', 'situation,', 'all', 'remind', 'me', 'of', 'the', 'schools', 'I', 'knew', 'and', 'their', 'students.', 'When', 'I', 'saw', 'the', 'episode', 'in', 'which', 'a', 'student', 'repeatedly', 'tried', 'to', 'burn', 'down', 'the', 'school,', 'I', 'immediately', 'recalled', '.........', 'at', '..........', 'High.', 'A', 'classic', 'line:', 'INSPECTOR:', \"I'm\", 'here', 'to', 'sack', 'one', 'of', 'your', 'teachers.', 'STUDENT:', 'Welcome', 'to', 'Bromwell', 'High.', 'I', 'expect', 'that', 'many', 'adults', 'of', 'my', 'age', 'think', 'that', 'Bromwell', 'High', 'is', 'far', 'fetched.', 'What', 'a', 'pity', 'that', 'it', \"isn't!\"]),\n", + " label: (None, 1)\n", + "})" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "print(imdb_train[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When inspecting a concrete instance, there are a few more things to note. Firstly, IMDB instances can be quite long (on average around 200 tokens per instance), secondly, the text wasn't tokenized properly near sentence boundaries (due to using the default `str.split` tokenizer) and lastly, the text has varying casing.\n", + "We will instead define our own Fields for the corresponding features, add posttokenization hooks which will transform the data, and use those Fields to replace the automatically inferred ones:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "HFDatasetConverter({\n", + " dataset_name: imdb,\n", + " size: 25000,\n", + " fields: [\n", + " Field({\n", + " name: 'text',\n", + " keep_raw: False,\n", + " is_target: False,\n", + " vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 10000})\n", + " }),\n", + " LabelField({\n", + " name: 'label',\n", + " keep_raw: False,\n", + " is_target: True\n", + " })\n", + " \n", + " ]\n", + "})\n", + "Example({\n", + " text: (None, ['bromwell', 'high', 'is', 'a', 'cartoon', 'comedy', '.', 'it', 'ran', 'at', 'the', 'same', 'time', 'as', 'some', 'other', 'programs', 'about', 'school', 'life', ',', 'such', 'as', '\"', 'teachers', '\"', '.', 'my', '35', 'years', 'in', 'the', 'teaching', 'profession', 'lead', 'me', 'to', 'believe', 'that', 'bromwell', 'high', \"'s\", 'satire', 'is', 'much', 'closer', 'to', 'reality', 'than', 'is', '\"', 'teachers', '\"', '.', 'the', 'scramble', 'to', 'survive', 'financially', ',', 'the', 'insightful', 'students', 'who', 'can', 'see', 'right', 'through', 'their', 'pathetic', 'teachers', \"'\", 'pomp', ',', 'the', 'pettiness', 'of', 'the', 'whole', 'situation', ',', 'all', 'remind', 'me', 'of', 'the', 'schools', 'i', 'knew', 'and', 'their', 'students', '.', 'when', 'i', 'saw', 'the', 'episode', 'in', 'which', 'a', 'student', 'repeatedly', 'tried', 'to', 'burn', 'down', 'the', 'school', ',', 'i', 'immediately', 'recalled', '.........', 'at', '..........', 'high', '.', 'a', 'classic', 'line', ':', 'inspector', ':', 'i', \"'m\", 'here', 'to', 'sack', 'one', 'of', 'your', 'teachers', '.', 'student', ':', 'welcome', 'to', 'bromwell', 'high', '.', 'i', 'expect', 'that', 'many', 'adults', 'of', 'my', 'age', 'think', 'that', 'bromwell', 'high', 'is', 'far', 'fetched', '.', 'what', 'a', 'pity', 'that', 'it', 'is', \"n't\", '!']),\n", + " label: (None, 1)\n", + "})" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from podium import Field, LabelField, Vocab\n", + "\n", + "# Lowercasing as a post-tokenization hook\n", + "def lowercase(raw, tokenized):\n", + " return raw, [token.lower() for token in tokenized]\n", + "\n", + "# Truncating as a post-tokenization hook\n", + "def truncate(raw, tokenized, max_length=200):\n", + " return raw, tokenized[:max_length]\n", + "\n", + "vocab = Vocab(max_size=10000)\n", + "text = Field(name=\"text\", \n", + " numericalizer=vocab,\n", + " include_lengths=True,\n", + " tokenizer=\"spacy-en_core_web_sm\",\n", + " posttokenize_hooks=[truncate, lowercase])\n", + "\n", + "# The labels are already mapped to indices in /datasets so we will\n", + "# pass them through\n", + "label = LabelField(name=\"label\", numericalizer=lambda x: x)\n", + "fields = {\n", + " 'text': text,\n", + " 'label': label\n", + "}\n", + "\n", + "# Use the given Fields to load the dataset again\n", + "splits = HF.from_dataset_dict(imdb, fields=fields)\n", + "imdb_train, imdb_test = splits['train'], splits['test']\n", + "imdb_train.finalize_fields()\n", + "print(imdb_train)\n", + "print(imdb_train[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, we can see the effect of our hooks and using the spacy tokenizer. Now our dataset will be a bit cleaner to work with. Some data cleaning would still be desired, such as removing tokens which only contain punctuation, but we leave this exercise to the reader :)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Loading pretrained embeddings" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In most use-cases, we want to use pre-trained word embeddings along with our neural model. With Podium, this process is very simple. If your field uses a vocabulary, it has already built an inventory of tokens for your dataset.\n", + "\n", + "For example, we will use the [GloVe](https://nlp.stanford.edu/projects/glove/) vectors. You can read more about loading pretrained vectors in [Loading pretrained word vectors](http://takelab.fer.hr/podium/walkthrough.html#pretrained), but the procedure to load these vectors has two steps: (1) initialize the vector class, which sets all the required paths and (2) obtain the vectors for a pre-defined list of words by calling `load_vocab`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "For vocabulary of size: 10000 loaded embedding matrix of shape: (10000, 300)\n", + "Vector for sport: [ 0.34566 0.15934 0.48444 -0.13693 0.18737 0.2678" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from podium.vectorizers import GloVe\n", + "vocab = fields['text'].vocab\n", + "glove = GloVe()\n", + "embeddings = glove.load_vocab(vocab)\n", + "print(f\"For vocabulary of size: {len(vocab)} loaded embedding matrix of shape: {embeddings.shape}\")\n", + "# We can obtain vectors for a single word (given the word is loaded) like this:\n", + "word = \"sport\"\n", + "print(f\"Vector for {word}: {glove.token_to_vector(word)}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + " -0.39159 0.4931 -0.76111 -1.4586 0.41475 0.55837\n", + " ...\n", + " 0.13802 0.36619 0.19734 0.35701 -0.42228 -0.25242\n", + " -0.050651 -0.041129 0.15092 0.22084 0.52252 -0.27224 ]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Defining a simple neural model in Pytorch" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this section, we will implement a very simple neural classification model -- a 2-layer BiGRU with a single hidden layer classifier on top of its last hidden state. Many improvements to the model can be made, but this is not our current focus." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "\n", + "from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n", + "\n", + "class RNNClassifier(nn.Module):\n", + " def __init__(self, embedding, embed_dim=300, hidden_dim=300, num_labels=2):\n", + " super(NLIModel, self).__init__()\n", + " self.embedding = embedding\n", + " self.encoder = nn.GRU(\n", + " input_size=embed_dim,\n", + " hidden_size=hidden_dim,\n", + " num_layers=2,\n", + " bidirectional=True,\n", + " dropout=0.3\n", + " )\n", + " self.decoder = nn.Sequential(\n", + " nn.Linear(2*hidden_dim, hidden_dim),\n", + " nn.Tanh(),\n", + " nn.Linear(hidden_dim, num_labels)\n", + " )\n", + "\n", + " def forward(self, x, lengths):\n", + " e = self.embedding(x)\n", + " h_pack = pack_padded_sequence(e, \n", + " lengths,\n", + " enforce_sorted=False,\n", + " batch_first=True)\n", + "\n", + " _, h = self.encoder(h_pack) # [2L x B x H]\n", + "\n", + " # Concat last state of left and right directions\n", + " h = torch.cat([h[-1], h[-2]], dim=-1) # [B x 2H]\n", + " return self.decoder(h)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There. We will now define the prerequisites for pytorch model training, where we will use a GPU for speed, however running the model for one epoch will is possible albeit time-consuing even without a GPU." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "embed_dim = 300\n", + "padding_index = text.vocab.get_padding_index()\n", + "embedding_matrix = nn.Embedding(len(text.vocab), embed_dim,\n", + " padding_idx=padding_index)\n", + "# Copy the pretrained GloVe word embeddings\n", + "embedding_matrix.weight.data.copy_(torch.from_numpy(embeddings))\n", + "\n", + "device = torch.device(\"cuda:0\")\n", + "model = RNNClassifier(embedding_matrix)\n", + "model = model.to(device)\n", + "criterion = nn.CrossEntropyLoss()\n", + "optimizer = torch.optim.Adam(model.parameters())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have the model setup code ready, we will first define helper method to measure accuracy of our model after each epoch:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "def update_stats(accuracy, confusion_matrix, logits, y):\n", + " _, max_ind = torch.max(logits, 1)\n", + " equal = torch.eq(max_ind, y)\n", + " correct = int(torch.sum(equal))\n", + "\n", + " for j, i in zip(max_ind, y):\n", + " confusion_matrix[int(i),int(j)]+=1\n", + " return accuracy + correct, confusion_matrix" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "and now the training loop for the model:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import tqdm\n", + "def train(model, data, optimizer, criterion, num_labels):\n", + " model.train()\n", + " accuracy, confusion_matrix = 0, np.zeros((num_labels, num_labels), dtype=int)\n", + " for batch_num, batch in tqdm.tqdm(enumerate(data), total=len(data)):\n", + " x, lens = batch.text\n", + " y = batch.label\n", + " logits = model(x, lens)\n", + " accuracy, confusion_matrix = update_stats(accuracy, confusion_matrix, logits, y)\n", + " loss = criterion(logits, y.squeeze())\n", + " loss.backward()\n", + " optimizer.step()\n", + " print(\"[Accuracy]: {}/{} : {:.3f}%\".format(\n", + " accuracy, len(data)*data.batch_size, accuracy / len(data) / data.batch_size * 100))\n", + " return accuracy, confusion_matrix" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "and now, we are done with our model code. Let's turn back to Podium and see how we can set up batching for our training loop to start ticking." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Minibatching data in Podium" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have covered batching data in [Minibatching data](http://takelab.fer.hr/podium/quickstart.html#minibatching) and advanced batching through bucketing in [Bucketing instances when iterating](http://takelab.fer.hr/podium/advanced.html#bucketing). We will use the plain Iterator and leave bucketing for you to change to see how much the model speeds up when minimizing padding. One change we would like to do when iterating over data is to obtain the data matrices as torch tensors on the `device` we defined previously. We will now demonstrate how to do this by setting the `matrix_class` argument of the `Iterator`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Accuracy]: 20050/25024 : 80.123%\n", + "[Accuracy]: 22683/25024 : 90.645%\n", + "[Accuracy]: 23709/25024 : 94.745%\n", + "[Accuracy]: 24323/25024 : 97.199%\n", + "[Accuracy]: 24595/25024 : 98.286%" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from podium import Iterator\n", + "# Closure for converting data to given device\n", + "def gpu_tensor(data):\n", + " return torch.tensor(data).to(device)\n", + "# Initialize our iterator\n", + "train_iter = Iterator(imdb_train, batch_size=32, matrix_class=gpu_tensor)\n", + "\n", + "epochs = 5\n", + "for epoch in range(epochs):\n", + " train(model, train_iter, optimizer, criterion, num_labels=2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And we are done! In our case, the model takes about one minute per epoch on a GPU, but this can be sped up by using bucketing, which we recommend you try out yourself." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/source/notebooks/quickstart.ipynb b/docs/source/notebooks/quickstart.ipynb index 7d23db64..b42e799d 100644 --- a/docs/source/notebooks/quickstart.ipynb +++ b/docs/source/notebooks/quickstart.ipynb @@ -432,6 +432,13 @@ "> When set to `False`, the output of each Field will be a list instead of a matrix." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/docs/source/notebooks/walkthrough.ipynb b/docs/source/notebooks/walkthrough.ipynb index f78d4d45..b8a1dc3a 100644 --- a/docs/source/notebooks/walkthrough.ipynb +++ b/docs/source/notebooks/walkthrough.ipynb @@ -220,7 +220,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "> **NOTE:** Conversion from features to Fields is **automatically inferred** by default. This is a process which can be error prone, many assumptions have to be made. Nevertheless, it will work for basic use-cases.\n", + "> **NOTE:** Conversion from 🤗 dataset features to Fields is **automatically inferred** by default. This is a process which can be error prone. Nevertheless, it will work for basic use-cases.\n", "> In general, we recommend you set the `fields` argument of `from_dataset_dict`." ] }, @@ -961,6 +961,13 @@ "And here we can see, that even for our small, two-instance batch, the elements in the batch are now properly sorted according to length." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/docs/source/examples/pytorch_rnn_example.rst b/docs/source/pytorch_rnn_example.rst similarity index 100% rename from docs/source/examples/pytorch_rnn_example.rst rename to docs/source/pytorch_rnn_example.rst diff --git a/docs/source/scripts/convert_doc_to_notebooks.py b/docs/source/scripts/convert_doc_to_notebooks.py index 1feb0731..897a823f 100644 --- a/docs/source/scripts/convert_doc_to_notebooks.py +++ b/docs/source/scripts/convert_doc_to_notebooks.py @@ -20,6 +20,8 @@ "advanced.rst", "preprocessing.rst", "walkthrough.rst", + "pytorch_rnn_example.rst", + "tfidf_example.rst", ] _re_label = re.compile(r"\.\.\s+_([^:]*):") @@ -556,6 +558,8 @@ def convert_all_tutorials(path_to_docs=None, path_to_dest=None): for file in TUTORIAL_FILES: notebook_name = os.path.splitext(file)[0] + ".ipynb" doc_file = os.path.join(path_to_docs, file) + print(doc_file) + print(os.path.exists(doc_file)) notebook_file = os.path.join(path_to_dest, notebook_name) convert_rst_file_to_notebook(doc_file, notebook_file, origin_folder=path_to_docs, dest_folder=path_to_dest, additional_deps=ADDITIONAL_DEPS.get(file)) diff --git a/docs/source/examples/tfidf_example.rst b/docs/source/tfidf_example.rst similarity index 99% rename from docs/source/examples/tfidf_example.rst rename to docs/source/tfidf_example.rst index bc62b3a4..770a8440 100644 --- a/docs/source/examples/tfidf_example.rst +++ b/docs/source/tfidf_example.rst @@ -21,7 +21,7 @@ First, we will implement a minimalistic example without much additional preproce We have now loaded our dataset, finalized its Fields and obtained it as a batch of input and target data. What we need to do next is define the TF-IDF vectorization for each instance in the dataset. This is done by using our :class:`podium.vectorizers.TfIdfVectorizer`, which adapts the ``scikit-learn`` vectorizer to the Podium input data. .. code-block:: python - + >>> from podium.vectorizers import TfIdfVectorizer >>> tfidf_vectorizer = TFIdfVectorizer() >>> tfidf_vectorizer.fit(train, field=train.field('text')) From 0c04847b8f06e56f41bad4b016719938303e1514 Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 17:01:17 +0200 Subject: [PATCH 12/26] Move examples back to examples folder --- docs/source/{ => examples}/pytorch_rnn_example.rst | 0 docs/source/{ => examples}/tfidf_example.rst | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename docs/source/{ => examples}/pytorch_rnn_example.rst (100%) rename docs/source/{ => examples}/tfidf_example.rst (100%) diff --git a/docs/source/pytorch_rnn_example.rst b/docs/source/examples/pytorch_rnn_example.rst similarity index 100% rename from docs/source/pytorch_rnn_example.rst rename to docs/source/examples/pytorch_rnn_example.rst diff --git a/docs/source/tfidf_example.rst b/docs/source/examples/tfidf_example.rst similarity index 100% rename from docs/source/tfidf_example.rst rename to docs/source/examples/tfidf_example.rst From 9f77e5a3c4ad6e2a01be5373965908bdb8178179 Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 17:08:17 +0200 Subject: [PATCH 13/26] Add examples directory to notebooks --- .../examples/pytorch_rnn_example.ipynb | 500 ++++++++++++++++++ .../scripts/convert_doc_to_notebooks.py | 4 +- 2 files changed, 502 insertions(+), 2 deletions(-) create mode 100644 docs/source/notebooks/examples/pytorch_rnn_example.ipynb diff --git a/docs/source/notebooks/examples/pytorch_rnn_example.ipynb b/docs/source/notebooks/examples/pytorch_rnn_example.ipynb new file mode 100644 index 00000000..a29d6256 --- /dev/null +++ b/docs/source/notebooks/examples/pytorch_rnn_example.ipynb @@ -0,0 +1,500 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Podium installation\n", + "! pip install podium-nlp\n", + "# To install from source instead of the last release, comment the command above and uncomment the following one.\n", + "# ! pip install git+https://github.com/takelab/podium" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Pytorch RNN classifier" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, we will cover a simple RNN-based classifier model implemented in Pytorch. We will use the IMDB dataset loaded from 🤗/datasets, preprocess it with Fields and train the model briefly.\n", + "While having a GPU is not necessary it is recommended as otherwise training the model, even for a single epoch, will take a while." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Loading a dataset from 🤗/datasets" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we have covered in [Loading 🤗 datasets](http://takelab.fer.hr/podium/walkthrough.html#hf-loading), we have implemented wrappers around 🤗 dataset classes to enable working with the plethora of datasets implemented therein. We will now briefly go through (1) loading a dataset from 🤗/datasets and (2) wrapping it in Podium classes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "DatasetDict({\n", + " train: Dataset({\n", + " features: ['text', 'label'],\n", + " num_rows: 25000\n", + " })\n", + " test: Dataset({\n", + " features: ['text', 'label'],\n", + " num_rows: 25000\n", + " })\n", + " unsupervised: Dataset({\n", + " features: ['text', 'label'],\n", + " num_rows: 50000\n", + " })\n", + "})\n", + "{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], names_file=None, id=None),\n", + " 'text': Value(dtype='string', id=None)}" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import datasets\n", + "imdb = datasets.load_dataset('imdb')\n", + "print(imdb)\n", + "from pprint import pprint\n", + "pprint(imdb['train'].features)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "By calling `load_dataset` the dataset was downloaded and cached on disk through the `datasets` library. The dataset has two splits we are interested in (`train` and `test`).\n", + "The main thing we need to pay attention to are the `features` of the dataset, in this case `text` and `label`. These features, or data columns, need to be mapped to (and processed by) Podium Fields.\n", + "\n", + "For convenience, we have implemented automatic `Field` type inference from 🤗 dataset features -- however it is far from perfect as we have to make many assumptions on the way. We will now wrap the IMDB dataset in Podium and show the automatically inferred Fields." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Field({\n", + " name: 'text',\n", + " keep_raw: False,\n", + " is_target: False,\n", + " vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 280619})\n", + "})\n", + "LabelField({\n", + " name: 'label',\n", + " keep_raw: False,\n", + " is_target: True\n", + "})" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from podium.datasets.hf import HFDatasetConverter as HF\n", + "splits = HF.from_dataset_dict(imdb)\n", + "imdb_train, imdb_test = splits['train'], splits['test']\n", + "imdb_train.finalize_fields() # Construct the vocab\n", + "print(*imdb_train.fields, sep=\"\\n\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Both of the Fields were constructed well, but there are a couple of drawbacks for this concrete dataset. Firstly, the size of the vocabulary is very large (`280619`) -- we would like to trim this down to a reasonable number as we won't be using subword tokenization in this example. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Example({\n", + " text: (None, ['Bromwell', 'High', 'is', 'a', 'cartoon', 'comedy.', 'It', 'ran', 'at', 'the', 'same', 'time', 'as', 'some', 'other', 'programs', 'about', 'school', 'life,', 'such', 'as', '\"Teachers\".', 'My', '35', 'years', 'in', 'the', 'teaching', 'profession', 'lead', 'me', 'to', 'believe', 'that', 'Bromwell', \"High's\", 'satire', 'is', 'much', 'closer', 'to', 'reality', 'than', 'is', '\"Teachers\".', 'The', 'scramble', 'to', 'survive', 'financially,', 'the', 'insightful', 'students', 'who', 'can', 'see', 'right', 'through', 'their', 'pathetic', \"teachers'\", 'pomp,', 'the', 'pettiness', 'of', 'the', 'whole', 'situation,', 'all', 'remind', 'me', 'of', 'the', 'schools', 'I', 'knew', 'and', 'their', 'students.', 'When', 'I', 'saw', 'the', 'episode', 'in', 'which', 'a', 'student', 'repeatedly', 'tried', 'to', 'burn', 'down', 'the', 'school,', 'I', 'immediately', 'recalled', '.........', 'at', '..........', 'High.', 'A', 'classic', 'line:', 'INSPECTOR:', \"I'm\", 'here', 'to', 'sack', 'one', 'of', 'your', 'teachers.', 'STUDENT:', 'Welcome', 'to', 'Bromwell', 'High.', 'I', 'expect', 'that', 'many', 'adults', 'of', 'my', 'age', 'think', 'that', 'Bromwell', 'High', 'is', 'far', 'fetched.', 'What', 'a', 'pity', 'that', 'it', \"isn't!\"]),\n", + " label: (None, 1)\n", + "})" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "print(imdb_train[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When inspecting a concrete instance, there are a few more things to note. Firstly, IMDB instances can be quite long (on average around 200 tokens per instance), secondly, the text wasn't tokenized properly near sentence boundaries (due to using the default `str.split` tokenizer) and lastly, the text has varying casing.\n", + "We will instead define our own Fields for the corresponding features, add posttokenization hooks which will transform the data, and use those Fields to replace the automatically inferred ones:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "HFDatasetConverter({\n", + " dataset_name: imdb,\n", + " size: 25000,\n", + " fields: [\n", + " Field({\n", + " name: 'text',\n", + " keep_raw: False,\n", + " is_target: False,\n", + " vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 10000})\n", + " }),\n", + " LabelField({\n", + " name: 'label',\n", + " keep_raw: False,\n", + " is_target: True\n", + " })\n", + " \n", + " ]\n", + "})\n", + "Example({\n", + " text: (None, ['bromwell', 'high', 'is', 'a', 'cartoon', 'comedy', '.', 'it', 'ran', 'at', 'the', 'same', 'time', 'as', 'some', 'other', 'programs', 'about', 'school', 'life', ',', 'such', 'as', '\"', 'teachers', '\"', '.', 'my', '35', 'years', 'in', 'the', 'teaching', 'profession', 'lead', 'me', 'to', 'believe', 'that', 'bromwell', 'high', \"'s\", 'satire', 'is', 'much', 'closer', 'to', 'reality', 'than', 'is', '\"', 'teachers', '\"', '.', 'the', 'scramble', 'to', 'survive', 'financially', ',', 'the', 'insightful', 'students', 'who', 'can', 'see', 'right', 'through', 'their', 'pathetic', 'teachers', \"'\", 'pomp', ',', 'the', 'pettiness', 'of', 'the', 'whole', 'situation', ',', 'all', 'remind', 'me', 'of', 'the', 'schools', 'i', 'knew', 'and', 'their', 'students', '.', 'when', 'i', 'saw', 'the', 'episode', 'in', 'which', 'a', 'student', 'repeatedly', 'tried', 'to', 'burn', 'down', 'the', 'school', ',', 'i', 'immediately', 'recalled', '.........', 'at', '..........', 'high', '.', 'a', 'classic', 'line', ':', 'inspector', ':', 'i', \"'m\", 'here', 'to', 'sack', 'one', 'of', 'your', 'teachers', '.', 'student', ':', 'welcome', 'to', 'bromwell', 'high', '.', 'i', 'expect', 'that', 'many', 'adults', 'of', 'my', 'age', 'think', 'that', 'bromwell', 'high', 'is', 'far', 'fetched', '.', 'what', 'a', 'pity', 'that', 'it', 'is', \"n't\", '!']),\n", + " label: (None, 1)\n", + "})" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from podium import Field, LabelField, Vocab\n", + "\n", + "# Lowercasing as a post-tokenization hook\n", + "def lowercase(raw, tokenized):\n", + " return raw, [token.lower() for token in tokenized]\n", + "\n", + "# Truncating as a post-tokenization hook\n", + "def truncate(raw, tokenized, max_length=200):\n", + " return raw, tokenized[:max_length]\n", + "\n", + "vocab = Vocab(max_size=10000)\n", + "text = Field(name=\"text\", \n", + " numericalizer=vocab,\n", + " include_lengths=True,\n", + " tokenizer=\"spacy-en_core_web_sm\",\n", + " posttokenize_hooks=[truncate, lowercase])\n", + "\n", + "# The labels are already mapped to indices in /datasets so we will\n", + "# pass them through\n", + "label = LabelField(name=\"label\", numericalizer=lambda x: x)\n", + "fields = {\n", + " 'text': text,\n", + " 'label': label\n", + "}\n", + "\n", + "# Use the given Fields to load the dataset again\n", + "splits = HF.from_dataset_dict(imdb, fields=fields)\n", + "imdb_train, imdb_test = splits['train'], splits['test']\n", + "imdb_train.finalize_fields()\n", + "print(imdb_train)\n", + "print(imdb_train[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, we can see the effect of our hooks and using the spacy tokenizer. Now our dataset will be a bit cleaner to work with. Some data cleaning would still be desired, such as removing tokens which only contain punctuation, but we leave this exercise to the reader :)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Loading pretrained embeddings" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In most use-cases, we want to use pre-trained word embeddings along with our neural model. With Podium, this process is very simple. If your field uses a vocabulary, it has already built an inventory of tokens for your dataset.\n", + "\n", + "For example, we will use the [GloVe](https://nlp.stanford.edu/projects/glove/) vectors. You can read more about loading pretrained vectors in [Loading pretrained word vectors](http://takelab.fer.hr/podium/walkthrough.html#pretrained), but the procedure to load these vectors has two steps: (1) initialize the vector class, which sets all the required paths and (2) obtain the vectors for a pre-defined list of words by calling `load_vocab`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "For vocabulary of size: 10000 loaded embedding matrix of shape: (10000, 300)\n", + "Vector for sport: [ 0.34566 0.15934 0.48444 -0.13693 0.18737 0.2678" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from podium.vectorizers import GloVe\n", + "vocab = fields['text'].vocab\n", + "glove = GloVe()\n", + "embeddings = glove.load_vocab(vocab)\n", + "print(f\"For vocabulary of size: {len(vocab)} loaded embedding matrix of shape: {embeddings.shape}\")\n", + "# We can obtain vectors for a single word (given the word is loaded) like this:\n", + "word = \"sport\"\n", + "print(f\"Vector for {word}: {glove.token_to_vector(word)}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + " -0.39159 0.4931 -0.76111 -1.4586 0.41475 0.55837\n", + " ...\n", + " 0.13802 0.36619 0.19734 0.35701 -0.42228 -0.25242\n", + " -0.050651 -0.041129 0.15092 0.22084 0.52252 -0.27224 ]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Defining a simple neural model in Pytorch" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this section, we will implement a very simple neural classification model -- a 2-layer BiGRU with a single hidden layer classifier on top of its last hidden state. Many improvements to the model can be made, but this is not our current focus." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "\n", + "from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n", + "\n", + "class RNNClassifier(nn.Module):\n", + " def __init__(self, embedding, embed_dim=300, hidden_dim=300, num_labels=2):\n", + " super(NLIModel, self).__init__()\n", + " self.embedding = embedding\n", + " self.encoder = nn.GRU(\n", + " input_size=embed_dim,\n", + " hidden_size=hidden_dim,\n", + " num_layers=2,\n", + " bidirectional=True,\n", + " dropout=0.3\n", + " )\n", + " self.decoder = nn.Sequential(\n", + " nn.Linear(2*hidden_dim, hidden_dim),\n", + " nn.Tanh(),\n", + " nn.Linear(hidden_dim, num_labels)\n", + " )\n", + "\n", + " def forward(self, x, lengths):\n", + " e = self.embedding(x)\n", + " h_pack = pack_padded_sequence(e, \n", + " lengths,\n", + " enforce_sorted=False,\n", + " batch_first=True)\n", + "\n", + " _, h = self.encoder(h_pack) # [2L x B x H]\n", + "\n", + " # Concat last state of left and right directions\n", + " h = torch.cat([h[-1], h[-2]], dim=-1) # [B x 2H]\n", + " return self.decoder(h)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There. We will now define the prerequisites for pytorch model training, where we will use a GPU for speed, however running the model for one epoch will is possible albeit time-consuing even without a GPU." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "embed_dim = 300\n", + "padding_index = text.vocab.get_padding_index()\n", + "embedding_matrix = nn.Embedding(len(text.vocab), embed_dim,\n", + " padding_idx=padding_index)\n", + "# Copy the pretrained GloVe word embeddings\n", + "embedding_matrix.weight.data.copy_(torch.from_numpy(embeddings))\n", + "\n", + "device = torch.device(\"cuda:0\")\n", + "model = RNNClassifier(embedding_matrix)\n", + "model = model.to(device)\n", + "criterion = nn.CrossEntropyLoss()\n", + "optimizer = torch.optim.Adam(model.parameters())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have the model setup code ready, we will first define helper method to measure accuracy of our model after each epoch:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "def update_stats(accuracy, confusion_matrix, logits, y):\n", + " _, max_ind = torch.max(logits, 1)\n", + " equal = torch.eq(max_ind, y)\n", + " correct = int(torch.sum(equal))\n", + "\n", + " for j, i in zip(max_ind, y):\n", + " confusion_matrix[int(i),int(j)]+=1\n", + " return accuracy + correct, confusion_matrix" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "and now the training loop for the model:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import tqdm\n", + "def train(model, data, optimizer, criterion, num_labels):\n", + " model.train()\n", + " accuracy, confusion_matrix = 0, np.zeros((num_labels, num_labels), dtype=int)\n", + " for batch_num, batch in tqdm.tqdm(enumerate(data), total=len(data)):\n", + " x, lens = batch.text\n", + " y = batch.label\n", + " logits = model(x, lens)\n", + " accuracy, confusion_matrix = update_stats(accuracy, confusion_matrix, logits, y)\n", + " loss = criterion(logits, y.squeeze())\n", + " loss.backward()\n", + " optimizer.step()\n", + " print(\"[Accuracy]: {}/{} : {:.3f}%\".format(\n", + " accuracy, len(data)*data.batch_size, accuracy / len(data) / data.batch_size * 100))\n", + " return accuracy, confusion_matrix" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "and now, we are done with our model code. Let's turn back to Podium and see how we can set up batching for our training loop to start ticking." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Minibatching data in Podium" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have covered batching data in [Minibatching data](http://takelab.fer.hr/podium/quickstart.html#minibatching) and advanced batching through bucketing in [Bucketing instances when iterating](http://takelab.fer.hr/podium/advanced.html#bucketing). We will use the plain Iterator and leave bucketing for you to change to see how much the model speeds up when minimizing padding. One change we would like to do when iterating over data is to obtain the data matrices as torch tensors on the `device` we defined previously. We will now demonstrate how to do this by setting the `matrix_class` argument of the `Iterator`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Accuracy]: 20050/25024 : 80.123%\n", + "[Accuracy]: 22683/25024 : 90.645%\n", + "[Accuracy]: 23709/25024 : 94.745%\n", + "[Accuracy]: 24323/25024 : 97.199%\n", + "[Accuracy]: 24595/25024 : 98.286%" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from podium import Iterator\n", + "# Closure for converting data to given device\n", + "def gpu_tensor(data):\n", + " return torch.tensor(data).to(device)\n", + "# Initialize our iterator\n", + "train_iter = Iterator(imdb_train, batch_size=32, matrix_class=gpu_tensor)\n", + "\n", + "epochs = 5\n", + "for epoch in range(epochs):\n", + " train(model, train_iter, optimizer, criterion, num_labels=2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And we are done! In our case, the model takes about one minute per epoch on a GPU, but this can be sped up by using bucketing, which we recommend you try out yourself." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/source/scripts/convert_doc_to_notebooks.py b/docs/source/scripts/convert_doc_to_notebooks.py index 897a823f..abbf914f 100644 --- a/docs/source/scripts/convert_doc_to_notebooks.py +++ b/docs/source/scripts/convert_doc_to_notebooks.py @@ -20,8 +20,8 @@ "advanced.rst", "preprocessing.rst", "walkthrough.rst", - "pytorch_rnn_example.rst", - "tfidf_example.rst", + "examples/pytorch_rnn_example.rst", + "examples/tfidf_example.rst", ] _re_label = re.compile(r"\.\.\s+_([^:]*):") From 73ab6020f8ba5cc88018500f22d3af44cc0ad5c5 Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 17:09:37 +0200 Subject: [PATCH 14/26] Remove debug print --- docs/source/scripts/convert_doc_to_notebooks.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/source/scripts/convert_doc_to_notebooks.py b/docs/source/scripts/convert_doc_to_notebooks.py index abbf914f..46288931 100644 --- a/docs/source/scripts/convert_doc_to_notebooks.py +++ b/docs/source/scripts/convert_doc_to_notebooks.py @@ -558,8 +558,6 @@ def convert_all_tutorials(path_to_docs=None, path_to_dest=None): for file in TUTORIAL_FILES: notebook_name = os.path.splitext(file)[0] + ".ipynb" doc_file = os.path.join(path_to_docs, file) - print(doc_file) - print(os.path.exists(doc_file)) notebook_file = os.path.join(path_to_dest, notebook_name) convert_rst_file_to_notebook(doc_file, notebook_file, origin_folder=path_to_docs, dest_folder=path_to_dest, additional_deps=ADDITIONAL_DEPS.get(file)) From 7b78c81069bd69d9b5afddde76610d575bbd5347 Mon Sep 17 00:00:00 2001 From: mariosasko Date: Thu, 1 Apr 2021 18:47:31 +0200 Subject: [PATCH 15/26] Fix tfidf example, improve notebooks --- docs/source/examples/pytorch_rnn_example.rst | 35 +- docs/source/examples/tfidf_example.rst | 2 +- docs/source/index.rst | 4 +- docs/source/notebooks/advanced.ipynb | 10 +- .../notebooks/pytorch_rnn_example.ipynb | 40 ++- docs/source/notebooks/quickstart.ipynb | 2 +- docs/source/notebooks/tfidf_example.ipynb | 330 ++++++++++++++++++ docs/source/notebooks/walkthrough.ipynb | 16 +- .../scripts/convert_doc_to_notebooks.py | 15 +- 9 files changed, 394 insertions(+), 60 deletions(-) create mode 100644 docs/source/notebooks/tfidf_example.ipynb diff --git a/docs/source/examples/pytorch_rnn_example.rst b/docs/source/examples/pytorch_rnn_example.rst index 991ee4f0..0700e425 100644 --- a/docs/source/examples/pytorch_rnn_example.rst +++ b/docs/source/examples/pytorch_rnn_example.rst @@ -11,8 +11,8 @@ As we have covered in :ref:`hf-loading`, we have implemented wrappers around .. code-block:: python - >>> import datasets - >>> imdb = datasets.load_dataset('imdb') + >>> from datasets import load_dataset + >>> imdb = load_dataset('imdb') >>> print(imdb) DatasetDict({ train: Dataset({ @@ -106,18 +106,17 @@ We will instead define our own Fields for the corresponding features, add postto dataset_name: imdb, size: 25000, fields: [ - Field({ - name: 'text', - keep_raw: False, - is_target: False, - vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 10000}) - }), - LabelField({ - name: 'label', - keep_raw: False, - is_target: True - }) - + Field({ + name: 'text', + keep_raw: False, + is_target: False, + vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 10000}) + }), + LabelField({ + name: 'label', + keep_raw: False, + is_target: True + }) ] }) >>> print(imdb_train[0]) @@ -194,7 +193,7 @@ In this section, we will implement a very simple neural classification model -- ... h = torch.cat([h[-1], h[-2]], dim=-1) # [B x 2H] ... return self.decoder(h) -There. We will now define the prerequisites for pytorch model training, where we will use a GPU for speed, however running the model for one epoch will is possible albeit time-consuing even without a GPU. +We will now define the prerequisites for pytorch model training. .. code-block:: python @@ -205,7 +204,7 @@ There. We will now define the prerequisites for pytorch model training, where we >>> # Copy the pretrained GloVe word embeddings >>> embedding_matrix.weight.data.copy_(torch.from_numpy(embeddings)) >>> - >>> device = torch.device("cuda:0") + >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") >>> model = RNNClassifier(embedding_matrix) >>> model = model.to(device) >>> criterion = nn.CrossEntropyLoss() @@ -256,10 +255,10 @@ We have covered batching data in :ref:`minibatching` and advanced batching throu >>> from podium import Iterator >>> # Closure for converting data to given device - >>> def gpu_tensor(data): + >>> def device_tensor(data): ... return torch.tensor(data).to(device) >>> # Initialize our iterator - >>> train_iter = Iterator(imdb_train, batch_size=32, matrix_class=gpu_tensor) + >>> train_iter = Iterator(imdb_train, batch_size=32, matrix_class=device_tensor) >>> >>> epochs = 5 >>> for epoch in range(epochs): diff --git a/docs/source/examples/tfidf_example.rst b/docs/source/examples/tfidf_example.rst index 770a8440..2ba93864 100644 --- a/docs/source/examples/tfidf_example.rst +++ b/docs/source/examples/tfidf_example.rst @@ -73,7 +73,7 @@ We will do two things: (1) implement a pre-tokenization hook to lowercase our da And then implement flexible ngram extraction where the ``n`` is an interval using ``nltk``\s ``ngrams`` function: .. code-block:: python - + >>> from ntlk import ngrams >>> class NGramHook: ... # Transforms a sequence of unigrams into a sequence of diff --git a/docs/source/index.rst b/docs/source/index.rst index 22b7b571..36119ebf 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -15,7 +15,7 @@ Contents --------------------------------- The documentation is organized in five parts: -- **Quickstart**: an quick preview of the library, +- **Quickstart**: a quick preview of the library, - **Walkthrough**: a description of how the basics work, - **In-depth overview**: advanced usage options, - **Examples**: full stand-alone examples of NLP models using Podium, @@ -39,7 +39,7 @@ The documentation is organized in five parts: .. toctree:: :maxdepth: 2 - :caption: Full examples + :caption: Examples examples/tfidf_example.rst examples/pytorch_rnn_example.rst diff --git a/docs/source/notebooks/advanced.ipynb b/docs/source/notebooks/advanced.ipynb index 639d6af0..d582d1a6 100644 --- a/docs/source/notebooks/advanced.ipynb +++ b/docs/source/notebooks/advanced.ipynb @@ -134,7 +134,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "" + "" ] }, { @@ -337,7 +337,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "" + "" ] }, { @@ -477,7 +477,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "" + "" ] }, { @@ -798,7 +798,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "" + "" ] }, { @@ -867,7 +867,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "" + "" ] }, { diff --git a/docs/source/notebooks/pytorch_rnn_example.ipynb b/docs/source/notebooks/pytorch_rnn_example.ipynb index a29d6256..076f953f 100644 --- a/docs/source/notebooks/pytorch_rnn_example.ipynb +++ b/docs/source/notebooks/pytorch_rnn_example.ipynb @@ -9,7 +9,10 @@ "# Podium installation\n", "! pip install podium-nlp\n", "# To install from source instead of the last release, comment the command above and uncomment the following one.\n", - "# ! pip install git+https://github.com/takelab/podium" + "# ! pip install git+https://github.com/takelab/podium\n", + "\n", + "# Additional dependencies required to run this notebook:\n", + "! pip install torch" ] }, { @@ -73,8 +76,8 @@ } ], "source": [ - "import datasets\n", - "imdb = datasets.load_dataset('imdb')\n", + "from datasets import load_dataset\n", + "imdb = load_dataset('imdb')\n", "print(imdb)\n", "from pprint import pprint\n", "pprint(imdb['train'].features)" @@ -174,18 +177,17 @@ " dataset_name: imdb,\n", " size: 25000,\n", " fields: [\n", - " Field({\n", - " name: 'text',\n", - " keep_raw: False,\n", - " is_target: False,\n", - " vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 10000})\n", - " }),\n", - " LabelField({\n", - " name: 'label',\n", - " keep_raw: False,\n", - " is_target: True\n", - " })\n", - " \n", + " Field({\n", + " name: 'text',\n", + " keep_raw: False,\n", + " is_target: False,\n", + " vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 10000})\n", + " }),\n", + " LabelField({\n", + " name: 'label',\n", + " keep_raw: False,\n", + " is_target: True\n", + " })\n", " ]\n", "})\n", "Example({\n", @@ -355,7 +357,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "There. We will now define the prerequisites for pytorch model training, where we will use a GPU for speed, however running the model for one epoch will is possible albeit time-consuing even without a GPU." + "We will now define the prerequisites for pytorch model training." ] }, { @@ -371,7 +373,7 @@ "# Copy the pretrained GloVe word embeddings\n", "embedding_matrix.weight.data.copy_(torch.from_numpy(embeddings))\n", "\n", - "device = torch.device(\"cuda:0\")\n", + "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "model = RNNClassifier(embedding_matrix)\n", "model = model.to(device)\n", "criterion = nn.CrossEntropyLoss()\n", @@ -476,10 +478,10 @@ "source": [ "from podium import Iterator\n", "# Closure for converting data to given device\n", - "def gpu_tensor(data):\n", + "def device_tensor(data):\n", " return torch.tensor(data).to(device)\n", "# Initialize our iterator\n", - "train_iter = Iterator(imdb_train, batch_size=32, matrix_class=gpu_tensor)\n", + "train_iter = Iterator(imdb_train, batch_size=32, matrix_class=device_tensor)\n", "\n", "epochs = 5\n", "for epoch in range(epochs):\n", diff --git a/docs/source/notebooks/quickstart.ipynb b/docs/source/notebooks/quickstart.ipynb index b42e799d..63d8382c 100644 --- a/docs/source/notebooks/quickstart.ipynb +++ b/docs/source/notebooks/quickstart.ipynb @@ -436,7 +436,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "" + "" ] }, { diff --git a/docs/source/notebooks/tfidf_example.ipynb b/docs/source/notebooks/tfidf_example.ipynb new file mode 100644 index 00000000..bb10f8e2 --- /dev/null +++ b/docs/source/notebooks/tfidf_example.ipynb @@ -0,0 +1,330 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Podium installation\n", + "! pip install podium-nlp\n", + "# To install from source instead of the last release, comment the command above and uncomment the following one.\n", + "# ! pip install git+https://github.com/takelab/podium" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# TFIDF + scikit-learn SVM" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, we will cover a once popular family of models -- support vector machines (SVMs) with TF-IDF representations. As a simple example, we will analyse binary classification on the Stanford sentiment treebank (SST) dataset.\n", + "\n", + "First, we will implement a minimalistic example without much additional preprocessing. Since we're using TFIDF representation of our dataset, it is smart to limit the size of the vocabulary as each word needs to be present in the instance TFIDF representation. Let's load the SST dataset and convert it into a single batch:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from podium import Vocab, Field, LabelField\n", + "from podium.datasets import SST\n", + "vocab = Vocab(max_size=5000, specials=())\n", + "text = Field(name='text', numericalizer=vocab, disable_batch_matrix=True)\n", + "label = LabelField(name='label')\n", + "fields = {'text': text, 'label': label}\n", + "train, dev, test = SST.get_dataset_splits(fields=fields)\n", + "train.finalize_fields()\n", + "x, y = train.batch(add_padding=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have now loaded our dataset, finalized its Fields and obtained it as a batch of input and target data. What we need to do next is define the TF-IDF vectorization for each instance in the dataset. This is done by using our `TfIdfVectorizer`, which adapts the `scikit-learn` vectorizer to the Podium input data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + " (6920, 5000)" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from podium.vectorizers import TfIdfVectorizer\n", + "tfidf_vectorizer = TFIdfVectorizer()\n", + "tfidf_vectorizer.fit(train, field=train.field('text'))\n", + "tfidf_x = tfidf_vectorizer.transform(x.text)\n", + "print(type(tfidf_batch), tfidf_batch.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have transformed the train dataset to a sparse matrix containing TF-IDF values for each word in the vocabulary in each instance. What is left to do now is to train our classification model:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Accuracy on train set: 0.9597" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from sklearn.svm import LinearSVC\n", + "from sklearn.metrics import accuracy_score\n", + "# Train the SVM on the training set\n", + "svm = LinearSVC()\n", + "svm.fit(tfidf_batch, y.label.ravel())\n", + "# Obtain accuracy on the train set\n", + "y_hat = svm.predict(tfidf_batch)\n", + "acc = accuracy_score(y_hat, y.label.ravel())\n", + "print(f\"Accuracy on the train set: {acc:.4f}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And for a more accurate performance evaluation of our model we turn to the test set:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Accuracy on the test set: 0.7946" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "test_x, test_y = test.batch()\n", + "tfidf_test_batch = tfidf_vectorizer.transform(test_x.text)\n", + "y_test_hat = svm.predict(tfidf_test_batch)\n", + "acc = accuracy_score(y_test_hat, test_y.label.ravel())\n", + "print(f\"Accuracy on the test set: {acc:.4f}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Our basic unigram TF-IDF linear SVM performs pretty well on the SST dataset, reaching accuracy of almost `0.8`. While this example encapsulates the basics of using Podium with `scikit-learn`s `SVM`, we will delve a bit deeper and consider some additional preprocessing." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using ngram features" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have only considered basic unigram features for our model, and this is somewhat prohibitive. Apart from that, we have not implemented any preprocessing for our dataset, and our dataset is cased -- which might be detrimental for the performance of our model since we agressively trim the vocabulary size.\n", + "\n", + "We will do two things: (1) implement a pre-tokenization hook to lowercase our data, which in our case is fine as we are using the case-insensitive `str.split` as a tokenizer, and (2) implement ngram extraction as a post-tokenization hook. For a more detailed overview of hooks and how to use them, check [Customizing the preprocessing pipeline with Fields](http://takelab.fer.hr/podium/walkthrough.html#fields). We will first implement our lowercase hook:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def lowercase(raw):\n", + " \"\"\"Lowercases the input string\"\"\"\n", + " return raw.lower()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And then implement flexible ngram extraction where the `n` is an interval using `nltk`s `ngrams` function:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from ntlk import ngrams\n", + "class NGramHook:\n", + " # Transforms a sequence of unigrams into a sequence of\n", + " # [min_n, max_n]-grams\n", + " def __init__(self, min_n, max_n):\n", + " self.min_n = min_n\n", + " self.max_n = max_n\n", + " def __call__(self, raw, tokenized):\n", + " tokenized_ngrams = []\n", + " for n in range(self.min_n, self.max_n+1):\n", + " tokenized_ngrams.extend(ngrams(tokenized, n))\n", + " return raw, tokenized_ngrams" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now incorporate these two hooks into our text input Field:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + " disable_batch_matrix=True,\n", + " pretokenization_hooks=[lowercase],\n", + " posttokenization_hooks=[ngram_hook]\n", + " )\n", + "[('at',), ('from',), ('one',), ('have',), ('I',), ('like',), ('his',), ('in', 'the'), ('all',), (\"'\",)]" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Use [1-3]grams, inclusive\n", + "ngram_hook = NGramHook(1,3)\n", + "vocab = Vocab(max_size=5000, specials=())\n", + "text = Field(name='text', numericalizer=vocab, \n", + "label = LabelField(name='label')\n", + "fields = {'text': text, 'label': label}\n", + "train, dev, test = SST.get_dataset_splits(fields=fields)\n", + "train.finalize_fields()\n", + "print(text.vocab.itos[40:50])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can see that our new Vocab now contains tuples as its tokens -- as long as an item in a sequence is hashable, we can represent it as part of a Vocab! We can see that one 2-gram `('in', 'the')` has made its way into the 50 most frequent tokens.\n", + "\n", + "As before, we need to train the TFIDF vectorizer and apply it to our data (which now includes 1-, 2- and 3-grams):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + " (6920, 5000)" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dataset_batch = train.batch(add_padding=True)\n", + "tfidf_vectorizer = TfIdfVectorizer()\n", + "tfidf_vectorizer.fit(train, field=train.field('text'))\n", + "tfidf_batch = tfidf_vectorizer.transform(dataset_batch.text)\n", + "print(type(tfidf_batch), tfidf_batch.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now train our SVM classification model and evaluate it on the train and test set:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Accuracy on the train set: 0.9575\n", + "Accuracy on the test set: 0.7743" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "svm = LinearSVC()\n", + "text, label = dataset_batch\n", + "svm.fit(tfidf_batch, label.ravel())\n", + "# Compute accuracy on the train set\n", + "y_hat = svm.predict(tfidf_batch)\n", + "acc = accuracy_score(y_hat, label.ravel())\n", + "print(f\"Accuracy on the train set: {acc:.4f}\")\n", + "\n", + "# Compute accuracy on the test set\n", + "test_text, test_label = test.batch(add_padding=True)\n", + "tfidf_test_batch = tfidf_vectorizer.transform(test_text)\n", + "y_test_hat = svm.predict(tfidf_test_batch)\n", + "acc = accuracy_score(y_test_hat, test_label.ravel())\n", + "print(f\"Accuracy on the test set: {acc:.4f}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Sadly, our new model didn't perform better than our initial one on the train set, but there are many avenues we can try further, such as tuning the hyperparameters of the LinearSVC model on the development set or filtering out stop words and punctuation. We encourage you to open this example in Colab and try some things yourself!" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/source/notebooks/walkthrough.ipynb b/docs/source/notebooks/walkthrough.ipynb index b8a1dc3a..e6134804 100644 --- a/docs/source/notebooks/walkthrough.ipynb +++ b/docs/source/notebooks/walkthrough.ipynb @@ -49,7 +49,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "" + "" ] }, { @@ -121,7 +121,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "" + "" ] }, { @@ -266,7 +266,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "" + "" ] }, { @@ -432,7 +432,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "" + "" ] }, { @@ -494,7 +494,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "" + "" ] }, { @@ -682,7 +682,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "" + "" ] }, { @@ -817,7 +817,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "" + "" ] }, { @@ -965,7 +965,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "" + "" ] }, { diff --git a/docs/source/scripts/convert_doc_to_notebooks.py b/docs/source/scripts/convert_doc_to_notebooks.py index c8189d59..8f8a8f33 100644 --- a/docs/source/scripts/convert_doc_to_notebooks.py +++ b/docs/source/scripts/convert_doc_to_notebooks.py @@ -20,8 +20,8 @@ "advanced.rst", "preprocessing.rst", "walkthrough.rst", - "pytorch_rnn_example.rst", - "tfidf_example.rst", + "examples/pytorch_rnn_example.rst", + "examples/tfidf_example.rst", ] _re_label = re.compile(r"\.\.\s+_([^:]*):") @@ -451,7 +451,12 @@ def rm_first_line(text): ! pip install datasets spacy ! python -m spacy download en_core_web_sm """ - ) + ), + "examples/pytorch_rnn_example.rst": textwrap.dedent( + """\ + ! pip install torch + """ + ), } ADDITIONAL_DEPS = {k: "# Additional dependencies required to run this notebook:\n" + v for k, v in ADDITIONAL_DEPS.items()} @@ -558,9 +563,7 @@ def convert_all_tutorials(path_to_docs=None, path_to_dest=None): for file in TUTORIAL_FILES: notebook_name = os.path.splitext(file)[0] + ".ipynb" doc_file = os.path.join(path_to_docs, file) - print(doc_file) - print(os.path.exists(doc_file)) - notebook_file = os.path.join(path_to_dest, notebook_name) + notebook_file = os.path.join(path_to_dest, Path(notebook_name).name) convert_rst_file_to_notebook(doc_file, notebook_file, origin_folder=path_to_docs, dest_folder=path_to_dest, additional_deps=ADDITIONAL_DEPS.get(file)) From 0ce4f200cfbed6fc1d3f58a00d4ef2aca8e58ce1 Mon Sep 17 00:00:00 2001 From: mariosasko Date: Thu, 1 Apr 2021 19:19:58 +0200 Subject: [PATCH 16/26] Move examples notebooks to notebooks/examples --- docs/source/_static/js/custom.js | 13 +- .../examples/pytorch_rnn_example.ipynb | 40 +- .../{ => examples}/tfidf_example.ipynb | 0 .../notebooks/pytorch_rnn_example.ipynb | 502 ------------------ .../scripts/convert_doc_to_notebooks.py | 3 +- 5 files changed, 29 insertions(+), 529 deletions(-) rename docs/source/notebooks/{ => examples}/tfidf_example.ipynb (100%) delete mode 100644 docs/source/notebooks/pytorch_rnn_example.ipynb diff --git a/docs/source/_static/js/custom.js b/docs/source/_static/js/custom.js index 26839340..e503da3b 100644 --- a/docs/source/_static/js/custom.js +++ b/docs/source/_static/js/custom.js @@ -16,8 +16,8 @@ const hasNotebook = [ "advanced", "preprocessing", "walkthrough", - "tfidf_example", - "pytorch_rnn_example" + "examples/tfidf_example", + "examples/pytorch_rnn_example" ] function addIcon() { @@ -51,13 +51,12 @@ function addGithubButton() { } function addColabLink() { - if (location.toString().indexOf("package_reference") !== -1) { - return; + if (location.protocol === "file:") { + const pageName = location.pathname.split("/html/")[1].split(".")[0] + } else { + const pageName = location.pathname.split("/podium/")[1].split(".")[0] } - const parts = location.toString().split('/'); - const pageName = parts[parts.length - 1].split(".")[0]; - if (hasNotebook.includes(pageName)) { const colabLink = ` Open In Colab diff --git a/docs/source/notebooks/examples/pytorch_rnn_example.ipynb b/docs/source/notebooks/examples/pytorch_rnn_example.ipynb index a29d6256..076f953f 100644 --- a/docs/source/notebooks/examples/pytorch_rnn_example.ipynb +++ b/docs/source/notebooks/examples/pytorch_rnn_example.ipynb @@ -9,7 +9,10 @@ "# Podium installation\n", "! pip install podium-nlp\n", "# To install from source instead of the last release, comment the command above and uncomment the following one.\n", - "# ! pip install git+https://github.com/takelab/podium" + "# ! pip install git+https://github.com/takelab/podium\n", + "\n", + "# Additional dependencies required to run this notebook:\n", + "! pip install torch" ] }, { @@ -73,8 +76,8 @@ } ], "source": [ - "import datasets\n", - "imdb = datasets.load_dataset('imdb')\n", + "from datasets import load_dataset\n", + "imdb = load_dataset('imdb')\n", "print(imdb)\n", "from pprint import pprint\n", "pprint(imdb['train'].features)" @@ -174,18 +177,17 @@ " dataset_name: imdb,\n", " size: 25000,\n", " fields: [\n", - " Field({\n", - " name: 'text',\n", - " keep_raw: False,\n", - " is_target: False,\n", - " vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 10000})\n", - " }),\n", - " LabelField({\n", - " name: 'label',\n", - " keep_raw: False,\n", - " is_target: True\n", - " })\n", - " \n", + " Field({\n", + " name: 'text',\n", + " keep_raw: False,\n", + " is_target: False,\n", + " vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 10000})\n", + " }),\n", + " LabelField({\n", + " name: 'label',\n", + " keep_raw: False,\n", + " is_target: True\n", + " })\n", " ]\n", "})\n", "Example({\n", @@ -355,7 +357,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "There. We will now define the prerequisites for pytorch model training, where we will use a GPU for speed, however running the model for one epoch will is possible albeit time-consuing even without a GPU." + "We will now define the prerequisites for pytorch model training." ] }, { @@ -371,7 +373,7 @@ "# Copy the pretrained GloVe word embeddings\n", "embedding_matrix.weight.data.copy_(torch.from_numpy(embeddings))\n", "\n", - "device = torch.device(\"cuda:0\")\n", + "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "model = RNNClassifier(embedding_matrix)\n", "model = model.to(device)\n", "criterion = nn.CrossEntropyLoss()\n", @@ -476,10 +478,10 @@ "source": [ "from podium import Iterator\n", "# Closure for converting data to given device\n", - "def gpu_tensor(data):\n", + "def device_tensor(data):\n", " return torch.tensor(data).to(device)\n", "# Initialize our iterator\n", - "train_iter = Iterator(imdb_train, batch_size=32, matrix_class=gpu_tensor)\n", + "train_iter = Iterator(imdb_train, batch_size=32, matrix_class=device_tensor)\n", "\n", "epochs = 5\n", "for epoch in range(epochs):\n", diff --git a/docs/source/notebooks/tfidf_example.ipynb b/docs/source/notebooks/examples/tfidf_example.ipynb similarity index 100% rename from docs/source/notebooks/tfidf_example.ipynb rename to docs/source/notebooks/examples/tfidf_example.ipynb diff --git a/docs/source/notebooks/pytorch_rnn_example.ipynb b/docs/source/notebooks/pytorch_rnn_example.ipynb deleted file mode 100644 index 076f953f..00000000 --- a/docs/source/notebooks/pytorch_rnn_example.ipynb +++ /dev/null @@ -1,502 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Podium installation\n", - "! pip install podium-nlp\n", - "# To install from source instead of the last release, comment the command above and uncomment the following one.\n", - "# ! pip install git+https://github.com/takelab/podium\n", - "\n", - "# Additional dependencies required to run this notebook:\n", - "! pip install torch" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Pytorch RNN classifier" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this example, we will cover a simple RNN-based classifier model implemented in Pytorch. We will use the IMDB dataset loaded from 🤗/datasets, preprocess it with Fields and train the model briefly.\n", - "While having a GPU is not necessary it is recommended as otherwise training the model, even for a single epoch, will take a while." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Loading a dataset from 🤗/datasets" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we have covered in [Loading 🤗 datasets](http://takelab.fer.hr/podium/walkthrough.html#hf-loading), we have implemented wrappers around 🤗 dataset classes to enable working with the plethora of datasets implemented therein. We will now briefly go through (1) loading a dataset from 🤗/datasets and (2) wrapping it in Podium classes." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "DatasetDict({\n", - " train: Dataset({\n", - " features: ['text', 'label'],\n", - " num_rows: 25000\n", - " })\n", - " test: Dataset({\n", - " features: ['text', 'label'],\n", - " num_rows: 25000\n", - " })\n", - " unsupervised: Dataset({\n", - " features: ['text', 'label'],\n", - " num_rows: 50000\n", - " })\n", - "})\n", - "{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], names_file=None, id=None),\n", - " 'text': Value(dtype='string', id=None)}" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from datasets import load_dataset\n", - "imdb = load_dataset('imdb')\n", - "print(imdb)\n", - "from pprint import pprint\n", - "pprint(imdb['train'].features)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "By calling `load_dataset` the dataset was downloaded and cached on disk through the `datasets` library. The dataset has two splits we are interested in (`train` and `test`).\n", - "The main thing we need to pay attention to are the `features` of the dataset, in this case `text` and `label`. These features, or data columns, need to be mapped to (and processed by) Podium Fields.\n", - "\n", - "For convenience, we have implemented automatic `Field` type inference from 🤗 dataset features -- however it is far from perfect as we have to make many assumptions on the way. We will now wrap the IMDB dataset in Podium and show the automatically inferred Fields." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Field({\n", - " name: 'text',\n", - " keep_raw: False,\n", - " is_target: False,\n", - " vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 280619})\n", - "})\n", - "LabelField({\n", - " name: 'label',\n", - " keep_raw: False,\n", - " is_target: True\n", - "})" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from podium.datasets.hf import HFDatasetConverter as HF\n", - "splits = HF.from_dataset_dict(imdb)\n", - "imdb_train, imdb_test = splits['train'], splits['test']\n", - "imdb_train.finalize_fields() # Construct the vocab\n", - "print(*imdb_train.fields, sep=\"\\n\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Both of the Fields were constructed well, but there are a couple of drawbacks for this concrete dataset. Firstly, the size of the vocabulary is very large (`280619`) -- we would like to trim this down to a reasonable number as we won't be using subword tokenization in this example. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Example({\n", - " text: (None, ['Bromwell', 'High', 'is', 'a', 'cartoon', 'comedy.', 'It', 'ran', 'at', 'the', 'same', 'time', 'as', 'some', 'other', 'programs', 'about', 'school', 'life,', 'such', 'as', '\"Teachers\".', 'My', '35', 'years', 'in', 'the', 'teaching', 'profession', 'lead', 'me', 'to', 'believe', 'that', 'Bromwell', \"High's\", 'satire', 'is', 'much', 'closer', 'to', 'reality', 'than', 'is', '\"Teachers\".', 'The', 'scramble', 'to', 'survive', 'financially,', 'the', 'insightful', 'students', 'who', 'can', 'see', 'right', 'through', 'their', 'pathetic', \"teachers'\", 'pomp,', 'the', 'pettiness', 'of', 'the', 'whole', 'situation,', 'all', 'remind', 'me', 'of', 'the', 'schools', 'I', 'knew', 'and', 'their', 'students.', 'When', 'I', 'saw', 'the', 'episode', 'in', 'which', 'a', 'student', 'repeatedly', 'tried', 'to', 'burn', 'down', 'the', 'school,', 'I', 'immediately', 'recalled', '.........', 'at', '..........', 'High.', 'A', 'classic', 'line:', 'INSPECTOR:', \"I'm\", 'here', 'to', 'sack', 'one', 'of', 'your', 'teachers.', 'STUDENT:', 'Welcome', 'to', 'Bromwell', 'High.', 'I', 'expect', 'that', 'many', 'adults', 'of', 'my', 'age', 'think', 'that', 'Bromwell', 'High', 'is', 'far', 'fetched.', 'What', 'a', 'pity', 'that', 'it', \"isn't!\"]),\n", - " label: (None, 1)\n", - "})" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "print(imdb_train[0])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "When inspecting a concrete instance, there are a few more things to note. Firstly, IMDB instances can be quite long (on average around 200 tokens per instance), secondly, the text wasn't tokenized properly near sentence boundaries (due to using the default `str.split` tokenizer) and lastly, the text has varying casing.\n", - "We will instead define our own Fields for the corresponding features, add posttokenization hooks which will transform the data, and use those Fields to replace the automatically inferred ones:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "HFDatasetConverter({\n", - " dataset_name: imdb,\n", - " size: 25000,\n", - " fields: [\n", - " Field({\n", - " name: 'text',\n", - " keep_raw: False,\n", - " is_target: False,\n", - " vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 10000})\n", - " }),\n", - " LabelField({\n", - " name: 'label',\n", - " keep_raw: False,\n", - " is_target: True\n", - " })\n", - " ]\n", - "})\n", - "Example({\n", - " text: (None, ['bromwell', 'high', 'is', 'a', 'cartoon', 'comedy', '.', 'it', 'ran', 'at', 'the', 'same', 'time', 'as', 'some', 'other', 'programs', 'about', 'school', 'life', ',', 'such', 'as', '\"', 'teachers', '\"', '.', 'my', '35', 'years', 'in', 'the', 'teaching', 'profession', 'lead', 'me', 'to', 'believe', 'that', 'bromwell', 'high', \"'s\", 'satire', 'is', 'much', 'closer', 'to', 'reality', 'than', 'is', '\"', 'teachers', '\"', '.', 'the', 'scramble', 'to', 'survive', 'financially', ',', 'the', 'insightful', 'students', 'who', 'can', 'see', 'right', 'through', 'their', 'pathetic', 'teachers', \"'\", 'pomp', ',', 'the', 'pettiness', 'of', 'the', 'whole', 'situation', ',', 'all', 'remind', 'me', 'of', 'the', 'schools', 'i', 'knew', 'and', 'their', 'students', '.', 'when', 'i', 'saw', 'the', 'episode', 'in', 'which', 'a', 'student', 'repeatedly', 'tried', 'to', 'burn', 'down', 'the', 'school', ',', 'i', 'immediately', 'recalled', '.........', 'at', '..........', 'high', '.', 'a', 'classic', 'line', ':', 'inspector', ':', 'i', \"'m\", 'here', 'to', 'sack', 'one', 'of', 'your', 'teachers', '.', 'student', ':', 'welcome', 'to', 'bromwell', 'high', '.', 'i', 'expect', 'that', 'many', 'adults', 'of', 'my', 'age', 'think', 'that', 'bromwell', 'high', 'is', 'far', 'fetched', '.', 'what', 'a', 'pity', 'that', 'it', 'is', \"n't\", '!']),\n", - " label: (None, 1)\n", - "})" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from podium import Field, LabelField, Vocab\n", - "\n", - "# Lowercasing as a post-tokenization hook\n", - "def lowercase(raw, tokenized):\n", - " return raw, [token.lower() for token in tokenized]\n", - "\n", - "# Truncating as a post-tokenization hook\n", - "def truncate(raw, tokenized, max_length=200):\n", - " return raw, tokenized[:max_length]\n", - "\n", - "vocab = Vocab(max_size=10000)\n", - "text = Field(name=\"text\", \n", - " numericalizer=vocab,\n", - " include_lengths=True,\n", - " tokenizer=\"spacy-en_core_web_sm\",\n", - " posttokenize_hooks=[truncate, lowercase])\n", - "\n", - "# The labels are already mapped to indices in /datasets so we will\n", - "# pass them through\n", - "label = LabelField(name=\"label\", numericalizer=lambda x: x)\n", - "fields = {\n", - " 'text': text,\n", - " 'label': label\n", - "}\n", - "\n", - "# Use the given Fields to load the dataset again\n", - "splits = HF.from_dataset_dict(imdb, fields=fields)\n", - "imdb_train, imdb_test = splits['train'], splits['test']\n", - "imdb_train.finalize_fields()\n", - "print(imdb_train)\n", - "print(imdb_train[0])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here, we can see the effect of our hooks and using the spacy tokenizer. Now our dataset will be a bit cleaner to work with. Some data cleaning would still be desired, such as removing tokens which only contain punctuation, but we leave this exercise to the reader :)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Loading pretrained embeddings" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In most use-cases, we want to use pre-trained word embeddings along with our neural model. With Podium, this process is very simple. If your field uses a vocabulary, it has already built an inventory of tokens for your dataset.\n", - "\n", - "For example, we will use the [GloVe](https://nlp.stanford.edu/projects/glove/) vectors. You can read more about loading pretrained vectors in [Loading pretrained word vectors](http://takelab.fer.hr/podium/walkthrough.html#pretrained), but the procedure to load these vectors has two steps: (1) initialize the vector class, which sets all the required paths and (2) obtain the vectors for a pre-defined list of words by calling `load_vocab`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "For vocabulary of size: 10000 loaded embedding matrix of shape: (10000, 300)\n", - "Vector for sport: [ 0.34566 0.15934 0.48444 -0.13693 0.18737 0.2678" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from podium.vectorizers import GloVe\n", - "vocab = fields['text'].vocab\n", - "glove = GloVe()\n", - "embeddings = glove.load_vocab(vocab)\n", - "print(f\"For vocabulary of size: {len(vocab)} loaded embedding matrix of shape: {embeddings.shape}\")\n", - "# We can obtain vectors for a single word (given the word is loaded) like this:\n", - "word = \"sport\"\n", - "print(f\"Vector for {word}: {glove.token_to_vector(word)}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - " -0.39159 0.4931 -0.76111 -1.4586 0.41475 0.55837\n", - " ...\n", - " 0.13802 0.36619 0.19734 0.35701 -0.42228 -0.25242\n", - " -0.050651 -0.041129 0.15092 0.22084 0.52252 -0.27224 ]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Defining a simple neural model in Pytorch" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this section, we will implement a very simple neural classification model -- a 2-layer BiGRU with a single hidden layer classifier on top of its last hidden state. Many improvements to the model can be made, but this is not our current focus." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import torch.nn as nn\n", - "import torch.nn.functional as F\n", - "\n", - "from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n", - "\n", - "class RNNClassifier(nn.Module):\n", - " def __init__(self, embedding, embed_dim=300, hidden_dim=300, num_labels=2):\n", - " super(NLIModel, self).__init__()\n", - " self.embedding = embedding\n", - " self.encoder = nn.GRU(\n", - " input_size=embed_dim,\n", - " hidden_size=hidden_dim,\n", - " num_layers=2,\n", - " bidirectional=True,\n", - " dropout=0.3\n", - " )\n", - " self.decoder = nn.Sequential(\n", - " nn.Linear(2*hidden_dim, hidden_dim),\n", - " nn.Tanh(),\n", - " nn.Linear(hidden_dim, num_labels)\n", - " )\n", - "\n", - " def forward(self, x, lengths):\n", - " e = self.embedding(x)\n", - " h_pack = pack_padded_sequence(e, \n", - " lengths,\n", - " enforce_sorted=False,\n", - " batch_first=True)\n", - "\n", - " _, h = self.encoder(h_pack) # [2L x B x H]\n", - "\n", - " # Concat last state of left and right directions\n", - " h = torch.cat([h[-1], h[-2]], dim=-1) # [B x 2H]\n", - " return self.decoder(h)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We will now define the prerequisites for pytorch model training." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "embed_dim = 300\n", - "padding_index = text.vocab.get_padding_index()\n", - "embedding_matrix = nn.Embedding(len(text.vocab), embed_dim,\n", - " padding_idx=padding_index)\n", - "# Copy the pretrained GloVe word embeddings\n", - "embedding_matrix.weight.data.copy_(torch.from_numpy(embeddings))\n", - "\n", - "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", - "model = RNNClassifier(embedding_matrix)\n", - "model = model.to(device)\n", - "criterion = nn.CrossEntropyLoss()\n", - "optimizer = torch.optim.Adam(model.parameters())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we have the model setup code ready, we will first define helper method to measure accuracy of our model after each epoch:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "def update_stats(accuracy, confusion_matrix, logits, y):\n", - " _, max_ind = torch.max(logits, 1)\n", - " equal = torch.eq(max_ind, y)\n", - " correct = int(torch.sum(equal))\n", - "\n", - " for j, i in zip(max_ind, y):\n", - " confusion_matrix[int(i),int(j)]+=1\n", - " return accuracy + correct, confusion_matrix" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "and now the training loop for the model:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import tqdm\n", - "def train(model, data, optimizer, criterion, num_labels):\n", - " model.train()\n", - " accuracy, confusion_matrix = 0, np.zeros((num_labels, num_labels), dtype=int)\n", - " for batch_num, batch in tqdm.tqdm(enumerate(data), total=len(data)):\n", - " x, lens = batch.text\n", - " y = batch.label\n", - " logits = model(x, lens)\n", - " accuracy, confusion_matrix = update_stats(accuracy, confusion_matrix, logits, y)\n", - " loss = criterion(logits, y.squeeze())\n", - " loss.backward()\n", - " optimizer.step()\n", - " print(\"[Accuracy]: {}/{} : {:.3f}%\".format(\n", - " accuracy, len(data)*data.batch_size, accuracy / len(data) / data.batch_size * 100))\n", - " return accuracy, confusion_matrix" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "and now, we are done with our model code. Let's turn back to Podium and see how we can set up batching for our training loop to start ticking." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Minibatching data in Podium" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We have covered batching data in [Minibatching data](http://takelab.fer.hr/podium/quickstart.html#minibatching) and advanced batching through bucketing in [Bucketing instances when iterating](http://takelab.fer.hr/podium/advanced.html#bucketing). We will use the plain Iterator and leave bucketing for you to change to see how much the model speeds up when minimizing padding. One change we would like to do when iterating over data is to obtain the data matrices as torch tensors on the `device` we defined previously. We will now demonstrate how to do this by setting the `matrix_class` argument of the `Iterator`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[Accuracy]: 20050/25024 : 80.123%\n", - "[Accuracy]: 22683/25024 : 90.645%\n", - "[Accuracy]: 23709/25024 : 94.745%\n", - "[Accuracy]: 24323/25024 : 97.199%\n", - "[Accuracy]: 24595/25024 : 98.286%" - ] - }, - "execution_count": null, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from podium import Iterator\n", - "# Closure for converting data to given device\n", - "def device_tensor(data):\n", - " return torch.tensor(data).to(device)\n", - "# Initialize our iterator\n", - "train_iter = Iterator(imdb_train, batch_size=32, matrix_class=device_tensor)\n", - "\n", - "epochs = 5\n", - "for epoch in range(epochs):\n", - " train(model, train_iter, optimizer, criterion, num_labels=2)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And we are done! In our case, the model takes about one minute per epoch on a GPU, but this can be sped up by using bucketing, which we recommend you try out yourself." - ] - } - ], - "metadata": {}, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/docs/source/scripts/convert_doc_to_notebooks.py b/docs/source/scripts/convert_doc_to_notebooks.py index 8f8a8f33..bfca3189 100644 --- a/docs/source/scripts/convert_doc_to_notebooks.py +++ b/docs/source/scripts/convert_doc_to_notebooks.py @@ -563,7 +563,8 @@ def convert_all_tutorials(path_to_docs=None, path_to_dest=None): for file in TUTORIAL_FILES: notebook_name = os.path.splitext(file)[0] + ".ipynb" doc_file = os.path.join(path_to_docs, file) - notebook_file = os.path.join(path_to_dest, Path(notebook_name).name) + notebook_file = os.path.join(path_to_dest, notebook_name) + Path(notebook_file).parent.mkdir(exist_ok=True) convert_rst_file_to_notebook(doc_file, notebook_file, origin_folder=path_to_docs, dest_folder=path_to_dest, additional_deps=ADDITIONAL_DEPS.get(file)) From 23b44d1003c76081dd77126eb04712c811f5fe18 Mon Sep 17 00:00:00 2001 From: mariosasko Date: Thu, 1 Apr 2021 19:31:33 +0200 Subject: [PATCH 17/26] Fix JS colab condition --- docs/source/_static/js/custom.js | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/docs/source/_static/js/custom.js b/docs/source/_static/js/custom.js index e503da3b..49e2a609 100644 --- a/docs/source/_static/js/custom.js +++ b/docs/source/_static/js/custom.js @@ -51,11 +51,7 @@ function addGithubButton() { } function addColabLink() { - if (location.protocol === "file:") { - const pageName = location.pathname.split("/html/")[1].split(".")[0] - } else { - const pageName = location.pathname.split("/podium/")[1].split(".")[0] - } + const pageName = location.protocol === "file:" ? location.pathname.split("/html/")[1].split(".")[0] : location.pathname.split("/podium/")[1].split(".")[0] if (hasNotebook.includes(pageName)) { const colabLink = ` From dcb6b9e2c2654aabf0c568dfcbf3f027c72c7be4 Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 20:08:56 +0200 Subject: [PATCH 18/26] Comments --- docs/source/examples/pytorch_rnn_example.rst | 2 +- podium/datasets/iterator.py | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/source/examples/pytorch_rnn_example.rst b/docs/source/examples/pytorch_rnn_example.rst index 0700e425..48f4f505 100644 --- a/docs/source/examples/pytorch_rnn_example.rst +++ b/docs/source/examples/pytorch_rnn_example.rst @@ -2,7 +2,7 @@ Pytorch RNN classifier ======================= In this example, we will cover a simple RNN-based classifier model implemented in Pytorch. We will use the IMDB dataset loaded from 🤗/datasets, preprocess it with Fields and train the model briefly. -While having a GPU is not necessary it is recommended as otherwise training the model, even for a single epoch, will take a while. +While having a GPU is not necessary, it is recommended as otherwise training the model -- even for a single epoch -- will take a while. Loading a dataset from 🤗/datasets ----------------------------------- diff --git a/podium/datasets/iterator.py b/podium/datasets/iterator.py index 97e23c30..bd8781aa 100644 --- a/podium/datasets/iterator.py +++ b/podium/datasets/iterator.py @@ -494,9 +494,7 @@ def __init__(self, dataset: DatasetBase = None, shuffle=True, add_padding=True): returned as python lists of ``matrix_class`` instances. """ - batch_size = 0 - if dataset is not None: - batch_size = len(dataset) + batch_size = len(dataset) if dataset else 0 super().__init__( dataset=dataset, From 63bed39ea3a912ad3f45897d2a8a24c7d762b851 Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 20:13:17 +0200 Subject: [PATCH 19/26] Delete examples (the camera ready ones are migrated into docs) --- examples/README.md | 26 - examples/__init__.py | 3 - examples/fine_tuning_bert_example.py | 173 ------ examples/imdb_example.py | 160 ------ examples/keywords_example.py | 79 --- examples/model_example.py | 102 ---- ...ace Transformers on the IMDB Dataset.ipynb | 437 --------------- examples/notebooks/Podium_example.ipynb | 501 ------------------ examples/notebooks/img/imdb_logo_small.png | Bin 237996 -> 0 bytes examples/tfidf_svm_example.py | 63 --- 10 files changed, 1544 deletions(-) delete mode 100644 examples/README.md delete mode 100644 examples/__init__.py delete mode 100644 examples/fine_tuning_bert_example.py delete mode 100644 examples/imdb_example.py delete mode 100644 examples/keywords_example.py delete mode 100644 examples/model_example.py delete mode 100644 examples/notebooks/Fine-tuning BERT from HuggingFace Transformers on the IMDB Dataset.ipynb delete mode 100644 examples/notebooks/Podium_example.ipynb delete mode 100644 examples/notebooks/img/imdb_logo_small.png delete mode 100644 examples/tfidf_svm_example.py diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index 757e1d46..00000000 --- a/examples/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# Podium usage examples - - -## Basic - -[Loading and caching a Dataset](dataset_example.py): the most basic example of loading and storing a built-in podium dataset. - -[Full podium walkthrough](notebooks/Podium_example.ipynb): a basic-to-advanced level walkthrough of all podium features, with detailed comments. - -## Intermediate - -[TFIDF SVM example](tfidf_svm_example.py): use TfIDFVectorizer to train an SVM model for SST sentiment classification. - -[Model example](model_example.py): implement a basic scikit-learn MLP model on Pauza HR dataset: - -[KEX](keywords_example.py): use a built-in keyword extraction model. - -## Advanced - -[Experiment example](experiment_example.py): optimize hyperparameters with grid search on Pauza HR dataset. - -[IMDB Example](imdb_example.py): Run a Pytorch NN model on the IMDB sentiment classification dataset. - -[IMDB Example with BERT](notebooks/Fine-tuning%20BERT%20from%20HuggingFace%20Transformers%20on%20the%20IMDB%20Dataset.ipynb): Fine-tune a BERT model from HuggingFace Transformers on the IMDB sentiment classification dataset - -[NER Example](ner_example.py): Run a NER model on the Croatian NER dataset. \ No newline at end of file diff --git a/examples/__init__.py b/examples/__init__.py deleted file mode 100644 index 7b51df24..00000000 --- a/examples/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -""" -Package contains examples of podium usage. -""" diff --git a/examples/fine_tuning_bert_example.py b/examples/fine_tuning_bert_example.py deleted file mode 100644 index c6b1b1c8..00000000 --- a/examples/fine_tuning_bert_example.py +++ /dev/null @@ -1,173 +0,0 @@ -# flake8: noqa -import pickle - -import numpy as np -import torch -import torch.nn as nn -import torch.optim as optim -from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score -from transformers import DistilBertForSequenceClassification, DistilBertTokenizer - -from podium import Field, Iterator, LabelField, Vocab -from podium.datasets import IMDB -from podium.experimental.models import Experiment -from podium.experimental.models.impl.pytorch import TorchModel, TorchTrainer -from podium.experimental.pipeline import Pipeline - - -def create_fields(): - tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased") - - def text_to_tokens(string): - input_ids = tokenizer( - string, - max_length=128, - padding=False, - truncation=True, - return_attention_mask=False, - )["input_ids"] - - return tokenizer.convert_ids_to_tokens(input_ids) - - text = Field( - name="text", - tokenizer=text_to_tokens, - numericalizer=tokenizer.convert_tokens_to_ids, - padding_token=0, - ) - - label = LabelField(name="label") - - return {"text": text, "label": label} - - -class BertModelWrapper(nn.Module): - def __init__(self, **kwargs): - super().__init__() - self.model = DistilBertForSequenceClassification.from_pretrained( - "distilbert-base-uncased", return_dict=True - ) - - def forward(self, x): - attention_mask = (x != 0).long() - return_dict = self.model(x, attention_mask) - return_dict["pred"] = return_dict["logits"] - return return_dict - - -def main(): - # loading the IMDB dataset - fields = create_fields() - imdb_train, imdb_test = IMDB.get_dataset_splits(fields) - - # setting up the experiment for fine-tuning the model - model_config = { - "lr": 1e-5, - "clip": float("inf"), # disable gradient clipping - "num_epochs": 5, - } - model_config["num_classes"] = len(fields["label"].vocab) - - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - iterator = Iterator(batch_size=32) - trainer = TorchTrainer(model_config["num_epochs"], device, iterator, imdb_test) - - # we have to swap axes to nullify the effect of swapping axes afterwards - # because we work with the batch-first model (we should add this option to Podium!!!) - feature_transformer = ( - lambda feature_batch: feature_batch[0].astype(np.int64).swapaxes(0, 1) - ) - label_transformer = lambda label_batch: label_batch[0].astype(np.int64) - - experiment = Experiment( - TorchModel, - trainer=trainer, - feature_transformer=feature_transformer, - label_transform_fn=label_transformer, - ) - - experiment.fit( - imdb_train, - model_kwargs={ - "model_class": BertModelWrapper, - "criterion": nn.CrossEntropyLoss(), - "optimizer": optim.AdamW, - "device": device, - **model_config, - }, - ) - - # utilities for saving/loading the model - def save_model(model, file_path): - with open(file_path, "wb") as f: - pickle.dump(model, f) - - def load_model(file_path): - with open(file_path, "rb") as f: - model = pickle.load(f) - return model - - fitted_model = experiment.model - - model_file = "bert_model.pt" - save_model(fitted_model, model_file) - loaded_model = load_model(model_file) - - # here we show how to use the raw model to make predictions, - # this is how you can use the model that is already fine-tuned - cast_to_torch_transformer = lambda t: torch.from_numpy(t[0].astype(np.int64)).to( - device - ) - - @torch.no_grad() - def make_predictions(raw_model, dataset, batch_size=64): - raw_model.eval() - - def predict(batch): - predictions = raw_model(cast_to_torch_transformer(batch))["pred"] - return predictions.cpu().numpy() - - iterator = Iterator(batch_size=batch_size, shuffle=False) - - predictions = [] - for x_batch, _ in iterator(dataset): - batch_prediction = predict(x_batch) - predictions.append(batch_prediction) - - return np.concatenate(predictions) - - # model evaluation - loaded_model_raw = loaded_model.model - predictions = make_predictions(loaded_model_raw, imdb_test) - y_pred = predictions.argmax(axis=1) - - _, y_true = imdb_test.batch() - y_true = y_true[0].ravel() - - print("accuracy score:", accuracy_score(y_true, y_pred)) - print("precision score:", precision_score(y_true, y_pred)) - print("recall score:", recall_score(y_true, y_pred)) - print("f1 score:", f1_score(y_true, y_pred)) - - # we use `Pipeline` to make predictions on raw data - pipe = Pipeline( - fields=list(fields.values()), - example_format="list", - feature_transformer=cast_to_torch_transformer, - model=loaded_model, - ) - - instances = [["This movie is horrible"], ["This movie is great!"]] - - for instance in instances: - predictions = pipe.predict_raw(instance) - print( - f"instance: {instance}, predicted label: " - f'{fields["label"].vocab.itos[predictions.argmax()]}, ' - f"predictions: {predictions}" - ) - - -if __name__ == "__main__": - main() diff --git a/examples/imdb_example.py b/examples/imdb_example.py deleted file mode 100644 index 35fd506d..00000000 --- a/examples/imdb_example.py +++ /dev/null @@ -1,160 +0,0 @@ -# flake8: noqa -import pickle - -import torch -import torch.nn as nn - -from podium import Field, Iterator, LabelField, Vocab -from podium.datasets import IMDB -from podium.experimental.models import Experiment -from podium.experimental.models.impl.pytorch import AttentionRNN, TorchModel, TorchTrainer -from podium.experimental.pipeline import Pipeline -from podium.vectorizers import GloVe - - -def lowercase(raw, tokenized): - """ - Applies lowercasing as a post-tokenization hook. - - Parameters - ---------- - Raw : str - the untokenized input data - Tokenized: list(str) - list of tokens. - Returns - ------- - Raw: str - unmodified input - Tokenized: list(str) - lowercased tokenized data - """ - return raw, [token.lower() for token in tokenized] - - -def max_length(raw, data, length=200): - """ - Applies lowercasing as a post-tokenization hook. - - Parameters - ---------- - Raw : str - the untokenized input data - Tokenized: list(str) - list of tokens. - Length: int - maximum length for each instance - Returns - ------- - Raw: str - unmodified input - Tokenized: list(str) - tokenized data truncated to `length` - """ - return raw, data[:length] - - -def create_fields(): - # Define the vocabulary - max_vocab_size = 10000 - min_frequency = 5 - vocab = Vocab(max_size=max_vocab_size, min_freq=min_frequency) - - text = Field(name="text", numericalizer=vocab, tokenizer="spacy-en", keep_raw=False) - # Add preprpocessing hooks to model - # 1. Lowercase - text.add_posttokenize_hook(lowercase) - # 2. Truncate to length - text.add_posttokenize_hook(max_length) - - label = LabelField(name="label", numericalizer=Vocab(specials=())) - return {text.name: text, label.name: label} - - -def main(): - fields = create_fields() - imdb_train, imdb_test = IMDB.get_dataset_splits(fields) - - # Construct vectoziter based on vocab - vocab = fields["text"].vocab - embeddings = GloVe().load_vocab(vocab) - print( - f"For vocabulary of size: {len(vocab)} loaded embedding matrix of shape: {embeddings.shape}" - ) - - # First, we will define the hyperparameters for our model. - # These are only used when a concrete model is trained, and can be changed between calls. - model_config = { - "rnn_type": "LSTM", - "embed_dim": 300, - "hidden_dim": 150, - "nlayers": 1, - "lr": 1e-3, - "clip": 5, - "epochs": 1, - "batch_size": 32, - "dropout": 0.0, - "bidirectional": True, - } - - # Task-specific metadata - label_vocab = fields["label"].vocab - model_config["num_classes"] = len(label_vocab) - model_config["vocab_size"] = len(vocab) - model_config["pretrained_embedding"] = embeddings - # Run on CPU since we don't have a GPU on this machine - device = torch.device("cuda:0") - # Define the model criterion - criterion = nn.CrossEntropyLoss() - - data_iterator = Iterator(batch_size=32) - - trainer = TorchTrainer(model_config["epochs"], device, data_iterator, imdb_test) - experiment = Experiment(TorchModel, trainer=trainer) - - model = experiment.fit( - imdb_train, # Data on which to fit the model - model_kwargs={ # Arguments passed to the model constructor - "model_class": AttentionRNN, # The wrapped concrete model - "criterion": criterion, # The loss for the concrete model - "optimizer": torch.optim.Adam, # Optimizer _class_ - "device": device, # The device to store the data on - **model_config, # Delegated to the concrete model - }, - ) - - # Check serialization for _model_ only (should be for experiment as well) - fitted_model = experiment.model - - model_save_file = "model.pt" - with open(model_save_file, "wb") as dump_file: - pickle.dump(fitted_model, dump_file) - - with open(model_save_file, "rb") as load_file: - loaded_model = pickle.load(load_file) - - ft = experiment.feature_transformer - cast_to_torch_transformer = lambda t: torch.from_numpy( - ft.transform(t).swapaxes(0, 1) - ).to(device) - - pipe = Pipeline( - fields=list(fields.values()), - example_format="list", - feature_transformer=cast_to_torch_transformer, - model=fitted_model, - ) - - instances = [["This movie is horrible"], ["This movie is great!"]] - - for instance in instances: - prediction = pipe.predict_raw(instance) - print( - f"For instance: {instance}, the prediction is: " - f"{fields['label'].vocab.itos[prediction.argmax()]}," - f" with logits: {prediction}" - ) - - -if __name__ == "__main__": - main() diff --git a/examples/keywords_example.py b/examples/keywords_example.py deleted file mode 100644 index d8b70338..00000000 --- a/examples/keywords_example.py +++ /dev/null @@ -1,79 +0,0 @@ -""" -Example shows how to add keyword extraction features to a dataset. -""" -from podium import Field -from podium.datasets import Dataset, ExampleFactory -from podium.preproc.yake import YAKE - - -class DummyDataset(Dataset): - """ - Dummmy dataset. - """ - - TEXT_FIELD_NAME = "text" - - def __init__(self, texts, fields): - """ - Dataset constructor. - - Parameters - ---------- - texts : list of str - list of document represented as strings - """ - example_factory = ExampleFactory(fields) - examples = [ - example_factory.from_dict({DummyDataset.TEXT_FIELD_NAME: text}) - for text in texts - ] - super(DummyDataset, self).__init__(**{"examples": examples, "fields": fields}) - - -def keyword_extraction_main(): - """ - Function creates a dummmy keyword extraction dataset in Croatian language - and extracts the keywords. - - The created dataset demonstrates how to map the input text to two fields: - tokens (tokenized using str.split) and keywords (extracted using YAKE). - """ - - sample_texts = [ - """Karijera u turizmu Pjevačica Renata Končić Minea već dva tjedna radi kao - prodajni predstavnik u odjelu korporativnog poslovanja turističke - agencije Adriatica.net, no zbog toga se neće, tvrdi 27-godišnja - Zagrepčanka, odreći glazbe. Minea se prije deset godina, kad je - počinjala pjevačku karijeru i imala veliki hit 'Vrapci i komarci', - ispisala iz ekonomske škole, a poslije je maturirala na dopisnoj - birotehničkoj školi. (Marijana Marinović/Matko Stanković)""", - """MISS UNIVERSE TESTIRANA NA AIDS JOHANNESBURG - Aktualna Miss Universe - podvrgnula se u utorak testu na AIDS u jednoj bolnici u - Johannesburgu i izrazila nadu da će njezina popularnost uvjeriti - druge ljude da učine isto. Brineta plavih očiju Natalie Gtebova, - 23-godišnja Kanađanka rođena u Rusiji, izjavila je da želi - iskoristiti svoju titulu za podizanje svjesnosti i borbe protiv - stigme koja okružuju tu bolest. »Mislim da će činjenica da sam se - javno testirala govoriti vrlo mnogo. To će ohrabriti puno mladih - žena da učine isto«, rekla je ona. Južnoafrička Republika bilježi - najveći broj zaraženih HlV-om - više od pet milijuna ljudi. (H)""", - ] - - tokens = Field( - name="tokens", - tokenizer="split", - keep_raw=True, - ) - kws = Field( - name="keywords", - tokenizer=YAKE("hr"), - keep_raw=True, - ) - fields = {DummyDataset.TEXT_FIELD_NAME: (tokens, kws)} - dummy_dataset = DummyDataset(texts=sample_texts, fields=fields) - keywords = [ex["keywords"][1] for ex in dummy_dataset] - print(*keywords, sep="\n") - - -if __name__ == "__main__": - keyword_extraction_main() diff --git a/examples/model_example.py b/examples/model_example.py deleted file mode 100644 index f1a34b68..00000000 --- a/examples/model_example.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -Example how to use model on simple PauzaHR dataset. -""" - -from podium import Field, Iterator, LabelField, Vocab -from podium.datasets import PauzaHRDataset -from podium.experimental.models import FeatureTransformer -from podium.experimental.models.impl.fc_model import ScikitMLPClassifier -from podium.experimental.models.impl.simple_trainers import SimpleTrainer -from podium.storage import LargeResource - - -def numericalize_pauza_rating(rating): - """ - Function numericalizes pauza_hr dataset rating field. - """ - label = round(float(rating) * 2) - 1 - return label - - -def label_extraction_fun(y_batch): - """ - Label transform function that returns a 1-d array of rating labels. - """ - return y_batch.Rating.ravel() - - -def feature_extraction_fn(x_batch): - """ - Feature transform function that returns an matrix containing word indexes. - - Serves only as a simple demonstration. - """ - x_tensor = x_batch.Text - return x_tensor - - -def basic_pauza_hr_fields(): - """ - Function returns pauza-hr fields used for classification. - """ - rating = LabelField( - name="Rating", - numericalizer=Vocab(specials=()), - pretokenize_hooks=[numericalize_pauza_rating], - ) - text = Field( - name="Text", - numericalizer=Vocab(), - tokenizer="split", - keep_raw=False, - fixed_length=100, - ) - return {"Text": text, "Rating": rating} - - -def pauza_mlp_example(): - """ - Adjustable example that demonstrates how to use pauzahr dataset with scikit - MLP classifier using podium. - """ - - # Set the base repository directory - # This directory will be used by podium to cache all LargeResources - # like datasets and vectorizers loaded trough the LargeResource API - LargeResource.BASE_RESOURCE_DIR = "downloaded_datasets" - - fields = basic_pauza_hr_fields() - train_set, test_set = PauzaHRDataset.get_train_test_dataset(fields=fields) - - train_iter = Iterator(batch_size=100) - - model = ScikitMLPClassifier( - classes=[i for i in range(len(fields["Rating"].vocab.itos))], - verbose=True, - hidden_layer_sizes=(50, 20), - solver="adam", - ) - - # Define a FeatureTranformer used to extract and transform feature matrices - # from feature batches - feature_transformer = FeatureTransformer(feature_extraction_fn) - - trainer = SimpleTrainer() - trainer.train( - model, - train_set, - iterator=train_iter, - feature_transformer=feature_transformer, - label_transform_fun=label_extraction_fun, - **{trainer.MAX_EPOCH_KEY: 10}, - ) - - test_batch_x, test_batch_y = next(iter(train_iter(train_set))) - x_test = feature_transformer.transform(test_batch_x) - y_test = label_extraction_fun(test_batch_y) - prediction = model.predict(X=x_test) - print("Expected:\t", y_test, "\n", "Given:\t\t", prediction[model.PREDICTION_KEY]) - - -if __name__ == "__main__": - pauza_mlp_example() diff --git a/examples/notebooks/Fine-tuning BERT from HuggingFace Transformers on the IMDB Dataset.ipynb b/examples/notebooks/Fine-tuning BERT from HuggingFace Transformers on the IMDB Dataset.ipynb deleted file mode 100644 index 24c9641b..00000000 --- a/examples/notebooks/Fine-tuning BERT from HuggingFace Transformers on the IMDB Dataset.ipynb +++ /dev/null @@ -1,437 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Fine-tuning BERT from HuggingFace Transformers on the IMDB Dataset\n", - "> __Note__: In this tutorial we use a smaller version of BERT, called __DistilBert__, that is easier and faster to train." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import pickle\n", - "\n", - "import numpy as np\n", - "\n", - "import torch\n", - "import torch.nn as nn\n", - "import torch.optim as optim\n", - "\n", - "from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\n", - "\n", - "from transformers import DistilBertTokenizer, DistilBertForSequenceClassification\n", - "\n", - "from podium.datasets import IMDB, Iterator\n", - "from podium.models import Experiment\n", - "from podium.models.impl.pytorch import TorchTrainer, TorchModel\n", - "from podium.pipeline import Pipeline\n", - "from podium.storage import Field, LabelField, Vocab" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Defining the fields\n", - "\n", - "* text - applies `BertTokenizer` to the instance data. We don't store `attention_mask` to reduce the memory footprint of the dataset, instead we create it ourselves on the fly.\n", - "* label - stores binary labels that represent the sentiment of an instance." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "def create_fields():\n", - " tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')\n", - " \n", - " def text_to_tokens(string):\n", - " input_ids = tokenizer(string,\n", - " max_length=128,\n", - " padding=False,\n", - " truncation=True,\n", - " return_attention_mask=False\n", - " )['input_ids']\n", - " \n", - " return tokenizer.convert_ids_to_tokens(input_ids)\n", - " \n", - " text = Field(name='text',\n", - " tokenizer=text_to_tokens,\n", - " custom_numericalize=tokenizer.convert_tokens_to_ids,\n", - " padding_token=0)\n", - " \n", - " label = LabelField(name='label', vocab=Vocab(specials=()))\n", - " \n", - " return {\n", - " 'text': text,\n", - " 'label': label\n", - " } " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Wrapping the model" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this tutorial we will be using `DistilBertForSequenceClassification`. This model has additional layers on top of the base model, `DistilBertModel`, to perform classification. Moreover, `DistilBertForSequenceClassification` is a standard PyTorch `Module` so it can be easily wrapped in another `Module` that has a proper interface to Podium - the model has to return a dictionary that has a key `pred` that points to the model predictions. As mentioned earlier, we are creating the attention mask ourselves so this is a good place to do it. " - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "class BertModelWrapper(nn.Module):\n", - " \n", - " def __init__(self, **kwargs):\n", - " super().__init__()\n", - " self.model = DistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased', \n", - " return_dict=True)\n", - " \n", - " def forward(self, x):\n", - " attention_mask = (x != 0).long()\n", - " return_dict = self.model(x, attention_mask)\n", - " return_dict['pred'] = return_dict['logits']\n", - " return return_dict" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Loading the IMDB dataset" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████| 84.1M/84.1M [00:08<00:00, 9.56MB/s]\n" - ] - } - ], - "source": [ - "fields = create_fields()\n", - "imdb_train, imdb_test = IMDB.get_dataset_splits(fields)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Check the loaded data \n", - "\n", - "> __Note__: `None` values in the output are for caching purposes." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(None, ['[CLS]', 'dominic', '##k', '(', 'nicky', ')', 'luciano', 'wears', 'a', \"'\", 'hulk', \"'\", 't', '-', 'shirt', 'and', 'tr', '##udge', '##s', 'off', 'everyday', 'to', 'perform', 'his', 'duties', 'as', 'a', 'garbage', 'man', '.', 'he', 'uses', 'his', 'physical', 'power', 'in', 'picking', 'up', 'other', \"'\", 's', 'trash', 'and', 'hauling', 'it', 'to', 'the', 'town', 'dump', '.', 'he', 'reads', 'comic', '-', 'book', 'hero', 'stories', 'and', 'loves', 'wrestlers', 'and', 'wrestling', ',', 'going', 'to', 'wrestlemania', 'with', 'his', 'twin', 'brother', 'eugene', 'on', 'their', 'birthday', 'is', 'a', 'yearly', 'tradition', '.', 'he', 'talks', 'kindly', 'with', 'the', 'many', 'people', 'he', 'comes', 'in', 'contact', 'with', 'during', 'his', 'day', '.', 'he', 'reads', 'comic', 'books', ',', 'which', 'he', 'finds', 'in', 'the', 'trash', ',', 'with', 'a', 'young', 'boy', 'who', 'he', 'often', 'passes', 'by', 'while', 'on', 'the', 'garbage', 'route', '.', 'unfortunately', ',', 'dominic', '##k', 'has', '[SEP]'])\n", - "('positive', None)\n" - ] - } - ], - "source": [ - "print(imdb_train[0].text)\n", - "print(imdb_train[0].label)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Setting up the Podium Experiment\n", - "\n", - "To fine-tune the model, we define an `Experiment`." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "model_config = {\n", - " 'lr': 1e-5,\n", - " 'clip': float('inf'), # disable gradient clipping\n", - " 'num_epochs': 5,\n", - "}\n", - "model_config['num_classes'] = len(fields['label'].vocab)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[Batch]: 781 in 0.10555 seconds, loss=0.43808\n", - "Total time for train epoch: 205.91309213638306\n", - "[Valid]: 781 in 0.00576 seconds, loss=0.38564\n", - "Total time for valid epoch: 63.414066314697266\n", - "[Batch]: 781 in 0.10557 seconds, loss=0.39357\n", - "Total time for train epoch: 199.31143307685852\n", - "[Valid]: 781 in 0.00574 seconds, loss=0.03775\n", - "Total time for valid epoch: 60.488966941833496\n", - "[Batch]: 781 in 0.08967 seconds, loss=0.17306\n", - "Total time for train epoch: 196.89669013023376\n", - "[Valid]: 781 in 0.00577 seconds, loss=0.15879\n", - "Total time for valid epoch: 60.47404861450195\n", - "[Batch]: 781 in 0.10608 seconds, loss=0.04704\n", - "Total time for train epoch: 199.86573767662048\n", - "[Valid]: 781 in 0.00578 seconds, loss=0.00348\n", - "Total time for valid epoch: 60.53778100013733\n", - "[Batch]: 781 in 0.10526 seconds, loss=0.00671\n", - "Total time for train epoch: 200.03476238250732\n", - "[Valid]: 781 in 0.00653 seconds, loss=0.74549\n", - "Total time for valid epoch: 60.55412006378174\n" - ] - } - ], - "source": [ - "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", - "\n", - "iterator = Iterator(batch_size=32)\n", - "trainer = TorchTrainer(model_config['num_epochs'], device, iterator, imdb_test)\n", - "\n", - "# here we have to swap axes to nullify the effect of swapping axes afterwards\n", - "# because we work with the batch-first model (we should add this option to Podium!!!)\n", - "feature_transformer = lambda feature_batch: feature_batch[0].astype(np.int64).swapaxes(0, 1)\n", - "label_transformer = lambda label_batch: label_batch[0].astype(np.int64)\n", - "\n", - "experiment = Experiment(TorchModel, \n", - " trainer=trainer, \n", - " feature_transformer=feature_transformer,\n", - " label_transform_fn=label_transformer)\n", - "\n", - "experiment.fit(imdb_train, \n", - " model_kwargs={\n", - " 'model_class': BertModelWrapper, \n", - " 'criterion': nn.CrossEntropyLoss(), \n", - " 'optimizer': optim.AdamW,\n", - " 'device': device,\n", - " **model_config\n", - " })" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Saving and loading the fitted model" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Utilities for saving/loading the model " - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "def save_model(model, file_path):\n", - " with open(file_path, 'wb') as f:\n", - " pickle.dump(model, f)\n", - "\n", - "def load_model(file_path):\n", - " with open(file_path, 'rb') as f:\n", - " model = pickle.load(f)\n", - " return model" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "fitted_model = experiment.model\n", - "\n", - "model_file = 'bert_model.pt'\n", - "save_model(fitted_model, model_file)\n", - "loaded_model = load_model(model_file)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Testing and making predictions on raw data" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Utilities for making predictions with the raw model on the parsed dataset" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "cast_to_torch_transformer = lambda t: torch.from_numpy(t[0].astype(np.int64)).to(device)\n", - "\n", - "@torch.no_grad()\n", - "def make_predictions(raw_model, dataset, batch_size=32):\n", - " raw_model.eval()\n", - " \n", - " def predict(batch):\n", - " predictions = raw_model(cast_to_torch_transformer(batch))['pred']\n", - " return predictions.cpu().numpy()\n", - "\n", - " iterator = Iterator(batch_size=batch_size, \n", - " shuffle=False)\n", - " \n", - " predictions = []\n", - " for x_batch, _ in iterator(dataset):\n", - " batch_prediction = predict(x_batch)\n", - " predictions.append(batch_prediction)\n", - " \n", - " return np.concatenate(predictions)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Model evaluation" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "accuracy score: 0.87036\n", - "precision score: 0.8693258875149581\n", - "recall score: 0.87176\n", - "f1 score: 0.8705412422608348\n" - ] - } - ], - "source": [ - "loaded_model_raw = loaded_model.model\n", - "predictions = make_predictions(loaded_model_raw, imdb_test)\n", - "y_pred = predictions.argmax(axis=1)\n", - "\n", - "_, y_true = imdb_test.batch()\n", - "y_true = y_true[0].ravel()\n", - "\n", - "print('accuracy score:', accuracy_score(y_true, y_pred))\n", - "print('precision score:', precision_score(y_true, y_pred))\n", - "print('recall score:', recall_score(y_true, y_pred))\n", - "print('f1 score:', f1_score(y_true, y_pred))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "> __Note__: `loaded_model_raw` is an instance of `BertModelWrapper`." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Making predictions on raw data " - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "instance: ['This movie is horrible'], predicted label: negative, predictions: [-3.139483 3.0472898]\n", - "instance: ['This movie is great!'], predicted label: positive, predictions: [ 2.5500994 -3.031134 ]\n" - ] - } - ], - "source": [ - "pipe = Pipeline(fields=list(fields.values()), \n", - " example_format='list',\n", - " feature_transformer=cast_to_torch_transformer,\n", - " model=loaded_model)\n", - "\n", - "instances = [\n", - " ['This movie is horrible'],\n", - " ['This movie is great!']\n", - "]\n", - "\n", - "for instance in instances:\n", - " predictions = pipe.predict_raw(instance)\n", - " print(f'instance: {instance}, predicted label: '\n", - " f'{fields[\"label\"].vocab.itos[predictions.argmax()]}, '\n", - " f'predictions: {predictions}')" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "podium-env", - "language": "python", - "name": "podium-env" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.10" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/notebooks/Podium_example.ipynb b/examples/notebooks/Podium_example.ipynb deleted file mode 100644 index 7ea8558a..00000000 --- a/examples/notebooks/Podium_example.ipynb +++ /dev/null @@ -1,501 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import math\n", - "import time\n", - "import podium\n", - "import pickle\n", - "\n", - "import torch\n", - "import torch.nn as nn\n", - "import numpy as np" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Dataset loading & preprocessing\n", - "\n", - "When using `podium` you have three options for data loading:\n", - "1. Use one of our built-in datasets\n", - "2. Use a flexible data loader from a predefined format (`TabularDataset` loads from `.csv`, `.tsv` files)\n", - "3. Write your own data loader for a dataset in a custom format\n", - "\n", - "### IMDB sentiment classification\n", - "\n", - "![Imdb logo](img/imdb_logo_small.png)\n", - "\n", - "For this walkthough, we will use the [IMDB sentiment classification dataset](https://ai.stanford.edu/~amaas/data/sentiment/). This dataset is built-in, so let's check what exactly does that mean.\n", - "\n", - "- Each built-in dataset has a static method `get_dataset_splits` which downloads and caches the splits for that model and returns them as a tuple (train, valid?, test).\n", - " - Note: the IMDB dataset only has a train and test split\n", - "- We will first load the IMDB dataset with default `Fields` (preprocessing pipelines) and check whether we might want to modify something.\n", - "- You can inspect the default fields by calling the `get_default_fields` static method of the dataset" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "from podium.datasets import IMDB\n", - "imdb_train, imdb_test = IMDB.get_dataset_splits()" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data in a single dataset instance:\n", - "==================================================\n", - "(None, ['I', 'am', 'and', 'was', 'very', 'entertained', 'by', 'the', 'movie', '.', 'It', 'was', 'my', 'all', 'time', 'favorite', 'movie', 'of', '1976', '.', 'Being', 'raised', 'in', 'the', '70', \"'s\", ',', 'I', 'was', 'so', 'in', 'love', 'with', 'Kris', 'Kristoffersons', 'look', 'and', 'demeanor', ',', 'of', 'course', 'I', 'am', 'no', 'movie', 'critic', ',', 'but', 'for', 'the', 'time', 'era', ',', 'I', 'think', 'it', 'was', 'very', 'good', '.', 'I', 'very', 'much', 'like', 'the', 'combo', 'of', 'Streisand', 'and', 'Kristofferson', '.', 'I', 'thought', 'they', 'worked', 'very', 'well', 'together', '.', 'I', 'have', 'seen', 'the', 'movie', 'many', 'times', 'and', 'still', 'love', 'the', 'two', 'of', 'them', 'as', 'Esther', 'and', 'John', 'Norman', '.', 'I', 'am', 'a', 'very', 'huge', 'fan', 'of', 'Kris', 'and', 'see', 'him', 'in', 'concert', 'when', 'I', 'can', '.', 'What', 'a', 'talented', 'singer', 'song', 'writer', ',', 'not', 'to', 'mention', ',', 'actor', '.', 'I', 'have', 'seen', 'him', 'in', 'many', 'movies', ',', 'but', 'still', 'think', 'back', 'to', 'A', 'star', 'is', 'Born', '.'])\n", - "('positive', None)\n", - "==================================================\n", - "Input text length interval [11, 2789] \n", - "Average length 272.42864 +- 202.5628456251304\n" - ] - } - ], - "source": [ - "first_instance = imdb_train[0]\n", - "text, label = first_instance.text, first_instance.label\n", - "\n", - "\n", - "# Note that the text is cased\n", - "print(\"Data in a single dataset instance:\")\n", - "print(\"=\"*50)\n", - "print(text)\n", - "print(label)\n", - "print(\"=\"*50)\n", - "\n", - "def get_text_statistics(dataset):\n", - " instance_lengths = [len(ex.text[1]) for ex in dataset]\n", - " print(f\"Input text length interval [{min(instance_lengths)}, {max(instance_lengths)}] \\n\" \n", - " f\"Average length {np.mean(instance_lengths)} +- {np.std(instance_lengths)}\")\n", - "get_text_statistics(imdb_train)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Using hooks during data preprocessing\n", - "The average length of instances in the dataset is large, while the longest instance has 2789 tokens. \n", - "Instances of this length are likely to cause memory issues when batched and transferred to GPU, so we would like to limit this. We might also want only lowercase data in our instances.\n", - "\n", - "TODO: add data processing graph (cf prez)\n", - "\n", - "We can implement this ourselves easily by adding `hooks` to our model. Hooks are methods with a standardized signature which view and modify the data flowing through the preprocessing pipeline at **two** points.\n", - "1. **Pre-tokenization hooks**:\n", - " - pre-tokenization hooks work on raw data (the loaded input string). You might want to lowercase data during pre-tokenization, but keep in mind that most tokenizers (such as `spacy`) are sensitive to casing and might produce bad results. Since we use `spacy` as the `IMDB` tokenizer, this is not a good choice and we might want to delegate lowercasing to post-tokenization.\n", - "2. **Post-tokenization hooks**:\n", - " - post-tokenization hooks work on raw **and** tokenized data. Here you might want to limit the length of your instances to a fixed amount or filter out stop-words.\n", - " \n", - "Hooks are added to a `Field` and only apply to data being processed by that field. If you want to use the same hook on multiple fields, you can do that but you need to add the same hook to multiple fields." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "## The signature of post-tokenization hooks has *two* arguments: raw and tokenized data\n", - "def lowercase(raw, tokenized):\n", - " \"\"\"Applies lowercasing as a post-tokenization hook\n", - " \n", - " Parameters\n", - " ----------\n", - " Raw : str\n", - " the untokenized input data\n", - " Tokenized: list(str)\n", - " list of tokens.\n", - " Returns\n", - " -------\n", - " Raw: str \n", - " unmodified input\n", - " Tokenized: list(str) \n", - " lowercased tokenized data\n", - " \"\"\"\n", - " return raw, [token.lower() for token in tokenized]\n", - "\n", - "def max_length(raw, data, length=200):\n", - " \"\"\"Applies lowercasing as a post-tokenization hook\n", - " \n", - " Parameters\n", - " ----------\n", - " Raw : str\n", - " the untokenized input data\n", - " Tokenized: list(str)\n", - " list of tokens.\n", - " Length: int\n", - " maximum length for each instance \n", - " Returns\n", - " -------\n", - " Raw: str \n", - " unmodified input\n", - " Tokenized: list(str) \n", - " tokenized data truncated to `length`\n", - " \"\"\"\n", - " return raw, data[:length]\n", - "\n", - "from podium.storage import LabelField, Field, Vocab\n", - "\n", - "def create_fields():\n", - " # Define the vocabulary\n", - " max_vocab_size = 10000\n", - " min_frequency = 5\n", - " vocab = Vocab(max_size=max_vocab_size, min_freq=min_frequency)\n", - "\n", - " text = Field(name='text', vocab=vocab, tokenizer='spacy', store_as_raw=False)\n", - " # Add preprpocessing hooks to model\n", - " # 1. Lowercase\n", - " text.add_posttokenize_hook(lowercase)\n", - " # 2. Truncate to length\n", - " text.add_posttokenize_hook(max_length)\n", - "\n", - " label = LabelField(name='label', vocab = Vocab(specials=()))\n", - " return {text.name : text, label.name: label}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's create our modified fields and load the dataset" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "fields = create_fields()\n", - "imdb_train, imdb_test = IMDB.get_dataset_splits(fields)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Input text length interval [11, 200] \n", - "Average length 170.3254 +- 41.14426515129417\n" - ] - } - ], - "source": [ - "# Check whether the preprocessing worked\n", - "get_text_statistics(imdb_train)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Load pretrained embeddings\n", - "\n", - "In most use-cases, we want to use pre-trained word embeddings. With `podium`, this process is incredibly simple. If your field uses a vocabulary, it has already built an inventory of tokens for your dataset.\n", - "\n", - "`Podium` offers a number of implemented `vectorizers` and a class ([BasicVectorStorage](https://github.com/mttk/podium/blob/master/podium/storage/vectorizers/vectorizer.py#L218)) which is able to load the standardized word2vec-style format of word embeddings from disk.\n", - "\n", - "For example, we will use the [GloVe](https://nlp.stanford.edu/projects/glove/) vectors. The procedure to load these vectors has two steps:\n", - "1. Initialize the vector class, which sets all the required paths\n", - " - Right now, the vectors are not yet loaded from disk as you usually don't want to load the full file\n", - "2. Get the vectors for a pre-defined list of words by calling `load_vocab`\n", - " - The argument can be a `Vocab` object (which is itself an `iterable` of strings), or any sequence of strings\n", - " \n", - "The output of the function call is a numpy matrix of word embeddings which you can then pass to your model to initialize the embedding matrix or to be used otherwise." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "For vocabulary of size: 10000 loaded embedding matrix of shape: (10000, 300)\n", - "Vector for sport: [ 0.34566 0.15934 0.48444 -0.13693 0.18737 0.2678\n", - " -0.39159 0.4931 -0.76111 -1.4586 0.41475 0.55837\n", - " 0.021504 0.28509 -0.30284 0.021432 0.071542 0.53333\n", - " 0.18084 -0.40818 -0.37935 0.86781 0.4492 0.67524\n", - " 0.052925 -0.42635 0.46103 0.031358 0.5166 -0.081332\n", - " 0.35399 -0.53411 -0.22646 -0.091881 -0.5428 0.61143\n", - " 0.67188 0.079147 -0.21608 0.2817 0.22489 0.0087532\n", - " 0.096962 -0.19739 0.61631 0.19901 0.37232 0.13627\n", - " 0.18799 0.35549 -0.74315 -0.08624 0.1883 -0.53688\n", - " 0.082768 0.78362 -0.31835 -0.21211 -0.023984 0.0095728\n", - " 0.28296 0.30896 -0.38585 -0.55332 -0.66063 0.30262\n", - " -0.65959 0.25509 -0.55906 -0.3507 -0.11521 0.32024\n", - " -0.25448 0.12392 -0.22941 -0.30697 -0.073869 -0.021054\n", - " 0.26082 0.45367 0.32809 0.56492 0.067392 -0.54504\n", - " -0.45747 0.29705 -0.007364 -0.15747 -0.19674 0.44562\n", - " 0.10822 -0.010847 -0.020063 -0.54242 0.85393 -0.098667\n", - " 0.22144 -0.1058 0.08902 0.059909 0.17442 0.34239\n", - " 0.071011 0.32127 0.099133 0.60742 0.52472 -0.13034\n", - " -0.10823 0.16208 0.30687 -0.079381 0.046322 -0.96637\n", - " -0.034772 -0.032899 0.44984 -0.0086 -0.24193 0.067582\n", - " -0.13938 0.19137 0.46692 0.58191 -0.20529 -0.29932\n", - " -0.19122 0.47751 0.011554 0.016327 0.14571 -0.29702\n", - " -0.22549 0.078491 -0.2856 0.027681 -0.050709 0.08904\n", - " -0.10603 0.5987 0.17281 0.0021526 0.93064 -0.057695\n", - " 0.17412 -0.31447 0.0025599 -0.079155 -0.08038 0.62715\n", - " 0.62819 0.25948 -0.25359 -0.38077 0.30185 -0.93266\n", - " -0.02277 0.40116 -0.0079393 -0.029146 0.2834 -0.42381\n", - " -0.75231 -0.03341 0.18793 -0.80745 -0.1471 0.12451\n", - " 0.16207 -0.83302 -0.48239 0.12213 -1.0567 0.60332\n", - " 0.25437 -0.12537 0.28882 -0.13624 0.16769 0.82878\n", - " 0.33491 0.57352 0.0038668 0.052082 0.87144 -0.40841\n", - " 0.0081564 0.0021284 -0.33046 -0.16384 -0.31893 -0.16997\n", - " -0.24397 0.24522 0.19511 -0.17615 -0.12781 0.61104\n", - " 0.62262 -0.28578 1.032 -0.24093 -0.072303 0.065576\n", - " -0.45258 -0.15914 0.27673 0.046784 -0.23509 0.078567\n", - " 0.18001 0.023338 -0.7807 0.48395 -0.13503 0.15531\n", - " 0.12585 0.14729 -0.3931 -0.44495 -0.0121 -0.49026\n", - " 0.33196 -0.63298 0.49953 -0.25245 -0.30707 0.30539\n", - " -0.0075252 0.1769 0.20692 -0.59478 0.3011 -0.38093\n", - " 0.36627 -0.05798 0.35727 0.65025 -0.23389 -0.056722\n", - " 0.35972 -0.15963 0.15001 -0.056637 -0.63519 0.13256\n", - " -0.4007 -0.19513 -0.27042 0.33505 -0.15308 -0.058018\n", - " -0.025477 -0.4279 0.013337 -0.26202 -0.012633 -0.34509\n", - " 0.14835 -0.63398 -0.15411 -0.82738 0.37643 0.11571\n", - " 0.39404 -0.36529 -0.3185 0.35226 -0.20441 0.10508\n", - " -0.022458 0.32689 -0.22491 -0.14179 0.79285 0.3091\n", - " -1.7894 -0.24364 0.33151 0.60022 -0.088763 0.11163\n", - " -0.1362 0.32127 0.017934 -0.6352 -0.61595 -0.24006\n", - " 0.13802 0.36619 0.19734 0.35701 -0.42228 -0.25242\n", - " -0.050651 -0.041129 0.15092 0.22084 0.52252 -0.27224 ]\n" - ] - } - ], - "source": [ - "# Load GloVe embeddings\n", - "from podium.storage.vectorizers.impl import GloVe\n", - "vocab = fields['text'].vocab\n", - "glove = GloVe()\n", - "embeddings = glove.load_vocab(vocab)\n", - "print(f\"For vocabulary of size: {len(vocab)} loaded embedding matrix of shape: {embeddings.shape}\")\n", - "# We can obtain vectors for a single word (given the word is loaded) like this:\n", - "word = \"sport\"\n", - "print(f\"Vector for {word}: {glove.token_to_vector(word)}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Define & train a model\n", - "\n", - "Now we need to train a concrete model on our data!\n", - "\n", - "We will use a pre-defined RNN classifier with self-attention as our model.\n", - "The model, which is implemented in pytorch, needs to be wrapped in the `podium.model` interface so other convenience classes can be used. In this case, the classes you need to use are:\n", - "\n", - "- `podium.model` subclass: \n", - " - Exposes abstract methods required to train or evaluate the model, or predict on raw data\n", - "- `podium.trainer` subclass:\n", - " - Handles the data <-> model communication (e.g. batching, early stopping). The user only implements this class but does not explicitly use its methods.\n", - "- `podium.experiment` instance:\n", - " - Wraps the model and its parameters to simplify multiple restarts with different choices of hyperparameters (in order to use grid search)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "# First, we will define the hyperparameters for our model. \n", - "# These are only used when a concrete model is trained, and can be changed between calls.\n", - "model_config = {\n", - " 'rnn_type': 'LSTM',\n", - " 'embed_dim': 300,\n", - " 'hidden_dim': 150,\n", - " 'nlayers': 1,\n", - " 'lr': 1e-3,\n", - " 'clip': 5,\n", - " 'epochs': 1,\n", - " 'batch_size': 32,\n", - " 'dropout': 0.,\n", - " 'bidirectional': True,\n", - " 'gpu': -1\n", - "}\n", - "\n", - "# Task-specific metadata\n", - "label_vocab = fields['label'].vocab\n", - "model_config['num_classes'] = len(label_vocab)\n", - "model_config['vocab_size'] = len(vocab)\n", - "model_config['pretrained_embedding'] = embeddings\n", - "# Run on CPU since we don't have a GPU on this machine\n", - "device = torch.device('cpu:0')\n", - "# Define the model criterion\n", - "criterion = nn.CrossEntropyLoss()" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Total parameter size: 3543002\n", - "[Batch]: 781 in 0.42820 seconds, loss=0.52127\n", - "Total time for train epoch: 439.7152373790741\n", - "[Valid]: 781 in 0.03686 seconds, loss=0.75586\n", - "Total time for valid epoch: 89.66827535629272\n" - ] - } - ], - "source": [ - "from podium.datasets import Iterator\n", - "from podium.experimental.models import Experiment\n", - "\n", - "from podium.experimental.models.impl.pytorch import TorchTrainer, TorchModel, AttentionRNN\n", - "\n", - "data_iterator = Iterator(batch_size=32)\n", - "\n", - "trainer = TorchTrainer(model_config['epochs'], device, data_iterator, imdb_test)\n", - "experiment = Experiment(TorchModel, trainer=trainer)\n", - "\n", - "model = experiment.fit(\n", - " imdb_train, # Data on which to fit the model\n", - " model_kwargs={ # Arguments passed to the model constructor\n", - " 'model_class': AttentionRNN, # The wrapped concrete model\n", - " 'criterion': criterion, # The loss for the concrete model\n", - " 'optimizer': torch.optim.Adam, # Optimizer _class_\n", - " 'device': device, # The device to store the data on\n", - " **model_config # Delegated to the concrete model\n", - " },\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Total parameter size: 3543002\n" - ] - } - ], - "source": [ - "# Check serialization for _model_ only (should be for experiment as well)\n", - "import pickle\n", - "fitted_model = experiment.model\n", - "\n", - "model_save_file = 'model.pt'\n", - "with open(model_save_file, 'wb') as dump_file:\n", - " pickle.dump(fitted_model, dump_file)\n", - "\n", - "with open(model_save_file, 'rb') as load_file:\n", - " loaded_model = pickle.load(load_file)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Pipeline: enable your model to process raw data\n", - "\n", - "So far, we have been dealing with data wrapped in podium `Dataset` instances. This might not be the case in real-world scenarios, where you want to use a trained model to process raw data.\n", - "\n", - "To simplify this, we provide a `Pipeline` class, designed to streamline raw data processing. Pipeline extends your `Experiment` class with the following functionality:\n", - "1. Obtain predictions from raw data\n", - "2. Fine-tune your model on raw data\n", - "3. Retrain your model on raw data" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "For instance: ['This movie is horrible'], the prediction is: negative, with logits: [-3.9965053 4.1739044]\n", - "For instance: ['This movie is great!'], the prediction is: positive, with logits: [ 1.8435774 -1.751819 ]\n" - ] - } - ], - "source": [ - "from podium.experimental.pipeline import Pipeline\n", - "\n", - "ft = experiment.feature_transformer\n", - "cast_to_torch_transformer = lambda t: torch.from_numpy(ft.transform(t).swapaxes(0,1)).to(device)\n", - "\n", - "pipe = Pipeline(\n", - " fields = list(fields.values()),\n", - " example_format = 'list',\n", - " feature_transformer = cast_to_torch_transformer,\n", - " model = fitted_model\n", - " )\n", - "\n", - "instances = [\n", - " ['This movie is horrible'], \n", - " ['This movie is great!']\n", - "]\n", - "\n", - "for instance in instances:\n", - " prediction = pipe.predict_raw(instance)\n", - " print(f\"For instance: {instance}, the prediction is: {fields['label'].vocab.itos[prediction.argmax()]}, with logits: {prediction}\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.4" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/examples/notebooks/img/imdb_logo_small.png b/examples/notebooks/img/imdb_logo_small.png deleted file mode 100644 index 5416d95b1988a0a64ba7c919df44f8b2a932ab2a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 237996 zcmdqoWo+F{xG3sS!_3SLGc!ZOP0}zkGvf*~Cr#2Yr)ij(nVFfHnbtb}zPVR(57!EUe6IEX=HIoPg_9FfdejM`v{n z&#X(G$)p1*v{dg3IvM_TIxkB3`iq8PL4*k}VhC7AD5&(@9SPJJ-Z!^5GfyyS$iUau z059;E$!UnBKd%_lUqu(pKsG3qhNB1re4e`({BP3$A4&}#py110we@h2{rku5 zy^E!g=c>hUT4Pt0kJn&w*X4Qb)v$L6yJz*VQ%G{6lKlH>u5h+(^j@$aQ|4y`e8d4| z7u78zZQtH)(WQ<6-foNzKkz{*SF!1J#?#uLMYJDz;sRWoohG zXDbZsr^aX^`OMLQ;%V;F?YLF^4#^NJavw*xr&uG8twCF7wim*M2g)lv*Ss&Q0OJ#Zi-3?IB zDp@Ch(1cF|(N+uzqi%O+hQNNuLXYF0Xq*H)^i4o0;~aFEAn?ot7bSlgz=a$FD-{`Q zCSP0Y)*H`dCGD2(-lq-l_$~YWMgm&tblBxi?_qms^s>1eU75gO)G;My19<_ejyzK#El|8}~+V zq^tEDfPs>Qfqr#%dZ1xnegBNcTwdF81YN^NSY6A4GC!a@>%kRY>Zl4C~LZK_l z5Bz{xT8wXa$Bo)zU}8svG|0-{^UVn`Ln5~@QN3;?Y%lDI!b%XZDMt5Qi9^>^P?B&B zeLa-6l#qR#1Hz^^)ib3rI3Y&lxP1N#K^(7HKTu2qi+XszVIQ312GPG-_>xHQ@!})n z5P^r`%B{hSGiZVe!(|E(mL@BP+19JuMdELE+ji}gm&!E?eAtkdQ7km-p zT=UKj|B7{C)qc;l>Kl)kVe0Q*LUDZF0c4;>`?VYOvS0e;QZ!Q zFM4zC0S}$Y6H*V(8{dh=I!zlqH(XD@DV*RApzcA$1|~`@WcjNDbJadz(@7JdDQ%H5 z;!kCz*SoUswe0d-0)#u|=_f?royS1p0qUN%Mh%Q|>u@~M47>iJke4Tn+0L1po z%B{1f1sHRNULQ{4+{=L#i}}^{iXryQX!@cMmnA!%cNFqfaNA=&TPM z9eMx-yu|H?>Ca=~;-wH!w3(O5bw~Cv z&&A$K3vUbh5Q`f*d^iVKp3qI@jDh(|X^xTn4i3KAO!h1+H)aBGKR;JKf|?>0R?@1q zKGbB&qS-PR(&RzmY?v8kt_G1W<)I&YWBysMfJ|Q_FSt8aWl`-O_{$&`Lo85AEhx473Q`Ufjl#Yd)P zuiygG)mIb2k(Jz=V2D(Z%lr1!3C}TTX^*)8{PkGu&nPbtW)EOy`?jVF+>G(3 z|6tyGq)WrOa+mSV-tCUy{@DKd!H}+6PX~UNT(#Z-x@=Y#VacL2&INnSMkR|H(IZ_*Dipy3aX zpBJDQ<|ONO&u*7nhx7Qu0uxBn3lbi14?|!IXp4p%luJ!ce0PTg{56q+;2>KGf%l=} z$A1ngKLF{G_lY`q(nKAUvjfBLmy!I{4K;;9$QzGt>>ukX01w*xP22s?sa>TH7@c(O zoff4GKcHR9^S*p6`{VFP_tg0GJ;1*zd++03*sV);9^^xD2+~W!RC0*c`WS9$d;*NG z?3K9c22TlqR38Jk#41jgKqVS)UxJ05&2(@-EIyU8dkOt7KPT?G8v(xx$k)1Ap01|` zKpUWJSMjMEl6R6YPzmr_<&1gyRagUr4IBo0@B&7I>_8T2FWu3C)9WWQ8l!+FY!%?o zILj^27ULP<)qEhgu8I%y5<~}7avJSqVQAz9$$@(tzX^{5YtQSOUo4)c_Y7`w?*Kx; zK~NLuIpemwperuB=hw#{7Ppd`Po;UBf)8oGn=BBd@N;RF^_kMy+m-FdSCFuhUVUdT zfPY@{ej5*@3O4Z$8UuJfrHpGxHUZNopFr380Cf7{F4sA5y=M@<*W1O#ef%LHA8LaN zgkzBlh|xcVdt418fvkU2k`~U(CBEN|0j*&%-2=IG%&e<6t4`2%bU!Vip}EgNUx79> z={sG5&mT7T25oVt@zT`O2f}Q~P$GS~Io!Ec!eUHum3(PZJQ4vKs!!oHXRdb?B{JO%2vCbhF#IaE$)o<2ZjX= z{yrr8QrUjIKnGR7|DBi~y?3ju6r|bm_RX{eUpyEG-Ybu2DCGSgk%ZgfLtKYq@dUn$ z;X+#iV)Piu;=AI9UsA4Kw!AxrAKF5S$uTRODMqkAyJW;`>TC4{sRm>=%-zR2k$=?^ z3n+Z4IGBIf>ESwY4eKqQwZ{+R#MX-aB5lFgHGk{aSvnxT7_m7P9tsXu(D(1c zhrF=MdN(m6UpcEOm@R!XVBlw#KEzR8YDu*5EA34gR_?coSE;J!NpV7ukM&URLRDAF z({Q$eU(}BSmv1%hA&BAv!=z~4*5Zd^U6*2y{JE`;=wZ)RIddO=YO&V{Zux|e4m0)x z;K5ctBizqE4K0Y>uo*@=zFSbf;HjsURL(zw6FbBpsCe>9gn>AOquGCQSa5oh7ybgv z-sX$ka}$Z;;{+;qfJY!ZF^*(_9&l79qoFxe1=Fp_vXN7aI!qDO#vwemEJX(ptdK;g zfL6{e)b@83toWoc9422~R0Kd3>Wj4lQzMbfdScg|@*)j;?_4+lH|^Zs`$iEdOyN22 zC=Q(Se)*&p?%rmd2Er|{tCI2AGx?85<4c|dYE)sxoqaxttcHT!*@QjPQAaNXS1B0X zq=0&2ljPLEiu?GA8FKdTRAS^Y9FZ6M4?p3cIt?3s;gJ@QcuGQsC07NWEWdmOqP|67 zJk`{HfzGbirpc?Vd3cBDyne)Kq58H2S@l{v9&D8jU2>8~nb+*y!-?dwgVv=Z#;vTB z&1kS6M^C=TcF_1K(**n#RWzaZ%n^9Piwa|-m!_F;FQ`ATy?8gDA)cTvQhCW79b(pL zix3Ky7GGq0V)ocG0fE;|{jjlo+d>GpX)v!1rjOuB1sd0$OdO|-F#95=&Xqd_PZ87#bqmJkhtZDYCqz^)A0#9PQtzlemy48j9u zE}s5?F?~cIG?4QW8M=CPcNu89MMQ8R9z7>eJTMJRyEW*wNh~n_Ef02!^d*RajK>1P zD`}5+8#)=qsWglPEqciJ^LT{NtGpO}Nd>t>JCKOwyJj>rcH>dFXnl@^AL%SoxM5~F zC`xyZR1$(G;5m=Zkq^I>k}{I&Of`sfsoYclanTxkVgaO&Afj6){|B*So{qeLRELZK zY+burb;7eEvf1DCy`HK8n=!asahF<`(0Vq{p^Q|9^85EvM4Z$SEy#q3b}a;{@w@Md z^1#D`aoUJbRS~W+i3+KXgwQ{Ggr5~mje#ygK3VFSoDJkDv;Bt~_tLpB)u2kYKem`C zPbBPnY4jz~z469B1VW>sz|!t@fpW+*xDkGh;G;8hN%oG!NBLOZiZMu3-thHdV``JU zI(>=@BKQ@r4ofFP_}M^@uru&1#(_-jnRz;X=GP^cs6E^Ky;@jn_|d&%1yd<1LjYK= zw)dGxe4`dowz~ZDoDG2_p1!3jw{FWQ+=C`0{CBdcJEhlL*?`_d?4&>eyDn8U36->5J;yAM_o19CKk|)=G+KOAW)Q)It+l~ zF4w>(1kj*F>n6V-B{UVCz)I!^B=Ib9ZEWBJZ)NN^P@G7vE!vs6r+I%hnwnJ(NR>$t zd|AkN19X3yygA4C<{MHCAB(Q_KiE_8sx(+R`1~P?&H*Oso zPZ6{~yk{OAkQ6enNV4YAdaq`>?eOP&Z!WSfXI4#)Z*I360G^&M>!qFWm z3pe=H_@6^Xw>f_N4jtLVge0|$=eFZMO_B~DJxw{>z}_cMej-;TDaWx(6M=g!n>t=A z1^!J)OY`H1#!U-^j^rsv91r{_$8|>%oNRI3)mJL!_$oyOe&!|koQ;`J6&ouTLf`ECmHw+3`HbS)Qe=K3WS52j0S8aQ8EV2AGRS$0ww6WQ8i!@uO`hguy4nRlo2JjD2f8Pn zX4`Ise#>B!hUq1Pyf&h`%Io7r2f39%^cBnd-Xj@^O#ZX!7{Nwx@M%+PB{mrdCC)R{^cQVoNhQRQaNo4F zDpvzE)j$hj>R0*6tM1<$32YcDf`#D0yn}^0hJOqr&6R8qxS)pqh@uGoWL=h=M~d%< zq^ikN;l-%>;m86O5;3n&Zofn$ z=b9m>&x3d9Xe9$-#Hnx=4eoR9o4QlNVh&MUN>T7B-eE7|hRHf9M_E6PxN0uu(=~(} zhp*X9TQJo=2A_kIK4Hyc_o3;Gc8GfBj2H>UTxVlDbQQLd$FwHFer9QO4@1{>*Uqt#f0kS@_!7?D&~gM*j(khb1ON(u~Thz5(WxAjzO z;UG*u@1)7w2EG5@Chj=0fC@UPWCx><^aC{~-43>+0w z+Tu;?B$JWkZ4x8HY{cDf8&13BCM(?{iz0a%{SeUA!+E(@ zIIfU!7nj?5Cmy->j89Kg@|gKgp4h9au;VJ4@TePL;`SY@{Vt@GyQZf;=>5ut@RKL5 zi=V=+qnw~vT~ZtiIgtXDQ(qL5vUKF0)fd;ZMA90!=ytvbkC96qr7S1O>AOh!NVA75 z3hj7&aEv)_VPPZPS+#~iRwnEY0!A}<at31$)r0MeJ+SgmP1WKG zP46uAtco!|L>@+e%Z}tWUbSS?`QoxZMSP(jL$W368>(WBN>6u*KXaoIT2#gc19K?6 z9|nt<59nDva(%zP1o{6iOso$%@*(~4Q(l=>eozyDvZ0Js;K+_fz~osU6Q07m495E7 zkZ18J+{F41%cHjSas4D@IhWFSBT?P9<*?+~Z;ZAq@=!|2r`M=gi2ixocZ}ogWB zOm?Y`8}z~^dH$a%u;)I$e(_5%n>a{r$?K<7wFM_b-=f|zwf~%rPT~4r&#D^~qA-=p zdr!EF*UGM@MDo4<_${S$kmG8`z}sZZi=lC9e3rZy*CwALB}a6V*NX=g8DHc_3r~xf zmb;JH(<+{UJH+xcy-DX*Af=j8(G8In3D(Pc?}rwNSD#LL;NBa-WB}^h1#~7CTDdCn z2MnQa{cI)6!8$`>^OrqWX*ibi_NWb&+K+G*o9EGF=HU5f!cqg4^4(p+WVx(*crEXA zu@)>kM=!VHppb%Zbd8*GMK~TpvjC+tYg!rwVv7vlJ}{kvsym}*8ls2Reaz^Dgd_@? z4t0U>G?kT1s)X1cX3yY95*Aq&nI9TsJN#@DfkRoO&uzC*zIUdY-77)KK5T7gpr|SUz$Rxk=TvE#G4q2s0<&+22a`m7qnN#YBmyQ#U0v-r+&FOElKtrrCyZ#;u4(Jf47B<{|e!D?TB^ET6 z><7$`Q0dw3O&bK-F&k#OKi{8VDUosYcI!77_a3`x9j9+~4!x%xa_yXaRikYy&e)Jo z%G4UKjM?C7-GD~Ki{E6VRMnT8TSqi4T05@j5n7}?OOn{sE-<5PBYuz=!3*-iZVdFN z%%y3{HnT0K;*F-XSWGIP!;yi|z%@63?I`N{z4cF=hp~oUwWPLGQNF|B-h5}Dv03i% z@4>hFl*h#Zv*eZwZEdxpMQQ4}=n2VZuO#&fVL2@W1#N{S_4-tGEUQPwJzB$Z9+A zja`@f5;!chPPo028b1-@HH4#$yp4iC+*aX@A*W_Ji-J-nWg^?S|6TqG%?#F7oWUwCEovC(SPDwr7h`DHK0;^n0aFu7PWWe`LcXT%?$})DG+w_?pwp{9%udx-*pV11OzgE! zSu$?bS2s{QHHhYwg|nV&BCx8!8;8-oJM-4ikV>_lHuJ_4SCNf&4hh3m96K3dgBV`= z==)?Ky5BlCY0yNTa4VlNf%!+q0R_bwUPV3eo*}a??B!#MAhIrnZi#EXV%Lh!9PcKr z#F)azH0}Wt%O$JmDvXXRrzaO3>icWV5+__5uP7 zQw**ZM|MKDRSi=qYFW;*z|6IuEf#hLspt`i^Gxc8CZDv5$?yfS0&XlE^Kv5i8y!N{ z+>grI%X8GJ4y#sr(N-|%&B+bC}J#i*@^FC*P+=PR!pTfDDH?#F#Uhvf7(rEURS#&q5$8+iSj2nAR z*EU~6FJHrhd*m) zZCB=4+vNwfLc;5xotnPt?BV*zB9QE_U+J5~&}?1b6UVnB#<7@4+wMSCF%fhZUojhy z`SPUVHwtq;gr$nuN2EgnNQ`dxumExsy!dWTjCy=g);A_~C}n{pj|%$+c(Ove>`t2s zSuG%bCCPxM7>wcS+Wp3n+|`?6{;lBvws%DT10|t;3`#~%Ppx0#c>LQtjC_ybH6A0i+jVD=@@FPy%YTa$apgQ97rjcQ$NxyEUl>V07q`-L-B z&y6wY!)hPn8;d0S5`_;i%0*KPfat2DbU}2coL7&A#8%u@gFQ>yN)#3{FVq&D0`=WW zPC|VyQ9SF3Qz~1T=oeL{n*e$opOG_=ISEbp3cMW|w_Gj~jhMBg$|(kZC7|Vm4r{Y9 zpwZO!P!NPtV$)r#{(u?^S0cg0gHL@S>wxF|AW`FiIEXSLhGVPuqkU=xILrYaXK<(- z65dQMA2XmARNuXuwOJV1!pXmUp<91%F*(Ji$jM(V#E(*Q_2f1=^-ueUR(-u*{e7p= zabrMs^{qGQV@AGBSQXxzi@9Z~`H9c)Ok#}d5>A`8SoYm<%wWJYJe?HbO z>FB zj8XsUlRkPil-k1P9g?x0PeOn&%at#x!%9R~Zl~1J6k6TqiM4(%H!q}-v~|&-^ptd* z?+38t(2#oOYQ2(VY`=&VI#`2o8;top&uHX-F9>rDUBl6fVg8eL)F$~}P%@x)+V#6a z(rs|<^GG{a6q9(8%B&zxBlOp zt2M`&zk3@F`df5Y2ep6Ed(X|=L_xi0d|{v+R4$x6*r`?8If8R;3w=`jhE|76Do$c1 z#|yqfP;|+D8ovs3q7|}@K{yH`=FpAI28~Ra%e{N@K7C)>d_9rMktnwum6G31O|z(L z?R=V52^Msy6OO(#=wwctWM75P)Hwc?5;Be{ zgHJ}$P^Q*1Cb8N_&RlhhyV9!0>JQzo)!_ol`ehNsbD8HcChSDtKQ z)|(2B|KNE`Ay&d5+ASjL{d>>jikIHnXpwls9Qp=3YgVRjnNEC(;q1e;3p%#CCse?l z>~)_ZN80z_Y3Dj}8^&E9_gahnUgwEIf`6Re+Lm?oh-c~ZIiIQX|c0WTInYCF55^vW6Ce~+M?TbLTnNH8p!V!v3>`sx%lFQ zrL99Y5E(2&c}+|^PSnqSp>_}?2YxI$q~|-T6UK8VxFpgvt{c`X^-hMk?IWmvr`F|&%g{o!VJSv2KfQM0p2bu0*Awf~W zsDefTBwFg}mMQ_mQ!Znzx`)~|5tBObbwuogA5N%q9=hy{yyl#^gy-*))&)By<5j@-#Vci4sct%D-;TUae0_oT& zweSgP^w;#E;yX2#5=yB{eXgBIglk8ANP8_ysF##W@nsyehtMQD;K%{mai+x>1t^nI zPk;W7mS>6PQmff3o6pliu_qr|D(`#0P@l5djv~zGHJXm%Od)aMR{C#*=$QalICD9F zjBA~hwZm_q-0xHQ-8f^cehYd_K@IGb();VhjXmbhwnp*p-}{7jfCsfYE7N{4t*#aP zHMKy)pbQqHbwdz-}jXcr(<0Q^aQu;lDkz}1@3{}$CN zI_MtGA#U2kiX+t9>zF7Hs>kjc=NV>jr9S_lYpRp0SJ)WRq_;J>Ybc30vKv^g$_pAN| z)2wk{#Zt9SfLufUI=iV0@CrE3zJqI2PYgrc+qCf=M{=!ee2f@f4691tqwtrA;#sDT z^L-T_It1(!cjt$G2^p(H9AHzW=Yz%DvidPAq&jaGdWGMvIo@tW`$>TMv(2jqavxYg ztMtx(azFubJw9@W9I}@Sv*VGGdXn>BVSg9glGJ_UL_UE@W|N${1BBJpDo1W*TABhR zSGdzC{(g1HN5BaRT_h^SDai8HmZQClXpQLT%dgJ2%ZR1TxWh4cHWB!=hR}TLU!L%0 z`YKm)%F%PzvYldna=R8-v>#xy&(aFU}LJbR}X`pcY>`eR@+( zD`3|coJ~s}C=)mvU~}2xg4Aow!1Z3dzZPGa*stv2k;KP)H&fipTQW5cJrtbio_Q_O zRmJ&s4|=C7>7HBWJWxMv?!k)w1lsS!677EGdo&oUJ(KDY`M5?eC%y8Bdt;8v@M?mL z_r8(&&N*ntIc*wC=_S*sr^zg!ggQarNeeNrHsA~mYgwwN&0>_z?PBUcUIIG~Bz}HI z;~kGkaWIu%*EC&ph~mAF*-Z0-pl9JdEtRbV5zis7i+wZX=@bZY9QIy#eECigRxQzR z$Z{#eoFWCpE~t45p*}2GEyR9E{C!Mx^#PwtM5muZR`#IZbspAaK>JL}pBg9eUWvH+ zaZY!|Q2Rj`F+9F8orbQ~LDI0atta6ewbPcqMTh(~fWg@WP4PymFgtCpj_pt7?k; z$2BXLyT+cb{x&rRrN_+|8UKCq{_x;+e3Z)c5fCaQfp@*C@agOFZj*aCKUebZ>&53g zIyHQC`Vg==cq;n*(R3QmXaI?;nEPz*QgC{851h2SKEbp%I(7t$29j;b00=~d1j0(5 zjWGDNKo9BeMj|Kkmq0ScFvscjb^aHd_ULFb{_&8@)zJOtovgKo>d)@%?X?=hxoHgT zhuxRXEpP@PUx=S>(?)S_sbgC=GJnLm5ec|pSkomEiqYoo4I;LlHX*Vy7=J$nkFAFV z?6dQS@wfGxW#B1VmMJJDJukWg&ij?0sg}v}t%Ey+Uy$dlE*luEdKu#P4hPcb0&>4^ zNZli%q=rDei)pYIhX9O;%!0;=3oiiu_RtcaWIX#y72xd`E09tc?Xlqe z7BDfQCOm?^T^%;U!eXu2$f#0bxI51s?Xh`JIfZ+l%u*F31TS|-`o~GK8~hdPPle0r z#3WkW%@_M0OTYH|N<$S96trv1P@XLO#&h<9w8}R3i8OQUNsJ5My8HHcBb0NmT&GqV z32Y`UuoZQ9pg%6(j2d{+1$9U4h4PfulXyAY9IGN#}_Y@1$j@LYm?=4rxwS9`p_ zUX(fCVZs{%76IlHynT4VRLxc#1~Y@VfSYc@m4#VC@ryIDG7R>7};Q zUb3=Vgp&F2b!Obs&kh*+Dd?4UWL}~~hCpIH*LS9p@Nn4SS=49#wcdGGl&~9{W7HN0 z9L3gq_KI!65F3TkZo-=P_fYo?MU3V7m&Y!+>i(*-;i}@<#)k_VBErir^f|c?j?N5> zd#YxM)g9QkSUThejeD+#+7vEh-2{w232#8!>K#=U*XZWoq;rpWTS04tD;>MWxo>^U7|_YWN=< zGKXAQ1cK*1p4{_@R-%Ml3s%Z|r}S)Um*vBXln$H=yV`!D^>LC)H|#A{$NVr2}O5H3&X zl_9&HO#}wE-!M5}c_yZxdJM?t;+V(tm>&fA4oEAD)bSz^MA!T>H7um9z3sr|(uU@W zkw7Jwr-_ZTM-yP`-A5N-{MS88`h^N<95{%OU(r8sgI^i?$v`TIJr;yLj!#^IAdWT* zOnG->iTRrP8CQfnS}SerjHHs*`paxsGKy8okbeL0CPXW|&chTzY{FY=-V!C+XB z@j4Tw`Od$c?5ov`%ifo4-z!hmE}HFE{WHU6p@%I=#2Z*qTDGRkw^M5%zt+qi1y3eJ zad|A{;^~YXQZyu<82xONzS1#!uo(4|`|gc1>lNHaRw9ool*ssk&=4^vti^*zKPEa9 zYKP1tat%bhvqD zEkb9@Ekr4F2OM-MY@RV96kF`hz9tl^{q&dLW@ca^wR%tmw?u*z!KiGoWYMT~e+vj9 zR2zo9;PoEI&`O%%{D@4WmID$b?+~S&f?u))W6&xNE;O;q>NgDHT6Dfuh$XX3&_`4- zE-=HVC!=cQn>Vz-VaP?uB)zeMYk8>%Gs!s%uWGd7XbADoYoz?cD=#APD~(~@J%RbE zaLm2Vkc7!0_;b&)3);lem4X!Xtr8Ps#*`6P4i!FgD~_0C<5VFJiA9>^r=@68`s$#E zO>!DC6e&mN7RG?W<3e5L-2wqlle7IAfcj!Ra$Hqlj^Bu4mNTkby(4O+jk=dwJ!cRq z5~tybg;}FHq_NtkDuH%uxT5*G=%Rf^Z4L4p4Oj$~lDFp34Ocoo&bTP13T+(X)37d% z0=ED%N_tO_?4f*%N^dV1DwSgN>ST~Cr(v6^Jk^C9^(t9CD~4XJlB&eru-{$_n!cCc zY%^UC)k#b_mNyBzWF@nuEzJk=4!iST)AX12B;6YixEJ?|@Cfn_x~P#2!+nt2js1qp z-FfG0af{hC&dN2sq$?6f5F7XHBu%O=D!Ah5!m)^wNPe0I#e9c&9Xelzs0%mat*8co z^Sf}DK6sgk121>REBc;X;O4M73sF0I#u`KW#6+U4*?qZZk-P#Hza<>|S)GLnyvC3r znyz$iw$AHi>_cfRpaG6K9P-F}YDzl0f^tBB^r$)k|=2AnMWyDy7i**;)21kB0E^ z(B5mnoKYKvO%vzPun<@K#X{&SScM#XQ`-3Q-~5{(RlnRrui3(*FF5i=l073!JFUnd z*IEQ)O(Gkxm~ZT1kox!2{efW6k|J>PQK1ABbIsgC#m{MshKRCWJiQK#-OTQ?xfk$& z$I?BSF`AoWlGjGhGjY%3rR z|2(vTQ1qR4IE`rFryielesVE%_PYmm|;;ern2D?-F}U2WaAye z$fTfKAe-Dqr-mPklZW)hD4^~ddwKs6wit6>ySZSgfZ$hYV+ke!9LWz;p@b)l#6*44 zxR6G{stG|Ab0};zmrM~zU$g-~4j4%_SHbVf-V-A}S$w)Z9_X#QtWTxw$?b8UPK0)F z!!62?0M`h$!EKZK=gKu386tiOzX>D%OGxOimM+9du&aM|(=&rK{%wJ4!zM96^E zz|b?p4zQQqHY4(#jBr1LS4EvihPY!6ScG@mKXirX{*O~t zkaBY?AD@$(tDL5j^k{5G6ag)nZZWbfi;3@1bZ;428WmI7Hs40kh&w+~!{7gN!f*EX zkJ&hN(z_FlUUz#}(RX{*Nd!=ew_J=xH z+NRqfgxN1ym8x?{he4e-lTDdA7k=?1awEAa=IgLJgT)@5p1hyRhH_fuoMjm#F$YgD z>1|>Ivn1K&{Dk+!;UwPN083ceK*MI#-$~q8r#~j&tVBjsF-phYMliPe5HXjxEulkZ zEVx%1A~my}cZ7F#GGpyS$+pe$IvG(*`DFvNw$%<&qYi~rwA+<(eDSinj(KMHB?Yof zoJS2zsff8SX#QuIDL&2H8_qDY_>=8LanUqc-#39WLRO*lW281h_MU3+Dw@GSzU36vV+#y`wMBL>CoCMcq$EHB6%<#UhU(|3VSHlbZ|EJ|F>ki z9};UVMu)jCkP7YMi1C!k%raV(>HOgw&7-Ov)SoG^s1Ve@0gGbw@U9}CR&7C?h#n3P z=m~zt?`E~ej4waA{Ok(|r$+1@MOk?k^pr<^@51nE&@@eE;<`Pq?rFlQQD}endWb4T z&D=3y)~?;TZ>w;Tzt-RX9CwcHAX7}zah2n2{YA-LpQ?k`YUkXF^fG^aGFjntY6s1x za_OROm}`yhCR1R~ILn5vhvH3sfY>V-y(S`@q=dOX#nz;joF$o5Z#}AZ8okFzKr$1& z>nY=>+&Qfk=yx?x)utca5c5rZdu*`OCvzkiG$fywoH;*eTs$~P%<9lqG&^X( zhVq!vII+_B2P6|A?;Pbgf5pm}?`xZ@RjgGbd5g5zF8=2UZN%m>7Uuq-+dlX2zV!?n zpmg+~G}Xu0Em9sQAAD%`b3t$hg?oAY|Dj}|gUShht%3PP8rHj86pCW*l@UaHREALO z?^Dion}B(F{hb%uSeTG8YdX%NN-GMAKV&!Z&mxm`95dpNi5y<~IKKijG$3AnK{-Nc z*D@jPk@QQ&aP+&)UjZ)aCvRizUPt89EqwFML_fbO+kXw<;uWbe3qk2=?ZY#x!{$44 z|HWtt`0*x1zhT|5Y3^HY7Oo>5=Ub8NX34()UC%y|;n(}}hZ*evMiuJ7n~x?g{QD8A zD&M`-3dGiY!mRDjog=qIvwj3MW0D%u(?3pq$Bvy-)L#oLTiy9Xmm&3<#oL(3 zD;fvWy?E&@p2$<@j2Z@~3Ku5{;^V=lLtG{ALWPV+D=S8YCg%J{CqgZLiX|9iTUmlk zv}IgmrSsQ|?w}!;;>>PaNz(ma9KUFPBySNDA7B<$kRNMtnIYZ2W^<#P(W+J0HEH$F z|CnIgVOAKFdKw`-rKO|N|5~hGdkY0jAq~)O#-us`assrhZ)m;;$9KR&=L~%?M32ffjat+Kw+V-OuTOzAEn?}cSMD4;Zht9SX7{})3BdbVDY6yZE?9X9sNE(b;5 ze-go?C>1iMt6ym($keoiz~qYNWT!b-jVj3f6?$& zX6xh-^#4DB#!BU2{V&`9<&1X*1e*hu$X(?4`Y=e$+Trwgy*7nzzGY?M)psQ}D=iku zerL7_9~nbcZP=o~hFdtw^a8x|L_xqHZ$*W0Nf(rlV1abtloZEE%3GhdMDT$-F1q6T z36l!CNHHUtmW(vA)7mnwo0OYrhV9brH=eVQ(bw*8QoZAAejMs)=oA9Ro_hPZST*2I zvubp0HsCg+D`P%W$CBIe}w?c>Ms|*vCEKU%m0BDx$NTnoZ{R3VR5EK{|Mq=zM9U+ z|I3NjNaEElVg8YGe?Qp0Z8KWG(v$>qoI-Jtm1aa8%i%(M7RKQx|#GU*uy9s_*ae#F_fvKxq6g5Z3+!g!L|yQw9+H{{Ufl_J0B4+`m8w z_!kKO4)GzaZ{{B$H2Mb!k4`cQy9MI@_H~6K{Wt7i-z;Wz)$?k$M%*;>39KOZl-&=> zvO~Y{#fh7|CS#g^aPKz;n7DT>$$NimS{mtGORRG~uPwZ{)+v0SOLiB(vNdacv+3h|TYV19@vxiEL&`-Yf(UK)8v ztc25ftroy|{nN-=dT!bmyOEAf$Bm2SMj?W6saY#z+8gu1H~7;EoS=tq1*Yh0)tngc zhPB{}V!|q^9!lkxlYDK5KbAWfDGNFs(V9P2UeJK(Z7-mdJ$hqODF&f4&JZFt?)lr;AprM9FTm+@Ouhd}b=Wn+9Q{)Jca zrvVq)m2Sz+tZLICNovh)dNo* z;I_PId!CYWYoe|6=J(!nihpr~&sf)3$sV1T;6db_tmK2Hl(;Ty)u5J#`bC6-8N&Vh z82&oO%Nfyo3g@@!4R$)5)!@jhUjypI%5hhW+r?dAaq3Psyzc%3O$H_#O;~~&lQZnb zmV$!hMF^ zZe*?IA6n6$nzs*08uWRY;TfJ-Jany|7Fj>f-P-v3H$q^VVOQQDhpxp6S7j)(stn0@ z1Pgz~5sIn1toV?ssfUXDJJF;zEyea_it576y-FYc`K*E9g^gA^f>$K*eTq06Fs@vP zFu6D=tXpW2e#V$=!3p&?9Tw6RpTaSM>{}?UL zn&^%bnzT1-v5irm=p%H=eE(n6+*MFr@q*|3;10oU1Hs)TH~|71ClFkNySozzE*p1u zg1fszaCdjNjl(6UPtWO@J5$r$RrldO!OMc8ioL4V|Fgb7>rREpZkK7qN9*foVL*rd zf{?`tdBgJz-WK;Lrw-=CuSc8*=KZI0|4V3fxJ-SVSn+{-ZjtP5m7a_U zC=cV6p^UaJ_*3O1?F5YyVpUC(foSZ(0SiHQm9MJ9X6tt{zi|;F?j?!SrDl7gw+^0i z<#n>;ABk=b5snnF%682pc-;D(b|G4r_?I0qE@9jcPA3`}X2U3I&_`nOL9?&~u_z11 zQCXne@0)`GfnW$r=g9?c9WIRk1L8;drkURF#tp7J)uS4h=;9w##4%upJ3C)D$v%5n znr+_e%_uH&n=6&_8Vbe?4sbBVSNO3~vYOvjL&$5n23-@_j);(V7Fv=tw+Cl2$5~zj zsX*N8{NITm)-4aOQWE65s>IF4n@5jly7(L*Q8`&40|>zAk#tZlyazs@1$#f@TfE!; zui9$AVDqF|G_O)!*SqJKfRAs#>``CZzC|A9o>k-IrIYdytTUCtaK#!RdDQx2yh7Yj zgI2q}&kX+L8?63}xP`-<`Gv3y>(Ll;Z!V}2#W-pytA+$#>#otFp%YFt!E~x09RTEE zhf9RYe5>=kYh}Zh#Hz7F^QoX(dPK}6(oY<)hx!wlmaD9WI^q>}N3ry6Zp;0DAn14XiX#u&U?o%aru@`N5IMHb;CW0SlL zBYSNLOX%^%QMXRt&>k3j9)RJ9Q*S#a>G6bOGkywR9d;kx+Gn;D-vZ|`w12!@KV|t{ zBAdM5>NJH>j>stA1i&|y*cG-r2N?TCI@oD-7ux+i#@T%8`>Iv(+9_6BT>Tn)gt3L3 zVjMQ)?anX)3`8Y*UVmi2t29SM%d?ZBDKIxNxaAqBf5o_3Qtnz8Wenm+Q_B867i64H zQ6o5-zr{>Wv+ogU$ADh_N93-7m3R%YAy37$xpAL~>ryls> z@WT15=`WBTOtx<-mgCBY5e9!QIjY|36v$BBn>T~aktiYUy!p6-KD^%9t@nL>7W)>l z-hyi>2}Z`N!Xf@iJL;h&ntDgOO+&&ydkWcnaVsUY3Y1^i0*`L%mm0=DY z10>H6o*WI{Y5sZj+0p>khzM4l;{f5!W1Rf~)97m?D&I@hBG4sN2Z&d<$V~*t;PCqx zD*YF|F|WxkBs=tY`klgtSSb}snPLx41cHB?j-BpPrh>cgxsgVX*xbo>o#d-HC(g#D z<&*|$lZu!KCmT!Kd!lO{h;5M7J;okBCq3hD-0D>kD-~@?7kS1tb-{9T>pwB0iCLJ_Td&4rDZ8w>VF|HA-V|{6p~6 z#E46V6EO^hsqiN6x&4rt0_h}ij7@0zJ84(R2 z#TVN`{Iko#u2G-B7A>gEHwKJ%q&-)sh?-~S1L=5+NWE~xJepBZl-=`hJrV_txDyYN zWk6jj57&^)Ffe40dWG)?t@u={yuR{V7;+|YKa>Wigjq0^H@Vg|F6eOJ%KsmoaN&_O z60r?2a~b2JE_36!W`@!KZ0I0Cd$-mBHEflbDG<2d0Q=>_3V_I#Z+Hr-#p@QZ zu{qzUIAvlAJYm4BH^Gfu_jfR#?f}0zdt3C_O9;uEP6ln?xx-c7AAbbfE!rn~XLKLF z@}7Ju5AuS(!CR{T%>TP`iHG=i*n-f0RPDfFMZ`g%{HA@$(XuLad7X-Fh3DNn7%^ ziw4@K>nsbu9Q%Sc!iB0}PpS>jl33TRkQaFPqA<*m3xmcJT~7CbG!4@b-Gd*Yo-|r@1xVvbQe~RwG`Dof?z6M4mb-2fODgaPuv|&1&tSG+0!DiIF^h4FY4>n zg^fSJu6M3*35Cl+3q`LLm9D7zGlhKV?9QjKn)->$hnCw*XI!sG#H%aBV7V8~Q=o^t z`&1#bKU8~xLwARaOGd7rEGTku7ZlmzI%ei`evcuJ@`!@_lEt`31OQ|=gV9jUAX;AW z2QtlMO8N%)JpVvP*mC5J&egKsO>|ccl1zvzZaJt%%e3M|BWRK(*KTQT{cmJ2scG8E+2-Or5i0PCd8Yf2!!DP%W0JGHOLG%t zz*?m+b++1u%dYKEC$VYAW7X?Z^ciy{+%0FsZP}aYv%p-#EAgSG58C70y4%R>n3LdI z#cRf7p~nFRox9XTs$8I{u3TVI_q4Z5P@Pxj!C?9;8#ft!4W>HsYrn)|EzvxFat1@E zf4PrEed(cK5x*z?3F^yo0V@&m_{L3o8xc$L1t^+Rv>a1oloO!r)0ZN-ox2}>mmD#{ z>qm9B_~_$1KzU+dLTDv!lFGT5R$GvM2EkF|;0T@G;qc343jb_NPV{7xh$YT)Qh3ps zS7=**KPl6f!OFxpuy|V3l~>^8Jl#Np$1pJaREEzmmGA!*Oop0`g^Y2eFd}oy-u3p1 z$-`ZpgON3oDwA+T_A-?RH^O7?@ZRf9_AMs5h5uO$hdg`t)qHVN0wTl~WY{Vv;}%?X zkO>)~2*q84Bh2E&ferA)i5qqZ88}_jWUd<1dYO~OpZ{i(2&mCrX_=c*KD4kK?5IDu zZ&Qgg2v*qq83q1T393$uu#}!Xwhr&*-fi=MwE-E0V=WoPY=~*nKe5Em@ux$#i zBYg!lsHr%z*`h zv?uE@N2d3~8%}Vt{nU0B8`1Ew= z{h!3pt7hIhSe|>lS>>wA&Xj_BgtVuRNJoTYp4K&ocw>TgDmjp zbhb7Cq&lZ5@imP!54%?Qk_L9tkt1z4{mSbSARLeCZezWJq*WTOA`NFm7{`ID6$^H6 zof+Ita%Ws5>q-21AH$G>53TzPX_3UX1gh>7YY91aGFJ*~y?E6&W3u3Z&yBopG*lTq zKnCrO%}Z74{cyEjoE3Lkk+$BT7)I|}7$X%FQ)$F+BOB=H>+Hb|u;m-`@fVo)64AJG z`00&B!GV*tztFK9rxN zC+Me%MqZ#Wfe=dCq`tvz)DbN{9L%o}PmjWzsOo3#LDN81Uf%nykqwHR3WCEDB{5CF zwtiR3q+-Dai+KuZH<|NivDZ;1)XF6gr}A)U{(xEE(C?K2-|4NOe!k+=@0^lc+9`A= zd7Gj}rH1Bx0+dQmhRh`D4^54}^!ZwV1D+h9uFRuIJBGvEkg6Z^K@cwikF;q!oQU!e z7Iu$P`0d>V={(}sO=B`HtqWs(G+Kvp2>qgAyWgj112al`M|Cq}CaG{LRv8Hve1;v9 zkzsKP)Q&u2h?1cv*VxxEfrX1;>a}7d6lm;j*79U>qk!0*F8WjX6m)rMi~%4mx20)6 zQV;dBllv!s5c^m;!9E?HQ))>rrYf-E7q&~>N>OoRG)A}bQ5H0Nzezh@8iAeJR?K5r zk8G%A!?ZAxfP}lBkYFO&_v@42{!5hM2z}w`%&omejMH+Il--ZSy6d^O!gpe#t2@rk zlBG1wtT9`H0SUj|*<6|Kp2Y9_xp|DHEL!{Ygovw<%MPv*L91g45TlU#gl~47} zQ1=lzCDTMC@NNkEv%_PMNIofIO$o|Q4rr~Vk;)<)BuU9Em$?!Tt6~)L2FAhu;^(yc zT?*pgWc>5R?j^$b3%r-JzievoEG^tC6l(k8fePN0Rl?*lq+N3^bkeQ1n02w*t7viE z)6P(0*P%xwrhx2V4-Zx1Go$I&V#H6#L{(Kf37XJ6Ma^vqce{Pem%F_31iz^ryMiHV zPzt1-xu+m7FRR7cUqo?ML(V3^d zmvgLEwNgWVsh7X+=bR?-WWaXgk^$VWyK~TQGaeBUWz&12%llKT( zOBaF74$G(lyLLg*^sgK?h4Ra9zR!#j zlR&zMfhL|J6mng)#B9!SVfp!g!^?Shwx4%h2QJ*D53&!Fr8-uH2>HO&xA|Hvt&Zoa z@&_zHzqwC_!eq`Hr5m{45uEELXztwF$E@UN{vH~oXiBbI86LEX9kJ>p`3mP&?1wYUCMxC3sX}5?rNcPyxF|YW!QQsKpgBQl76Yrs3sC);6f+ zm`QE+Z+&ClEQJ}Tg_&}+3Mhk{c!M+#EDQrg1-<|6mNIOF^K2#hv))z{vHeE>g^UrMKcG1*$JKupS|3`WD5CX+!Nj9Z z{D^-VX{4Oki&Atx_=U}&qjaI-yC>w6BO2y{^309dSJ{e19jG+pE64$PGyYFXD&LDq ztz-%D6;OT#^}v}YS$zTt7{=i!&9I=>lYT1DDe zjsT6j`L~f6k&c){?CBwDhamhI$vr=eVUN+o)=elhj93aktWDX?#aT$+@PZrX;67b5Ws za10X7=4lu2W&gG0kf}xasvK!-!Ib;jCjW@>sc(ts>ll>8H*=Me&<{!{{3DR2jNMBl zm$K%#=sy^gEtX_QCMnsiVO{tL8us!@~5@ZzUCqVd^(Q#D+_ax*MSO+AJ ztc&!w5uRy5Ar(M-cYVDu(t8W4Cg2t{kX5NwpI`}AAGPjH_PBL$?Xz1 zPHebM?o7$7{}soDe;*wy86SfxZSm6n%S`X7+T4ld{E4T{f*a$HeT!CNiMD?pzNg_Y z72`jyu{u(a|0CadH<&r(vUy}L8$9=P_8aNAS83;nd6eA0_+%0JTFSHZo9A)Ow)P^2 z9PqCTtmcs0^pVRB8vd4Xi<~CR={2|5n=yX1lfLYybUn(I2_DbeaW@w3^;xOhzvQM&3f z!D?bgEup;$eXjBlv2k@v6=iuiarU0`!9c%At*q&UicHY3`7&m?)n*BcP9OMjs&+g+ z?=#Cs*8RH}RAyG*+Lm^2PIo`(B5|V6M-R_M zEzGNxBxZ$v?g}Z6SgmS(@w{ZiT6q4I+Vl>p*Aq!lTFP$5l%2=*1hdi2FU>l;4O8(~ zDb(Nua8Xh20~prqN#@a@-bX7Gjd0tqDGLroRexWox@K`oP$yC#&dEivIhuNsPNN!q zx?y)W+0wegt9Itccb=n>r@32J}0PoBPBDH_A+6Ic|n>xQ}`_({IH4I3VjX3 zSg^_4Q*XxTnJo3_tHFKkAVO@HuP$%$V5R3Cb1ecZ7x@pivBtL+X9y&YAk0y3*R zRLy07@gIST)_f_QUBg$W8#8D69wUQVeVo0dOtOntePYa-u?k}M4|=)uq9xDeOnHE zf+B8O(S1@qqjB4~#LE|1`bALZda{PBSyAq(-9)iX!n9ZZ>Oko?aU>RSoy6-3(rfBG zOJ4pPD94mhUUYbfy__Cx!D(}`AU2qJLewUx->6uJA~`}Wfx)YkYa+?Algl643y_Wo zWa6kr`%x3t55BIRqyt<}_zdS$Ww5pMktbTsg@RZhE7@XLzQMwR(Qum<4Wy9<&~g)& z`W15ZXGyQ4zX}%E6?du1D@3bDEMKI|o5 z4ZaymC(hS;B1UpbdCXbq6r!#rW7-z|M5mdl1lcO#H%<$`dGsb=-V_ZKeWp6| z?Y9`S-ur}8@60pxMjhvHa2{YUL*kTJ_vsXQX!73KjIlBqpt+Ds6nBBvDCBm619l)W zU?Mt3wJa4%%A%54;9BUB?Nz;=^s_WGw&>QVyu-=Z6;VKF=1(oEu@c<-RQBVmqM_gK z-Io3MVu_rj2#Kf{4pVYrKneGq2tmOnjD;PvK0FH@#}n?#K+IA2?vnJnd@-a;U?S`B9@DY z=HGB>AI_@^gUVb3$bpI6%-6p+N(0W>##oV7hj=?e%TA^&F&kLt5SNqq%V=}l2rujC z5A+VzP>v0(Q>v$|!aY+E+=AVQjE2aGM#y5K=F1UZiOqR-+_DhUs|7iZDUxr^n6Q2D z-#>>A;RYd5MmWtK5tkP#jg)mF(|rQERrMhPVV9#W!{ikYiskikBfprX!I*J5fldSD zf3DqVbTI{(L7Im5wRQ!dr1%d_0SLx4o#dJ_>(a4CT1f5IC!e<4_ z*Y&|~nDe|T*jFK54csE%VrvO^k|Hl&D^v9Q%)`Gl^MVg>NO-R81P5u1e-ixxrzYY4 zlz&7SVJ+8`iKGqDcS`jURBxh|~>wl$re05aAe+us+E1s;t! z8JDLD+q^sH(Cm~BlQd^%rUHE%AwS*?7skv<1c{8Rk{Qx-dnuc>^E>R1l?T!!tvtvu5kGf)`6-=lE~9GL(3d*+g=n++O;pX=jOT-Bhjq4n{fJa?k#qS;kbl`PY^pL(T2?_Yco@6S1SrMCt z4F$iqa(M5pT+oxCP&YUe^}_K)LXH32dV9!Yd=Y_QdYk-sFTpEqo%g%eeeHxc&7<(O z44f+u z0YhAF?jl@-(+fQ z22(0gjLLt>V@ss9+%Ng4>c8!8cu&_4EYUrD*%rTYd(9(x!#xsXpA7BDGZ&BM92EgZ z%e9Az5oIok$1kHe0bbJ$je`5;xPy=CA zk7GeswcfzfGh8a9%ER$WCP_AP*GTcYu7Po#@YSHZMqYS^T3I{T#J2y%*O`3v_W+}_ zLYn=#{MI1oCj-+0_I8=a>>&7?@ovNyEupP=)WnlJcp$lK>C{gtzz+kc5}RK@<4ayi zcb!d&qL|#1@Q~wOtv;H;`mdSO;VzFszz7=pus7v7;W{~>%{Ltj9P!fNzTmr+#i!5L zwPL@$N$i!AV4jf1BSkWHKb$+Xdw~T=CLLL&#XGW{-b^R15w@!O=NEV{7QB(RViNHc zIdNxm7n=rQr(4L}2R+X(B1P5^Dli)e(C)9f$NFk2UN3yOa=f!b2b^}a+|tX{Oc=k7 zEaf}G7@rOw8@(lg9Wh)k-(RFJS^A8R|3YmZ6pnI=(wfZ+%HWfDSFrj}0MNcff^*%b>uBzM-sOBkVdo+7Fgf;c)qPoA|i@q5f^U6}VI%aIB8=R(9 zI@<(dx#Bz%4PvKSnO7dlk>~dwX-_Y`ZK~UO(G4wS_lg2PU6xND`pU51VjvvS6_D!@- z0N0q`5L?(g!)*$KE*#yHj7|X4!+LMe_3sQCcXO6DwJ0Ct7){Pg!iZX*9X~KZM^%RT z@?77C1DjUhp}&<=DEipA%xETJ&D=pTb2>y<5B$f;*j!RS(7mq& z-r7)iKba*J{(`BLB|mduM8eOAc`{3xTn^r5v5Q7%ko4cN*mnv|L49IgR2HA!jAGWy z>Z;VP_xq=Vno1s)QT$hTVugj7Am{=g8*vI6=+E;ZmIsURXG4d2Kd77$h3lMnrVNMw{O0Nd|#kHCzpEsqp+lu z)2rUfFD~6+q5gLp_I)7MWh&Q8C;IA8$6efg#iRj&D)nzRnfOD|V@EP+4l+sbqIEwf ztzQOrrG`#}>tEUA`*->imbRw#_$^u+N=-!jVM=KmB4586^9~fO!9_Q8mb4oJ_k9F%vvyls@wrl|p;Xrp=pnuAR_0N^~aO@rbCfzASbcf2?toenU)C8nEf< z@(!UUMxM$t zg-s(F7H!4EdNv7CX6w!RjA-+M>%j$n1g+c|Qe*RFn$FnY1FjKDNsa^r57g&w|6fT z6}!;WQZvM4tq)*AtxFoHSou5lTr+6Q^?B7Aj<-H}-t$-FEe&bUDffH8AhSZN;H%?} zQ&OOuExHC`okcX^N@OcWqUc#ly4G`NF^bG==Zg~$%vcQJ;I||9CK^uu`E*aFHTdeQ zNtDDPmp#gYFYx>8W%?4zKE9PZi}?S4wqWz&o>~WhD>(mE1{FAa--69y?q=9-e*ysC z*JIDeB#6$B#EX?DzXgcyz<-TU{RlHed*z8t0-IqLO#o~5%wa1Y7BMUOyU-AT^^xl8 zG<`~R_-gx8E`I;P!A2Ofe{68Od0vGgwv}CY7|gl2bh2Mhky#*o*upB-!{-+6PII9- zC0hCf;zDfd5_=LTxiAuf^E_+0S&BtRv)`4mfoSnnF~!(-ao^sLBs~px_qQO+msygksO)sFl0Xv!z)vMS3>^I`hMo$0HpmGkQ%aJ+m&j>pZBK; zHaMGZptxQL@cny0@%x-jrWLVTotsAaY1E52ACsbZ`ou04Ed_1$xauEVybR;zOxHpt zj*m+41C7R!aoBP(a22&eX5##ek^qwYjmm6%X5`F^`9sMd!LiL&^R$xx;uh@Ttm}Q} zfXryke{l-d7Sj2i&iTKkpaM^*4sH&C9IWAbbgWhlTklWul4(bHiUc38W_pXO1+RWJ zut4?QeolcPy;(%zvLaI{1l*%>-y7#@>J2uii`yXe9!6sPm~+8y%GZBD%bz=orPhTl zJm-1-9OqI#fnNU>X zxxgp|=XJ*2M@EsiBY{zYJ-(Tb1uj4^B)>pfyV&RHlyPfnR)k$IPO+Q{LyLh{C_aoM z;lWWbTgzMULZ>*T_Rr%#Acl+HgF{YO+c2e<7geoG&`a2gr4(kLmM@Xz2m5T-%ZftZ+rFLU_H+!A26S7~vhbIpbXGU}zZh zSf*?eYfy?!m9%HKXo`qzPQi@+f;r(HNAILbDnO|B# z#gy7>G(Yv5tW1zMTE~#7c|5jO|3!WH(R|80rZ(jtIb~4rFJ@d^MTGf(Tu_E1S)wdm z5?)K}V0L;HQbZEHo-;ONz(g`jY|`YR6l;`fxL9taE}pP%tL|*;c*}-&eCHn`gDYGS zJFgNO3$(v7=ZTM=*7&$0teah^Jp|#_DE4>Gb=HuJDmlU;S-Hx*gX;iAr&<~5pT7`6 z(n3+XM5=NXE$A4E#7E!)HTR#DsC>+|0m?G`419*AI*&Tns-7?n+Ca4 z#$kYwDyaRq(xTTpxk^{%WS+MD&Z;>G;e^iokt3&I-m(Hhj@|9&b|DAxN4k>YnW*}L zi14DoPWnSg&XK8issK*yybC0(xemgH2}UQnXlE_5Y|C5KIM=}E`dxB% zy^PC%1en{zuTAkEmUcdLC7=s_*he&p8`3x^S~jZBdQmteEVC_{bWXi2OCYi);<>7l zo68;z7jWV4YIVMiqo|JS4;XPNv%kqcVDpVvf6xf41Oq)E zro^D}q-Rr{r|lhw!c)6eBl{W0-{BTUlW08YAo5C_>eY28A~uPs5McykfEO3IW2a-O z8F^{=d6!%6IZ*U?dP*b3Qdi`7o4eXsyc7+v5XZ$uUd{^0n7dx|fr;3c`s}k9`lX-U zjO}|7Rht0+AgzSD*jSC_Fu~R}pAE&Z;%~KO%bLK#;v}O^-IUOXlW3l)?@E8{Vwwqm zFStVbNPYZLriqAY|CZ9h!0C!r)CJUmhLSh%PLxlO1dRt<%S6d8{OB3AoiWml#!si2 z&4aZzSKZ2}^HP`k#3s|C77w~Ds0QeK@e6 zH{V)bNLwLTT~W5!6>`GmyKU_{n0$T*ht0xn%?A2J>8w0afZ2MWoS{dTdG^l(vUA(F zp0RwQ54Q*DRmC1AV4A$28-=yY#=%1{8C_)b5-mzg9l7!o3QHwuRpSBAOc$n24M(TKD)P zi3K)&BOePcE08I5TfuD3f&a^FYi`9TM}8unP(O8{_HJ4i=y}M*6~pVnE;mT{kfMR; zs*OL?Y#m)t<2Y+_7x<>Rop6X|>2y(7c_Fn&vf$DfgWlS$KA_6&VJRP^>#wu6(mS7D ze=)D*9^oAP|IKFWCVgW?W%z}_eol#!<2>YCOjZ0{+PH-diI}S^eRr?K3MKaw;n<=; zO0eetAY_JZrJC!bn(Gh8*1CV^S>I_nlap_mlW(2SSqEw1fuI6TvEnbvreq751Uj1s zw!0t`4N3faY{@Bl{9gMJ2I}l1zhLCQ#b|Fk^%U_m|D|Oii^zKO$o9_w#?}LP;J+?N zJf?w0aaO=y!KuS=%Ck>%{)q1eRUvA?0D!Fyj>{}pmbf z`ZRE5aoAM3Iw<+X+A@Sb7g*Fp^jKB((F&8G7JE9odI|i?Li3(3T(zpLo#WJj@^Dn{ z{be>c;-$MFaR(zuo7iKWnd*_X1JQD?Z>oP5I?B~sYM9L>RExYO`|fkx&B`@vna!oN z;?Kf{HI!6KSDED<$#=f@PoP`1Ly4j-V2`0E{!Q!JLC9^7i;&4q@0Fbv{-fPsVT2tO zP$(q3F<|8jvzRMk$kv=e&1-M{Q_oCfUgiUXxw2vXk>!t#)Q_5_HZ|~>w=@S2uus#F zT)s|~XcE#|`2o+H249iVKGg^}6Kfz%h}NwsO?Fb#6gRlyRuxrVl0Q>AUh3lY6V2(9 zoCi_dh1NYo=EJF+xPYJN8Z{^C-Vl~vKCrI)jmIbgIG1EliJzdYGd>k+nx6l{B0c`V zcB4=qhw0lUyd)U=nsiE%xYg6&_=D*{xF>$$0d!(nUkge!Kr>oR2U+h()`lin_TE^z zxlyGqTaYkvE?iSR)hxWuKG`qw$Cg#PIc3n|4;0l>mU|GMwDuSKk*5MUs7COlo5XOtMiCEip^U!x}ljB zuL`KB7GeQ0O+wxufBQ7CENn(QRMsnB;$`yyp_7v~i(+oycxK8U#+A9_#~9@wa_ z6;^Xrx`3WKjI(za&sk4>G0O+0wTQ}SGErixhQnNC4=Ll%0jz4aOM5{8%C_e0Po9IV zI{F)}=-taxvQ2s?+&d>!{yBae!0JfQk#L*s@@?!hP0(!Up=`fary6>Y{PUg@0d7-u@FYiVu<}Od+Ftf(7 zN-GT{-GUgwES$0Dv?yTOE)NNUcKV_E{;ofTQ%#z41yBdr(x-XnT)-0oSKQ>F%ymT= z*bfBEf3OsUK~4l9&~2gk25RE*c_fuN+WpF5+~v(0Xsl8|2Do)erG@BYiH5IrTDIEJ zIgHcW6vwFYB>NC~rK}FyF5;CX1@P=~-L3lO0s_tpuy0oP$o;v!cPoA77J$|d#jL${ zDHg3%SqR%u=Eso4F81rH3se`vYjE2L)u@7f3{(-3mv+BI2Fd~j2;sng8e;`K{Lz<6 zSXvnBR0PWF$0Et;qd@bXH>JVlantSR$x6%^1edY=yz#$-$onSmfSIjdGH>_}rZH!7 zX*5QrqwKH3kB6b0wO#8EDYlSqCYH19)PLhj8j~kbN|ThJa8MCej(!p*0Ty;gBcYr^ zEu7?3y|h}^A`bB1WZD;p6#lWk95hO;+aeGDP#R8w9QX6*OOKc}cwwshvZpA&-fpHy z&1uk+A{u0-MW+j6Af-QZW(1Ydc!9dlxsebc?km42=;(zmtV^@F2a+MSYj-Q)zv@_= z+T~=-^fQQ<%5X}ZFZOrYg9e!YfJWpzAqObKo;gmu5m)U(3mmZ?MT9h0!lOte{;1#i zeAymQrX6|nhOi5G^vDKG16Pe0CHO?`Ioh=~kSwVNaKNT;l+RFS^299xFDnB{(gV#< zgvV%9MF^PnC}3O6%URFbPns!ks>dqy>03CKd#%Fjy1A(2EkaG##63?D-PL$i zdLENf3rZac$uo^bh;wx8)vqK5s)H$Z>Pq}Nf;3Rwh>JuDOVLJoC5^pOr2BNmzm44i zw$;cClLO9d4Bj^ktR>4P9PP$cORe_MN!k%HcOcD6)?cs}254-2^u%OdD-VP1$i!DrEDFq+VZ$G*J;3|9Zt%u(aqQ4;|zU^tkulKw4z8eRt0UY96NQd zAab*P{x!U6aN4yE3;B8%N<1^=pq~s& z&ZQkRzWyZwF=^gT4#`xnVD2R^tG@@MRXvP2JuYd!J@?2Zk3cGgppOzip$$p?Kn346RhByIz_YN*R>jnPZhe(#BI;N;@-2$+i3|2jZ3%QTRzmpTmBd^ez>iY zI1{0w_W@kd$M*r;rZ>Z~=NGrRL30+nD|K{E-1&D}zBIS1YBOcZ05{0nSo84(?KiYn zY<8G|ywzn#>Tk>{NGfkd0od-?f(?`#7D8R~?{h=2tFe=eEyHH}0Gt*9Eo9J`4GQ@9 z<~c#zVWLkP7HX+pK5W182YB1fSYSe05MU6TAP!KG1Q_Q}(B?aj<9TBQ8 zwG~=RzAsyyZ&k2rZBnysCBEyk?tn_8o<*rsZI^1rFpfut;Qay3_uR8l0m~tYfmH&N zSL!KBl{?9x7mFDm-hn&L7Jld|CzFb4kR`y|M*{eQ4uhX%NxBDrp81K2el2gkZ9NFF z`wtP5R@Bx@tZQgjD)QC==Pv`7B=VJ1%P;cLd2ug1dsZ=&u1rJataweIh7Rn47LjcT z(bzo`9<27t6{;()laaK}9lm-)OW)!mn(bOug|c&OIdb_)_|Ui9EwAZ%tWT|1-jUpc zINaf8Z{c#j9N=tR?(re~DsYbb6DRw{u5Auir8CE)ajVYT`L7#26=S z&4+yB(@KZzipaufxkgBP%lM{m%5erj<$rel!?*<^UBgf|!rkPqoM%O3)ftx1MLMZ>DgR=e!`!_frR;GpR$1IGhn$^iIg=uXt{d=J&(0b}^$#M8MvQQK*bYvL z$y5gnTWiV>wt4A!#Sy!o&|5wyO`l;d$s`AP+2>PCM7QE5IhH?ClIlgGTe}7%4j@oO zRa{N+ituim9$WT^Zd>+nQsCZ=;C;0Jxp%w`p6~MWS{abgT0S#r*Q~ZM8+e?X}Y$Rm0Gr{tis+If3XI_45Kp#5%0cUaH$2FE>+~ zy^$dw6$?$2sGIfcvntA{IT7x`-yJmF8#On-Q6XK9&QCz13=qGI07QK z0|av}&FJQ!Hs)E1?Pqvy1mxOKNwNKx_E_%cmseD@zNa=nTZpSgtQSw2^qqU~m{ zU8iDi*`5a7<9#e+nQ7A3mJJ{ZZu1!E=`_D%9=ERf#gEe+8e^=VN9f^os(gd`J`G*S zun}*P2b19|z^)ta>`l}5$=Ccul0x_pd9GEyw3FpD)$Yrk8NHjfBm}=%qjD5Oa*2Np zl>^X_U4v8joGu$lXIU3~=tRb3%Z#ykCO|gP;*sjdrY_pfqI66)zNWCxn(0S1R5(GC zqg}h5n)BR_x(JSvQfLissrbIKIpTDROLKY=c_6mZ7Tkhsch?unS9|K?)p#ei);8Eu zA->kO)qTh}l-|Bq!Z3)RCuGU0v`97_`Nxt!3zGsO{8u34CcXYWFu!!i`}Itazf8teJ8&XgpQ zr~UXr=;A&>Cp#n!Pu@CIr@?np|_xzFP`v!0$H|^MDY&$bU9K@W2 z@XLsQXKdSn5H`AAL~Lxk#|p|6{_vC6f;@YQ6_FuAx_mYz#PG&Yiy*DPzJVd@xOft@ zWAr-2;0BSbEXw0K;EDLW7cbtUZ&N1<^+G{UGdPwHRjt)ZkV&*sWf7v3lmJOU%#5Kr zCU7Gpe#$6N5NsSihiuEO=O~-^YTDkH#v*qnSf#Z6l5_SNOKk?dW#TIc5vUz_Xo%A2 zgV6fQ5dWeu`}+$W71{S(bJEaSw(8s8;J)Zq?il8oXyj~sCZOEHbg$3*n=k!>%6OH7 z8k&*5-Pu?RGn@VBWp*@9H?G1cRYKXfXc7MP3OuHSyb3mA?_V;oZ;#8ifC}RbDo?ZaD^{?XvBCO5ku9TZ!4*5lI$4>M}Hb`{%II_pBeHTRlVGY;H~fr z@p4H$?H9M8=F+jFO}2-tzD|f%_dBP*oJytmi~#$CLjniX(*#4dD#rXGr&c zU;2{bZ|93_wrHV23Y_VcGZxYPMk@t zotechZ+7c(o#QqvPDcFaA(S(n!l+5X`KQrcXTa4xqozB|)XK zenG*uVWJ)J1Q6LCOwem?U3~HzgA2|V2k7*Ld^I&R1291%8Lj0&LfUz7>wRj}3ql;+ zWX%q!twg&&MU0G;v;E{DLpQ0VWebFXt1V)KOQZ3nLTwv2259&39Lo@fX=mnUj0o1C z%WHRD|39?-XIN8D^fn3;A_hbg6ctM#SW!el5D7IwK~O~OfK&kiMT&F?AqgE6X(~mU zV3&^clF)ls0#cHI6ipz3Kmw$_@%Nnb{=Yn5o^!7Afh&85nOXO&d)Atnl|6f9@A=#p zk@;;ucjfna-ST5K`fqnv?&=u*34E_1FFMuiaLc{z!by9A>n)#M#ZpfzKFD_%LjCR< zy)3+F<5Y9gVti(3)bH*k9ht{p9{dP1l`W?{>#X*;)k94mos+WoB4#p8JdYSx+IGnG z){_Y>vxpC!I~lhn70SLbUx;s8GX3nFyJL3i<)JfL8gbwD_x4eJf$F+#UZZ)(=I;Am z*My60(z_xa}+4#aEPjt<3pEkwyTiFvsd{LX@nY$Qm`-@c+aKEB)@aF#IME#9YX z9z=ORE}eFq7mi{zY=d z;{H->U;YE@c>6iWHoigLy+!DU#eR-lPnhR+0O~ju9J)9iy*t^&y_RE`@KEgg_I5e# zm?hp?e>9+S;YGmcF1@-qtebRfziRu!=yNo+-9<~|(X)4wQ!#!X)6JO1Q3qjKWvi+; z?N@&64RL$7T>kjO{>tTx8`cjV? z`bZ3Ek#O?v?v|*+L!Woo<4PsWO*7K^_;qLHK?|xW=~;c9is}|WLJV>qkPqqpb*_!Q z?kKhV=hUk!h#jB%h3|EKOLc_|4_B^Q1cw1BTeAk9L;wOWQ0&L;&%^p4{E)7v>5NmAL%>DpC0V zRXH$o>Bq#A2$chowI=EqYDO`Q z#8aE)C|0q$)TWGq*5`^u_DyQu)7hAA`n&J`7&<>`d$3lgmFv3j+n*h~Uz1vMIva>I zL|%A(J2-M_{PMTw84u2Uj~~PTM1GHa0I4`R zx|K`%%&Yo-llt(>^XwZIl^y({{cnOTs18@(9}Hb;z?ezu+89`m^bc_t_*QSn%3tSa z{6E*B7EdIn;h=nzqAWp3Oyd6#v-{is^G5%trjVEs>XR1v16Ydt)Vl?b1_8=X7A~IG z_wM6KhWNn1flb?nD5)Y@gqfG@m+u1s9Uo?)S$U4WF)WQb*TZPToS}|BRL99+YM^QT zbsG%nqPOK2pP9Yg4T86jrD=D(gj$v7q~QDQ4uc}UB6h?4om#|X<1<0awIwxwN+{if zIcbF_Hf+7MGZsvByQ^&lf9mV-Fa9MgGRYl2IC^`FcoWURM0wL2=S^#sb1_=w6;}_Z zK7jnOd7@{<7F$#9^X#=YXaQu`a2t9(Hg;&r(%HWKY8XaKkVD zKWyoLTU^M^WKiJU^@fgT#nWG7VrY|3J{VZ;mHH-L7H!Isy`_KCVE?{-6%W4n72cni z_@!6)6J_(@%u%U}skg@il1omgN#LZ)r1o4A9S>l9A#pa(x@4)Pq#sXd@mXMZa5UAV z_aqTF`TgnXDerpD)xDtkHPisJ*0?V}Khd=5097j`ZKrC zK2m)1a%j~3Plv7U=>LT+IxN@SozF7*FkN#u1OEJ)wDavQ%41j6*HEb03DA{0x46cN zaq%DXc;TO2Ng|4q6RA7cYwwtKm@5kbQ#Td@SeJAcCtq6KV)w)>3VhyRIs{1O0{C4X z4Pr0Qw1G2S))dBJkLXQ;lGiE490K~%P?^Pij2|V(!&I@!+4k0&48wQWlMN6m>!z4ICCv; zCgIPeWmE}Yx3WNR1)tI&DM(lxWSLXl{oCeJ?@SMoq+f?zyIY>A$LrB~Li}jhiEx?h z%zl;f!td+GWkv0+ZB@$5UK|gY#^aC?`&iQFIkTTP2n}hx-3*sPO6O1*a2zi8kU+Mt7@inVv;3Mhj<-o zAm|rJ+x(u-I8P-`ZZ-eWSiHodmJtWP1=3`<8p)1%-E*Ug{98sjvE6$4zb5fF-C3T4 zR*SNEOiWuSL-StFCQR%5(!MQD4(y$IHq6Yd9~PDO=Mn92P6Hw#nO%S5uJC=A)3Dq3 zeyJ90CTy=nz}qd@vr&gScxVTf~6^!p#Rkuo2a|JjPUfDP@wT^SN;%9o8X*% zUv=BIUei*qSU%-^U;LRF9R8%tHnkzj*3fg1T_enw z^wOtyRnVr_#yVeMx^L`0q5d~|i_)uoH=QNw9D0NX+fNC!`&hiW8j>K(OxGRdIk-() za<^^nuU?r)%ALQ78;%?0>B_NVZ0B<+yyPbqLcC>{SE&bk22B=a?xygM-g~cSw67z_ z(B#X^*d9ehZCZEALQAOJBrK_eGv2JXqNi9fmhf}wc}+vm6rHNZi41`)3XcJC(}vA2 zEocoIeE-$M!tKIyKM=Jfe#1c}Zovzg6?pfn zNAPvg`ijt(jPl`}{O%v0KI@3hpMGwS7hxqF8AF|C+V_-eJo2CIcUp}hamBS=B z5AQ;s`yu>uOP8tT3A6m{>1s4+&hyWubhigXUHi=MY^4%aP`(xav@sBNXa8i`g@q4! zFI=3=4t^+q)0rK1nEKe_v99BjFEq=M(RV2(?i7@np6SoWX1CU3PYm;JE!*}JIhrB! zCc01OU7AC+s+84X^z|Ad?O89$^8uS3=ZAMKw&3_QHtZfH=DD3hKVo*S4n#bgA2IeB z4aqSu%k3~!sMyaGKHmH`xcNs)*@?UH?&_qx?Ox&U^^qvu~%4>)J1mj|&O zrgb54=`K`s^Mx@p9M8bRu9pV6#t9so4JjUTNbtjW!t^+XW1!V4zfZjt?&p~*c0q+x zda7JCa^^$t8+J-sH8(4C;-u%8%u?OkfzajloZDMp z--IlWm{%t9i3Zu2+6!0I6<{fSMhS7lgRw!2yYnwhnvER4>~y z;a7&y&yIzr%ysd5vZw=nAG-5tPzW_7&ljP3Fg9IT^TK{NJsN~;fw8at4s+<$EhiCE zi#ffG_9#gA1Jdr%vlL8L*R&2NX?6e|#N0{z9&M;F-#x#0&+UU=#QetHlsH7rmI~>< zV~_Uo59{c8!rOJJ4AXn33oIA)71<>favgz{qp*vUCMwJmz1fBAm68r09S@i`vc6H0 zlgHz2WaMy)Pj$yU2C}KA9oN@|vCpbLy_b^a!hsu#nMzwv{4%cM)OQK8`n6A%m~nzW zpsGIc-LS)n#d*V6peWVDa=+#maE7kiJ{tC=gdSLqZYS_;FYIBpe9#@`R5#7&)o z+gSNwvQ>9j=7F{{BZv5|erLWlFlk0Hf~N;F%~TdZ1|62{{Pg-K4IKNnj6Z|Cdd112 z8LL+g-G9zcU*BET27g61O+8_%pYml`Tf*x1qhcyS*S5ApEuc3fGczd7hckv+oa~=| z#p^S(pMH}wYel0zU?1xez+WVoX=t@)M15TSy8e;iubSbogPm}r^zQ4U+FJWe&#T{i zrPKDVt2zCXW%)^O<~4fYg_q|3f$!n?=N-@VZpbY-a;VERsIsSzdABtQr>eqE*SW|k z+&qs6A1~g~t#^t0DUNKFlBb?-o~7EgxY(+dv5gdarHge^Q>Q%xdGD(9&V~0YQB^Jl zUxw=A44~}vhWY+p$GKkmUtt!khFDc%Q-}@fF=@2K4lT>If{$?uIAv$1SQ2D7|a zjF^**uj}nCqtozPs%|>PJE{z;vsw?ibcfoZo+{l7>gXPKo;ZJW;J{Sfuut)+XUFdy zkLiXOF@uOMp@_Ge9J9&HX-3+W6@lj(76PfOpbwII1Ses54ktK;hjy(8MquR^YuzrGh3^?Xm?F6ou=mzCi2qht~vdFKM~JYNDP(osjn!@JN+)qEpt|H5;q-o>$c=%!Lvq z7TAT==%~tRH`cd@_`iq#kd*ea#aE+!RMq6>Z(b-se_I2qua-o0US~_kEA9auKH<7c zRjc`stD8vUMJYazp(8lwfu4Sfr(UE<&S*^)tkX1YO?K1N94>I?o3mxRDYIVK*8q1+pL#v|34H06K+v&2+c7Obd? zA=7QOmeo4B;QQ5gsdEb&F(OwOe?t%GM-0+wcG~q$vG>@O9WfWa0PuMBQ zb=KR-@}4)DS#IrROZnhGfl(8+#p5zh>(V<|xDmUm{1iLuFI?&$*jg}21jX#lAx&N0 zVkffR6>>5frr-2)q4G=5VW$OBT&gxxf8s6IWM;gV-5LzbLu1F4#}0{cPH9DVcsD*a zd)y+u5tQq9g2 zwHcBvyTE=je~}}fuo+53m+STeBr6Sv(lPwhRmdp9iA|=RnR5#anXjp-i5e3wzZ93w z)15Kyu+9f4o68A64^9LL0cA5E6Y?|EsDyRvC2L+?9)8icqC2UQS+i{fK zD7O5q=bmEFPx@x=gq|stPF`Mw_4aCnHNfXdM|aGm(W|$U2eSEJZO}xKDs)CW0$bNM z-8kKot{hKy%-U?YA7D&9+C;OS;>umqsG^{Hb^!6z5rEQ95Kz&!eOR^)Sb?3Nkzu3WZlJZ`~aIhmt_* zYQU+%2K8R~=+>bZf+v^^o(sP2YP|+@Qj1(^w!!07gAmLknX^Qf<*E!CmFi&$^k`=` zRW=m~6%{c>B2ihpvNlbIrdLC}9+lhoiwPMO#Y~|5UJfjTr*GEU_TYVK;{yYmA1|x8 z36Y!5duDCgu6qaQjB1a`RJ&sQeH7%h6FOhiR2H#g{>=|~RtO&nJBE^k*`XliBOXG6?D?BM?T1hh;`sFl>h=0oo)8gPtQHd?vU z7y928q5YZOU_GbtjpJ_>rr$3T)SrV~4u)_4@ho15hQcPzhTQP1W;23rEO4|n`IDw} zlGd;1Y;WsQjY7cq;RhTmArp^91zM9?v;cqN$P}+X1k0vZT^y!rt$S${1GtB9uU`_y zg(^4Thfboob=ni0tRRm1FTiw*U(59O?Y~2o0{_W+$Y#6A5qR}e4Mphzfv}E+252g+ zZvZ4)bTDAzlLVJanLedP^5V4rYqh44_@ZuUr@oI_Wxgcp#>~UZt9VpggK)rzdbOvusH#ge%pf0tERd;7fbLWhVk`rOaH8D3L!(4 zW&W@m7=Nf;TqT?>CZ_U52kPIJ!v^^$by;l3oD<$N%pM_kb(rn_0jfk>Gfk3ztEShh zQhu}+eOM1;a@RgUgBGq*)nO~sUX%Mz7X#K4$*P+R^A%*yB^Gh%6hz?cz^dEK3CKx@ zG&f}P<2~u5sW<-^vTNZM&$4EGq~9`c6ndJv@coWuwz-@+$hrkqgS7_J+i4oT!YONW z(yg%Z{;0J8s$S_?LNrWUuUV?HnAaomr~5u!;Oz2AP{O3tc_@$Qsio>>GyPB=ozp!% z?3#1zJsG_A(uAFu#~uMPIqzJzpl$oiSKJ2&clwO&Gi?#z%qej9*zg)`S#3ktwWJUAz$z8nhWZXMWeo)72*2r}>xYD^Jhowx%jsQ>!C`zn&J~ar|AGaP7nG zFLxDR|9|(v<{)&SIFUW6Pxgz5Kt%rkp()I9(*HKr^nduJFf^5AlDNAlb1pa&gMLeG^{DHUX0Ta3om|s z&ie13xZ$i~MflUF(G+hlXiVl63guc2uvFt!(FyHPL}G=lu)k&H{_RJyE<6`jmBl*|7fk z#^3b@KjeC4jADjx9<}RDr4*&t!DXgWt3bYf$#<#|=f~K$c2gp6z?_j7RTV*Bf31jKpQ^h_}1Q zdLFC`L$&HG1bG=1gxAd%YBF_eBEKj5x+PosIgq+-J9K7bCpG)iHJRHTt=aB1(>YZC z5BIK~+}x093;jIpQz*lNGBX#NWJ`7jJ|l}PW3JvSU(co zo}AgtD`h}_hR{}&XeWqj0j+gjMz+M>$f|iIx3NSw$0C$5`(*mvi|o25jgh*4b;kZH zVn?+HPxo3D@mgf3QmLWCACC+fK^3*HydGo!YpQ6yBvlKaIVSVWWj;gWKu<6t!%giE z?#M{^`?8#~Eu`1%8WIq-wwcLH=ef7a1x>ff_NsfTv}YjRP-ow-98a<47M9X`IE&1h zX-Up&P1G8*+19}}xMn?0dnPkIT%9>JJTz)84R$y2-ogH~6H~-_SFen!e_J*kJU~mL zM>e$9uRTj%*i9$DgoAt-+b5Q4^z^r!k}cm%dd8#sIk%aq&J7LIOL4lZx*gn;GcPS% z=6fF$3GUR$b+128@Hcqn`SNI6CSty#&H={lY1P}}4HeXNq-NkRl^XREX}g)7|I`ki zCcjWepCq<`b(4SpY?0#|qI1r$hM3Rr31g`l$|#++;0)abR4cbST6pSMeRRdODC=5X za@dX)_8dQN%;^6v_J?_kuDP5<-z{W2{lAh7i=qLowZMP2LZ*M^y_Dn{U-loIUNz;t$s^6|H5o<_9rU@;dlsdMMp^WCw7g*61bZ7zdv~=x2$6aRg z!B33WzTbLNT2O&M#isHaGuLw$7RDCpmDL6M4*01S{&`5ieEhoJ>|plsPhOZ1F1mvB znqyRchNGuRrPL0T4yeP>jXzK@)bF5q@U+E1IaoJ!@2f~eKj#+5kCfmOI>o-aoSF^u zgOw)KCxn`se^pdR1&c5Rf^)-P31K07<>*ZyORl&R^n z234Z$^J&B=TURqovYuJ`5%B?-Y)^5MU{joyKKoFTS}P?IoSyfSNOA)ojSRcyPsvuSH- zw9v|tiSXicgLEOsH;-HtCYmO%h~*+2FqOMcq8cSo9x{jd&EJUz+CHc zNd{R^9u9*14ayz!Q7`>?l)qIcSIZvZgxUxN&DKZz2*sSB_sG$^#JTuiLS%KZv9V*< zov!O9EVh}Xo>Ol^uric{^Zme2&O_3GOXzBW(G!8iXdy3?bGcM-*OV`?{O&qL{9mF{ ze||Hk1{ol-v$J37noGNt(Rd-;^_qlfb~xe*Vk`7_=pWAS^!^)0G=wZp&?wX;$#bHz zP~v2;+|bHYUBE!AwIA{QCe?)>OkeoRvuVg-bfm+c2>Gq16S=!N18WkPr00U`L4~LJ zjSct@?2Z~pFv4UhRTs9v*WJBg<`-qJ-exy&Y-?hs?fF|W3}tG_tzj$A(Esu-b8RYe zd@zU=%p7gJJosEXCUJu~ZWe>+=WR@O40G1c2<(3!!hO5hn!}?Cwv3{w!^cm7FZDI7EDL7j}uw3jm zS`e#lJ43SLJ_<1@mUl69FFw7Z+(ByoxrdYY71erjqhn(^hw91yU5DAKv)gzJAq8MI z#sxm*FYu9tZN~!d|6pb85qNVQvt%i?zijW!*u4}CKxDP{hT~}`qy_w07&Tq5KX&53 z|6mGycv_Ap__3H7lAPecz^0og97W!Ltq&9q`zE5%!r+h^#p zRSh3o@j8N96rq;1`x5B=Sl1T;#JL;o2obtEk=zU@eW z2}Wb36&k08dKApExi5`4!ojOR7exNHsZqAB4_v8}eY1bXLI>QVq2cD~a)abjNE9{o z8+}g_SUH;(t?ydp;2j%*AyOpeb8d_k<-dUE!6J6M2$u5R*fjtg@(PO8A~QvTFO45A zgOJ5G-Wj7_?1-1P8l}da$gYdQd++w zq#yVm?4?Xz6+O#4l9I`7$TxffG{FV>Yj9K6#3E8hz?B`(nYJEV3k3OdLjectmViL zsQ3FABRU})oLaO;Ak!jlP?Oxi- ztlhBMLS_Lsbg)zrI;a)c3A`;iDBazNWJP@jIa(4youJz?W0IYcjBO!Jhs<_--H9;= zi~j_yiT(mE3d@&i80dO!BL9lkW9tc_*o!H0#;RAD0ZadJguEr|6@=vgR za1G>DpfBR_o#d;>*6h-uxm0W$a1s&&x_=5*a1@q%&Q-YtcJa4^UCFZPj_zi>tMY#( z%9Jg>hLot*sltpCJ~xOUj0dXEQP;)lDI;mO3>_~V`H!@vf23ssLmIF>z%iN5>6?E6 zA@XQkKWO+^v*eht-BIBJn7Y25d;WUu#IYV1qaTllcst}#)k9UXvp=Zxw^K{4)haZIG zHV}dUp@Z4jBKBg```ow<;yPAY#uc_VNS}k#eb>-bd#>xfD)+1$<0o(lm<6%69SpS> zwr@c^{`z(dMi#Q+jr(?vAgLuB{xk_zB&$Qk_14vW@#`2u(OCnjwHpW#k&|8jvll*P z`+vCY{ofYtF^Hn;CPv&?l->5n&;UvM98-|zVEM?s&Mm;^@X;M% zry8~mtEo4Lyi=4iew*xcQsM~UunwMc)@AF@`%9y0w@aSp4GskYx3u6@eelZ9Qo#j!0h=ujvJo@2l+pV0O2){YqJ#k zx1qRp!_%jUbPp8BAr*3Mmia`o-Mk!fAH$5jTCB%E=ZsCocN`4bL~RO4WV>lxH%&mS z70gZv>bVWvsR6-B7Dibh7&%ftDg*wpUGU46ubzZ20?(K@@6thf=ea$v-dY~$5n#?b zjS_)^+C`jJCjRuH*f5Y%QpM@Ra@D_!o$1^88s7ydy5Rn9L9mm<&<{hhzOO6=`QF%n zn~pfUMo;inPY|?e4iIcNM#FEyP$E7eu~DE~euDtN2;gpGxQ=-7F5!<23-|^H{xlu) zH3M^bx2(?K=)yzrZvrm!RC_+D_t#tsmRI1hHZJ5O7fv*U5&`^d;U)&iwrz}Z@43)D427rSC~l!7!sXVVzL z$%s8+P{uxPxS?toy1UHW4?jDO_({XhGfBj)pM%#Z;2s%Es46AW5# zr;eAsGd2J+qJa;U)~kW(Uh+N|*;e8og5EsVtc}>1DkzjZg#%G;;rIkp9jS-Jm`3?uf&%D*-ow8HIcFb8Cek!>lApy8ogvkZ}b!^7uH5|3)}DO z><8v~#!~TO8!dpsn5#fg-dhKVYnbMs0bB|fp$0nS07(QxKP%r!U=E01VI4uCYk&vq z+(yQ2^f1f6NWocdq#pb#*YQC(#%&Ghwv4rWhWus#Pi%q4T>7Ux=@QT5d)lGMONU|z zxL#f_t~LzOBWLF;5^Qg-2`UO$N?owy|0;Iif4WGbJl6Y3z@HtfO5#3RV<15g-?J(} zsa<=fu19DO?p4ETVlAacjgQ~e^2!B_9c%KEx6mr70&le7rWz-4KH8L~mk(ak`HJ&7 z6?L(nHtzxjEPE0-N$!AD!wm8a!}IiJd9Gw$##R0>PUR2sv+;f7%QRdsZmSuG7Q*s< zsg0%W0cTF#dOJ)Z_|*wx8-g#URd+_Got;KFK^bNh44EYGfnVOB-rCVjyyY2(;qQCF z2I?Tp{y+j$+*483^@VW6q=Fe>ERgdThbXHmBIB*#UjCna?NNnRPw0q>KVYr8Lb*A& z+Va2*v!Bd_C2^K)1h4?K^Qbf0N@yOxMw%)B}wT=f+hu*D0d zrAXkGH`w(-J}?4!avtd;?40#(^MzHPGK{puY?-8T{1o z%iK^+0y?4h-O?$$iImdfBV279T45R6Hg*ec@2V^cQqrPeuRd1+P-MFdsCcK<8(xwZhTOtd_B6`vbD|Ec)UDOaLPak#8p=Nx! z@pTspz=t3LT2opY0xSaNv6W{xs&*%Ve;dR(sX>}Ey5O>%lf~Zlc9$8RSXrS(`dV61EjE520Hx%+;bI*-mbOXY@2s=T&TJT&qg}lc-j&UC0vbp=ls`h{l zU$pwNPQXStROIh*e;&Ry}l z=NuY8lXH0DwGkY9K>OkLP699*_~4B{nBR;WZN`be20rXNfuSHr0pma~dB+^#UxKf_ zOxYcH;!CixIaEZj2Fn899j_4Tb1CFa32Tw}t%#S=geW?^5dg%%rAGEm*2!lzHKxSK z4Qa}%JhpfbyyYcdRuPjAd|(fG`qCH#CIF@Z@lAGGB}Q5zd>2cy2vsB+Xmtq!jxJoPb08F9BG z%}_EoiDw!FaykX3y#~`fPHi63ZiPz8%FF)48?yL)2K#p0h4$29$j{DkW1)RrB;d{R z(M-G-;RzPB)t?5)^5O;TeF%0Hf>m;V##7_F6Y#IS)2i|yqK}x}YuwbyO zcAV>=*ibVL3Qv?Cf5ViK#YUV5J($^Y;ab^4UV*oLgN{ucaKT;46x7)4B8$u>Je2{L zfZ;$;zY2(uGnJ~@(6$R~KV6Q0!+1%oW4Ub?Y}k?$T3N3ia)*HJ9%+D>ci+5_#Pp7W zNYEh)Rst+8S~Yz?N!n9NP}hHP_7D2Tp@y(oY3S0YFDH!A{W??O<_`1qAAP zt{E!sua#J8v?Ri6%G#9)e%g{QB9z_8eQ|PwGr4w-Fy^xSBqX2aCN7vx#>2);gs=*> zzXsNxO9Q?@g<5T3-42lrMoJ!{qmXWE?1f0Fm=sD;7I-cL(Ou0emF}5(v3q6L`Hh z{PT@5F)as({bLCK8d?8UnavfGi07?errp7dW@)ben?vqQ)tGj)}SB619 zz9-he_dP8J+>BEh6?1RG@9Gz2h;YoEy-ehyfluWK;XZX4WTioIl#u7QGf3U#rgRvU zdm6OSOPc!aJVxBfkL5Mxe1L7RPq>B1Th59SNOlpL_OHMti;=+nS&O16mD>JI9$q54 zQevQ^+S)BpajN_XxXxZ%s7?+=Uqwee3dH%LJGfU?gkSdosBLj5PHyaPGhptzS3K1= z=*=VuVGt6cF+v25{~GIZRG&F_^Mt8t!SWjB=FB9 zZu?71!sa)q*H6HV<;w@FmM;qB%T~H3r%ZPu!CEHC(@UZh{x7p4xpx2Sz=UU4N(Qsc;h;>XP==`FXqkKY} z6jl!D%bx_6zY#m&RDyjQ|izXhx9cIxIp*d?|tuo(LY3nD`cq6i$VoK6K-p zn{l#nz+FZDWYBs#L6bSoeEZtTPHp4Ui}&2$yF0xomu7ClMP&9^M0v@0&u)Jxx3g8} z0G0q))3m` z1NIgB+oQz5Prvlhca>84lm4j#-hrESWKR=WAm~cC;1mOPIvEZH@4Ou4pdj81Mb1PK za7|>0jKlW010XC0v+dj>CG4CO2PvQK3mB8&h=UR{jsJD`)qXZ&Ha|eRRO>!S3yR{OF;DiCO^~K)%3s5)@eKxchF2m{YYb36$1~6a=FT#P2 zyQ>p*4sk?!j2STf8m}W7fLPdeP*`Ar@N$2Hlv<2;JflfOW@oRM0R-mD8w8?&5aBx>NM37-|7 zl62L{EK3}6l7L(BvM(^+6Ji`88Jn*nNxsY#-!>)HVU2Vcf}Hm{x_!zX`hrQ3hhrB( z%{cy|Xd7@%NH0L{x4~BIIdM7w^d59%F$|cM1>RQmjR2rUXuk(e^el(JK=<=Uomy&+ zo6YWLsfPg4Uil~hWRPEt` z`6d+xEN)v9?g$vtTMO_K5e_T;yYegm20myNDAiw^3vveNi2sSx925iRN``5BfmB&d zINicFy%H;}k~`0Glf3M$fT28>nO@fx%ZNVoG;jGCVI<@$PO!5~Hv62n>mg|3x%W44 zzp3;|#8>&2-}{AHrrpArW;DvBBGe!<~cJ1?hm``-8IJ2yq9 zJ0UIrnf6+$@f~k=z;2T-tJWSlIF)!oqOQpRt^xqdulz&cq_icX*V+`Fc54#&ZTFNt zQsdva69)^e4@gF0{z(U_NNy1Th&7}c5ll+~)9z*MN(a6KyCO$FdsMSfk|2w{{MW*i zN6atV`I3p7WCSGL#J}1e@dBC=Plyg*jK1r8S<%|K<}ez2+)Su}CcvAn&f~NgX$oN* zBN|dVJ1FKz7O_@o76WSOBcQq}d%-*X0~4jYphy`5;ae$HIFzhe zAU(d5PhMX5UA#m&2h!>L^mh|x6)|ZfW+Zcz?G2+nW)pBh;vw+k)+Mr;rkfy_Fu_hI zEvS8Z6e0S39fh!W`Lai|Q=o{;d*Mkt6_Ux9XD+}A)@gC$BCwg0`Jj_W zq~C+`OQlsHSFng$q?(ZaFkqTQfn+ssF-!%qjJ$sk^h+CRuO=^zwpZK6a3i&EP*EoQ zhRN}N0fJpzn4#Re=2^%QNHY#J2r?NGV;?i=40YNKMS_(L$SoAHF=;vuWc)Y8Ne-I! zycY*z90}U*+;SbRA`8BLS>}3MXrPQJ#DR=^28AMFCFceoo^%Z(W6LGIkBZ&{uPMzU zw?p9ybjTc)+a)(*)@HYa9R}#Oq6u$FV!Hp-FIYRiFFQokGD_!4B;y@<uf?%fc3_a(x;gef;C(Pgp-U_ivT@i$nTeHB>-o)!BzHy zUo_ix*4iIPhuUDLghon`T{N|P1YH#8yLWy4V=7h(>~f|_@^R$izaotwmNyG|5cFeZ z)rC7C;9h`Q;B*WP6;h9x=wfs%Cn6I%ew zfC;f~?Of)W1tcWtF#Dey%EWYyn}6QueSSA2SbbI0I#3QLjX;;ac|345r+YCbsCc;_B)gd~XetmTW!!)--^cVxu-Okf^pbsuOi9}EiC+%#X* z@0-P(X6{nFfw?zN_o6)VZ8q^D8#<}$1j;oqpgKq+71)h}_su!??>rvG+?2CBMm30Q zJgVqq`8*^x1lRH*c_5ailoLCq_u=Ci!s(UY2n+3KK|mtG~?4d)0L}4K8=^nCyWgP;wo17|84LxT%?Y2fAqU5}zEW z^I4O7xw1HKk95(v-NljOH-h2Fa>1qPnbUfllL0f_boIpuVxoDeLj84fZ^XyylGH_I z%w~`2Ck#@%_5~(emU%?u;Y;JmoqEzr?R8|rwrSw~SqsH$Syv)Y-}$T(l-!NGWl!%0 ztYP2fOiBPV!Jn6R!8h%Q0HK%m=D#pZz0vol&)h52Ot4q7=}K+l$`Knl8@%cXXDGuN z7e!(H^%63+pL%L?ZaTE~yq-Y>TWD;xa3%aB9=*!(Rc5t_dD)+1U8cp9j#LW;A6L|Z zA5Iq&MoW&j;Yt-jQL&a%EzV8IF47;Y=^xR;YNND(S+FpWQNb`DerF^7i^tocB=+_^ zX-MoKOrCycrG}jtKnY_|u1&jm%n0l5NO7g4O;4OvJ-)wrrL^-3&em-2j6_UWQyb zk=`pdDv>kznp@#f!#^l!cKqgb#X3rDCVvt*FESya48`o_!rspu^O9fEQ`D+Q!m*Y& zUo}J1{-%kE?StF<2=8+tdxs=It6nENN38#GwsJtQwH~raqXaAlOav=-@po8CLK+Rc zvdateTaDS)0z-xSDY{FiX#lfkTt}kO=Z)4ocSE=mihB3^ahLaLsx&vs4)qQ2Kp#9*22w=~bjo)f_ zS4Y%*DXlyH5FEY8@4m?z--Ak(=OU+oalrGJJ3qqht4|B3;f{Q+Ah=^`&1c4H&4{mY zgD*ag2VmH1g3X*!`|Y{Fmj!uD1-sngJIX_mIXU@~aX^J^r3J##KLnjWgbT|^+%H^p zQZvq?4W~Z`6(@sZ)Np4&(7N1Dn}M$oJ<*w(D*{$IKv>*?`H5J zikiFtMG9C40?Pg%sgQF80+)qj3}_LLP`UL$$3VzlFHjT??@PEj4G`?uV6UyFrfZ`` z@CrAdd_BZpFyl#s>-9gt&EI6BpI;lOzwzOdT_jMs1t$XqTPK1uUBK^a{Bz}FHC1*C zw>dgK_Q&_Nf)(D*V3Raekiu>J6zpp?P}u!hE=TP9Cla$U@40h{Mqw%!|KWJR3W9m= zb++(qsB=ApFSs%t1!Pdx+`P#9J^>Z1;WwROX;%%qdU18{fwK|7XK}#GO_-QWpchiP zf7@m~a4)FnuW(_t0vA*k9=_EyE*LHzU7y>)-+s);*z;3ot_Pi$?Ba@pLquezaX>+< zQegPC{(>ZMK@{*C^dEBQEw)GMH?l4+f7$^6!C+`%bDxPPwOL2G@7QmlWJcJ z(kN3anpC*JM6(xH;YId=%h!=J(?BQwc(&k(r7{i*_TWsXg|JTqm`h9kS4%I~BhWOn zNHFMQn83+Nbfa9Y$#um>|3$Ejm%j*Jc^m)DXWLf?hr|0LeLuapNnaoEwh%z_n&TDu zhfkgU>dozVxp-u=c#f+~vF-{cYcOTfl*4*abE`}|9 z07CLXat0u!-C(E_xHk$o@0j2ywIxqKA(p@5jDVUGCWXGqhYSG?%OTT%?Aw$kq2Js8&-tk7&m0duS>h-| z_K}a$lrr%5MP_@5%o&K?+>UBj`mXUPa`Q;Tspqf^C;L*D5#M-$7Mc}aTQpgSwz9T<(DBOxBbA7bk6S z?yhWS&qeNRUH9IIi1;M3yi_LZGNnZTjQdfND<75~Hu2k}0>7_=Y@V?{wC6z-Df-HH z|%hOVr~t zQck#&N%N#rzlgnkgWwd74-e}sw=X(rcgMD(&Q8>RpI*+ShN}arT zkHOS->*b+_x~3|?U#SfYhj9uSl^Mpt_THHRfpAa!@;;HX{_S3=;YwkR?#k_bNf#nc z#6$qH6$|D+5BD`$lJ7kLJSD(0N;DQ2q@(u*Tf4sV&$Cnr-tj^RR&!Hz8YUAsN5;*p z+~ceRm7i92Xsjm3c!qxnE7JP&e#6uYCi3NdrUW~4+mhoD$rhHQIo5NVNMxYgJbq@*cLYRntyu=J-o5J1RvB2dRfUnu{p+4umeNnd!-oZ14D&{bQhWWV+OW4BAQw6*LcrW?3axvMasH50=@fCm4bhMe zs%`((d97PVofF-RibIg_rS_50&bY8RDN^!Y}_7->4lbD_Yr*|5qG7d?WmY z{Yt`as8B!ZYEFJsYf?1O$_n;-*nP3$T)cR)G*WjNE8CrL>S}PiH39x^XV;8?MUY7y z45H`0Orvohag+eChZQ+n698n76z7FDzSDF9RL5+oU;~>(WT)r;S)wu7(UkY8 z@*nS_EF`rgn^SLJ*luEebC40ARTg;Qy1~+hv30bq#82g(H9@KBR%Ja zg=CQ|@nPK){I$a=W%u3=cjnEk_|4X7vebyB-%>s|$am5mFEm=lT(!MDm$I~i>Y%e_ zLf4*D6e{xo`Yz^Kk+AfO4BvSsPdgkhd@1;isZTB@4OzP}O+Rh`g!VVdx`R{LhBr!N z0gcws0nsS~jzd?Bax=QoR`Z36Wi?sNfU2u#&~4x(E){g>RQYSn9XHFItJJm(F0lYh z-a2CAzKbkil1v#hWDtY7xu_aWg6^I8>pN;i2AqYfcN5{EuLuea!1UIJY|TRS}2Y zR$U@}o>N=!4uk?q%g|L;{M{9v3*elCz*Q^&Qq-%7JA%caHhJO$qX8GPG=%dNi#+Lu zb=m-Zz>M}_lmV46PTCF5_0LNAuf&XljzCrU zfKXZBsSxJ^xjGW_o+8ad4R2)KowzH?iZx`Uxgq<|z)N{xUPKKA{{phK9!zny3A#Ur z;b9S-An8}Auj@Nux(0cJ)IFAY5uDBRX!94K#BVgK$k$K8T zw>-*PxRT)B^4@`?%JAj`hE!M#$JFmj_#cyWG1(|2D@c#9gj`_F_yfp|yDZE+Fr&g! z5v8ad0P>HMEpsA7kzdj*%!L}4x@~W zOnsZA!Re6&{@&tTA8ueNwc;%(7*9I#5JznwMg*4&pbr2YSirOw_&eWN{%Il#+48^F zhb+*n$l*9}G@BpV?*gU$z@k3^&?P$xagugu(FxbCqz%`RI3Bf#SO>9980}fK~;m7(sUdIu=@bWlBLb5<0*;wgTgHEl6OlGqw>;b zNDc;AUBq;Sv-$|$$6!`@U+539z(F^_cW+A#f7l`$!qI9*eh-pw5CBZ|n8l1xG~lEG z3rZ<^y$$?aqf^$Ar`}6i*D{e&0JDMlcMJ0lxRvZj6>$f|=d0&W0yP>0coYD8O~B)N zF!r}~yn=}JAK{^#q;S0L@_u#r<+E>I2Jmwit1ZHXxbvn{~*v4>^OwN3Vq(6{#TS1-%EPy1&n|6qxQ^LHZp(U zB7Su@I>ulYXxRT1ollTZUv7nAF%&ThGWsDsaC16K)eYH>2b@?)tx7x(^Hbd`&sW3f zu334*7h&&zF0%NEoF!E^j3>=p8N+l8Af>VEFaH0Z+WzE%yk+}La;6)T=&e$b6C|CD z$x(H!oWSJlAUUYD(c%CK+Kze4AZ`BwJBJ|$JXfD(Uxj_5g{w0sNX>e$;2RuY=OvP< zph_TBj74c+Jtd?1FI~nDk%A?syU|P8I{tr|NHN_Z*A0$W4*Fg(&TFE(Ff2Ni_T8-Acy^Hs5DMCXKptbu_%;I15>%$=T}dJs-L9 zy-JT+`jRD=yO0AXam4ePBJ%p~$dYvSAI*An3&srtY$gPEzrrYepTTtjqfB;ewPQ}a zf45jVdxzPiJu(}3m!?Duf769OaF-Opna7J17EiDMD*aME-gooi*jaohX1y7xfA|5J zxplYh*}kr5xQ%(B6qSXE$AO3Gpn<(*fyU0{z+pOM5zudN*ouS+FXV~Mzp&BwFzgE+Q zg!*l@atrSV{4ly0v`KQ!(82(G7Pn7Hx6y- z)gl{`1mw1txHu?dVi=`QK(Asf@~oGNr=X{fr|9SGmG&F|j+DDMm96ai{nJ#O+B@e| z|A$+syW-VYHXwozV;BM`vUv-%jQ$_-l>HvAmBye|;n0 zOVVoH^{)LRp>Nke=IM<>bw04)iN6Wq*E#t1E1#M5KZHWVxbzXVU9-K*dv99QLh|d( znu#;H1bf>~_!#fV;T?b^*a?&vb2fNBZ-+Ohjzbki_6cy>mA#95ue)ntR!7GK6`#b{ z<U#zdc$?hu>m@0AT%zhfC)Tq-aS=%0FU>Pg+XwOr+^ zLW3a1mf47PaHin5&L-IifltEFw!?P9ePLFYDhT<>yu>Ql$`Kl8Q2fT%&}d_ZO8HSv zJa_;Mwm0JH-n;|sy%v=Fbw094C<|%W@7MtjVYx!n>AB)CpV`Sb85X}qt9bphJhVh9 zQG{6K`~VxO#ZL5y$2y}c*qtNLP%oNkx{obgR$J1c5BFNhUWxXUjpC;Wsgozts`?3pKQ{dSrKzI>nj0hia z+(u-zgFir@yfhO2YlICOa3CWLo55x09n^bKv3&=0fbUa=d26FWpWq>p9`)_eHb~Kz zOJ9nlH!ijD>vL+_!646DtraoR1W7#ojqN92U+_P?VK-gAvDHY1iE=ePyvqNCU{OWrOCoH=b1l+u3$zx^&j-`Pk6Q2< z-8^xKMcOcZEQo1L=#G!@$(Jq?^ZRqwIsf^~Qp1Ys{Bnos>5Gko-ihlJ>F;Nq_C8cM zv~Z?MF}!2>7yLMe9@3pgWFZQoB`B5G*NFeg6;0?Ra~PrV^fCezl6$5L%4KId3mh$) z>va4A2J=F9mjX~lg_B?n3258PdDbuaf;H}1Y_#Hc5TjS`p^*MYGCTN=DlrBr&u_Nf zgmWTfZuFMwu2~}dbF`=7UNmL&MLUN3Q`YmLFz-Jr=})r9pf|O9qt28-E?bwtjys&> z5j`A1xF#9P>X2b_Ykd)13ke!q9mRKOK`*Tbqx$$>npg!B0-C|kr86KNNw0`9Ydm8S zYY^gQqrz@WsqrT*3iu<~c&^jba(cFq?6_gU`En8{$+k+>6k%9iwTRLDz#IQ@0OIWr zeMf|0mE%sG3(XZ~q_s05DLa=I;V?$h66|C#74cy)viU9K7&BTWCsf8^0rUVOTWpM_ zV@?O5icWow5c++Y8WN6e4bI)4y?!Bf#`vAV>+pS3x!tIbp75n!R&G#>sgW-eb$Q2; zfP8v^h80NVy{^oEhw!cE+Bs? z(xDgf%K#QPO2JBIVdg=9A!Ws32xF&COff7-T*CZfd8|C_(4|zpv>D@;fRn~3EwK^P z<(R2U%v$Oo()}=J@Jnvw4xV?Z*uNGs`?)e#`>CSmk)8v^u(rp>agxy+8pL6_XiB%u zR%G=cSfm!x?hCzE<#Z4SIt$%(X5EFYxUEF`oyouyQ0=E763}u1k2eqj&D^1v?ENCBe_Vc?#aV3idtGUY;ZKaEAt z*Zi`|FyPHv)x!F^N>rfFgkoQR!HvGc!-sl zfD!p}Pmt5%0CfPxx-L}yz$c2-^Q%!`3{DQ0T;tvGN+5e*;=Jlc9;F~A!|tB=B!|1S zt$Z!(rho1k*CRHry`~(k6wG2|v7bt^akAn_)L@DHwG&Lx(a@HgsdIAMw2T{4!_)5_z-p{_I%2(s>j!iVWWj;JC1-ytg8E zdJ?Qj8qx(f?9{)wg7$daOy`eJO6qAX=PjQBCI}7ts-ryfk*f;JAj56M_YBAzCF94( zmJuz8#j>E>+Z&MIusAoELg9QBijMe;PH;ypg1XTw32s4%Sok0ai09dpKI7ejrpu(W zdlwu!jZJAG>4{3+l_!9=Nlljnj=rY7k>&6Rt5Qa113}Dq)Fo$ zjsCZ7v5SDehlH#Jp%{oIP_evCaBgE0q(5{XL=i}C>X1i=9XQ7MC64u#$T&j^Yk|LJ z%EX15?tm3(vXs~7=-h+~?c&l1Ed%mXc@8FHS1xR@0Bkg|U-7 z?3#$bQx*X<1WhEsto9>sG)m>NWx7CPwYga#xmrYTLL|S+3g{H)Qnn%Mm_$!&?oai& zQB)?&iY4A2j%`d9>qYWe!>u?vqXbgt70&990kBds;P>x#Z`8R8NVjSqcxr@}yWh_2 zM4s0x1Na!24&!nJ8LRy7<|x!j#R--!y9=HM55cS}LR5=hZX@7x48z zLGYymKr9(WWeS*RisVw2`%n_ST=70OC-(iSQnO_1))EKpfc8*&9Z<{^FG%)MELEzmi(BfaLzkp-YA6@Pw1rB~0veNjia z8TRFnqOOUk4c)vr$f&94He%Fs3v?TrY7MnH3UwhrNX}bCMUNeL1icpeHy`rhjp?^; zaKnjY2gFij4%gzv5Iw-73p|-q-beuDfQVllA-he(g_z+ZTLqi3%x|ZElWZws>j?E3 z>GFd>)Yo0*WEFqO9eLRJO=G7HqoUfXQw;CuwP%Ci^%P+8Dk0Lk0-{(9nZ3<*MwXgvKackVsvnJU>wXp6Dl*(3pN|kN35f` z#<~1I2lvXEMa0!Ah^h}X%lpPD9~|EMS}2eFParHFS#uuHnVkoow;&T*fMth83^ypH zn)b~^cfydfCydLZgJrJnVCCLJ3MQN~=0Td$J5oL;LlEO<^Q)Y+DLfOB*Cc*Y5m76V z90S-&-ZnyP&%UVc7THJKX}7V7x`xYZQaI_!Z-sq|NWaE9A4#(BQm!NT!xqvQk0+;Jr23DgOjHq?U zrDB8rg1f$sbCNeAPaD9jLN1CyF9qJwMC@QM{}Li=E&1^Ut%3YGI5y}N{K*jmo#0cCvmBYgV^Tj0&VbD(!*BfK~0wFO~S^A{m2PV zXo1C1BlXB7MECvjZ||1J4jXMF9IGHt==L5TK<|)DIX9F(fB}4YNgzv&WmQ=DAz-D? z2csZ7fqrekx>-ewvMQ{Bs`L8=pICfrC-V=yi_me4Y=WpmG<9IfKP9ijg#8IS+Rb3^ z9<4}~81(rQ#>w>5b}-u&Y7-X<)-a9{5H`+rL1er{&N2RTMNI@UW8-KmwZh??xlsas z|9lOsUKI8zB>)+<;|@EtY?5r`lf{%lP!TG{kZPJgbc5@;Egq{18xaaBPj6!nh(K>@ z(h&3Nwoq~g_wYlJj<&~%=Vl)^G`8VO53haCcR|?+sxw~%zL4S{ARC6L4tz%HVfb7EIC(7jIh+@0lRwG#R7caC zUVl>LfK^b`x-2^&+=Md`%8Cf^!yAOW35woO`Vi*x1g5vt7#R}^qzRxAr1wcp~@jv06dsC(^T z%o~TqfXB_|OuNpr z!guVWowe2C#J;~MS!l$Dg=~(GfbXP3Zl8l*3vkl@!haW*yoNT($5TmqwHno|3DmXt^uy=Zo6&ymdpVkA0^f+oC4Iy3agF?7~3PCTgGGQorpPEo7>3!sHnt2n zxyQGv3nWOP7PR2je~l=M5q@&5)$afDbZ;R~0@iqz@PD5R zTDha#S;W2-^L6+HcMLx2D@V9=HW-;^=qgrbd+{gJzy0z8CfS8*3>irYDdPH2IAzzEJm_u6rEaY>+#L_EzES z@uv3$xXbLdoH#jz*>q+~`4{d*RY*bx15ay1JUbh$7a7-F*a=#uliGiDKPm1;yU1Nj z4FO#c84o&Dgf*}-T}Qx2`P;Obu8mcXJ^a3TZ_UkdE0UPIf8!+V+6{D zksGT`vf2h8qQ{xf7ftd2NxL{kW1ab0+Gp|Q(>iVb{V z+FdKC=YO!0IFUkJiVNUzzi3zrzTfKr|NuvAHJ8$h3~q3u?yD{(6v}_XmbLYD-)xfbb{> zbmy}J$_=ZRC*;H@t2lOOQ6SF0kEd0_r}+_II?^2+f{6-Ov7>ZiuJvIe`wz?#0k%8( z%`3g(;4jrV_f#cbugf3zyvDbgr+1Kh zdGs;SU^M3Q-oHDR7q9$tqkrF(b;DYlKzsB2^RnHr0y&@tA^R4C1!BF8zdvq?f26o?JeV`WY0B7j zp>AY?%x-ET9)a!Yk}e1e=<7stl3i7M=Ib5q;%So4?L*0? zfXQw&(Lv06-v&Q;2aud*D&`!3oYL}l1|R}HeRS`Kuf*l;&H4#N1 zKWW5I{sPZ^tjj&H;u4IY(vNR|C{S`C&yI55&n^MI!=CPc1C(O;*+v0GDyZM?7i;I81c;UA!M(DmDb>Z_pL z5X6i)DjULdCjetfKF{^f2+$#e{*jB~G5-HicP8CgvVNyS)YG&18-BgnL%kI8aRjeO zr5mNvc+P%?$wb7tvjW_CEE(t#G8}ww3W(pIl3=}P9pPqN5BhUb%ICsK;qCBZdf8=8 zj~$=O$_P(9#s)0QaXyKhGXbT+WZIwc`DBAE7&Hxn;}aE5nhE~(gv1rE#$GOcHB$*C z7SMlvlGkea4f5{6u;WH!1}rVx3W1tG0*nM>tbHU#9rEAJ0kZThMkQ?EGo}vmuNRfG zlg5O5QE=4P7_9>l(kA#bEZqwC?g0TAR+>8JjWG`iv;;$Bg5HGafm!_Iz%-x}SWy2# z#$TfGeE(T?JcOqhvN#s4>O}*|eP^t1fiJj1bBkGL-_d}QZVa}YH8voD2t~d7#%)ZL zBUJnY-x}FPIcpo|zT(>`%>Z5Ic(eQ|I3SaZsm<5|4TL-0g(_@;S2n=n{X7PtJUap& zZRioMNpm{W%kkN)NJD2{{5wJ_eObKbG-q=Drf-0aUD z^cJY7|3G{wq8nEJkn=0)EvAk0&YKO_1}yeN5ib~)2u_O*s-q1v+kuH;cxz99f5VU! z9LaDN;L6b5r*n(iv3)>?Eg+c!gi8Y_mPrOcZ4R%u8!&d8jr4xSp+v-cv#S-5mPBM4 z1J?np_rb^Onee_^&cXr^NB1u0aLSSZX9E7%7?N+@0r=s;ZD;2Wy5BLBZ42zfiD(B7x(zQ;^3aJ-om^pOAKDkAdgAtDlD{DFdDhlcmjk&m{3eAbwRBU5LbsbK<; z4g>$80qF}xr@{f~1mL9{Pw2hOIcmb`oA>quVt0*ddMs*zg~4e+%kw@2HHr7*%t>(E zOa9`cnMjpZ$0h)mOT&DJGbaK_z)(F<7E6}GF5`hhGBlF!e>8-7u)@}M2Q0P#Z9H>u zhkIvJ9|oXpYIqNYa@qDA?RU@=wViEm1LPbB$tCvN+G6|oQ*G0blq~>#;TIDwiZnL? zn#r@4eH7`%3CmDYEL9ve*@$1Jpy6MoU8k(1N``@OHXTrP$+=AT9+;pFhI05Zv?&*= z)-2N(Ln9>u6})B#IfajC$hW1fK%^n(UI<4doHKTWBY-`(UxnXgSYE*ao|BEpExPv^ z+(@H$8+_;(3mrCpxHE1dk%C@kcr$*o%;4#BIvjM?XebBWpCLU`={Rl7Z1tdeiF25L zEEZX2%00lBqTiH<_Gmz!Vw(d5zof~QZ38{A2`fz0HzM-hV3qgohT4KVLK4or%4Av6 zDlvwfLZQA~8dZ~%;z>O*XL+2qYA{AbdNRhYwB%SgH&Vryh)8oBav6sVU~a(YCcH0f z*E!}hXC2uah5x|;+JevSGxpU~BCy)g!?O{BLkfxJ08)p&(ggM<-HibGB?j;eA8QOu zq;MqXY8~e*;g7s9J%%#&Iqb0t9Kh+2|KU3m<`K+sx9kNK4TW;*vC`{6NtZEK^hcA2 zJ$S(H5Ti^uhh0Ju5@uv3)lYKgVEJ*P>@W_nY%CYVjA?DKs#6`q+AJRaanh3GBW)e14FKqMuaz?r&ql03#r;QM#!(ghiQNl zDR6aKE@(d9JB0h)U3q+TMK61Uom5ThCPnyqG9%J9#Aey1WsIj@7;0^q3XM~i^9U(O zkja{;#81pR7Mx>FD*ApTLjaR=KRrp?r<#*_n+?^iLDfh8C|mc9W;x$))o*U|6FZG#E4e- zx!m`gG;6Wey>Y*mkNN^^_3D@>CDAm1dV#uy6Ue4D=n#p;c94su<=WeCwOl50O3VXtSY`u9*oYzKeYc`$xhUp?l>iKHgx%*QIyYCV3&72`{CZ-XyUO1U@L_|Ud#_p>Jf zNSK|}p@8558tp2TvRLJvhG+iFW)v7p|Ditjqn>3RtlS8+WzJA&l0&fEt?IBX{-qMj zN(x@}$W_iP68QSLDxgXlaaUoT-5Cn3IN*#da7kWp3-?WVb1%FC^IH}E&S&-#Y=mR` zi2jkC(qqlp?bUt6ETDdzKy&`xDN4e`KL}sHUL%eXY&yei3QQ_DmJTkn{LbZIk0{947-E6-xX2NOD)NPa}F@3ji z6OkLlB0QGi+%Cla|D$^`6xUn4e?WcjB-EakwP>%Q)?vz>W+vV3zTs z6A<0EV^Tf9cVB-s{y_DAXE@h0n|e6!}mL;x6p%pSYXhg`g5Pg+`rx*n0_bJ6X-qY))9N6UT)A!!qq^p6f`hlOt&(R%(Z zR0U1BBLjnwqO%*gkYEmT9`|FgzHoI9@Y=2?{B;Z=N|Y6d@hALq$ zeU%$x{%FUA>!MsaY?oB#g{NhXZfexugGHA(+jZ(9%Q7{r zVS_JzKI*od7wbnZygm97v+%2U~)*@S!c@(+B2tvx)FW zIVJ3qzMEW5`h1hn#*IRjYkz%)OZ$gcewzcG=p|5wm~Z3fd4bfA6=UzIh+BUCzr`xH z3l(;N%Q>`vRwvs=!)Hy3a7$+#WH7O?8b_&{p{g!z_pY1TWGiisIL46IR z3aX)!7aoN%r=pRfz|Du?cFtSB_&@&7{qKS@K&@n^U`Lhk(=!``&yUixb!_K6@4i0E zbaIjE4I*w`*_@jP!bs{IrpP~tOW6@AI&cS>n!~Hwt9Qs6X~%o?s(VZ7-3WE>E8gjP z-$mX@PaJJe3IflH4~w6`Z9+qJlVU8-h+VwLdR3D|QMHK~_GeYmP6ql}BxMl`dxwdg zq^-N-mV=LR%*IcH5>zvt4JM^Zt)FT9a;>S&F{#muDzbGBT^-57u(SMUf3^nw$WA*+KC9(IFjtojo}47S;|G>((W}zq z4JQQx8jlaM?=<&h!4^eh(3dyZ8JtGO_$*qa;Y@B0g+G$DpN1`q%EP5NDzV-Ws=HMz z0?HEbJ|VK06Rfwx?^z!-4ze7_)bwV8_(@{tn~CR}-IW@ZQ49B@oF9Z}FBM}0p(v8G!Z`!O_t!M|&@)(@I{>h{+;!Kb=`B>PC&uG68Sv8Rw*xB)f z^ylOI$+tFED4_IWA4Gr$*CLG&o0BH~IMQK_hpfktSPP`tnO`1*h7h20( zK1q(*4a%p#58^e^+VMNMIxdC(H-2Rd=k;?TF(;mfI6_@&cAC0#gdASqH7s#=ftf5M zI%r+!5<{@Nb$C{CL7#UeFTu0*VzrAaU(wI<_#iQB$t=%mQ$0*WOStBzj(UOdaJXp0 zaqEl!ew7$FhzFMT&Y5NVJu5p@{Kvdwy0ZTc_M>UlW|vcW!1*9YIV;SSoPwwF16#-& zG7@XaMyJ;Ej~IKrey;G+*q!Vg8H_wOaJTyHgwSXW<;;=94wxcJ+8O)TTdwX z3RxRs4{iRnbO`sn#eeE{%(*Cez0xEtz6UT7<$J>T>Kib5@n<5xGZALbKb*TF*&~qq zK2g5v!uQiX`q^@p>z_RMuIE_a&m%v3J#ppyEgiw$vI^FE1$NqvJ!pYT?J2 z-|`ZdUgJG(`lp^(c5RBqQiP>?PLEW+>ASV>qvd zwBXuOx^|y8tW~PVu6N+n52G{p4;l$H@%DX)jk+=1;)ED_XXH_MIdX=U-0qan-FhV9 zOp&UKMhmmP{8+M~@vCHLOV>!dv*-l-!Li?ofy5?%yc1`mWAt|N) zukSiFq;W@wXl7F6Z05Ge!r>x@`*o>_tMx;IdkwGry&DeNo97%77w8}_{Mwe%zh_@9lf;v7@l3~C_x9+h z7^&ru$QH0^Rs5X^E$JGxhRo9stZSyVWp<{a{ouJxC#?!Zxq9N{l^-5oKYO;mdG=S# z{La&T5AzNlXASvjb9lRY$F^`-j$Xn^heR_R|JjZ_qO|z5w7?O?&}9%UM&MdzGJ{<8 zHZ05p`ED`dy{U)nJP7BQ<=2`EMI@J-#{M{7fgK(kbM=?{@>ANT z;X!)^AN_gz0!$xvQ0*w~axP`Y|DeAapKPcs zA(i(af53wUf!_m{aKXt4Dy2N8AUICTsfF6Z*B1uAl;k{?^?d*H!fHfxjbp!2bnSuN zh@_Ur(_qEWWPOP>e@b;vK2JQFJNe8{dzmV<0 zs+nbCZ5mQnJGYJS2ljKsj_xldE===Jw-#;GJyf3}z}U>0%J=u&l;W7wqUj&%S6f5- ztt6l3SoWTNy1n`|au9+>_A)Pavdd+rbt&OGo2g7!?b8&U-`meZ+rSximxta>cRZb6Sx-7LbLa2G zTlrsX%~-=6OCRZ=E6ac5ru(acMms9D?(a!4UX$z}2HRIL9{18&HiKJSAQ2Z!2h{kr|jPfZ_RGb-zcpZAQN zUP$kK?B!E+I76h_2ydRNSLIhtP`%ar%PH+hUDKnvKgm0Z4-e(WW*={eJrZ`Rg1=S; zG}!hczVvXFU_VdTy(6U=#;R_Hg0cR&P;gl1Q?unVH`o z)mwSlVR7PxT%1HPmUh}O<)KukgYu(v3SU>YkLt0Khn@f zYiy}EHDx;ogh|dZQ}qZg!VRaFp2G#Yr&1(U#`g&}vWDk$4Xr}PFV79fIyY{&IFl}J zIfl(q#0`GDna;yvlEM@nN~cel8l)~TPQ-jsnLapswtLFW_Ya=Hvp@d0Fuy{iHiaMu z9&A(XQsHZQUy#?C_>8P!cP_W1*lT0lUasB-K zLk8cNWOmU?sg2F6-eW51tbwS>7!Blx`n3^@p-h)oPY4sX0b=sY0|{Ra>iQLJOJ3A( zigY?bDbW(C$r;4#Js=qPc$aS)eB(W`$_a@nihuiN11t9RWA*_rtxIqhvqYV)XG(|j z4cD{QV_Zu+lu1u7edAHMMi*)4TGMoAsZ#bu_@*dLpv-I9=EJPyg9q z^7FTA=6JB+`0^up-4!e6v96mRKzi-(Hs!`ZW1hRZ4re9e6h8L)BoD;q=2twrOx3vK z?P}=#O#mIEb?0lohB?|_6Q`@alx(L#b{E*{jcncKp*;0#Yz{D(62+Re3A{ftfcMcU zwB`-_a=%`EfBZ@w)u(hevMT1#RKq-YkD~Z%uWKB)L5>M5t_7ie-LAAve>-gw;r#5Y zRik5T(V>0GpZ}g-`dLvaT5ABJ*G@g_9f;GZn8E&`rsEPGKTLbJ{x6_SB~qg>Sikap zpUrp{j_~Yj%@v=3YkvtIt)ke^2Ja2nau3hh>IhWGkoeE*{ccCP(9be1?ZiT+R&Uk0)$J1dkL6ct zYa}Kc4E%nZMM``rzP>7z*oD0poM2yU9q$`+z#!*}(*=>9nibw{(*Qt!Nou z@sa*BzA69GypdF5O@{+)e&NEKzZ4$2c5(7eF?*PeL^IK?8}lvtjUM~>dmH`>pK71{ z!0MyzO)>wePaDrStS)tZQDXZ9!FbSeqDs^Ci!00KylIinl2^n86;t_Igera$JX}tm zU-(7cTUtDVyoR@H=$NsUXvQy_AJ2_exe%u+Ve?<4KmYsTPbYSgpQxUn40k@dl|WW~ z6> zaw!b8Q{pQg^v7Rz8D9tnPHdbix+$`?$B-?s?wB=wUQ`fJv$?Z-f7E2S%Y|wh2U^$?Cag?_4(& zRJ=U?H|4!pjNrIoO+mTPGnMff`ML3G1B25V4oS~t-#6dXmNPY^?QCswj|k+aaFC(f^!^^(a(>cIIX+3Ot^NB?FBmUlTv{5247_FG*|I^Jg($7mwb4r zB&l|?SS85*EJ^$txvN$?<;Iz`wyeq8`;bK6Ud(9ozt=*l-LuVZEyyQId%lh@8h&4C zVmHVeip~1h5jpbwo-?Kw`J1dLS0kSjOnwL9FTdZLZ|Gpl>kw+?f&y~?Se2QuPI~mN zRj2k>23fwsvAoltXMKHs|u`!S1Js0!Dyxi?<2hF58{*lZmz=3jL?Zu~0T% zbTIeWk33`VPAS#HEgH|*m))NAJ8DMr)=vHd#+}&MnlaWZvvA#YEdH^cpXE;bSy#wr z_VO|e;$UAsRPfdvE;Bhq)*Eg2F3l1n8c$%nA?F{Ko1|97C}-b{(E;Wq^nS*8_w469 z{#mHUnz(f3yHG{{VUuUy14Qn$24mcs_n;Hz7MRT{nHL!s=iv=Qg`LhZcFQHw)D7a_4RQ}(BLVqGo&=%2j{U9CSE zK{`Pb&k~z^G*lTPK$kWPEDy0CsHfJO;^7$Ssk8I|_4CWKZ9igcEZWX5Xh@!))!Of0 zp6LMeZ-8Gyu8}ggP4OLKE^F8&c%aDPaKhdUa>+LbS3hM;!K_IQqJBjCZg~B%eJwPU zrFALE@@>%_k*m6+&V7L;#J$Xg>!hwNGj2jfC||!hpg;Fd?yRj1zr*w6b<3f8s+~wh zqJG0w&Q<=30Bpbb=w+h7E28S#O;EG*$Ek;>!rc8FP_zMn`Bne_qwKxHn);q_;luz@ zsaB8{EI&|@rqWAjiUk2HDo7AikRm1Y06TOP1gumEiVBK=2uLTPNRbjy>5$MvGXVl5 zG0EOv{@=~HIXCBda+TfInmv1F)|#33-Jx*g@2+O7$hhhQI)34xg)~3AAm!%J4@2^N zL-J5Fp3bt0f-WC0l(7hsdUI-WG_4X&R{x_jxtyQ0Qs($TIs@i4algU*WIEfAl|nk#IlR&7ZMxXROTTAJl$J{xc$9m?5#- zL8c`)ri$xa8nQ~5u6BlQ<+DaKb%J1;BEji&vHE=I1&(08$&){u`CHfCxlz9AH^ zf-Qjo)1OG!y(9urcilATIHlsQ#@_5}c+#@9eOZO%w8rh{Y&(7M@gQ#s>mBS=;n`Ai z;Vk2M2aqK(j*X_6$1BR@zm4hldNf=mv9+DxNHWli1@HExFMAsEhTIfO6S$pPwG{mr zcYLs2YmSu9!3fC-wo!(hZz>*lhHgUB)In5>hoe(K2F2s8;mtShdB%F>*TAdvURV zF7*+ukzxV)L|lAm>eDn~c11K4r>81_Wnm;>(Euw98Pc`2Jup*pJvaM)09~uS&KbP^ z?fc{kX)wA08ScbDx{?|K&QDPhbh)(KAh^-SU)eg*Vs!0zeXXE!L>`0Ffl=oAKpk9df})xyZYuQu>gzhE|nt>UK=oU6Ck_l{zPl5 zy!UR>(P+hy9)0a;Ss!*dKXUdFoAHPB&hE7jTbh|ZxzbU(w$cI5CR;Hh(;9iENDeebo!*916*cbC3h44KK! zu~rBv*;S@!Gc!SAGaS%SDEo{e^ON80>|41nhhD78mQ%rE8R|a{?}(1B*3f6J_U>$` zY!Z~aj1aDJej_JS@XcmqSRf3|-Qx+?QbV8Bojuh-T;t*+1=_k8C}8&{JnJcXFs+HY{Q zHm)#1A|#Sk=k}Gq7yV$*S$y5Jp))R)9BF5bTyV5|?dnv*w4nttxaiqfFv9ShM^itd zwtbD70Od-j?knLxPdyqxc7H!>+g44ZoP0Dlbuuvaw8GKGK;_5FTR#m5UmM5jv28+s z0evls=`%HV;~X&}R^CuK3nzU~x@90?rlHy-W`UnLemFzHOEdFaD#!N?D!sb2%8*9+ zZvo@-msBNeCCSdBT+jbiV;fWcLVhntu?$gd%-cas=_B6#hTl?W47Kz0mTT%4sdL8b zK=fp*yjnFaeoNOTS{n-RCDSakWKU}yN(2TQ17|p+N+l|1urTd;Vpn@vL2wS{MUE#3 zv4m1S#MCp*xfo(o@1}20JC<+4<88x~qOpI1Bn<7a%GhZX1%!c2m|&d5V$VTsp$Xy}e_2(=fME%E_};i#S+Wnx9j^ zRPX1@Da<~ElV}?y2HC~=^@00RXUrA4x+C@`{EY~W_&Dq*g969NRKqLk~141 zwv8`re%*wlYZ#E?S$m`y+hXVz&xt4)?%L$wa*TH{^zf&wVRtqMbRUM4ItKwmWC`SJh!RJSN*ZPqFhD&miGMj zJ-zK)jkvSb`}2a&FHnC)4Mq#ZK-_549ie^a8zMdc2#UsSZy3ugtwUxBzqrV&%^?vY`sWjBWRUL$WPiu5eaLL2;R|NpCfmw z?mOFNG3Ri7W7|W{jQxSXtL3#+gI9vups?|g=s^V3C_T7;KKD&bN@u^Rzx3mX*}uwj z>Uxaaxp9nUYo0mbe96Au8qHkYswHgYOa|c`OtF8PnD>W(meNFCqHZJA9>r>>( zeNyOWwDBeAMFw|M2V_f<^c}hE-WkV3^RN+oJ@IxxjW_ah#_Ao;yPK6LAWbHRWUg=_X-0G^r+aGpLMAuQ&RV?MmMvp;r-Nxz-}|#9IFBpT?o<*HAw4y|(W|FigXrH@ zvd^ohJrq-0PIWu1adAcWxOHtAlc`wFOekXgk%h>)kqAR>P)Vn`}>*^e7m;xzq zEas@YV&AVsdAZyRjA9x)skG0}X;%b1Do#14O|%1SWy@_jfgJ$*1!euH#Rv zJ&IG;q(vk=i&HyZN_x(vDVF}0gwKsoF_MyT>TehP*4xilcGaJ7Zh1F{X~URd|XA^XCzL#(hJeR zzzDm78Y*7{ZE|On^)GE?x0R(cUb-4}a*B37K=@)*>(e6`gw7IX^+dtS!K-^BpT75Y zLxa#qiovU%IklgGc0S+OJyQmC*redR#Q}|tW^WCn{l7M-7`}=k0R4k^03f780^5d|C3z2X8C{37XCl}`9IK((17b_zPDW$Nf^B0K=2+j$$rFr zsBt4kX?rvtFLp9<+sPt)$C1P*$%mN9O0FG79fhT1^AAhuo4Lymxk7u6be}qMx1%*@ zah`HSX{D-YNi1;bkA85+C3;N=$R0ee8DGb~{+IvvPT=RspIc{1e{S|)eUe3brEux3 zd;9O4EVF*ep3CIC+vb-I`pXM%6b{-|F!irFTr)fhmKlFWyyOk<8rX3mC@|`|^&fgk z;iE#u!d$y=wEvDyFi-xbQ0!_M*RP?D8f!`!wX2wi4xTtl&b>6(lbi2e@-B3;=W>8Y z<2&KPxaj>_ZUNxZtPJx}o$GEh`~Q(jUBZgcrJ{rU&li1~fN{yAqk9 zKXcar_(<>r9{(Gos{(+%q$#!6>+!mm-+ohi{rQPUgJaxhm%tHz{T6p+uFE0ycEXHX zNprX|+duI^nM%s1oxIGSyqndx&+SckVxL=x1@|&Aw9x-Wwd!vJ&<0sQr9)Y_-~xsUxNwS@V-Y>tar1&%JKJG z89in@kOiyUUuhFMn)H=5C9^RVzIA0Do8fE3_Me}o+D2l;_z|x-mRe``4WCySTmE~! za3`?8eJ@V-z6@)_&5jmu{aIQ3Ou^3ftM?D4MDZ#E#AG-QKO&22pfC66Rrd3ZvaHT= zl@&~do9#3Rf&m~d1u&&eY`zPTIrT-&XpLgNq2C725zXFebD3DtH%>#|O<+=7k zTL?SE+H&NizSiUQgkMJ%M&BMB_wQ2M)&F;M*__h!VfD9g9JCnsQvbKh(w^}XduOg` zoR4+V&`B944)9qNVNQY!@Z2qYLHd)fWodgwj^J>-52&Lx?S<(7{&WUbFZT>R9~B+= znuo~dk{3hK7CJ0WwJn=35bDp%6idI;7=5jmzjFGgC|B~ zXfdqzKp)C6!iA%;xk|bJ;`6Y8077qIJpD(+&&Jf|MpP2_I2TcBHWJk&4_SujH}gcV zfvvh%=2bH_QJReN>;g-1NH?JZUO4`q-NII4U-W1cnif3#_;WYL8z#ew!)rZEWOE4mZOki|PE=K@Z zVwEvB#$xd|)?fCYTESX-n3nynVwQ{h1j?I-!fn7;bDo;nTKt1OZkmhK+L{?J;E7|2 zFZ}5FG}ZsGm}D+@qZ$*GwhAD$b4-j?3JhPMr{18F4n=$lQT~YzTStF-_$^pNOf@#m3I%;)1hvchCIR5Dag8=` zE7FBXtU+57xK?KmLeC}xIYWj2wmeEU@n0H9^>kkkM zDSzMsVrc872eomZ;f}-{F?)?x4Ii(^~km^kXb+T?l{_}<~1T%`9K-Qi(${0}gnVan-W zQ-^N%%?8)P9Va}>=)aR^$T;)|I`!DPJuCRJF`-eX(YBanwTOje)~%j3yy>0RNP^B^ z?je#d_Q1lkz1X@hLVzFKW4w!EydUFK2U_V5aHwXuwEO=ba_S28%%wg~xvl^XzTl(~Pq$6FI6REwGdcz1ZJh6oW2tTt=p*W?1+oAg%Xi7YKq#$c2)hkII zw1ob7d?pb;WfJH4PbuVX%x2`baSe84(XcIrr4Pc_F|bCXgjMv>b=%s9?1CT3lm5V- zDB?|S7h5#~6_{G^iRPKkwaRuyf73vlbl&21@`#0kb3d`kx$EDAfihMLW79ifd`ynI zT+WO{R*wUTWPbi;7AA@RB9GU4?_Y0jFLW#&5MO)>FRo#e*C4Yt;?qp@T4bgX%l@mCF^iK&59HJhf`EZeAqMe1dU3G6PI;*Fp|p|70kux3G37!{%Mx28xGn58Ko0{v!&%c(78ac>)+Bwaw_jW zh5VIL5K8>C6FQ&{w66k#Xvn&eb$BrHIMB$@J=-jJf2ip6RTajFVJ0xu^Aj`9df0+4 zFDDVxV;Z&m{5HP`G=L;zXbayR`KKvV}>{~Ztd8->ernlQLKO>KP* zl)WVWXe375XrMlWuM3d-ZC6`}TSGvW_CuH?xI|A})1cfh+bUQ?b+6fbK!@}H;?N?X z%`J$lX9~XX*eu^%;O#vfzWz*hn%ay8y#J=>Fj&uD63bW`GZ1{4U(JgdOp7eeyO@Bs ziUrErpfjf+zCWn=97;`Wz~n;7N2dw$e&At@4>CXgd#OIScnAu50A@BfQX`H7epHMm zk5M4@|Dt3WZJTK%$8WNE0>L6w)Z z7ox2Pi$L%Ohd!K@XI1lBN+LCMk=1L2zS^e57nw-!sXLK!z=glL0h1%yZ<-AfS>usxE_${*W;!W_rIwU)T2|W0WGNToK zZcK{fumWw5gG^3AC)0tRwNg8_t9Z5h0}OJtY82{x!&zFFL_rd0xHm%bkaw0%T*8_h%Obcf3spd)tTWUZr)FEd}3UtVixE_9p7qA1}Rb$CcZ3q6Y zLz$~69k%``_di~ddYY{7;rhOkyY9TIglFhrI*|<)bfLp#4bIVQh3wHW&koj0%IpX0 ziaMd#F5;-5d(=^<3b@B%Y_?E8gF_T3h?CHb5^x*+YO_ithHO<0@KE0KsC_iFB^>C6 zGO4{K#^44IWeD;(=y7AJ2-xonUVOQ#aUmw!wP-S(SYJNR8G8fmm4XDmSd&Q%IuPYB8HNz`zal0+wLHOB^BP zGYu{HExHkxY3N#B%$%*I2dV6^!HpM!>;s_#3Y8O#fx7gD%;OM2w&5m^?EXIlRt#7E z#t9jj-2js#I70P;h&jX+x~?jizZab4xqE+voB>;8-&7WB>4XKXVy&5{+2I z8;fg$l-jAjpdrAZ2HY3#ct`^bGk_02(XZNIN5{6~Ac3N-4l%&{yuWnt8Yd|j`V#L)!&R3 zE^)H{=)s=#@k|rM@;=ji^g0*QE$CbBn?vmcbVPQ;>$BhDXl+f*ArHJAN0fnI*~3v~ zLFaL%;o_4OT;jVpAnTjNhJO92&sTkUL;-m(FD@9rwe{i)e3i$Mwfh1$3E(8OLRY;N zLI$P}PSMB>h(~11AY#_Tw3)gtp>$ti;pK0(A%l;(2;M|QR&F+sH~^VRV*ugtGt}FH zdQ7p9SH!o-yb#jiaB(FEY>CfMhe#Y1(*e%jIP@T1fI96mcspbgB+=tFnZRfVH9+K?fPnEl zXIa}cGYI)5MDow+AgHbjeFU71_)1VOFwc)qy+I&uh{>pe-waJThR$(;rgMs#sZr2g z1CbmudYy&tV3(U{hQX-&Lq0kDUR}5K$^QeJmNk_=3=|`k#*kmA@KT&BipxWr` zW1?)zaLHR{`UL#7`XU@Wx&;$Rs~-7;iH{}D8TY+-2AL! zaQAHbRtakcOx9EV6rpXZV5OjOM*&*4zcT6HYY=92kHphLYJ!&#vpyy8CzqD-yCr>n zt0N@zbavJ2+e{O?qk(%2sB)g{K@wc^_F+C%_y=_yG0hnpz;`3|j)j?KBBih-?lnqI zsKgVfw;mdMLcD>v;T2CY&KB;5zFn70TleW?7bI~>8xgP{nTLP@#7`r9F2Xw4+*#h2 zW1m2DLfFVcE~Y$VmJ>*p*1fuFrP|szcj$ zK)<%HqTGLAis4n}D^K3SSAFg^5lZ3rc$>2Labd^$BGnZ5tj{X`9a2+3vwb*!L*CQ{ zc07!&Vvt7sCb2LkgaM|ADt;U;T8<-Ulov~EMeKK#$a7UH&+f(4ONJ0Tfa>pxg*4bk z)X{Z`!Y-l#@0NJTO8XhRf?jXI(~e9*6}_0-2%}P}aM4P`jWULZY zeRPIk?MZs~TWfXIf^{Q?7$}|>Msn$f|L~BoGsfRl%fN_AEyLy!#`Tg$>pBt0?tY$V z4jEdCJ5qf)XJ=0~Xd)m0@#ZkK@pSmG%%lX;ml3`RDKQY9myinAllWQwKwu;0 ziBXZZV9tu<5^MbweBI~76t(-KhA4O&g50R+gfHO^{%f?`k2rhvGFTG}yz4?M zft{n{1qOtl0xzPsIiUz&ty;z3&^C?lAk5->^^66*6B5JfGrct)~_Ir=` zUvMFBUjtn69q1lSk)r~)wAzz?b=xc|7Fn?YyC9b537@EuUwyLX<$e|IF5?fp_@?3( zC&xj8>K0F#WEDRpCwsH#WvJNg5_kqYb6kTTA}4t9tYX z&*?ID7LZ4F!SB41?`wG{t=acSa#j9H8?0@c|9+V{%=8gZ>!-|3TWnNG$0`cCVHGCe z)FUMFF5lD8J0MCM2~8W)_|KvG zfa%3U2VX%3g&ht_x7&{?yYSkdPO_!jEM9%aTJE^uq@<;^@3szPffwkEv!D~4Z^|R#Jjfez zHU4H}KV_h!xE(5TxZBp6D+mg4usZp?R=hP@%QOp_C@mAs1dfAa2FZeD5OriO>v-V} zJB-vC+ctw%YF_-{_{|nDc?+*&;nAH4V>^TjF79jd5$oHEXD&y7mQ33e`l=JK7m7I! zX-^uKzg%278NNG0{uX#W88`#)w77So#1ee6^TtE%$zXsWD+CBMH|>k?x)vU%@uylb zuN~&?^K~lMogymRU^)^NwV{B2Qoq?dX*&(OE50auhjg{|fRYoQTw^V6ONEZKq)oQ#?l0vZSnz=h&ouztq z2xR*TNbPNU<0fr0!AvsL1B;rhwmv*>=q$Ld_KOWMsLF#&ZgPh8#xt06VN!%iT1#0* z)wKi2t~BV%lBi&R4sQGCgwgtH#4jqA4n#_(H+m+{;9L1^@DKrVh?8lp48lOh6(3}i zb5dk$>zg8itxYD{rV4r&oZciZyhSqlIkep%)(#suCF9JyIynYqT`MAZHy7F%06EM1 zYD6z5?Ny7v5WX8_N{%~mK4!}sJi<$<-3ax+8qvYw3Hl4$^8@7f{&mJuC*j|j`lize zPGsIekx7}36|Z{L8@J?+zvcbV>LaY7b>QKlWrf;lbw0atIx-L{LA_BgEDy};>%w|O zttf%>;DKfi2$?L*#kgcPUd8vc;yGm7G!m=lRnPflSodOv3sxtlDD zPs%7U;w;t*a(*qmSXKE+@t3~m(^l$t;+{`~oIkPIZ`Ir4=vjuXCFdebINR0q*?xpw z-8r#IhUk17=~1NEYM>crQ1oZ6@La*`yw~QOhsqlJn3S_>wA{}(WX{ZgwZ7Tquyff+ zqba^zp9e=jU*U!+T#YKZV74z*^#JSlZ1zl@lr%Jy( zo}V}Wp4ms8A16nm$Lg$iSD+mUM`YqNS+%*`P|EqmfBP~WBeN1Eg0bR zwvVa4q>m`<*J{nl2U-sE57O*X3iAM+{;*4aMt;}*-pyG>-ag#-U94HraG~bENFg#c zZ^Ytv>JzGqtiIa;>h@ll@K5X^X`NpH%_)AM$wVPPDlV5VRwQhNY*f@sLMY!Cv)aRX z`*Yfwd!u?k`fKUk=sOM<`ZiO4pF0*AkrLymJL29b&Y+60KZZBuH9|ve>)rHDVCzc$lLRH$-dAf1sU~%y=Nu%(ZU!}%j_6m-A}64tv2=CbA9q_m3HQmer}PAOq0UL zX*$ktJWUbNJl``~>9z2ZE+B!vR-_}@y{QxK(%gL3qb4rS(yXRlc|h_pyMbMbZ$d*NZ6I zioXd;Y93-2h>O|pD>#)smBm-ekrXZivgQsA}+HJ&pT_FUtB7_ljxz!1TJ`cU+wT z(((>VHJpqQ0HxtXc(zQc&z{Z*OK8P7dfi36rcsZwPVL|NcRh@%;*U@b6p>;etR@g8 z)c~6r+2p~|$Y*WCMi{S>zQiPpt|4Dxo>qoI~} zD?aLtl`9xI3O_0tJg4$hHX{h60!#>19#vocHRcGWZt^`4qBQv z50b>1^GiQ0fEQz{S`~HkqHg>U`Z(tyBw%XM(*mTOH+hUR^cFfa&886{UAx|nT2nAV z^HuU)5zZUCr+3ZvowjqxS*&f)Gb2Y1UGaZtseEXI8waqBD))1c&qchq4Mfwav6Y&+ z)wi`x4JX4@K-O73D1js0+jaUhYnZsnrF?`gOb{0-tAvCDU-bf{s>s_r#Bbkc!*msFEdYe0zZKB#CGr5%r)20WFu02RLxsE64 zN) zM&hGO0bdF(?QW>|mB7i~f;U0&gL5%I#I5 zjQd=Bfv)?fvbdP{FSSIh`wmDpg{$sA<|1jj99j58t2+&)HvAN3|eFZw<2 z1(y$hQ;e8>ukc=IS#OwVCfD*8<7QMQcr95a{qwLbTm2S0Q2!Z^;VMOwnQlx!IMgr& z?E0O$3+%t;lNbFgFuz`IJ5AXPyo|Uf((T3)46+qU(SgiF|=u8*dcj3-NbEA~1>{U*@NY zJgBW&JK!60iMwfVc|7~Q%!@w6e>J%^KFq#aiE&JUdreBPuUZK4Mos(a;yCQ&?LRQz z*I(mf_^O3~b0ZdosAHVl%ZRnNR18&LXm~d*+SMD@xH2&t04!I(2RS?4-!X zuKtpayN(SZ(U_W-6 zjBRgx!BU+^+aR=GF7}Q+c@dE7 z^T!}Dbz$nP!no;ktxs_AiU}CFtGLg+Yx6r`eX1{3*1g__W3>Qwo^U=2#RsoY7KzrO z?Kj1*vH4bBrYw{%K=V|3kMNhcslOxw*iloQeksrL&%!;OPu~BB9M$ZzP<4$q zH8|&3%Jpemz17zt%f4Ygk58ckL`w&*@kf9@-a3W8jgWwHx%Krsg5>XuSb8~spCc<9v5B~$}(W(!DT za3xqDx?z9or6--f07dWw`=_!$63?3=@+*G``=htJnYBGY@`(4XbuM^8BA=d4MjZBm zoqMF|%R$~2jCESXK(cL5q41PbUx$93jkfPulB_0m#ub!2&X6gEP7;CeRG*1u)r zjD=Vw6lUyN^0Cm0;{bQcJgXI&HR()AVv)`JhdIEYH zu9zm#TPV`VSvS};L4`=$K_w#Zx?wkIq5%!a(@GQei()rMz>6Lv_k;e3${%Njbw3(D znzE@{xEDR#eOG)|^P!DLe*B>6ZtRN~L7Sd1PgkpPpEFBl5yH=#^bw2T zOAY2>ZZhR425U4I)eq&h0J9>~JwkrybAzRG==U)>KOU^A55Ir0+f?*?oLAmoNQdmR#<*I}S zhd-&aB*>4>!$?|f8w8ywi7OeG3_33fXk1*uN|D>U)J$Ds?&b7_=TDd(7O6BfAb~Bi zd7cf9QyzKL9QZC-8BIRQMMiv9n8GQ|*?&afiZsv4#P!EdhXHGt>3934# zM*>QmeK7B&qcq$BW4stfyXExNM_~`=cMg%nC&93vx_u;Fw?Q0_SxJd;iwHf3cm#ce z_u%%?q!2l@|E41`dZpi+JVO#)zj)6(gex>PnB9G^NJlc(Pfs4iivBe_EUs{_?}RB` zWykrL?>&WZ8`Wh~CV{jnk@d(@Aln7}KWZuub)uIg^-~^fd4MOQZ$Z~PY7R-#kKL+h z)Bb#*L?qtBtD8grFAIum@}Ff5CW7An+s4hGXq_KvmOXcuB~b`H)kNs2nNBM|@yg5m z-D>E5E&QUaltRwkbBv7VvUZJHNzFue(lBGLHuxWU?<7@Abo%5Kx$PrJ^-cQw^6<}m z9c0zKQr>YTq?Z&98^1aSdjFevzCnhR zc!ig_6f1H5sk;asm+U@yBL6hBPwXZ$Y4j}B-ebb=Pc4;(#bg~3tk-+i`UkqbU&;;uiclZ zS9kCa6kfWDQE$+2iN%HY>aP8hrZsx}$UAR`sMP85B2AbX^95c~1w*Sc?7Y9-0a1O) zN9n#xb?`w0_y5Gb%G}J%wJ7!HwUrFq_u`^FNbA%VIE!gK=?DX*D>olW4Z{qU^G~7k z4V)0FBk(&jIV5EXX&RUHyK;qD`mFyg_`6E~t6_%q?nhJ3ERkZ}-xA6Wc`>hhM2CP@X))gY3102ChiO-u7j^aYOn=@4E6gT1m9vC#W@cX!& z>ypxqPw}1%btnv0SuB3(n5FydiuIL8_m5l_{fJv6XdRS2=Ox10_FE8xn9W>z=>rd> zw!1~>TAh~a1pcE!MLf$jpHOK5s0N6-o13s_GrZ5vvEwy%Xsp?<)@}Cm_Y2DR5vJ=p z4e(KT!N5l9Ij^@0)~8=Uz;O1ae&5mIb9X_>Lsa!}@oTa|)w*slRHcGa~(8 zdK)Zyhlw7Gc`v~PRP$eHnenl^g-hy%^G@eMKRyK>)|G%=a6a+fvi7h*ZNzhCl~V8 z9&jV6qf7;QrAQjon05*sRLs96ZV`g2=%+=rO33ex*IKLV!;7c~GR(zg?tMS}lI1vR zv1C(>h|rLt>leq49oy3f`*6Q#-U%7(D@s7<8uo_oGw_r3EoLbt-%69t;MNC|H5w3g zan;6u_NAX)S|k^J9A#|b9h_%>jq$-d3P%`yeS9ATW0H90>%O;j>1m;T0ZEp0?t}#E zE;t7Qh1GrJ#3i&XsrQbaT%iTdti!}-R}~Vy#5ldz4W}4=;OoSe;EN*K21xUrE`-%h zSp2jMfK^PGWxmV+71WM=|16TVZ>B!R9aNR?Ey@9w z|Gs2woNp66-qk?e^NTd$zrQe)bbd#71D=ai7H$QW75>SS)Q$)hM$M0<;o|41V=zC) zddh;vhbY9i{YrjF4}6n>68!-4H7*~%@`l$<9)X20if*`5%Anh# z-h_5~`A23LT6My#lP~{~;cXp5d+4j4@pE`K5htD>vv*!SK-n1-_SmnfoF}|Q5eimf z!RM(Huxk1zmvgLU=-^q@J8I>YXWvTI#4>p2RGxYk)TdbDxF>Gvupn?wCF2$3Pb1$0 zVws4L{Yu>utzt@Cv+Um34GogOxQ*#mUG;+QP1s+~fG$*VoqqEVPUtI>fK_?lK&f)! zV$qc)O}BjyU`FkqQr>i+n{$L8M!5@Ovbb{;HCFWbo$J)&)h_~-WY6&|Yiwn3+qS58 zJ~6oR5h9bJseA#NpQpHt!aY}=v>$Z2am)39n_4T*M|aMkl$DFDcuY-{OZ-zH^JF% zK*bhLfzoq|P|4s*QI=1iV2tEtHeM0`viD~MJS&577>yEJwUv`1u|5Rn@nOBfds~2Y zr&SwG!%4LY%6}~Qc$1|$NmjE?;pCh+krXgp-%WXA1$Fu?d53@I2w(xGbPe&&M>ED_>67)+_DkX2KZN zmD_H@iqn>Ah+$v^>(>)vY1}5H9!vFPAHL0I{!Oi}Z(v$|LVsJvODU*6>UAB7AJqEh zbKHZrXxfU;zhIq7!3NY|Xm0s#ad!o0T4O`_^`md${^;Qb!oTYZMzCcJY2Hjiw&O$a zD7{hwKN}IwFVe?5WO(hv{uwhdy~^Tq-vzEEo~9nIrOa z^%U`MiPVedqrN6pEgN4h_D26!n5;w8Ay018Mt*ch8g43HQGS4AeZB=-2F&~X(`{S~ z6SOByxY;0w9HIu@1G)9CAqf7zTedLD9(K7GU-W?D`Im1l1+JAR{%y0a;pKq{k{Qd&f?WnmVg7@kIwv8YF1sJ z&kBjf?&$(zP`|xnH*3RFZek}!eZs*xEZQ+=al9+M3SAguc+NEgl0O)yWjNe4?wKcY zoBj+`*^|73w3%D#OB{BsuHp?XxHEI#@>3Fq0OJr!q_RmgrTltl+j&d7XDr-u)i4}y>L3zQB(U8Sf)2E>I#>q#q#t8q1$Unpy zNf`w^lw*V(R>Ri{`;??YaDqxWM@#{!9dQZekoEaq3;}+6DUC zSf}XJpKW5_*d6pI21Uzjj#CX=Ybg?&K(uw4*t~oa&~m&=D&V_sy@Wqn z56K#+5nBV(Oc1_^hhhN&I)&*B!S_aCxbh{4)tY8jjT#nZ!F@4DqZCi(oPLn% zad0#52vW^jd6##e*%*KOFZjE-uZVdl!gp&=@1kS|e|DVw zYOkBSEWTVJ)p7Va?^WACEPRa!8*;04CEk1TO)}>&TfXD`mgcXFiR8VWg3faM>AnlR z7Cz2a?X+DHd5dCjmSDCLm@=drYyulPqMA6Z$OT$O?qKKtT~6Zd`|pdO>t{X3ZL0o_ zrwT<1ik!6Fr|I?2KKk+fiL3{e)kD_=!FJX%f^3%{k zsIgDP4e_&NS_ro6Zxq>3#5Lv)=oPpZ}*;sJ%`|4LwrLh)g<(PN7ggWf~fc6 z=b(nbUqlZW&O{!z7V+}X9S5Us-x98sRO;ye$J$#)wG}=6!q5Q0rL?%yBE_M&1Zjb` z6sY1}q{ZC{F2$h~x6+ojxVua6Kq&-I(BKIW2%22}_r1@(U!E_|TJOqyI_KofoH={X zp4n$+etsQ+*OVl*yR}3lW<*Lc<*MzPhIq6)oyvsQyB_8uwqK!AT!bnyBm&H1YUIv5 z!Kyx7CfWzK517-3V+j5dpo;Ea1LovQ!q$(3<^OQ4Tyefn>QL(Sf2(GJw~5}Pjk&jv zVCvghZR2Z?cuIaAgDQ+^k)JjqeEZq~SPxXo-YsTtNZ~_Mzy56c+%?Q+a!}D>8GtRM zitX#Bqw@SI7H%H~ucBDJme8E-ZqrPKtLoGA@j>J&LGjw(k(Qv+rz18!lpfl^$!;fK zXZN2G=T&P62DGU_=85H_h_e_Bp*+3Em*DnWb?8k*vem23$`7C`J>O3F?uLY}TC8tT z`ubBFN_{=yR~N5R#MiCz%Rvk>x#{sICSP81kw`!gA+%>@e@bF^T*P;?<2<-WYqO)nF=h#x>8OHLWDVw+)+1Tvlb)gViy*WeJ zq<22A_KON1W@APbp@jy3BL|C#Gc2}IPKQ(yA44!V=8xg3WTG7yGqZy zip4O}QFi(ru-km{DGsYmJd>wESpis4CrPbDd>N(o2;U%dOnh_6>{PKhaVi~>Am8Z6 z7R<3`U~SG%ythvjc>Gw7#xm^XMC~Xl zjlNmLD4BL;ANDPQ0atTz+o22!pX8;}exH4!MsdFxuqVs`3aZQd;^ZE}lie;u)cHrr z%=+{Mzm;2jZb?En1TnCY7F=<37mm8U9TP3Et1=KDGGqxNnF!arSNaT456}UiiYB&I zC1%Vc6bSylBX!_@H{8ML$(zp!8zwJ!8@hO16mS?)FvTn>uKcE8y}9xs-c?r&TE!*y zYB~9y*fWmk zs^s1lG-*k?#PYL4W>Px}Z$#(U662vCW$Y8Twa0Hh3x~J8tCWE5QVNo5K;@OR+BrOw zTdwm3MCJWoNwD;jO$_UkhvgMPAKvbR$5>#z92>hUXH4-&Yd;Dga>;kfqnQ7yGfw*7 zQwxxz#KuRGkU4+LCs6)S6s41Rm~;FXm}keHRUloUp#%+s#uSuBR>D>psdTl!lJfF9 z^E3-3W+p(x=yJo;)9F-S8dfbH^3phNzecSi*thwV__B9GzD&eqS85qjd?|5|Vf$G< zPf#!0eOU$E&2k*A89nh-So&Sa$b?qV2}+{q;}?8!yNIP`en~fMcS2lB^|u&&llmjz z(9YKT;!{%A$sH_>1vH{8yTE3$n?BE`oj7`h(5CgoN)wpJu8_n<9RWU8^hNj(1ldv5 zOAGsjowR!>e&A9rMC8>^!U)Y}_j8C8Qe*dS-sF0|a`R8P^d&A`0;>c(e_eEaZ*|+c zKhh%EDxx@j;6<=r51U-&wtP~6U-(1}vjY8`Y1ZN06b5~o_PC%ke_-!Pyu#lTGWHmo z{7gBq7vz0|j$dIK73@_O0nSHA6?0ooD4F@wLdfPjkj?fw)1|Ta9|8RYQB585)uKHB z%#ln*G&D2Y{BJ1YnI<$Hk=t&8CPFeT2`N#pNF;}0wHl~}Arbud{?@Ff1y$TI%dsCF zkwcCYw1V?Ix+|Ao3p#L^+vI5^SeYZUEP!qkM-3#GO`7+KN6JV__f`6H5N2c4N;IAb zW=*k($7(y=subdkq|?e9HvorX3JnEIk$c^qR_F)FhpY;L+Tbej=<|gEux9mj!Cwib zh4!DG6TsHWS4)2Rv@2+Io$XqnX~e6Zy*yYu)#x4&=0V3cc9up$x?!sA_q7TAxFiOB zFY45C47hDcVyAAk{cy```#$R~gPrh^{QgONS4ak+3Q*D^dUU!+;z|@zwaBz;&y7Y2 z4Iky+ex`#;i#oP5Q(R=#GHMlWUuNyus07x^En=;YUV~PNwJ21fQf#xmI@t6C(O&oG zR-{W*`RcT=(g!ctmaB#xmoD}axT;dwvS1xV6IUJ2e354Dwo+Sb2c*AH7HLIDcU`W4 zH?Sh{Psh=k9FH>cZXmJm1?0v118ifXPyP_@+s4F=T9T?E_L7CbNUJyLkf^9 zlKQY8T>jIatT|@)_Pbvo@ZUFH!^xha;(tXU{ehA8#bNdWQN^&96IiOAX3v^^o~++RP6&fRNs+n!i~B+wK-J&i0-6u_DK&H4J^b5 zQQ9rUCdY{BqVxbk6wv8bdfn7dm>UAMTl!Bku4LfqZMZ5r;)x95KHX7c;bDqmCH!xK2T6u`t{0=HI zolS|={EKd8%5?aKGr*6!#IB_DU6)Ol*Se8SW2pdak3D{Lbj@tQAPf88h4~5+0fqa5 z#gsZZ>zJDvZ1@5gqddc>2QGRF?;~rg09Ed~uN)|DA`Rrk>KLBTckpNHJyBa{WIOkT zUYuu4%vE{w_+v~NZGyASnNYm9=`t(vJy$ZAnh^Stv}B4%_^tyy!dPC+FL{02Iza=3 zgk@Nu?#LOk$(!Y|2GMh|xA&M3kQ7CQ8N_?uxV`_LXRVB(GcOjVYs$M5ts-+F8!|55 zk<_scS-c30#V!{)YAEMn&qEzq#$0(2mw}ve=WYXr;Sigwmswe6Zi6eE?IjmkoY5rF zArlHicgGmaWBk!9TBlq$1%VLFljpec+onZj$-0?8*Nb46CL^}PKm}cXwI`S(>8%pl z5G2BerV{nk6po(&re|mmotoq`9)6w` zffJrgj)yOrZUo(s1ts*rm>}z~RWzZj%GfFu$!^&5Q`O3=zy4FijyCR*kI@tZh>s4T zXqoOnrl2=_#o{DjgFDHJgB&JDgh&ncD!uC^Fyk2UIt!74M3p0L0zsh{214m}q7*Y|Ou z@78~^>oa8tARtF$DUbu}0-}XLFcOU9o7_ZoT<0E>LEQ86udSFZ!J^GtM>m)~D5Z+8 z$Hf}w`ib zWvYQs}2$DfKbi?MEVl`jacdbYfwny zanZ|3TNu@M^f;}{o`VcrOwlm4N3~cH1Z2@GQ+B*)xt_5nJ)R@^hvcNfZEV6V zczwTgF%>G(!WQH4#*WQwuXHj+3&Fl<8X%sY@H9A68rO5tmQMt9*E1D-{6H76E(HAUiAOBk zbvl|RqN^=#LyxxaNEQ$6!D9*V2NthXE6}j-;|cNL6H70(sxI^?zdSGbWKVC4UML*E zDWz%UI+=5h#uHp};*mtnpMMe%kT?9lTt9=>|2BE=e|YdeoevUI7v(55-)OTu;}(37 z_&w!bn&xY5my}%JlS2yaVWTScML(&6ncbN0q<6m&q+DKjGDTbcU-Em_3C80VQIKR# zJbN4eyQIL()%gqbYM*OYl||QWf$Bo1{f&YAAIiR~uy*L$8Tu=(rL8e(#+4sjsbxSF zs1kBLH#_qjcZhcaUt)G(a94Qe&Gqr-DERW*8Dm4$vEU=IiA^SCs_uJ!*EWF7{`f+peFi- zJDaR$`x_RL$LCes8&HW2b#Mb^s%OJ}tkDMKcAKkHAzFoC#70?Se|{KWj?=sFMNw}1 zZCxC!AFE>@?`ePkYnV$R->i+A6L82JiW zPm_LT-^9K^iB&JiW;j0FpU*ZQfqP?qt%p_O0p1QBh)!ovQgEPI!lY=(Q1GOW1s4BY zanBFwI%fa;Y!72(mbOy22Ucy|0f_FG31+Xlx`Q>oUU#K~g5fHvkSoK&?g@C-F~7() z!N&A3UZBGu6$CxI%&o0ykH-uwG9oWmc1SRBi)5_>txP_+!7p9Vy$!v*k!;LsTpsMA z59hOQ@&aU{7W@Mwa^rh;u-Wj>d1@y0CRj;p$Y%3JWJZ@naBO?2_vVq`mQKLRPY#ZE zxIS^p|B$|o$$sv&l1i7osq$GUvrQLgWAIVk%g%U;j{D%g>ppvX`{~_l#4Oo1H6#+b z)Qu5QH{UyPw6F(_9T!z;g7Ql0%DC_u%P=4DprE;=N!B^AGNe?yOMmci(H`l!-?T0U zap0UwJ$gn3@w!hhk7-;Ry`b%`|j$xW#{> z`u2?29xfUNs~MX&t9fKwp1m(+-7=U(y{Izv@(B(sSkPDpD36A z54wC_Xiu!b__k+_9idv`RTMMB-3^Y_o+Y3RmzTr(e_ovP<1-Wn&HKqrfpbF zEN}D{&Mqchf;(Wyk4tt0`@_domt7LdWYBmKWsu^3*Q`s`zp$|B|sGS`?~EW|9YD7-vG~4)zrav zFovpSS>pQM?8!#Yk(NCSe-J42HyO~B28MAsUHK{z-Y5raEqClMXb%2LF~1b#Jk1Yl z+z)uh?zW-Tm2pi`wf>Z$$@KkVeFjHgkW*&>-1=9CmpfPp9XAA`2tcL$aW1pb$VaXT9i|J?)cd@S^c-@h5 z*RB%X=vYW<(K24Sas90zbdirqVb)ujzq;~Ej$t_Iq7LUMt&R*3v&ij2DDjV+K@o#YTi8|@$ zv=CCIS`hNClFPT`sVWz4e6NZ!7P+i++%5wFD_*Qa$}pbS*lpa>|6Q&E5q_fy*`@>F zmCwspyKd}$$VQMjEWVwQ7cX4^N1O+2L8g+kBW$|Ne%93%`%#-;?mERBw-;s5OhN?c zVRjcac!dzo!k?ql^CjklPDLMS23(NREb>)#zZnY*AkdH|Q^k9WARnAdQszP188S`A zcFf}keCnp_KmWJWZ_q5|>&i0+V4e=)U z<>$Dlo0O0VPto=8%r-n&?~fXj~9mc ztOid>@LG)k{Kqk}HR<`@eiK8h+?S*TewDQ}L)Epf?bH*m%CBDCuvnKM7ThsO`NzI~ zaTNR3Si$tJy~AdUkrzQ3$*DdQj9e>A63C>Pm+dmNj-{T-2rXMWKA=2|MV8 zdD7F??;&yW$IzNrsG3TnN|bja%QGzWTOD<-5 zZC4&E?B=Xv*t$XQKX~U$hC{7>xq9V&` zLLu^*<@5o=o&g43U#_$(^G^0p-p5$^N#O~K!Y{cHT@P<@fT(Bxp92eK{U7?O|Lx`f zabU^3-p(*X)S?J#$~>)J5L*|7nQgArzd9RehuTE04Fi2F8%_ z_5By}W;WR;68&Eua@|+I8_vokO-@SsjH}qG3J%AD$Hm9QQRBHEB!^{@*Saek%U8EQ zyl#a@|&u$viq~&_9`W=Mp8f} zTWihP=QT4aGHZfG*RN!#cR7Exe?Hz(-#jboaOutHO*Z1g#20JE68E>=2WFy!ZDLr| z@dBTc8#RKw>=smd39M4Y4Vic-PpwH&+~caNn%u9NlWZTN6;iPzB>$TONwqndQ!Xd- zW^jWD+}B+nue$l}%dI9l%$i?n-*sWQqzE=zk;YJ|@C-_bJ}&kl2rl}eK_y{e+3qkA zPvf9)uXBCcC6yrm4{JnnOoO_`^QSpHk>?=EMdaIst$-!!suA=1-OpVYY zu{TPsFx_buufF;2`z@p36dw*+&FM3)2Jo?;EmpI&WLEnQ$v$yQ&j>h*ek3d?GSh+l~e5+}f80b?{-$>Os% z394P^*|%IZpTM=2io~d_J5k2<4(@Ua3YXet?<+np)PH=##>#5)x6#?e0>SZ@&3U=? z6e}Sip-4i-f{C5ee%Krme6~N|9P>DAe_qhW5s$lpu-f4@0*myl_u04;?wjkpcu{A9 zce~gaURmvn0q5oAwVMoaD*8AL3RSfqzkmOJmg3>Vhfw{pd0|$Y{*Gw*0=UB$=nL4< zjs6*pB8&An?fBeIpGs6#PcN6@BnQ|^7+`9j%%QxH_|jDu_WKQgqO(=v6!vvs{VSi> zuU`v{mQfap64hUdOG|rybay{mXQLk!U!ems0bQ-Sx-9(Wen$J5e;RNn+KarBGjFcg zdtq(O+;hsmKA1@1ohxiQJ+#H@o7IGOEIyUP z2IrK^V!a&O;OU!mW5}(_7U``p{B?r@_uc*VJmt?SKh z_1=rFEgv}^CfnPZ$XA32Sa4c8V*~q4la-v`Xef!Twd@!t@fz}~Y`(_VCb274GSa)! znKJ4YD9QiBfw??4?$+EeA#M5(c&wsEFJq?pOFxVig205qg@R*}W2fsU)EI~-=PZo| zlLm!pBb@AG8I|>#S@>SCprBy!+0FFPIz)_dF+TCQ;Q$^fd&_`fxAxI!siohW**K`= z*%SkmtZLyOEi~kS>cq%e0QonHdlH*4bk7wDCfn- zcagY^)i>gnm3?`baJ1g4Mei-*H~3==HM=>|u<4&T%+?_$*?v6dT{;GczIge2+K~w# znZQy6G8Af*AQ48kC(E4I-kG(1XNIqP_3Bmj>r(8q_f%7$h@;LN&y_%RcXf63m(!e1 zVj?2r*2|4B+8bXdcqhB9Wfib1J`K>SZH!P{%YGCj# zBVPn#vE%*@L0poZ!{POH!<#)d&Z#o}x`0-X=Yz5j8q_Gd}h@H_>V!)knbF1<^vDi_~@k!(;XR4h&Md8CxPyL;+4eCanl zL*I3|=sl>Y^gDcdJfoOshUAF8YbI1bV`myNKR@s1(u4H&3t29|1P)}^*Vm`N9u&+x zt#m5%iN~$TyaS7U`}Xb5&$#4dP+|>Sx8tGnT=46g0~;-|xYWAWgY(O6zCfMD+Z~na ztM8YKkVE|3YD2*3Zs?BPL>{wG;al+4+|1tXu1n+Jg*dvIuH9`{d!44giEQ$cAI{$5 zhyHQ~!zYZ3%gcrLRes|pFij<8`JFW3n3tzCB{-k@272CeC+*W@rJD%Tx~l}b8PG8X zIyOcbhx7aT_J&sv&js5@hPgla&lP5~{P!B)f@duL^H(+8@)ldULG!Jw`ForjaOb!9 zPv5#0?NymUA?K{cyPVmVzzg48JnwqU0iI6(rmO%bT1dEzOHSG4w7b34&c>g2Ww%iW z|66?QUrzk=y4^~nz$o+Tj+g68HDy;?D8l{f8OcPOomD>)*GNZT9(?16n2x9 zUn?P9&bR+XBRjQQ-f4sT&r~i?6WzQ9KHqmat#Y{n?K9p~WJT-oS9ak7N@c3WC}-#W zI>~(!-@Up51&jHVcTg-H0^YM@!95|Oh3HV!|UWF>b zOLKQ--0}STsdc%YvRmlj(M&O4V@6Eye&UW@a+LNQzDD5?AKhRgdL^>uHnypI%(*vH z#&{Xhx_T%zt3BCud$u_<@PYFxTfus@3;gHz?}!s=NBCLdj3Is>0Hl7~bYr26)eg^3 zW}B_BoXC?u4`N4pm#-?(x)eSN-)+U$oyw+BmR!3&&^^Nr;JUZ5bNn2E5mi+`FH!m8 zR_fwcT?VHoAfGrKQy1(nL*;=vW=@l9);xr4ae=B?(Bn;l+k#rtSiK# zGQRjvA^wE!3gt-Tj-L7cEPa1VE1=A|{DiRg;LWdL(`L8TyJse$d+%g!%6E}q=eb5_ zRwpGr1nlhLi?4IT@t&`WZX;YiDDzf}^Xi%47X+m^#vt!1vJUNb2D8D;=;Ho_;NUs8 zb13hiHWrb`n-~9)Uu>eybVMcWHpjd`l5gya z?{2Q4jFM7PbWo|pV^5!hO1%>j8jfqClb@3Kb+t=|!xNC6%%^|bmQ60pjXH%YH_TJ) zNZiXGTUe&8?72y5d(|X~u{zp=xl>;pD&29m+~4Kq@>I-uu6p%!JXf}Axz={{%B0EV zl=zT)d^eF*R+O=W;r_jQeuWu|Rlk0b-s0pHc6i^pyei9;aSlI({wyuE5$?^7ULSg} z5TtpiEcVfyA&`k?<$hrGMf9Axptt$g)YPNxsS+*Dvu##6F(y=@d6NtKp~X;>{>28- zIlcg$H0N&;WNc(;=rC>;KUZr1%C~+%jOrx&pfuY5P^>x1J+$r{vcjy*=OY1C*~kUF zxd4K@Af48=4C%*+_bI`yL}w4wKE-t=9$8gtDeEKeeT~1^*WKz$N2UA23td;`@QYXb zsCmtcJe3UF#1fPPAkRBE-KyXsp@DTGsB1yIaNSjMKR+yKFFxu8l_zDlCn6)78Q?RK z)HG4f33)s#KQ-`&ju&WC7n%j zN|ti7OU1w|V{nriM3rbUdYEho%jH}|MGLiIK(<;-}&@(SWgLPs%ydoto)Hk_|H;2m}7T3Fc!k=O{VPOP+9s2qdM@Vde&&KVvR=7k0| zO#{z8DfWCLwB{WsFeh@w5mrg_llzi{tgOW}5GFe|ubN`+%3}P}L%fdH-o z=h^&5gs-01T|X#GL~C`3!K9{^Ipk>NUC6$XPo-7P#2Co}UH8gv<3_4Z3_X^(nIzK;_~ue^r;Hi$9oVyK_WgNZ0(PHd~#YxbW=SxC*6(tR9*H=tj2 zHL4t!xIX@o_%jG4ROM@?zM4)ijJGODG-w%DULb_-QT>{EsP4RON;Q>y@M&&>x9YSv zfZ*tDp$|(yec*qETcM}FUhE$EhVA<={)S$EEo|^Vs0!)awQ87tdd+YH^>EbYgy|a_3sg}cR&7Z^LnaQUG$CTmt_ybsPyi;Z-ZG&xw(Pq4%XaW zYCEsL3-+FoIHA7Sj*d<2Q)lPD!`L*ym&SI_s7d+^mpfC-L-2<03ndGrzw6pj z?Y!xEJewksPAq9(i|)DDzv{z}2b?-UbQKTkzqi=@O-zsu>CPDeLvlt|Y*LTmQtz>h zGR)BVOq=Hvcp0q1)5#X8fK6|tf_MvRTYcu+aewV7M$Bk%HHMk3ArL z-voZl#spz`%~u;;Z<2~XuvB2nnotLFI2KQu1WiGkp##x6=2t zs>W?~wEXu5)miX(;XHkELjAQ9qh9;@Kfkl!K6uKNiQA6%1ojkc7bx2`7EN=wJ4Bkn zy=-W;&^A{JD)DJgR5_Lj!B+``PR*9!iEy!P(N|)MC*xmXb2LNO{Zk;f5Ymu-aw&N7 zRp1uV&9XY&2|~3BYEQaa_OE48n=)|>cn!CBZtj+=_k zsOWs_S6wPwcGa4!L8Z!h?!$Cksv;f@z0(T#z0v)cVJLe%T%M+8a7q3bnWxcum{!kv z`4QsItlu^ng97HB;gI?TG}OMwbLhhVA&Sw^1~=WDZzv$($8d^xh>6IQe>qSUtMmf5 zod23EMAY>9j-R>Hi|ym8^WfB4m_DO<=8%&RE`olueRvg{xr!XMkS@9^V+ezXHqFO< zT4P6xD!LnPHj!u%P4%IQlG}+Hm?xne7wmo(tJpM9R_q@(T*pSR+9|3#aIO?PM@D^H z#BlZQNXC4W=O_nlT(VikjhAWx5Em9CDxm3|7P})n4R>}&=Ta+{5o(-c;G0Bw1na+0 z*O=-{*BDmn?+I_V!UP9(R7F2bSca9ib$1A71#`Ch5h#($a;)8`pG)Ye7bubF4P{)o zeF)niTlY{AMS_NV^GXeyL;YBSd%50{c9Cx|M8@$f8PreCF@Pctq|n<&=)4X8X7%Ymgf>#*!QWL-*}A;JW645GA|A5G4Qj$<^Q>ecYcO}*6-{?Z+v zX5at5&Gfju_95bhAfd$w;yLp*qC4(5^8xx>FGUsUqgCC_GCM7~s>R>c>fX2FSH1k1 zXFEIfH?un)VIWN2RN_aqbq(E8E~15iSO1KOGgd`{B4y(`Qnx*)6OJ>H0;e zA>yrCPO*Ejkh0agXh`7A)xFo9uPHLbM^hi**7b|5rS2gO`7u$V|66_z2DU6-x%1c$ zq7-y7vHph}P>l+%Egrmon$~K!4Z>aALcg9sR2Z@7HbpO!^SQ8|!1p=g<*@py=1yIn z_Pl2ra{SBU$F+7YHN8h*x;pZsyM9JRvy80GRXTu)jL!bO^i+u>C8#l4IKFpBASl@q z{}lHbpoA|hCdrH67qg5S75gwCBx?6mNYv`7&|+^ynbB#cq0~3de<;=wmNO~?HYO2R zvtPscOZec@6|p}}=zKVU#^ZM7 z2nV>!eLoft;@S$;(&r>SFY$kM{;LS_YF!fhWb*A3rAl#5j_zFR9WAn>FGMK!-x*N~ zFIjB+rS8fR`4T}*{CX&7IOw5A3m-nT>mNV%BUc*!hL$7m;+=Et{n|APz_7Rx)^?Fn zoA!P-FdKRP-Yn_he)-19S#N|gpcz1QRc|i-Tp=_h<+E|hBS$rbaiX_0Kgk&-OKbik zwkXl6M#>dIElP5zC3iZ@Gg<9Wn5=zhI+PW-=B7`(rbIwv_X44h)5sHVN8B5uyUl_S z6?%MMg(sV0i|jLp^pXJI7V8(`$vEvCCnfGw+?Tt0CWZoy5A}q+fhUw zeG$uOOOVoBmAts|S%mviMD*ih%QP=5a^GTZn?J7^f0EV4?%c&BIH`#TMsop^HkKZS zakii}cu^c`hss-fEWksxjuf_b#<4qCGJa}0n(Asz6$z7hV*$Su>HA6#{Fzc1J@1LB zPAMr4B)6io`g=I@iTQe;p4{hv90^M%xpCY_=OM$j9(WoOT?zS-I(A91IjR=t0sN^9 zlUS;+dH&%G`@$NB@E#j9z*#Olv}*MhY$)Eb1t{J2TPz_I_>Tf{3;tb1cv=(|DBk17 z9W1%qQ;`=<8GcEWH}tl}EX5nR+xJ-Q%qt>KQ9e;#ia-=Ofp~mZ-lHr{HX`cSj+}al z`0#5AeGVhGR@^-{>)|UgdTbA1Eu@YYt&UD9La)x<`)JbK7Y2Ijd4CKS8K<7uqVO_c z(DQ-jAl>0336o<2$KDWGD}2QTq@3_?SlYXdZd*R!FX}I$e;e2hH@7(U=R}VQPD7EP zoU2Jyj_+x}^#{FC1ioq!c?8UF?jf|dDz@)8-KzO_+TvB_a zmwvkuPHf!ub>o-w@Atxxs}0e~dFlgJ*zyGFTq0MWGNCFmQb<<7iggdL&klW&DTBf7 z)K8D4?P^Gs?k$zrFGb!+G{;L@SrALHoxs=xzOz(6akp&AB2F|?^+b@%xjkfSW}&V} zQh%x71uE3mp5_}7E6E+d<9y%dODf4Z{2P+T6{KpW{cw={FA?Fw&%Q*Ad!Fj0pV}Z1 z1wCL=M)vcx3W=vFQpD6tkA^y^bfkx-o7o%GRzm|WsL6JRdv@-`vs&e6f98Mx zuhqIj#Mt6*^P;L9%j(hq50H%4M9MdOr+g=L@Ntqdc!1CxFhqz6`+*!`aVvJS{IR8K zB0*m{#n${eBNgEIR7Ai6PUp48@x^tQnkHlz%6+R)#6J|U{w4E6?fazs)h}5~RCT+l zCPcy$+)1+0yl-5j9yGK4%O;?C%-2h3efM(kgBrEO02lT>J$ItgnoQL1q2NDm>!G_* zYcW)rt=0o??dvO=b+R>6ev?44h{wP~)!C-p9u@ikoG!JnCY9lm=6|G{V-w&5PEve6 z^(Sbse5tt8E$vp+;Z+0>f)milbOZEjYJ?Qq4@i8j_i|qWqK~{alp9ZjC|*Z<@;CGT z?$;g`*y$Je7c&*^Wt~7|{=s3%+H|6wc%I&7(68d)5LjtRt?Bl#JiQ>*-^Bkg?PalGVl?k;p%yM}$7`{TB7i?wC)g zK&MeL-TvusDVHosFY8*h%8>`dPx7zzbdwLd*Xmvd7sV>7)^%S5aR|uByTy5_o<%?x zpV3rmvKJYL7F2W8g}tYd@mYK8v_JNz(R#f4im#k`l2rhI6G3RkA*{{JldAij`srD} zxY~jnadT>Akp*YmJ|R3w{jlF5&Wp#9@|csNzJmOE=shz;odq49{z7N+YvrnY_f^T{xg6DYaz;}@ zO6TA5%ZWF7q?aO&Pc0+LflGi@jm@tvhKgr{Ibjq0RN(-CQJ+e8Q^dJ5RSu(Igi)x} zlOn}p|1f73i7~EVxnmd;chJyeW6rI)JNB4$pg=z`u@Q#s6Az^tR_!Ht_82=3<{t7x zo`ih*s#$Qj#KSB!#K3d`akD%!IJS? zO)~;((!y=@G`JN3X35y)H`He`&E}@qI}DdsI(e>cWJ_eU&m1YABzFf3Ml@3d5qkSTGAYU~2y5qivsjCu?_*U>)d_HcHN%??#`t8ugRHhKTNJe9)@*uCI`cr-?2 zc_ST8bs)ibeTSnv>SYeaty#{@pHF31Had_EMi2zgiW5*{WV@Y-E_$AElRTnKGk4II z{i`pFh*X`8^P(E3=u0fp{5eQUk;9q8TaxD*AJ;IUUApnr9y!8va=QO&;sYJ;LteIH zUpLhN%YOuN)%A&Pmf36FpHIAjpACLXg_RQ*rUNqTBnKiq{s9hqTFY(?*{?YQncj$( zu-fOkz}23uq^rgryatXJ97G$#9{NHusawcIeik~6A=Q|IE!u3w@!oQ-A(L9r;RR# zQcIz?vk|jy;_)p6402}|{a1=%)csBipnUE6K1Jm_)swRb%D+I96x|P`;d#a3I*C>a zgwH1L%ev-~y|Viml<&de!aq79o}co-2gg8Df8C%!K%U7&eb0#9@FDv-rLxs2l-N-; zl{!>RG@ZJLp*#Y(hv*5|B|}^G&HN?AM7@j#C|INc&5Xti%!fLYF`>8ERm#6$l_$ni z7HaNlWmJD-2(Sd-AE|tQQO{!K&YPtxO8Hfn^iGmtaGV~&(a{zh2)>6iZtPlF~H>$j44_8^^q05Xe7ip0v(Im)&4xMaGc z5_=;)zx*39PITFuxe;3Q$%L_-IiU8kL)Jq#PHvie0DWFT2VDM42&krjn-Ub`sdJ^{U%h_L4c?+{+9(fO** zhTq@l_U{dQ$7m_qPJ8i!MxWw2%#`Qc^k0PJI6$BJKa)%9)*Jpcr(F*-`?bNrB>L9% zpg-xbo!T*aWvGp&bVa=n*GcZi{j{V@?U4k*e3^c5^|$6!(He6-&M%L>f}(9wcls^q zxmo>|C?g_IWaZtyihx%MHml^SkAwOM3qYrU0m46kcFx;sW-HI4aK)mTJ8ggr6NO{v z`~KA>z*$P!=QNjtK3vhjuqVoHyvh>%0%hk$iuQ+jZOT_0C!pnDgz*Q`Mfz2Q$RU*? z1-IgXuorA>&Fx0iZ&QKw%g-GlhvjE`tSx{qO?3c?PipEM4|bxXG&c$zR^mRmh>N#e zvwsR*?bfrwhGl%>Y##axTo_St`@I5m;1g$Se#8y@(>QtYgiBeDnx=lAzLK~ySgl#L zY0vY!=3N(}SDe$rbU!UX_C&JHm5%{F10Kb6 z?GD0{yx}VwDP3AL_ZXdE%EbloOx-L}B>qFq%-KRMP1?B1;G)D8AUXS0=%R4ZcmxOA zoMkq!eDBV9n3y6S=o%C<9>s#e?vZ(Wx zCPUm$&pR%Agjl2QWx`VFus7_JY~rx+k@1?VFmzG8r6c2E&y?A%tqJ|%2Z#y z-DDjMDmZ*HCaTgbPxrmG(8)}%Xi#ypa;o;CkfV(z%P!25ZA&di!P2{1f&LRRc2-^< zQNb{`rSU>}@>95>B8wx*Ud!f_Akyz=$@w3Z?v5{|1@bQO8b1b$)rIaU&Aq1X-vd%N zbkEXT2NqQ^pO??YtRIFiQ+UK6GoEyk*&n7|JsXAZhE59?1#2Sa2^$!HIcl7my0?cQ z-csZ#Puf2*vCy}RtoWwoQ1|V~fBL1nauM6zVHm=We%LPL?+NxX4c~fbKnPcAc6SGZ z+!+z%qT8Ilf1ySSyGk0o@}!RElw^e2BtMu$?a$Tt*s=uvF)Dk{(n8l5w-hF&R{*Hl z>NUJ6YT=M;Eh;eB7wyd5K&XYnf|0`X0|iZxG225Hep&pR?NAx|{(Ydivi?=L41a)| zTF8Xzq<pa=Kw^69Y$8M38aJ-c z{4C4J-ZPe~kZ#v$9ke~GpLN7YmhDi|KKSCEcm3ud4|BcDW|-#3?&i^L0@DAaCdu)u zqh~k#FK=E@5tQ?j54QhEY!{rOf^;um9%h>!wso0A{o6pTsah!k%aO75Pznr`$_5KS z8e7U+AhyG18JA3}XNm1Ow-rBPIwArv1edxjw@6jFnx2+j3=_t`k0hiRTO9Lhuj&5) zT#%oYCy0_f)ufcFXNV3IhZz{}@oxwInaQGPSjsPI;Y#`DpYj2eoZF!6NnIeDNap`>Y))~vW#OY4vujPRe?wA9f!HB#DOA1i@#KeUSGCC9mzJ?K%N}?_dr^UZDDQC*mLLcZ%6ou3}Feh>zDg#7RV6Re<_2>xG~9HN1Z5 zffNcQ;yNm!*RgqixOa|g3^;2AlnNm~cVHq68@SB%VmHGwb1cuoa3UwE&iP~Wg^vR~ z5>_nPpyov@4{%+wNBGW$fJ2vt?s39{wkNuK5l;fbbAh`587^ypW%Gs|miJoo`-g&a zB_3&PDbT0T#n2VRHsK}lRO~2W^EdVrY4VB!U-cUVe}SW#4h@@`8=5Pcg3VM^OYGNx zah2}y1JS^{-J|QP7d!TQ7p=v~+d!{M|1P&s&J#sJROQcoaQ($1TdWl8pds&r!!Slh z%B0|#49;{frx}H`P{x895REea$V~Yhk2FYLQEdU9!TUSpwo()iWT$rJWy?7Ge-VGZ zm^`0oVWe4WQ*B~j@eqM(1j3#7W!!vtCH_G?gH-mlDlkO>o`LVfq(@RK*xmMTBUB&{ zR#c&)z*X|5`;CZO0TtVxTDa&WT70S_{TCcmxups3Y0`N_NA%R5p+6yhQO653Q&nPE*BDSBVoG5e7``5asNvJz?odpyNwDuK$sL0~ zSK>4#SpbG7*^j^)J2N`T!9PY99E-c4Q83f+MPubrhB9Ioj{Y4A(c9&y=;~4RtYT+b zo2i!f4D1c$9b3NkEFBVFwY(ew;|dwGfsYv}Zj8U$%{^l|$}v{%dwy6M*T%yRu{>YdQoQQlAX0L19sIZ&x1nGejD+G8S;+l@&>1oPH*vq$?HPFhQ^K2d&BKG0yEEoemb8`YWn7s#XTBL#H}9ot1G zcUx|@mGsTdnY#|qKg(>-^2aaQri8`D*pQA`7rCvfd%+F^Esg67U%eCBiH{q?-b44k z_#^%?TDPf!;kDKWKX?L-rGEQ)b2IC01b?}is$lm+34ZY&qI)wdiDJqwtE!pt{3JqS z>KN`8hO1;BhkTK$sB&1jyEx&)sH6KBPBDdQs~tH#@GFSgZPC#)YD12%;!88p|BJUb z4~Me<`-hXH60Yi!2vaF4p(qMtDp#pgDj``$Nyswx!5C+fv{#6 z_OXv;7L3`<%z6G?_wV^W_wgLZ{m=c+Gk?q+$NTd+&h!1A&-z|pgnkzPRM5woqURMZ z#fR$5O)Kp>{NOJFbX;op_uQbj@Y;tJLS;hQ=db{t4|r^O7BU09dhU zeD=lj*)RQx$jvsdP6dZIW;%^|LbQ@IwXF*qcKtPWJ20T2r~J< z9kdDI|4U!&|JeOs2W?Ee(y^V9$}ji$9FY0>>g->VQ~xOaa#&b2CfB-LJx?Nm_wY}i zxYWA+wR>AnL}p4#qvOeaJGVys=Vt;TTxT!MQb{-y`Evq;~0%i0D6=N~~?W^$%Eo48r;H(1C)vTv^k zCVx#N(<%H6`@5_8Gh6Hx2@YR63SH=R7-R1z)Vz=-8f?~`5>Q=+!l+w!HXZ=RJF}b zmDfUqF{E6mmGW=eUl?PDfxz#E^Y2yF8%ZQx8=NYft0So0c{_7tTY4*{C-v(qX0)xH zAN-PH2J37Y_LA{uQ~6)t7w$56AEky%+Lg(7VYx1vxD_lJTD~`ARN4GdtgJvuowAjw};nIId zyJk&Jj5qQmv7rHGIoy>|;2`@VO%$^T+`h^Flb!q0v+t?nQSOA;3h*F>^rKzMY*sjAfzsUN#fBE%Bz`M}^#kVjIs$;dq z*)36zYI=Ni_-)8mll3TwS@KP2kn(+Pp+@;X)5PE`H)RayT17R&tDb9|)nuK2U%GBJ+b;ySQ!T$BjzLz&1-*%^7v0C!RV1iEZzDrLh}$gW_lCE~ z)w-y-0c+DA?3#n!Mjf2yEZ6&1QImbg;k&Y>8cKh#4zk+8pY*$XlS}(-zKEA2d>^m6 zfKaO2*$X_SDpPE4s~AsH7Wvf#YuSyK#GQKmSWz- z<+ZTpV2LYvcGpwF{)^#m^WLcJXjj>_Gnr%M#8`z)&RxkI4pD9qHKCY<#)n!wmM;{H zHum9?01e#0!m-lIZbJq6GFsi} zT2+Ll(pBz^HPt66V)<+Vqn}K@nr$qZp^=ika(IB|T$rWa- z+O_0%F0RMc{oHTavL~NL$*A07-`UI8<{8kT`F4`-BdTFro`K87bHDCn#Qa)ghMHQ3 zC8sdRXoWnTyd}fc$00@+TI1p=uU%*~5vcZ5yg^v!OZ5V(p5M>yQYVv3#x}9U3*IrvV%cmDy`*0?&J0leh#pQ$R+>x+2c>3r?HyG zFD7yRp$j~X6t^>A-l!M!G;=bq8|PAUzsn}nh<)m2aC3g>C4JC+{r^hBEkdvh2cQsb z_w-S+@r+R#34+IT$765ex@K$lg?C5tGsP5y#W1Lj?a(U zA*|0fm}Vz*kYO+}($r>~T7gPuzriz6FXZci%)>w?G-Cv2wX=qVKrP z^`}e2D8tyl_(izxQ>88?=e(^Pl1wpI8{4n!& zRYGOG?X(KBo;=vFJ|4%keNy;J?#Tw?3`!Tj*$PWCYg@MA#&nqPw86O5#)~Y+v_Ct{V z|1-!N;WkVA>`9~UDnsYC*R z?=ksawNG1~fBIdvl5guVH+VUCTsyQo*5R#v$L;EyJsGEbVoj^J#e6{fqC@Z+8uvg!gE50St)a!96{z%z&FWJ=4`~JTPknDX7`_>f&H!!E z`EfBM7dNlB9CBE(J0RQ|{GFtVtc0GRB*Xp*ww#r1D0IkO&vZNkWoLRv z*GSIp6}VS3qEO`sgU?F+9fhJclUAAXY6g0!O%f;+;yRX$5q)NXq*;Rt6(9?H$)NIA zH!{ylNdxm641k*8G(3yAIO_YcG@a|yl~~_dYHw@|bv*iw-KRm|2}!exq1Wpo*;5nj zlN_k_lo0&)ipq%e5LLc*ep`2kfGo{44Zd?&oLFto0E(+;^uBS~hzNlC1Nvlq1` z#~L$S`SX`Z{U2c|>Tk|1q7i_ZVQ-MZYS8}W8VRF!w9xYvvLjBVKS_i_xu_hz)%ry6 zREwNst<%OPc7Ehttc41?>+D5IrQ}tg!VRDqdqK85JZ?cpa?K;|VxO~~71o)w4=AuzBf*8hV<^@(^aaF~Wpn+CLKwHG#}rKG zXXl3+bFrBFiAG>TM1;t2ySFC$QQP(-a&y536-cK?fYmA|3}_K%fB zT^DTO!}C0?=NDIh=>#A@b7JzLvxE3|vkSNbsmLMoW%h=>t2XN zXvsguYfUmV)xwY1?;{<1$?VEY2KhAekt}mVBPbv8%Mb>qq%*SqVAI8h)9c+|VZ+v= z8O8mfm7nZ3r-TzLwh^*!PqTd>b#FW)cXTWW-J7{KW-y#$JRJObW7M-&WM3YB@@WZ8*t=t=728Z75Cp2hGJbU@<^|STAh3b%Tf2X6%HEVu z+n#NEV1D5Fk%I!7uFaKA-=C7mIrs|pbSuFe*>N+W%^>we;-hzmiCjaXpUYf#*CXe- zOv48tFDzl_%-pB|-ogBU+3R|)c*oi87jC}DI=%gV`<({+iZdrNOFSov(XE_Y?X?e0 zaXuq9X}cTkYh-NVC{c~|3Zrs7^zl2{ov{a2Pfx*j2e#;CKHiKg#!7uXrOveiaf^GIjcaRrTY>f`EmrYRSiCeiUJ*4HO^^`eA|R`3f=)?^n&t2!n%I_ zRfk5(wQ4l>HhJ%P&)I=U-K}uN;44-=;RTNstV4nwc~AbGm7Z+*{klHx-garzag%pu zj$vmH%|@A+^+3DVYK^bF`XH_J4r*UPrQ~EoJ3Ct!be5^@nUm6QhvIX)e=QyyI=Jle zid424mk57F6<1IZrmr!Rhm^zgjo>#-(F;;x1B_qcft|7wof6;r#7`@3O1r+vOX{iQ z$|2(y$pBMN{B$}G%Imw3t{PR?#=54)&kAi^Ot{mzXM?w5T21WL;EDMi!kUZkVE4v~ zb}FlXn)W^{u@t&S8GrbUt)UX-<7E0I%@9T0`XuhlMNGj)?0CMp$Ndw>OLr~aq%~lE3i}?;@Wn;v39*a!GGRrX!+kvzD{p0_Y)q@w zh3VhiUGHGyDfhyIolBoRdZj6J9}2A5TXkc3koIah5hwhoZ>NH<-d)&u`0O=OM{rfu zW>Lh*4uJ=1hdBzlFSOgnyV+U($LH-%6U9+#jZ@^u;SZ>&$vpEOl$#<+D!=|)SC&Cd zbNwE>BO?s~hqkjC6+6XGXdmt&JMPShrdO=sId_kn=4YlEIOi{8n6bO$Wh2v6=ZRLU zl!Gc})1d8%BBEpb^4jIJyh0^0*4+5^YD0&od!Q>n^_tEXaT+6on=DI|6_3I4ztl}s zFz5f4t#MhotWjZIm~eWZ?)t+?F4+(o9C-L)_r)VEnwCRaY_M<6$Vh>6 zZ|}p<%nSUdWQq5(H@zZTreEDL8rmfrf0O^BUsb(+Y#UX^*p>%pcLRECn~QBqm{w8v&}PkqXn?6DA910rfMK z^QY=7b*#6(N%dRsbG-mb*Pt&Py(QP!fx4ba)F)<3_y9kS*X&@d%`l`fSx%7V5{F~mdwY{kFx71 zy&u`7WfU%Rlz4=Rz%b^B79nypuhx)K(y$7r(@f_ z14bDH7H*&%h?Z)I0i~wF)amoElRex%$=0yCz4vNE3zz*l-GDwD7zIqBa9_L>UGRVc zG_ttc&0X+9So11U9uHv{2-~?#fs0?8%I+m}~)_+!zNS#Wxc9{>toj;3F#b^m#3g0@CZ;Ptc)aaOizI-$b(M zoD1&Q3F(Q>N$mMl45#oz*M28Y2-w>?uJjD;*7$s>lbn zs${@ljsMP_qBK)q{I0!|3g}Tj5V&1K;4|+WQ9rO+lOy(V-^a$6DxI4&^_~ud9+s`= zofa$HZ+Lti98+`i0(yw8Ok)35jgxV&1BcPjT$hbUC6k`@_61`C--@G2Q8!q1y`nR5TtJqBuNs^~PKZQvbH08jk8&i~%4b*8%8u#5#-%Nxq zO7ls{!t@Bgqxvl%^Jy^;ILtQv%jNxlyFoH-M0t^*C-|tx z&X)qj$77IMDzK+t%tOYEokjQlz0CUb?#0*MjGp_6NA`a*Q%2 zcw^qsJp9Hg^5bm?Ja^5Y{+--XxXcp zeZ+WyZcRXXClM%A?6N+yW$rsE#PQ;v-Xf3Rs=N~(Gq`ogvUd=#ExVVfXIn+8=clv- z1+PM*J{7Oy_;`+>atrmi)m0F#7vw+i`P-N90y*bdiV5iVr5Nyl;8ySTsW&H$1#`&b zQ2Aq^8sufCnVbvWgG8OfNB2nX%$ zdH73E!94Mv1{m;{f$7lhDd>A^&Sg#M6r`S#;?2*!4u`>&R=sG$G`fN}`~9vT*vw3@ zpx2<>PBAC;E2K+O+5)XVgeRlx_a5CZP%=2mr#w1~ddVSUxcecQQ=kG&o_Iq9A7EIF zFm-;&L+pC*6f8Gx#a-3dBu64NKw`)0F|FqVht`PyCAQ|@_ASNDfajOxyyc)X5 zj0XldPPpt}Rkx}-`hEbI+eKi?xH*f1TxGtghbrZ}KY(4Qc^ zHKMI5=)Tst%FMY8hdfB9tmaZNASG35hciN!EvZ{LmoYn8{~RuadpB%~J|&q;gDn-Q?GLGFuWL;J_Qyzi zLlU>Sz`{#b<5i5>;!)Z28Dd^UO&z9^)&*F8-1izl-nhwN4T=1A-VjU-p+PyfZ%C$7 z1k==|laF;SrkhAgtM|D9jaTdh@AjKE-O&G58FrvTh`0d~#2~$cpn`|?KUPc=!mik` z#J`K#+&h2(EkXcBpIh7H(?c?3a-LxE$2;5Uvf1pV(! zTtCFtOGQg>r1EqiNaDev89M1XnR65CBkoK;<_wW&;DTpzayy^q(}6XJD_~zKacC%BupetX5&x4 z4~Yt4##aNhCOtWS22n*6u~$Tu5MnGh6wTl;h+O^$r1_4VhgC z-KikD7YQq0G)##xppTD5BX_wZM?qHQIktD(OO~oHLGU#$XU#me=iVK25jg?cy!Iik z{*9cY@>3tN28T86K=Y+#@W?#2s0QrQgA|oaEgsx@K7fJN7ck#G+(`3IZU_BNf!aSy z;=uiHu9>w$0HEwIhUEV2t?Jefy1dUyR(Ka zGap3qQAq-dYH|A1=DjIT`iMlCm!42ucV;KI!mVuE8uBPhi$yught5lZ+~+Ge7z;!K z&T7qr9x9B_+mFXW(N&v7r6SSm4Hph)a1B?o5?rItKz6Ohfpj<L-9_jJ)#h6FmqH%kENw z-#H)XcS{mG_bzyeub%)7eBHOoq#;0vIYK1ZclM}|p7`wv-!S4j< z7c0!@8<5A?;BTgdt4NxU-u@E0C}h5oh7PiUunkMIa8)X+*hqeOfBp}lp&^U%iEZoG)20mVU!GWB2OoD?^5t`eV9ATiH% z*%>hnn2GxvP3VmS=ICF2PX++LQK0O~4KU_(&LvgQb_(pDX|jKFtSfB=rVZs@aNmZY zY?{#O`3PZGIEZ$;vqGb9k6lnj1a#R9;;^i10YnC-pcOLm!V-35v1S}U=HEv ztfo03Z#nFr^*GTAaZm{ojR^(ftf8UtkzoZ0IxhePjXbC(nRR;O4+6D^XBOmFMLQz} zToQCNp-a$H1%)kdRIPyFKl#ZGZPcLHDFQ9WpzJq%L>dnIvdnyS5Q>n*NvMYnUUmoX z4Y`0eO26hdnb$6b1c;(CpzjwT_u5QI-VAJwRuSt;HgH8vFx;4)ZP42H`^zv}7C_K} z%r6H+(3J*GkQ2sh{ln#JwfCU)LQ!kOx~$a;BN=!B6zb2R{}2VX8g*uej^gmndzi$R zi~KO0|F~@qVpg|jG>46LRx8iZB>#3i|!Bp&Q_L$kX!YPo|yza6DjTP5|6fqgvS38geeQD-)YEd$V?tQQvPp@oX#PP?<~0KG|_2! zgO|dDzFvhk3Sp>1G~99p<-B08YcH^q22rJXl~98+kO$as-GHA)T5K8G?%p*20OCt8oxVATf-HDxE&3c7GMNUjX?+z4Z`J zX=JgpXWTV%sZjaIC<;x}slO>mTJ_8K7$rvV^?TlXj{;l}{ye-T)Mk`4ZvMq9O?%YS zoHs?>Q$*-m`boSI4OBpWgbI;59(dQ1HDptMV!(+G?~v_!FT48`YG-uMxt+gf>JC!R zx5bUPet4Z>a{@uUoCH98?fP}2g-&_TOsF`7zvq^HL6j=RGn|CDy2+0Lo8W50w>jbK z?^cmBeY-4EE}A9F4Bw-WvUWiNUG%_YB7a*(`SEA9KuE-CMh?{p(W`Lz zhX@@2rd;j@mSl|#Q3izUIhT9sXQq(Bpwq?*3qq|Zg)rsMHrWM+rINDbpkX7Z6!A8F z-OymVNfpubVHnuMr5xTs`|Ac`1qe$}*m!4AP+XcD;WF6z8Yrd$tp>Dc9Dq&UGEwpo zfFmk1iymMbgiFmlL~1kn<3mGIsK!Cav)S7bJqmq4xtbu=5Ue9UycE5m$FWdHAcs{{ z|9gNYf40j9+T}P^;PjD@nc}C%$vy_%=vDet^W)YOsM?}hswNg>GlI)I1*K*P@NWch zf8z;j2U>1NCIE?lLwwPG2|?xRIad&VX7fy6h^?NrsDW|h2kya%#1c@!0H=2gbng^9 zs`Bmks}=w-9@Kwt z%r+1MzXZIznI3KA7JF$FILuq@+j5*TL);;N`Y*T|oqrGQ97aDMIB2N3BY#7PiDxXy z2oY4Vxu_n-C&`3YZvQ0O|D}8$#D8rddDORuPobY9TJJfJ4#v;JC5M&n4WzQ#cmA+Q zeT7i%9~xdZXC7gfy*qwogC(8m%#}@D_izu-sZ-_LcmX+E!jQ?$(}+1pD_I01O|0M| zVy@0!nc4bE5VfIOL@K9e(pgLru;>-~!idAV`!yS)|B`t{Pf`!!>q{-;x~Zz??(IzY zav+_VH)jG}%E3XU-AdcEjnT470^?mGoAuBPOEc;OQ{Efu3=ZB=l57!wG5CkACf~Xi zlwHAT#UG4s_hERbHz9A`*ui*;Ap#%zv-tC za@<}vsy?aoV8i65{U-d^@!M_$=9}gPDNkvQ94Nm15Dvz84(iPxH9o;VC?Nw7Ua?{l zx*ixO8bcL+vCrQsP4KvboVyE+)P^vJGZoxq+qZu!*d9ZcggIhu^c*y*CH*trZL%MG zvs#LuJsc^CW9@pCNI|_QfmNJmXL;TNjO1`AwM}y<*M@s7o{cs8-~taJMn23cFT!eX zg!RdyF^$&uqp_r~{;EOO2DUymAZ#|-7oGKIe`9M)(D8t~)LOHFr?vi?C9xJSC?ooJ zh;L`bzUe|Ews?R0m`Wqv{W5kQy zqnd}$+<1-KuQ}!@)BEw~pIw~J#h458L)lX520yC4N_SXlyhFe^jfCW(1eeQSXZA0h zGn@-JK^^avo)3{uc$zQM=xN?uVSDxdCiABmJMIKyT+_-{jd`mX<_P5poDzF}w|Gez5(1~#nN8034PXyy}<3UBv_&0T+e z`_Qr1pM3Y4_FhR-xKR>Ej6`!*24D^DueS6^f7~B)coQvWvz0MDBtT;n zvox?oR`|P$2iub1=f9k1iys&7VU>muz|y?}!KyxncuSO%o12T+Gv-->>G|r8L+SO7 zffgi9H0dSoeevx0)i+n0>W_?GA>5geRzKqok{8{BY}v3}rX_+iR_X@9 zNgp6Jtvu)utJS>$ACu1jvg-q({B=sbpnO#2TO?n0)QO`orct%k59WvR(CQ*&@2E*O7E0GKCH#fDzx`iJ%hUb;`7J8MB#BOab|gH~Bz6(4;b0IYId&0wP?_ z&+|YWFChF+o<&1GlN$6h?QotNi z6{rYOf(zEEvm}>QKQi z$|lt2XBBDh$vZl z?F^FfoG4NVA74k%i9T_o8w9Ub+{g$uLSf*%DD@1%!gfW--9lIZ#4HRwQ zm*524-}Mpz(hwA63Yc32Y9e6%IYg`PK?F$CwnD~3`je1}x^Rq|zD(lnTpof$+~3C8 zkSu*&5(hAdsthWqq16uws! z^`W|380J`hvIaA1A-p7bETU7E;sJl@pTYveb^GNf#Ur3$Lv1+l$2bqcHzcsl;gWBe zf?H_&|7xfS2Xe7{0WV54V8#U!ptnZQObCfq%OHw>*w521L(OOpZAeUa+0?^EAhw+2 zHiit4p=G>Sk2Iw0_~DKk_cC}&P$YF=t94k!M%cM+W$c6Laul z^p&#QpvlLI7&b;6VQo zyD3CF&2WJNP_AA9<7uo2@nlIa2>YZG-S>T zhC+X08#DRU2tWCO2J<%Ke)t<;jY3-(Rd9TVgG8h)tqzi%6oK>(T|{TMNZ3if3W578 zopE!xb(Fz0@mwuz$Hv%6I|{COy&UBVT*6AQC6ILyHVw$K`M0MhjD_N%($-wu8>&ey z4E{HudDsk;N`PL*30eo98VH;b5Svn^%1Hp`0CF19HSPydeu5!53@mg;jai6H#kn+4 zq|bN{=b{!V+reY*PlfgGCt{I%TrH~Aomu8I(@<&h`6*+SDi;=$=wnJR2KpeabRrM) zDWTS@VMr1zb`QG@weKEh+OBMLXrCrAC`R!sa2Y@<>!opl(!X;<`5U@5ob0r^cGR0 zmmtuP+i%>#ZWt=XBJIQlZl?snC|IG`oXQ7&5wx#dn@3ap<^z3VGl_l_x-KuJKt%Wj zsSgrf64+D*gQm`-zIcQw*_sdSOXA75Y)02fJ_kkU@m-t&@+_6^;?v#Epx?oVQQeibo6sZZm#A%@%b>y#%z73UM9M^N;$@v{W-qwhpF58 zc)y(>f(nmVRrD6rZxr@rWxt)#T8|SH1wVLZ4C%Umq%aJ7Hw>>7`C@HJY8VX`Jl-T~ ztHuf9_5lWhZ~_Y^n>d;Q=s*@EdPKF$E&=v%2q})Xe_d9B8yY4Z&Fe|U*Zfs%?E)=+ zdKAaOK13`ml|r$Vt>A!2YgIOryYn>1oR$s#F3-r3e6JWJo1iTScP0IZZ|YkM$s5#a=TW~ zJf$^L;jWyndSu&n%5*bVL`#K@alZ(RpCC&)5?+Y@QWizm9mZqIPP=a!Nj%d)!nPPM zn{tAr-%=6r7~>gPhFTfZI>oKF-Iz|aENkV4J%g`mw`Q`tq$N1Zq76RjqFsX5<#9an zJ2eT+GJg02faXFj1I9sf^2KO(#{0sJ|w zt}U<;>sJJ%_&{c+NOlXnlH`hdGenZP*rs@riv1lH_MUG>3cc(MNrZ9I57+=xDx+NH zc_?gr`iv7S19*fIUP6BeVo*t#w}KyM%YHG~-o2u*Q=trtr5}70c~BDkGd|AD878@e zjz^5i|IkI)izoh7OWo54_9?TNmDsOXq6O!X^fpd7(Gk`Oew#+4tP z_^#i?w8zYWr;gf%;olFGY+b9y2S8Mg_$oK}2xb)yYjm*eig|{ppOc)#5Z@C$nBy>^@rv;^OhD+V$393N`^sO*$f5fWOk8u=}um15;l0V6Oicxmo=W>4?AmCtz3x#Zl zDNpz&Y$ zlLh%M*FjO*(t)zjA~OyqOXj`FD}nKR`jugoekn}l))#$rYJq4UkDAqhK!iUwo;|%_}T2kC=H$fdX6T#TcU7rVy zFZGdm;%_tk?w63r?_3TYVc{5*vf#&pssJ@i*kjf4AM9F9%#sHM_KL|L4mt6Q!4F|% zh@P7<*`XVT`QEy8lH7^|qUmv_0QNa~3e>DNrI%t7X(Xf{>n_Iq1byl2(7~(+t};8Q zp@+vDm@CsQqr`$4wU5H*6NrTb{0heap~;I9gf2SZACFnCI&)DIH)*z|b8Q?~q&j&e z{BOa*z#Fi~G3Whmxxy7M7c!l``fRH;#3YsCp6$&#P8urRp?eyWxyPcrxv8h*`;5B> zu5pZC>7_Zs@>5FlbHP3AnjSB_Ju4GA;Pimylyxpw2Un%hC;Z&=_anNz%u&p@#k3cj z1wRFjx4OO-$lms&)rbDHHppE{#eRh{ur)0%ybFrOuy<0OrqTR;r%CqCwu2*{doCo8 z9)1+VsuSh9}P)VG4gn2MjFzo>Jy@w%j1^!RvL z!`z49+9cE^!R6lZsL)5PO~UUB5VK4l~UvQH?@LXD}iFQqx-+g!@WY3eMdu zqI{Uf=%Jp?6;cx~F5WM&V5PfgUQd7gvH41yhqkhg@*2#E`{RLqf%p6Fpp|W1oqGP! z0ZRA74W3Xwohyg!FoRZ%6Gd+=cS5!X|lib5q>H3#h&5Qu`GW|j;qwn3QI*ai%|E^r=ADLMHu=m)Vu8L5DE2J}RX}HMU zRLc)K4(Cu)9BRN=F=|F87=tM+<$CjmwdgAre@3fGAEPmhx4u!|Dp$K;E4!Am*{^k) zcOj{vhw>W?k0ss1$>dJ{xo+Z;m4M3g*YcQE(lthf9Q2@XE(^;l1|(_KwWY+RvG1ZS zF}2fJ`zS(uZ3QAfZ?LF%X&=kD`%dheXXQ78wKAUVxRcZL+GWww`E{N|hQ$E2A6`RO z3PD!CyY5wwe0rGKFZK0JA^lAMdh=d9Uz%p|ScPVKAdsXrGQDJ=Tj|5HdlwHFjG3+ zCJvtSn=@^I(yk^wuZDv~$9xdEqlvK#-?%<~+qc<7)epW;3Cis6e|?AgBVU6fUGubN z8_N36NU~X3|K~TZUz3@O&aOASFrsU>`~+co7`M5`uz#vj%tqDyyRm@@bXLN-K*ZD- ztNpBYFf0mgM`B%lkZCU2|E~Ld0PZ4u8+3BlwS43jq)X_L>#0)}&m5jxAs^aj7o-k& zlCO3Li(W9=F6XCUDHnfMC2|;;fW^$4CDx3dB%c(TtRaS)iD>DJ*_d4R_$rZ7I*x0@ zde|$`cPT}0pgX0C8xZc6mbk3n{9lOiX zk73^Uqfw8#&CO~sUc?1Z;szJu9Q#7 zxhkOhA4OYx4{#Md*)@w>-OSKq{w7{@wc~4J_W_CFhOuY$1WStEE*H^C-UIGIPbHB< zSZS#_SNO9&nocc(k1*THRcdW9XIT6_kK)Lz`XH--n?oj#TJ|u1T-sAnzw;P!|0%V} z)W_W#?ZLBKJ=xCvH+rp3Oo?`sg-H#+ECY|1J*X|{Y-%_cEKu~vaci_h)U(6oFcVJV z&-&}Mg@DZNx&TDmDCsF75P`)1#gghYDy}#(Zf&W(SjJy9yR~qCpDu^BUh*nkEw)mz zG8Q8jV4JSTIMct>em?+4I6}yZ4n`&1&k*?wZa0R#9=1g*4_fcWFftLZXffw}fD4hG zZPCc^GOp#5n7YbqPt3KB^~sHQ+&Wp?9>m`3Nw@6tBnS4m_k>()x3sMpKe-a;gSyD| zL|u>_&qW+0DE}!Qa^X0m$0JzqL35M!@$M?pNUA(-g&*6AN$^G1e57d)Xk4nVRA;o; zy3UQzCtq;``lQFK7Mz)NzJ2P7Bi~r>bjI$Rhm4gguWc<0$MpTARDMNmClp|Okp$@w z$q>hoZSdf?Yg+V!f0BR}mV+yB;E60_z`+}Nwri9WA=n+rEp^avkmLA!6c&;+{<y@=wbi3pG1}KL;@wOe6cS6!R1%C|3?{W*Ziu?(Z@Sg;2?!37D`Z$GFFUwh0 zf@MyxR%=&!04iO=BYvL5&d$WbRAF=k_eP(F+HBqG@p1>bP_hr|6h_R{PmJ`J84VFffD`7^h*Kh^fuv8O#Dg2LH|Dxn#_# zY82*b6v=0}?h#e^F5SbbJSNuiE3P81IvsBd*B$;L3V( z+SwMKi-Hrq+WzLSzRYjEqWxXQ<=P`yAnrlBatC2DY2ZSH1fXE6Mho5x!cmm_NwS9ajAt(jC+OXnF@{GR*etlYbD` zPlW+l`JTv{%uw9gB`FL7!Meo3K4LQH0j-1QNW(Z)4#@A4d$ghCQF{2;@fwO+sSE}) zMRy6s*azcPmtthEhZ4bkvl&_Df5JYjeV55c=pKucTyzKDawQi1nB4g{SCEUDVJTUy z#M7eJ9PGQW3^yie!?v`cf>UMrf{nBqkDn@Z?l4$W+{&n|!}#7nUMgj90?KnVSfA+u z(>EU9R{$`QQ{6S@8SDz3ajTY^sH>wqv-x;>$^*(|m>m84AU>wVQm~mH$cxc}Yv=a&rqND^e+)oQ6 ze+9=Gkx0dux{2sGI&ur(J4R%1Mak)2oJnID-0aY(A)v?PZa`?ZiTtJ*F)^h}8c>*{ zz?L$7S#;C%ggsIJDo#K+B{C5$;J}X*@aWp#>}ec$n{9p-l6C*Iyj(6{eQ(pIEy4fo zB>ANGKYBg>m&6SxN!lyVrIUY2*#Cb(YhNPtcuqzhSB8oo@zNxB`oEnj?l3u2PSN<1 zbm!W!f4@XzE|&`~ExBKdq(qdcq+A#5UO-w{;>Nu6XIU&iXnlR%FRUXVN5yhL)3P22 z4n8-yk$Z_RSc3!tf%dh4{18Mo&g z&a!krV6?ze$t#28(pStGv`L(s16+{)ri;w|k%a>r#D^k@`;6Ww|zB7N~r$r}W|noF!WN8X;7F zUyK~zI8B6TeYlxrvhP|_KGK`Jo6@?JwJUlwtR^Dn00#~LKuSiwPMuw zsB=MY;Zdp=-*6s3+zx#yxm_>*&QpNFaQ_c$ZypY1^#2c&q7aF)Q&GqklHE{AMJN@O zEt0Zi-|>q5;GQwddFJ!|{+{P|UC;IVO9H?}w_SSsOs`$TS-n+S>3_9UmSxH~ zzSocFtV|h6T051(Ll{$3N^m52&tiq4m=yL@3-skt1&>!f(CaF^PBf8tjy=JwVzAO)E$WF`U zXiBKoxu1dM=l)D+cc-nr_mAIwI+DI$d*=b}FDC&X8nxnc?foRZ&Gw3`~u<&JO&OzvAweT=*;WyKddedt{Y57oo}p zx;FlCyVsl}{E>y0__=}h5!>{ar#{&Jlp5_H_QwwU7u7EMTN&IIFg(OXAk^JD z<@_+joaJ`Hr%QTAq#J_~`BR~UOr5kFPv6c+;xN)J&QE>N7!neXeDt zE|H4{-=nWtWIX;Br}~z!ASLE(B)i#9x#-_x=2D@R5?A(4BI3*9?>HJhKRD#k=Js4b z&V6zHi#wk?Lt>0xID^KCK7@=~TK2J%DX+IAn(B?ciT-Kz`Ci1B>(hfRp@I!9TD%wI z1`l6y;WOWL_S*62b&_v;8mV_+wQ|-m$NK$IU)^__+1zP?H>APrc7r6yddJvBVz(Ox z7SIp3N^8ukdIHg_(=+y8o-0I024Hz-@TrkH@y9r39UCnK?}A5fyTE1>wW=881aUXk z8#``G9%plQRFIVvui3MD&}w*H{O5PN@ViJA?O3M^>`#u{Z^#BwcxYi7Vf9ttj|})~ zV(&M!Xj*kdoc5PA23gN&Zh8i1=q}5$gtJXc?gO{sJd8I#xgcD$C z-1Ebm>iyDN8a44Fz)=0}a#blm&(>GSOD^N6*CZOL3J>wci6DMPUX;j7R( z41<^4ik?Bl=yTk~vZhO?xhws9xh6Ih?Qb4904v8^jp#6;6X9#8#11di$H{{jv0qHBQktbXUG;}+6wm%Qr$bY z>^^zBll3>>ktMUKvh+u_8q}(_HN#qPJ8xQfOcj!%nxA0gq*1%ov%O!MR$~MhYDXEb zdM~nXUw3H>R$w1T%5a)3M?EfJZQ)k=_pocMBb&t8H~qd>@$BLc;$zSN0=Q1l?h$iUO^y)+d7`kMl!FDpS=$4ij{*? zwonu`d(}R~YI*##xvqN3&p$7G;gZw6$ZzvQkG&lPM>VPLGG5Qbk`8}}`obd?FJfS* z;Lg0YKCe%Qt^cd#oky#QfOTk@W}Hrih%j-+?4O5Ae^w=S?UwYe422s0REcgP!Aw5J zihS%HVO6(ad#O0~t$y8a76@>eEomO&;jHvWwmz%ia+hg>Wm;u5%FV9ls#sh!ZEf7z zj29XWoBUT>wTTmsMQj9efjDb=zISRMYcEBF0AqnfhQg;bdHn5B&o?C<2#1F1UTa~V zt{HqP@tA*K_2Qr?M=@Pq^TaWZ9|Agy)J3}?uW<_;smx*lEkajTrx(y_ zOkb?ZggmFn1X@%EXbLqF>G%k3kCyK6;*PI0=*Ix2$H*{?Cce?X{IW@$9Q!lnAf@L~ z*Z}NvZRajbi6tLfqnsC#6XbiXADHMYV9nm4dMV3ZQxK5i`CxXH*Xw{w_mM03ssAd7 zVj~Ucap2}ri^GVIIpn*_I!tr_BV%7`Ixw_CqYe9c z8=qfUn_kCl>h8jHzaq>F2O#-+7?C0XjaMtL>KD;h@`SPp9S+kR#9^nL9hBmq7LDEM zk2#^9J&RhU34+Jjt_=wZVU`Q3KWe>D*0J*9>Bt?t=AG5+jf~w#eBR`Be>tFbo2@Sb z!2+n(+(>vU?NO~2JxW)y9sQ1VseUMgfcKB#nTI9w&7ne42x$mDHuWW;2+~8%Qnkgf zdOPR}P2>D@#<%&ygh!gc*~y}+Gbte;VMbRI#*4}9$a2xd^m|`F#ud=|azmkscUCe9 z*(s<0O65s#={_&XlW@U_1*Y`OUm|sxDs~Pkx!YUck9bR%?n0;sN)~i33_bOM{9U=9 zh_1n}lo0Aw;&Uh%y!8lei@o;Y!FQ~I!;5Fw;l=$+`w0gen}U@(z6`p;IFP+%kptyd zxg+M(gs*&t_~y?Hp@(-*`K;#?)WJ9sW&|;3MUnQ1oqv;9SF|6@tUGkc_Ku9xsSHb0 zPK)CytPtDVNewJHtox5f>t&PE87AKe>NcEat&y$X674LUv8;R?mxS4}7Zgsig+?PZ ze-o=cWWGS{0a~r(=wB83?uHO{3G8Gnx#THHIUVNq?Hfq%9M-ux4>RRWmEVm zX+YZjztKo{47vho+ z%IE&&N#l(v)6{*UoW%24*bQ%Gdi?!%hJbiF$@%#Qz&+d56ibzB6jM$4ono&*vzK+8IS^#!b*B%n3GZ>t?lp5_ z8SQggm)8?y7;b4Pu*Zo&pBX<&*h~k?vT}6&0sJn^RUXlVb#bWx4~5Wo~j6k;E`ioN<5IV7ZUTV0;D=o`SN#k z*`ry#uDZbygj*_&a|7#W>CRC|e!igxL9qA6{F_?yZQwpiKok?~NT^&(lTMyk))kbt zMn}YIgZH2Qz(f^v-NB~Aes^tnu!8S}j{U>Q^>GP%2y`5Zxlg5NGX+=pF7mht8(v&I z*mGNXN)&U*WPol0RW4WnFJgZ?1Oo%lBYK!&oUXFcXV?u3GTw>st-=f395}F1%`o!L(e91fuPUrTouNtLGYhXl>3}KQJmtr@-A zFmw@LE>945C3Fj2XWChSvklQ^J%u>yuk368gE|&6<#HXW5qn5lyA_N(<;%4A?xNm& zxoDE)4U=X+cP!AKb33!WaZxN~L3IJ-9uVeegsV(S@!XVCWmnSF-kzq77w^RnZR39I z(gv78JX`AZjD(@&adVRWeoBSCI5dkhujhgq&gbq)u^&)B-WKjrGMDwK^vk0DE-rh7 zaUk;}vhrQzJ5vMCG;{5)lGjxE8JqlGf2|I=xyI9m+@nfQe~$+#tX~lmyGK~8w*Q8O zYlP?rzMr!fC#8jEQSG9TL`(2IZ`!<&zBpNrsx4)@v25UF2}QGY535T%`3TO__!U`3 ztoMa!0ggkDWfu;YezEQ_up(WZD->?AsVdiGDUsi`_8Jg6<~>s#(-A>G9>bP$7zma> z^xCPG{8EQ)XGmubNIPL%ESIS1;BF2=1rH@VKVz~dHiw_>kfD{2Gr{m~FsY0VgV;~d zLqZYb?03X!eMC-3oNASD?@NZ`fzpZFS<*CKeDzf^&0y}|I>*>v-lsax+~H%(-kHl8 z3@_`{Bf&mYs{>tPeL&fC2QA+RnWu%`vCpd(BwKSo@%gTL0`ghM-1dQC?@~`d{uJc? zRw{Mnb$f&s&gnn)itRbb_R`CF5vA(mHResJGu6!kY>feO^96p&3Dfu7J8MJB5yN`z z;p`;X+u72O%M~bmcwQNFWm;`!`WTykEpW)(d{F|9&fiLPY((O3^EcR4%Uk~A;^vSt z;*M#ozFb}$xeL}VOQu?QLaSEtN5uiuJLjgpujq~&`JmD$Hpe-nM3Wh)Te zS4zPE`VO_|mnq{pGdLQ(BDg2i;s;QGI!Kc#jT$)#cm5tuS7|d$G4NMHt+_57R9tpV zzfRlLO69!+9T4=2J9dgktmO?tn62^cLx<=-z=sL-rRPsqc*Dk)r(aA#&k08v9jfF} zn3>_0vQzdoa_MbX7+aqkX*b=|$5V)^&Z#){`~r^u&=O8Y#4W-|!C&1>n(*C^k1faG zcswiiupW+%fxn!3#}eZy`r`*Bm8#!c|C%D1GYr>bZ&%rwOnH}O|7suOOrD*xcPpkEzTJ7z zM`6K~vpReJb$*Cgd-y6$m|(=MnK+ZBdWW2%(l~!=mT=UqwuOUW_lP9y zF$=rbV(e>Q0=*Wd-h;Sv0arFb?JhJt8D34py225`xC@c|bG>JHn%iF=pau@QJtrFB zUlmTGPqH;?edvgE`o^N-Alc-2>JrpKZL1&ldm-#^BONXy{G~xXs1)CyJ#DG!_XCsF z^_Vc5K6m#|UHdaV7zTJ>JO67YYu=?%qnG|&EN-Xo@BCkHbbziT>FnQ7o;`H?!Ku2m z>(in#a#zPkgt&h5Za)6w8kpON;6(Vvz76G^Yl_tL`losQGCe;=ECTQ$;hFLOygW~o zoE`E`*wO2Hx7jOc?DWUV$2KHivw(E*=<4)Zj{!QRsa_|SSjIUNd=6>yjp}}%D4d*% zzqjZ^9R2`391d-mt2eOtTbOV9&w;Tvn-{729@czI5V>l~bNc%8JJ0T>2}n>R4rsL3 z#QnN^tW`Nm19j9&g{Q?%=6shMbe~4IYJJinv0^ z?=3drOAT}eSFcp9#9uId_Qa$XNvW`FwoyyQ90Q3YtW|s`HDBnHd3ht1bVQIHb}oK% zye;)i>IZy}DPI9Bszpe0r-DxYv$2er=pLjvJrIeGykzykp#8Fs5G18A3BCH2X@@fw z@wFY<{nW2LRCVqAWd3TOhnL4Ix4N6`lPwia3X}5H?R=iXVkxow7WfATG)rg5@;*T& zYBP1m^=wZR^a;jvgz=WhRUCSGnTMor7^%ZM(oM@T9qC7!^O+7mw##pekTVmB^!aD^Q_OLW zK&4qihc`aPbSdj}W{NWxQR|eT7OFek>M-$MFz38hb3HnbMPPN>P3G`9pVimCM>R`2 z;GPy~UC)tEU)R)qaH4pwqNAENJqIuI5jQ$q)*6Gbk>l7%i~Y21!`!e; zaQUW+vP@PE6O&%@x579MKs-vHjbnj<%Z6@6rIJhFV zI(g}>ld--@flJ-y(GH}-TTEJSxZA?BhH&R3T^jfKnJPzJsm8zf_~e4cd&d`iE3&U0 zz^DtgxSdU8rHvd}%|GAhsV8=+phna7!WE9VS|K&53(`eqzN%cs5UT*D6v|9)jz9=uR&|x0#mqVYWSJQ zH|wa6kiHD9Ryuf6os`MjE?TJY@7Buz#e%9$4bSox_F5^vwJ}HK=wXYm72228B~=y;G2`5rfO{1 zfElLMpN+LjG;0mz*yGB1nRO-3S~h=y_h0>(&Jw;&6-OcW`PvQzEQ$AdeDJ-dG>P}Y znFR$b`Pw9HEiZBkHT=4Ks9F(qD$Md6=eZ`zlcTU)S55=oyS+!AnGWju=5alBh(Ud+ zD?xQWPd)UsrLIdorsUiyNnu~jo;I5|Zi=ZKQIAJ2!L%PP$pg%)UQn3dD;7qa*?OYc zf`N?BmvSGb=~Wg%D_h@fsfmItpg(y^hLFcZSYwN3kv>P4h+ajkRYy5{NEO<*NPg*Zq$d9Z z>(J9-R-20JE*h!ULS1{;C!h6Jit_P(dh=3LQ}K-1s7sv$DU#9}8{U$}nwN9Zl*1~(98j?-i`N@!NOrFrxD%HC8VNeQ|C&nKS+sN^*O}R@2MRs z*BmYvW<9F;qvkF3NBvt%4Pz;54J%gtHN#h&$V+#Y)-E=A3?9gQ+O;b^dbx%bX89BI zNLIs#+w6s-pJ&}z`o{f9$0t!~kNhif-#x1~PX596zRVDK@dQ;WO5W6{bt`%5cONtN zx|hC5m^xdqd$w6mbbfX!ZzMQoB`&OudiBElpa*bE{H84rZqceq%b9&u^AKF#WhJht zV7}-O_VCiFDmmZT&^QMr4m&n~(hOVz@ofmhfi3GeB_U^fH0coXnj!xo%7EW1vvTI~ zScd1q@5$EX7d2@)ly6j2NcBCJy5_iv;;|B+x@gDSQ7PFFnKS`Up}*8mD(RsbzNPWRNuA!{Hl6KB`Aies3AHp&$uR19 z7bn`=`R{HqSw(nN3?tpGN!M&6%u8G<`;PGg77R9{nw8G$eA;#9@`C!I7h9vlhf%lA zdrgF?riKv`gmC3gmw})rFKH_$fj`R z6f2Z8aYIl)2mQXi(ZBUpQv}YfBn(s7_iVZL7AuXq;I_qHGA>cC_dia0H0kkGWB>M1iz%j#<8Tf; zr;aJ)Mp9j?iw!qOGHQ*c&o(t|pVBDy#f{fLDxwV47eE2&q~JUxs%JYhSQ%ybXCU~j z&!0QwlD9*y!zn68O8g07yu=oQL`75_ z64m*KD({@dh5zdb3!}P~aQs~v7gf~bn9;C6h*pbhg>gvzjX<}zeYtp>aJQPP>X6%u*=dD11j$jAq#Yw~&7J%xJp zG4)PN(Sh~4ER4j^m|XDn&7VhxJ950v1)Ji1n*?WrY#wXjP1MZ2m^-jmg{ z!T9GN@1@mk1zIfQ2yG4P8j#7I-?hZ2{*pWG zmS8leP>WNA9T z#<&4HDeaTFq_v$997SSZv8l~dFl6Yne0D_Iu}dtNJ(~G(D68_e#1S?|<;Hn(XyA-D zhXgECmwQwLr58y*Lq@&~7i>1y9POZQOntj;Q1{X%Wd(N)w3lRd-wH=-L4!tw?%L8lil3r!b%%9njd4BHR8e}r*6twT zsvcT2Tq`8Loq8i;-{a-P(`pa+F}Ngq{Kr$1hpP10qvFf()tHWh@ls5rwI4Akos_na ze=XoKHBQ24?bKvp$`Ja9hr0SLl*@!G?$DyI2vx3579&qP{-Wnk_NII! zIUM40%)pz@)J&o8kSm%!{q|&GqLqArm)QRbyV~TkHf98alvx}Xx=~x`H7;RZ^Qa-l zKX3abcx5WCsDAoxB9$UtJ&%7KWOuf;^w-Oe5l?at6~*{9d>Eyja8;l4p3ahaq0o|w_=1=yw+x=gW~D>*`TOFm$uqt zJazU_MS>>+(J~wpTXx}Vid96(E&bCdR#Z_uoS+_Nf3OVbmVUhAp`6IJqqn8;wv_(d z(H2p>%v`Jv*4jUtWw>6d;>yl~2o$bh$CbN2El6%DU$x69u9j{LMuvlrZAu4aq(2x& zJF7LiXSKb){U9h)rQcW4xwyT#<^V+}XoYfU;Um$1p_@2E#Rk!0+YO25=g3&g(HCMa z)0#f7nlH1Zv3+$Ul`kA~BsOLV3P(-KcM4W0^N{`TzGzU2p7eH7s=-$t__dxTx6m2& z^rK)_&F4Sc)Qrx$Qk=;p&k*EAZN!a99bo#7ck3TDAa&d`fx4$l&aZ*pUL*NKzQVs< zbbb?0_{>sZrvM2C)Sl&;2}N3mAJqc#tt_jfDQKg~66qXchHM_TV(xfIWj%of5z7!a!aCIC8tjb=xhq zWySl6bgCU&(Z_hP0n*8ukdT`%X|Tg!?S|)sDEDz|6W0$>|Ip4G#)OF>W(RF3ruxOn z-~)HaqXXo9uzF(CYG@99t_S8@wg=xy^lYN6AxE&Y3;vgszQyXpRERY^6vT|}zy#wG zvzkXe7*Sj1*pQ%&d_5pgnOBO}U)8t87S6f_`^8DXK&c9m8dz#j$_cVsDTtMtHETpE z1y_|WwrYLrfITA(={wSSWILV)+mx6rJ+wUhYAyaac;1DCnG7)w^h38!%s2d{e?O!^ zq^GV?*IqcesYU(6v`#T|+ebrtTg|{N9WfW2xL&-kP9X%MGc`f|CXN+|Z{pOG7|z7f zIQl$!6f+XaxifB#-L3Fuocmi^%bC(_LKf% zCq6htgO;KXd&1bAHW#K=G}w>8`=Ne{-LU^n0?v@OyED4tWN!lect){%=Vu7Kzs~@u z^z53|RI&pgN*-eL)}vRMn;s8_;Nzr0W}$Gp+ix222I-Or1 zwI_N?l775nRJa9irBu^fEh{R2`GN-)>GIyi~ZTKkN5huW-ry5 z1>(+#8`08v#;Cr4zpbo6_@ooMn7$Llc(rbAOQWITSLqn!ryf`RUGp%{gp*k1P8B{^ zmmUW13+ZseF82UQzc98OOb3vOp(zTfI+xb>>VP_OIHHuH{vm7^CGgABgni}P8az&H znp*ROSh@%2Y8yqT?uIwzaj@tGuDWcAg7}{x!73~IkG;|U_$=B@aqQR1*6Vsf5-3>( zwhMB!iTPO|(FP`7i-qupyFU&BK*Dg3CUYb{=kE(LScR=TftI|#{gBSsh3`C6tcQ$= zR*IqMiBTjfmcDM2Wjq7$_7gPNZqnm#;Z(^B9AW(7jUn4h^3nW6aEul@eTdiu*va7D zI>TzRr5#>bVnCOKqjddnLq8{o6kZ68hjsRVEb#OKsMBystT&>3o6=7R>RR>>F270) z%uI!foWQG1b4+sBp4%Eqf}>ateli^YsQ9TqwN?Gd9ym-xWK&@*ATEf4J2jGA4HkL( z4%gAAK^)7CQ^JH?bdKi=Md2Rf9j4$;FC1}e^-{A4MVjX_4&-kFIT}J+>=} zT7Lz?+xXK!eXiP#A}5{|S#(@bQ4Mf-wN78AWW*;0t0^CMf^lrAMdG7}0Xl99+?+*~$X}gB;PRzr!v+4dfTfE&2&e07Bc@B}6O2bD z=RS=LJpVS_POs7y{5bL}$(6DgUQ zK_C_7#t5BXmJB~+Z}k#!d<^K5aWZtzMaH9OpBs<7SxLC38P~?w%dcnn@;vudJhic? zk+Anc=Q@*5`&)U)k*y(^DecygO|yGQV-xrA0I2NjLDV^2 zTJf@t-U1;5x4qR?iXUE`9EOes_Oi9wLWf-&edgSP<1&jwDIhb!%eXj#QZM`NE(oDIBnPA1FC-JL<-B$|Ij)Rvg)zw1Iwd=?m}Os0A&zXiw1 z%FOm^MsSc|yfwTadDFg^9=bY%&Z0tt`v`%3luEEb+9mHNcPCELH*RcCxCmaIjEmz% zhr&;55k`sJ17Ln#^Qb=#4_LxGjK7WSPK?q^KQXZ7LJkZ`P9oaFqPaX0+U36%Q`gSVM(2%!3Q~SGw)@vI%%7I5{P10h;xEm3tvKYjcHF$xfRWpW8@x-a#BunY~osbTkoS|XT5^i3q zCmGzGh{M#pls5Lxd@zz~a^oI1er=fnJ1F_Z1$T9|3oZ!*ezv#Ch%+xi+>g#vMx_5( z43)ao8vB7(LiEr=5V8tx&$)$sq7Y+K5R$#Xs_EuXgjMHk+==3N(Vn&5RMihd$=3os z#5eF!j~j%Z*B$azyJ0ds6CVC6VISx~lBU1Gg9N%@?#H+}?t$B?BQv-Sr0HbIN~ zYTDq>aMdy8UHc9?Rb&=Txp6$E2w-b~?Idi)TI(4|9((aWr3N&7Mj0 zd9)ceDaj3wQK9nRv0^wPX$a{!dG1EoHah3}KIU9zWmpc_9&5GK;;r%AA-Y+An6#o1 z-vA*jA{7Cn5sx#_FF{`uRk^4Blb8?MOsRHer<#J;<&FNkmo7x}3_I|G!50WTf4l}) zBtNlbt~F@~#5|_FnxzN2=i=$oiG5#(j)4~)2g%G+XM2xLVo@?9MFqgg#QpW<9TX~D ztIoWcjO?CY0+N!thO15mSjmQH8f-^nV4lK~4RkhD3jJEALH*Why}P2yXl0xs58OcF z(MdSXiXM1!&=+x;&g@WZV7iOa41Nev1^Mmi^{u$-81e}a&ZOSh#WllzP=5pFphWzN zoNTxN@9-1tf*XI|cx7KoU)CeTXn_Os^kWV*;?zM<77si*fvv~djX>%QEpYtT`w26; z;9D|uUr}10i^-Tm#vK9F<9Ni+4v-7*sB}?5s3i_aY^^^u>Wbz-G@K;GZ=pLNJ_9tH6Yf{X>Iq?-=#YYVWEpV-TgC2m>fI$fra>gwodic%5rN2#5o?wi3g)yu_h{u#RFl{wv*5ApyjK!Hm<-BYli(-)*-@ zOt}e+1HEa?J`9+pEYqpq6aJ&hTsDA%F0FhGX6F(t$qRlHrSv=#ZFkWw9WR}xIy992 zJ|;MBqi1i-BKXM%gZBgP*zVowL3tcq11fob4Mk*L99YA`TM#!i!Qd6z;2NBE6iQ&( z#c_jP<5F;A-uNdqbRAdslBw_0REYB^B@|mAh{<5;di2wG^l~?ZJ8c|&3>1TTEX@~0 zGvSZ#|005jW8yNPNqs$?aU|HP{ZG6VSbT9Qpkoe^xQP>e4NlP{8OAnrVicffhS6Ov zFc#?zU$=2r>9B+gfa;_R0`IWyf^&5Zo?E>XW~t_YA)ph09~CPCi1sPiBcR0%2tu@p z@?>g#CskjFPE7@&{0p$7467FUhiWR=#Vob$j5W|VJQF^D7aS9DTW1W@jOogfoZv=R zz5T!m5WKI4{_Tcn)3bMVW?~miW&s~_@;*qmKLq5-TR20iWZV|GHNy5HXucs;f8v2l zmgyh+`u3&aIL#sE{7vL%xXmvE7Mf(EP$La0XLzpQMo?gF*9|sC0krW_hwkkhEFjP7 z0=nCE%94xYzI$P(;K!Co|AMokjwJNfky+q$lFd)Cx>CVx4Wt9ZC3FBW{_^59FY~J! z3K%OCu)QBoMuPAknjmT)$AtXuFqA?w?9Rw!YF7L1$(HBmm#|$BK=&$VDAjd4(N0T5qU%CF0WShF_~L6 z4CUJl-ro)FBdq5H}WiqzBYE z-e7vwl6?)MF2N4e$LzA`t?v_qCV^H1Lxw5Cqv%q5VlBKhD!x@_r`C1{UWSLk7=wc3 zk8LcFBy|fm3|PGx_#a%%Jt|xn;0Pwlc!Q{rU=WqNPnVpVv4;VTECz0f#-t?j!@qoZ z+jie%DQg`k9ma!2 z+yM1E4KJqLI*mPz1C;< zk-_H7gwVtPgQ7EiaYsO|?FXFd~!x3sp9+_fQ3hem?cY!ON=w^S5xu81Jq8v{M zF-V^9>~g_c{_>niR<|U?y{Vgwa&8Fh6hING zJ1o)Uz&iCw4K$wt-osU`(9C{HQVSqV-&)nYysJS*KM-AKn4yXvX=h`zacsZJ_l)-R6u05^$tq#?mR|`#B?xRw z{Coa3lx0vNf)dyzVIa=75T5PntkT#37T0EQRd0tr({#Uw{<+?#M-@1JRZ)9ATqce& zj{qoUWxkNjkFK!lx^rjiMX0jh-6FCiKreZ+1=n8$%fgww!a%PMH22ZRF%7_JlR<&% z{>U}p4*53r4S#c@b}#(4WHK3E`;-vEcRI4_hCO~_oWAe0htOm|K%^M(^dfFv=WXnW zIO5LzuK`L7?|^iF@E*?Q@zYR_E(Nv-$91rZaN352Z_AP&Ug22}PwvOoeq!$I+_-jh zhC#sg^)(9JLYJ-`WnZx~J|FG4gIH!8STof7vgWtWh&P@DfUP{hbJXZd0}l%etL6Xn zTFHI$zjQnk%?QITkJ`V3=g5Jme^P}ufBv)15Y4G6)KJrS2Id$!p zd!OYe=^OvITe9C<4BtRB$7^+01VZRqbI8*c>nqmbo1WK!Sr?)!KtuKZC%b>?+I3vu zMl?kE5q~))hnGJN_|Z$%t3V8Epc%0Q`^vS@`&w(L#gNQ!YWo_C~X0lGg|nJ%n2iQbi@i3(^0 zpRrnaH+IId=h&d>gX?dL0d`ysB_AKiX(c+WV^JEokHM@SOnXXjU zG2RcMS8E1#4|K7tvi?4_%zH2u(M-|P(9l38CXyZn1ne=V$p@ioYHCzfRaFai3*Kx+ z0|bk|0Vl~^^YBHHA+~2+|0E4i$L3$&=z-0SZW}*}V6g1J77~0Srw41K@kc8nb~p&f zQa-4)i6!*W_~R6T%i_POx%0Wy$Y*T7OobbRyjG8q#pl_6^9sqB9lIf=D9>ZVh8o*O zJm49ApV9W_CWc?pqE@2O<}u;e*xbQfJm3I8K$@|99qJ>{Kg!lU;B`2%PPU$D5t44&dc(CrX zE%QqV3I3;dJni0;(b{oBAV9)`z5wB@?$iIXy|c~Vc{fEi{8wE@XnX5pYUArtAt#k= z$nOUJI}1mfhyWH6+YKHhn%JJcg&6zueDK^3$3L0YY8Iy}J-pdGLc~T2RcVBht&{n`bYyzfF~F6ysYuqGE-dVb|`!2zaPd> z4<(9gB5+4jCSWToD+oaWff#ilzJ@I|7L5qLtZVt2!ZN9L(|?8*?$YgUph|tf<0Li; zYcFdQOioTtRcp0TAqdW7@exj&dH+|M;{5V*Qm7U!bCenVIX5@<5-9y%RHR}RmOl_~ zFsae%+h+Nn^2SuD_j$sVv=1#E01D^NGYRa2`h+m)^EGp4dvL;)Js;_2Y|ajz;oVYh;RUt$p=@holtlhi@|Pi@vMwT8}LJ^+KT!^0||)v0@(Uu&l<5=C`&b4x?a zl{t^(?a=KTG3cS8p$q?c&%MjIB-KjeKS_c_|M%|og6{pI0W)z(F5>LC3U=Qu?hKPF?ANo))y`4O5 zets;H1lbmsckFvUqxM5BA|IEj7{2FS#jH1vWP+0IZJ!CdJ2pU#;g|^|(b?Q!YM?qg-PYWkcMmakW}2TZI5=3UHym;5SWrb#5!aX# zN@$2`O;TSSC-%SymHvKyVgIJ58SqDsCO%up?tG3Jw*Q3nM)N@KKcQ~+q(fSjb=B3Q zpF`~*Kc1~zT6+2$Dj;D~*D?2MF@pX?jwQ*!YUNiIT4fl^SBKBn6LH};iIlrJI}G#q z^YsvKS6jQ6W{j42P<>q;^^h+&J#1(9-*ir&{fl3cwNW&~Q1k_6b=Ozk=uQ9BSKABy zV`k&wz`LB0lvQWz)^$)HXPBtb#oom^63`~Ziwce*{hK!41DE%1LvTlicN9i$j}*N= zTXhGSvtxbqa~k%7XMAX&EdMCivBiU-+LHy$71h}#;yf`M72hBM0blzkF!LvG{m05M z%1bVIVN$Q#o2B}{fp6sa<$EzEy=6vAbfI}~fN{E#)yWn5%i$J$GQ}`lZz}b%U53YW zwCZh#u7j)4p(S;(Zq*v6uy~9v1pMuv0K#2=vc&Xqk$CBjOVEc-|Ju%=&nkm|K0lpS z^Zr(&hR}%FH2>78?$Fg@5h>M+r11a0aUbqiS+>9Wd#me!cd-o%i`CixX$+JY{(mG}*m-Q{e1L zkBahOnvH}S{!&`(=kL}R@4fv}c>Z{7^5rwt5|Qm64qv=0G$A@*!T0v9kCGL}z^T{$ z;@#e_J+$N@D>LKFj~|3JavR)5R&VBgt&|q2XshbJJ2d(6Yr!MM+_Goq9&+Mu2VpLM zSo&&oy~*aHV2`2itj8H_u(ForZPbbP`|deI&S}zvqa#nRO`e;bJQ@2|cRFxd?Bc?& z-o^S~KQL*JZ3L-4F>BXUl(0{?f`7f~S@>m*dXvtrOT{;HZ>*R->-i} zPm#5HQXsY{wtu-aBgSMm(W~b1@qJO5In`YB`+*C5DI({tCrp^0sZ#&2F(2ElTU81_ zv%h-oYt@SbA!~B}?YsJAnBS9+x;NDM|5?gE`I=-WH{Y*ViBpRan zA&QtHH4i}#TW)wBf?RWs9a~Mrk89c#y>}Mv_P<$CnB1-Vwu63Gym#>#t#z?`avDS0 zub#FNms-5jd-Q$#*%GFg)%|tU2S(R38>RR*gJ7`|3#AnAnxu}Tt%vTON`tT+OQq!2 z>?#GGsl}~jrMlDJ*W_ZaSluX2>brAOaV_jZL2SqRvkQ)R>{u1&s;1Xa^DP=%+Mg8g?e6ACppRXY9s? z%I{AJ!04A`#knPKkqbX#>?Ph1zFO~`Fz?9yD!iJ|xNBl{TJoO_aKwrF&vg(9z;^tM4ZxQ4)F+0j)CQ zrLTk>_c;1x`Tt<(D*T#kqo|00(t^^VAR!1N1SBV*ARQ{*9g?GK(nupnrwGVLH%Lrs z)adTm2+8PdT@lE2u=ZU<#42tjxivkSg|Q}(K39aZAzN$ue_6SjN{kMz;m+9Mq6e*R zPb#63viv2`AAysC-0SaXDyT1-oHvnb~;%X{BZ3M>&BR%mb&PJ(6zMA z3h;T&AF%n&>ba@kM#vKi%iABc+8t+d6KU|kT5QN7__C)3)RuVF(v(uC-R7zuJ+~EJ zm*+d+WUDsxO{~d3v(b0xz*gqGCu3BK`w)Kdks=${lhcKiIT!twmLi<@5Z&`H!6oX> zMaONIoj6S+$Syx>CT1g!`pBPt6LtvgU5>1!*l~6~TIy^A-Om~Bh#g3Zy++4gj?>wZ z%gX~}En%j<1y;%MWu0g7-Vek%77+V|gNxTS1U z*p%^RNk-JW6v7IXi&h-52S-=cNN+|ODdQ3_ zSZb+@$btSs)ZmOM|8|}LU(5x(_KC@9X1uAPOQ1Hb&58QFyX+A)P+~$t^a!_!x^5~O{ZQXP`o7z*9PPX2bEJ%fn7m8 zEt(Z za7)lME3`QK&1-k)M;6QYH(XrG@Ddt$$;>O0ga$bXjbq||sWyXlu6BU(#=auVI4ZVx+-rRV_eVB3u)qMOFFd5SQ14=%Lji- zz3~x~L6uRRKZv&sL{MFsb)o3rhF2vEu11xQ;2c{47N%m3@LZlgcd`y9OlDUP0%@eA!ehUpIx5;LYsj{HQPcV^;tA>R6)9tJmtR#%r%K>KzeQUh=Xj$u<~7&Rhro9wvL|SqXM<$7l`TRPs;W>F;Fi z`iT_-BN87fKWS3?AyabY@%JH($zs)6?mNXHys6J1d3MLrEry$d%nuhL=N-MY#%js- z07_Fblu7?m1W|OPY^F;5?&8r+$9MhaiPw=qQkimLRIue2X{+*#Y&pn-$)T6r|gE$ z(9+}c`WlIuP79-a2jV@ET|4`fBq+hbyALhZ(*g_D&2AZ93vi^tODE39LlbFf+N}Rt zTmK;?!wyozKNvcL3gRZ6TkQ&sr|`6(4K%7H&WIQO=6wbAqm;^&2E=gzS#~iM1cJ2} z4#U=@`OA%)5fjexnpxBlnNlYu{32sG!)Qzqa}(<0>O14SZis!W6Ka*;;~7`VmjUs} z*N`nbg@-%VHj~^wF>mmKxcyW#qw*eOPn`S>$R`vi0d_4#M4A>0lYLEH?y}#_Ef| zmS@sppT|5Z_YQn1bCx3m7ll$SoNx^`8kf`3H8pLy z`x*Q6q;!wgv|oc*rl#Buvkp!|MA}MPZWPUBxTgvMmftr+%=7Rfbpwd@zCwv6;-8Y!bEoJRZ zrh|l!jL5m1s=`I#&@E7VEVMmNZZyYxc_3^taeqvZ^5$Qv2@-!#iZ8bz2xS+pklMfh`tNpWOm6F7EChdIqTQ3Vg81a@bp%$rwQ z&fT~BuzU?rc|)gX*vh>6=SlM3LNv32I!`$?toSy&{p_e5U$RbFMY)Yq8adfuoSt5% zgEDHW7}L{))$3z|T$)*D{YCL~O$aIvi@%5sYGloK^dEq(X@7BKg*~oGA-Et6~uEdN* ze$GTALE561VAIOb(ysMZ^pIu}|NbH?84C19weF(+8{?Z>@?8yoW=J*7g4b-!6%N@~6L^3>S7%Ld@x>(E7$hApEhj1+p(_TC z%-PF@M{u#qZd`&2V5j1{e?5#2mU{ed?|D@iN92IYyPVxtwj!&&hurrA?K!+$jH8zq z@3N>CS-Knbks+31HLz3&r0jvpr1^#-`?iQJ(BfgXepW0nWX=yE$=*u zR|r(87lC1vofr_*TjNL)pDb=@{1YP^1art%Tc!6FcdD606FfnJS8MZUMJaDy;)l4| z8*ISCQ}J3`+>G%ML=Woi+R_rl&HsFZT%=JZv2XENx2)*iFNsy}G55~H!QNX$jVak={KapAt;moVu=Ci9^g z8gKHZ&gvvDI%m$t`F^KR!nmI1N4Wk`KWg2DRL!&_i^>|tN6&+sLuctcBkv^XV_go_ z6d9qmf3K7!WsWQkmr=BQ^M)>K~5X}c3JpniOX*#l4p)Lge;aRt}j2oMCCn^ zq3ZW*XafGewVcGavmjp{{?igE@;L#~O|9uA_`v!l0^sd-E6P~f3mr4;#^hWZx~Nfl zhWTo5s?PL5^!)`{EAzr;wtP;YL=-E`;hmufbL-;Zq9WmPz5_ESYjVb+$F27h*vL7Z_#CV^iH! zACX+dNJ!BbmRv#j4a{W0+#Ka674Fr@+S*vv!!1lgN^e0Z$*7B~q&^Cst@ilZ5H4NL zZL$2lBGG)`#ihy(1xu6hmdV`HGo3zs_vJ$(%Ej4c*mk^oL6yi*g>g=QRf&FN54}>Z z*a^Q-pIcZO*o*($|9H#FCR9tF>FU)~jclQS#h96oJ#&ruLAM zf^70Gohq8hb|iS7nY^0Lnd^3F0!~%<1tB4+5$AWO2lJeo8>-o%^n3E$1+XN6PcQBQI4{c5ZYaoKizGrDK-Wb1O|hN^Mg4fi zKV$tqWimbM6X|JI2AzKI$mBlRcK3XCecJq={u1o#wWy=duBNWzeSUA|zhIpud@f;% z9g!dbI{4o;=dYde_!RBL)NOrT!_Kd7r)jdp_!Trpg^U>JC@?2%GJcNKpBk0lNS5ZG z9_RmFU$2ZPSs^);Ftrmwt`|)H9Ws7zV&dxN6^Vbown%YNN9YsgOA7g;;<6szaM{^4 zc;9)1Sk>baPFIm@ktn2ND^Tg^LFeKcS)xr=lyiT)Qx;KkDX_Bg-Z)y`c+=}uwluBR zR^=GO>wpL)*V%;d$ddaeE2+n4jlq$TRD|uh8=ck^&8>S^*#~E z{LMV zKffp7m|x>X5XgMc^1a2-{>@(7TaQl)gO}>ov|T8@Vju8=A${md^qX4S%D)TwWfAAi zbc$u!quI~#m_OqXE!3{1P82%QiT8*WI&}V#HRzBvsN==WJoPIgF7!m&wVF)X@_sRa zQME6R<~NCb6*?TA+-B!qFFOt`ol=)Z3*@5c`S(L zY82N{e!wbcx@@$?O;mGIKrc`T(~@XSY@lJ`DN_1${Hd&n?g_CE_m8pGKV9P!G#^8Y z^Iur9LbAel^3`_!cKaJ`Ne9DCV!EYn`F z?m2BhYu7-`GIWcQ-u1{GJ2k?;l+@#;V=q76g7~s<7DL;z6!4Hn@mNHyuzora#!MzS2N{hNtw{ zZa6{K)cTd*5E9jT$gwU?F2)9oM>Nvj( zBBx-i!&o!=kHP$ed2KN8 zL0)>yzDE>UTUU60-MoN%z>f?+E++k!E5E~nh^A*Wg73O4WklhE$7LlU`ce-2(eG`= zvE`nM0?XK+#1`Se@Q7P7XRFcEe$Aqv;I+D)hAD6bWA@IZd>@@=lH0xWX%$Var;&{_ zPI`?#)tl)K|5l6B=m`$xZF;SIYhx4LQ}yk|0u} zKh;e0V}*PTN8nj%D#O@Tihkr$q%95ctJ*NW4b0bgLmPDhkwpN?UYFh&|NMp%!I{?b z&B2g396XUlF=RRl2Pusx4JEKysB@Z zmwS2*QmHj#5rKsvlAd}RS<_J+wx+RyKHc*pYFo~Xd4}OQg&AvRVSSKOCv4S^klc&h zwHA^RSTy;_&^1yru%77FNaMoKo%$lb$i#QXp`R&!_P4Bk58JX-j2-Jx4|( zaMdlOYd$k*AlP$=VMc>d^GEX6m6BWAu3 ziu;>gctX0_#)^cl5>%K2+?Ltt;=CjuTR?}Q0^>Sk=ERCB``yFLtmMPzRLutL8T@&ANCIdK&*iSWiB7_T}+;s}ealsPu~X8O2Qy*TAaEmHrKsZxo_Cp*y+F_*?O(MuUU&t;wj=UuGKlVF3^) z>2=HA_+$fh#x})sJEqgK$Dtft-nmt@`$cy1O$N?|YQ)TDWgLczhKlcmW_gDfLU|F` z#h9(11y-P~k#Ag3+8v|!q||@5qsN_zS13lgG>Ety^>~@9-AH_21jb4`;t1Qj@NGod z>4gi!$(J~*RvcnWvzx74}217<$ z&z#Kp+>v7^5GR~-DpF6#0 z5Mba>}K8oPba#bSv+878?NzMZtnGw*)|{{olWSGixXpMO>pgs!Cv? zr5aj$`K!6i`B>}&j>Qk3iI3^=Yq!@ElQTc7jy$uaYH5)JgDU|qq~;?D8`i4Qw!hJ_ zr4Q+`T|53tXzqlCU9WB$bVA7)ln0=VYe!!;MptLJUqC53f3!r#RS+=!D+5k(UiYTy zb_V}O_r?h9#LFV+6$Xqx&vodC=VE+e_4W-DPo0T%+m838%X1phO*CUJ&K$rss!9m0 zn*__e>`yQFG;n1*lIdx)Z}f}HWXQyCi385G8D`NXog;VG7AFVo!1wMNry zVClmYO`EJVPO^4XfIK@dWVc7}Q+D*b_iWdupt;#QwKK?6iIp{M#Ny_3LjZmsjgv^d z02BN`@EVeYk0vA?$xEcOdI@MGYI8P6=C>oRR(=)sypCGQP7xn_Wy^0q59fqAuE1Q~#WV}u;8)0@(T&U}!FVU#zoX(T!>H~Zq@hJMw z5X>A;E!*4zynw@ua0qmO495>cf)4U#8yo7v@GZ~Es-(&5S?^`a^jxTESp2%yoSP#H zO;hhpS2QhRBBK!Vh6`Na`$~vQZv5-Btu6MWk86R9eQu&xVQGtL2_DCtqpBWb{v9p( zw#5gjD51~K<*T8T*?dOp0)q_5<%VNXpROUt*B%J*rQaaJ3ZmDD_lOA6uE07jAw|ir zyXBFZ9->qnV1+`A2kGF52Fo{7;!9(P+u65}dE(dGg4?vGnlW8tY+HO!1c9L^NF`kP zN4v`+!6)XxV+E)wjl%|L1y9@7$HYI0)$hMvse31;G^3e6#B1*!-zChno{Gm^!0fSw zJ5^Bn*`B3wl~EPk1?mVxbOE|1G5ydw``^-h)nWbPBu2klo9a&6JRCk42w?K}mVR$o z9mekXkb>Eft4Qz=fMMhZz?KyJpNEmBqq2bmGKieJ`1$XcrXhR&?m-69FVLSRE5gz<&e>A;bceDNa&RSTB&ZrsRxzKK%O+lF#2}{5Z z%SzC}vst->G|KT^9Q*zDTV+W`U8#;6JZZyq>v}t>m?S=YzcB-}AU|wfy4# znlV^oC5Wn`h%ph$&(d2`T(F@krA&>#1H@b!Irw|$w|H=zMm<9^SueoW_`aANSE=W* zrM*|RvyJE0Yod*uPz=kj=y}m)8`VPAn>+w5MgjfZH3Ci5SC%+J>4U}iYv9=oGy$o- zJk)H!I*jv=$K>6xUNJhK%-ZYW|JK*l*F&!Dl?e*Cl8;vDcP=Fx8KwJA%tjfkCtOcf zMF-)Sf$IR96VS>nBaV%>WgR%502b%xYx&344`x}dr7xL~bAJ2twM<@(mDM%1CAcIo zM3(QmrLOW)vnIldY!6lb7kQS%_daVpn|A}yIWc{xXbz}{fKSyb6lNF>+rapqvPF~Q z9ln)0yod-gOFnU?+8I`=!^j_~*z*x5 zPbgsjRk8sWp?Vhr2YIBp6j)BqIBr%ZTBSkqnA*|otV-mh$=f_|jTE$HAaPLSel6sQW=`~(d96g>fNj@)5$Dli>C%-g$=fH{Wke72$NZM zdg=b=p+V`M$*|!MD@3QDA`?C*-+DRUWEk0j(R<+ZkP%AsCl7BSw=XwM=kJ2iBY$3w zw)1lQhQxV(FfRX(Q7C*G|u1#O3{9w zp`F?&XPFYk81f#1TAu> z|3dP5<-Erur(8qmHI)MwL7WB`RPsa@TZ$#Yfy(s<=$h3!5_*douC@&|1dIQ;!-n{9 zsQu@a^5b;M-Fj5aP#A2DycHnL8msV%ldwok-T2%+|EHn<7M zd3hwZ8+xOQiJ9~+ta4}Z#1R5MbLrz0FFh1+yNnq9kF}Nc9MA@LQrtO@6AY=Jf9<=! z+Q#5zLl~VZqdI`pBN9IH*7Z$<5nVg3^+=${iI8?;hGVI)M8O+E=y26R-sN`Z?fF#8 z-|-}o@H#3$>PkXlf%o_VC*n$HjwdM=q32N!6Xe8#WzAmT!%PS) zQr%?(*t;@K=^&DRdU}nh)6#9GfY?mn6zQ)?fT~$cCM+(FIVJjzF-W~$^*dRO3Is_% zaulYROLtr^wcVeYpCD>Jrr#;^0UY3hT#CBo22`URixD$YozOek!Tay9nQ{Hyc{SY6 zh@wM-gnHTjE6o=<0eKAyRNpm@-j3}k(TVsx^o;3P?ajlv-M`K3tK)o(G!_-*PY1Iz zO}_^f3W(78mh=e|JyaNYDTp(zEo<6b#g7XQfj)%1JsB0g0gBrMPALAwL7S)IwlLLM z6WWv^oZ_QO`!N|&_O8jPoF)oxS6ef_7JBr3A75~NN~yn25{(Z z;xV2Hq6HIxSzG@%;5hCz@b~Z4n%&;lx#JI**jgl zB7uL!7)Q<_#VL$!0jhB=baJu zT@~5$dcn@TYfD_JDg6|44Q_1Wg2#UTUz#$EI% zJmpm1lJaLSkQl8-rqqo-_j0Fm{QLMUE*28~Y-RhwR>V(ruar?+%Y)b!^!o3YOW)XT zPB3+rn(l0Ly)0XV?)&Lx}yi0S%~vw}0HI$J~! z>Ls4*=V%|UU01Rv@#+uJBwdw!Rc39MbX$py0>|D)IY#M3yaLjqc)}YS^Hn^*jh+h8 zV)y`VYT&T8r9M)>Jm>PH$$7g9D+1ju;yYN4bK4pV=CTJu0P_8nlNVoJeL{`eu8DVD zh%jtc+-Ss#t)gJz*dU69vPN_C_xG;#oRAY*Uw{wLXfMgDV|0#7&%c)~+3;w#hTvOC z?psWXYdV=l&K`WOYe5dwC!PV~3C|O*CT9bhOMmk&#Qh{iONWZhCj&KhCsj*)`>XwV zsu29ECG#n)EBz^>dx#Y&B_>V+&4ux8OW(7r2UbsJF&uze2sp1DrhN>h0i@0xaSU;S zeY72>=C+`|AYJec_eODV_dsuO&6HXP>jaOH;6OL6H^-|m+C3GqaQ16QGYuI7IL=yy z1>QyAEw99^{ik`t7V`+GH{RjN84gNU!rk_pUfspyUh%phkDeo8j7O&xVrlp}ppEU+ zoVkVrm+D%YqcSHuKv`ed?zc-WLeb^K`;!?57HWyJxmcC9D_d|tx!9wCGE73B{}^mB z%h1fB|4p`Zw-G*w9etJ@g(m6zElVz?;1a-E_z8gmrXon_t|wxy;O3=6lBqHK=kqOfgl4l$J;l zjvpQiY%Hg=R%;Y#!ztJSX%u+$?$m$`ba}zuePV;2tKmc&XEYu30ob*`brm~w0T9&! z7FCuhkzn^%379sY_{bfiE&dJc#eW=+%3J%vxBLUpfzY2gb_kax4g`mWceKVBgnL;z zDd5bh6Mep8p!45;O=>*n5y-}^n#{ar8y)+{WYt~?}H^!!96FsgX>)K#1kxh&zVTj{U+Wr@v$IY&NXm);R_?g&QDaB9or4mqKwfy~ zaw$Nl1q^1!i6q~3_U!EAlXyP8)3Z~n0l&E)eTguWZW&AFU)IjWX}+SCRguKq5#(^a zqeniLz~Y3<++rf_DIUemQ#Ait6NUtIsA!9pK8vig2mDc2c|HJW7ktfca5AesD`n?W z50!MZ2O?MW1{R=vd+Eb-?VeA+*aM9^@yAoUv6yML_`Uu-ao$l+uc@{1BH_6b!|}wH zdljDz(}79cfn94c8j9{vZF3R&{Xm1cTw&hK;mXSB4f?*TH7&n3g`9k$fKT<0a!XPJ z+vqYT;W5EkN+j|^v@spbM>X9-l;kagCk|Hqg(cD?3JJ#^9XLW=@+-~69p2|alc8_s zBbVS{6eqwa)5+bby<0~QswIG;v3~b&GAS~z7W*%;u+1YiTzsrb2tWcV{{^f@^KziS z1BNa@caJGYV7<>Ol0P@@hHvGO_}s!qgYM@1a)UrjF{d1P&cGF27@OtyqXa=M zb>>_PCnYEE-p&gM?bDGSlyH0SM7U?t9 z76&7AY40#N0ZPrOud7}_xn^G6A!4zPq4B#~6r_+v{WAFuI8XzBVz2E=1O9=o_x?$i zBlaojS-zr>jGIhzvDA)~bPU0=p~GsJr`jG4NT0$eaW7FF6l=tPOebJ5{Fc7JfmPP< zRiY2LcBdoqsbr|J0wulroi;QHYU~wthSznpy20!RX_2`V!wCDDL?l67PP6n{0ytm2 za#F(qqgNChz|)=LLsS0v?uL=oQ`#V-*zu?v_`pnt;54xds~>YD>?T@Nih zEC4mM15mLEb*aewG1rs3)qHF<*7S5X2_~||RgI&@P?tQvO{XA^zexhD)`0;)`ovTL z7yKP+**JbQYrV1tWv5x%a5*C@_-*T?na9zgL9m>P4s&_hOB8TyBlTzLBkUO(EJw7c9{RVAWKP=nMfWt0WSsaFTRWWzE zNbh!ExH)u>7*46tzE*WjjU#bB$-@G7IeY^^+LmXtc59MIKk-Qk(B#1hO}Imi&L_a6 zy`!OKmJEej$ep8h?cI-Kp~GNQ)zzL0dQ{|OR6_LewPK(eX6WLqzC%(|1t;<^7=c$( zy+UI7>S2;4ETr}KUL5+_r<(`Q8>ZqG$~BR0+FdB`59j}q$Gtwq6>)5JksQ)#8An4>x&R}sxX-3+EhzLuJ71V2LyWs*!hB|L}STS<|>>b>1*f7xE!(5t^QN#GmnDTQyR^n8# zE0V(v>94Uw`WA-ON~%kfp`~JS&87jS9^+{ZU!?pP>;MG7_X?K{s~f=0R+&D(HsyH% z#Dh-(1)MXv^48zfH^;l8XXh#G&qdQc!{|oOh2}eyw>;!}{7r5BkxZw|i@Du@e-L7j z=sq_t2>FjP;ZtZsw&C;Zm`k^EF#;eYrgbbA%qn7Erm@l}8S4M4*LO z*!cpod8VEmzagszGRdPidEsX)?BV)YRvoxz*IlPUDz)7Nl)_L;c)$c&Kwk5*^vs6PHhq6P#H9? zcXdv<7c_(S^N`jOpN-|=infq*z(*SQ)7zg5F;g{Grn|(~ViUNuaon(z@2bWA2F2B4 z)HR|z9jJN%q>W{<#^o*K!t4S5Mytw6`a3!)a?PbGjQRS=-+@f1$+h}uMJM_ zhTo$R356I%)@oJn6#N& zP#S@KN`|co85CTLKvxzQT`F;uy-=?(k&V zm0=w{Acn+C0>1fN5MexpfI8Vv&EKF^NziHpfSUsx?Hw;-HGbmC24%kPPrCJEoY_t~ zPUTmcLZFkCE2)=6|J9`x3UQ>%NJV{N-?p34zYy-wvg}z|aSS$I{uv0Pj)!2~LGKU8aeOips z;K=xZ>%#r`iPIg&#o=IVzBHKwQ1pa9@Dn%7c#ww~RRI7YeipG_=mPBa=mac1N_*j= zc$*++otgxNds8Rg=G;+*!0MT@%N~^IzuG&=F@XcW`g2?KftkR6+V@{T(U(2DbQGwj zuqHPcw)H0iy4{>O5J7+zBPuRvJ*30}OgDFw2QAVBw&@~(hEea$@+%t7D|k%K8rcVq z^q88T0mXO!{-TQN!bOxA)P8l(w|FBIkRhpvAyifrtQvVAe;s9@$+cxT21{VFh#%h% zUD^k%|Ee z&pQkhtAv@jV&nfT1gy4YVk^Vq?i!T{bRC#R9id!fp!QUI_bxg&z|}z39b`2W&Q(xQ zwZWJYc^tjn9k)#fBC(I4NX#35L!wt7qU7Fh;aTNGD=suqC!SVR4FFIBtZ6Cl*qg-4>sj^I)O;4n}zVIp}2QLJp_4cebG;YTi zWh*@4{RK1+&(9To2WXq6j-^xiGbv(ZE6?aJVuAR2=(?egaX8Vfk`pSgmRW7S?(R(l z+0%JRHB1f=0=`>~Q5-_=gT*?&cAUh`)#4uO3EI|U*>R}f#ejGH>{?Bvtun56n@V$3 z=87GF`r}-joI!n1s)TZaKaI~f=RQ8fkl-xhe@SCk13JNbd{()tZ*RmuAj4y^Y0n;u9C^{Ue@lmd2EFi{vS*-*GpRU5i(yQIwFu46rIY^ahCB z?wVLiLIZ@*11HS+-3YY-yI;Y$dO`7|QvIs6}>f_bu+C*pUR6 zyU&#~fNN61J*ADk03Q`&%hWJXL%>j8ZCR@N=+RbCtmS4Fx@$5<&>@V^(P;DNiV+Ko z0{on623 z(JK}zYsJj-J^*a@1bG=YJ%FGeIcX;m(&L;41os*%8do=lEk`L#J?3$ld# zCM3zH)Q-^f#Ao7!zmj0)+1pNIEnPoEa_3zYU$x)Lcr>ucDqh0eR+ohZ9jfm;SL(RV zN)Gu`dlt>~#*~vjq@Z}GK1e=g%lAv8gYhYS)kd!VWcuAXmU#${^r6ms{W&ieH()xO zU!j6P-}F(&P(YM5iLF^rev;!E9~KV|I>}-gsI`S9-I)n;4IKNI~35xm8d2 zqw{s27V5P{?ytxxJR*N0L>5cL_cPt3AU1WJbWeFVUjMar0I*oo+Jr;~^PISRAeF_Z z*ENZKM^CT&g6OBT2Bu>C!L_&LgYB$UO3rYG-g=Xqp6uf<+=EnD%~V0^3Xvnr-q09x zaczPh1_(uK+^ZF}-@R>0y0K!v%^h9hWaWFWMj>{5E8#Qcu100c5tl1-_x*2diTwTH z%AHWQb9fAHnO=RwtGf1eUnN;WAq;I!D(^NOwgR@=nQerr$3#B!+(;u?YVWzHE8(KqieIV zAEY10rML_LduCS9#~=)l%d!Cdd_z2y^GGgG`63IzvkmC z7o{TdFr`wPFTd3ohO9*f>$>|l)yj?j+|wz{&ZoKI??8?>D={8x?g;1{5lEgezxJTq zgY^{wF2fhy4L9dLM}J9BE(AnGy!59H4ldVZMUgWsBTLSfB2=v=MiG^nVq?=7;#|Kk zsR1Q>#qOf<)U7SGRmN*;JI^3_(xAH}!61tBxbiBR-tK^AYNKU^PBYi<=RdFDswzJOW!{z~;`Cxc{gGJMkX=Wf|24Ts}ZGAMF?BHmxRc!)fy>o+%<(Q0EZqa7{;n!;ih zjBbLY4@^d|RImYtLHLGx>_v5-Zr(Q+HR9vxK1spMODjPON>Yp5QUspSl`6S=M_B6i zqTb^A`fW*}kT-aQ^;^&TEnJ2v0Xw#?^gEOm34MR#{wc?1A1p?1eYVFzTRaAQgUL!i zUFJZjO$=l#4krJm-B{E@UKVmE%i*@~Yo+ZBUmd}>9~A)LsA-k$PMa5}u7Vmm0TiV( z%8~*CvkYa7Ljj9%ZjKK{%MHUDH*5!>Qk64RovlPOHNcnkcdaK+R(-~y(Jt*hmx=1} zUar+beK@kAF@qN7b|=vKkKvetXO3dwSNxw46kc1t2P=ih5)-IzOPJKvX}pMuhaMcc&%JWim)EEW<;2;@c(E&ZSJ z%{@!?U7P>ZdYWTc;`xd77RCS0$JNZ{v_#9|hntY;wo~y>3F_Xw2v^{P>C&A4j2}H6 zaZIR%<;v{}3Q`2n>PS~7dYKa`HpFG`@vW>j$GGvuC70WOiWQFc+VVN>(7(E=!`JHw zyWT%GclE0IntC1^r6H$yZa`aF$X4aq;N6{!=vA3CRoE~;0VM8aMBeRE-`xPtonR;W z|I`Wd6^>z)@!x;FJaLJsf2#go*5!+eLf8WK+C^UOy?g0HEsVQ4lc zW~10s@MlEMr^5N|!p#xzb)H+ zpwtE4=fQ>HbkyGOb}3r5JPsfIV$O=WX~XKZ_&oZ;ylhtD6_Pb_g`3IQCzD__8A@uY zV>~w^5|b$_{1M6||Mb+*laRo3m_WI`b&9dUN2Aln(rk4;w!Zh`T!qfoYhyo6tNGZM zasu`X3w^l-^**G+-ErIv(4Pai6GH z&+3s*Nyk8$^)zn1IUTu9Xf5?}>m^;p^OmRMJqt7NFHU?7cykpEu{=z-e)w%?EUs>% z9lXcCjD3YfvxF~*2sHkF>xOo}y@mXKX&m|E67+Tc8OI&yev~^Vtb6>cA}Za!9FR-Q zYkNdyAJ9>U2-Uu{)ao+QOK=aBdx>dr!x->w7xAE)>@3vNN|^F!p^JY9u7*XiRqDL~ zvi5yWBSMD>)4paQUawQETtw{zaOkm5M*sOUZg->y?^8p@HLLS30$9H=J*~FHxKH}h zT{|*cOP7g^6)$IuiZ`31azr)Xa0?h5@omHZqceSSCR}N-8v(z{9b7j1N)nh)VmUj| zou!p|^LiwRAp*imz_ywE^22SHZam;shpTG768KL;t(cW-HO2MGqsc+S zVIppiqx?vDgh)X2AuA;hAo|Y!`|sPIRyflDQOe$zkTH|@IEvBW?uU*SM3(6@&tRRR zQAZ{J9;))NfpNvnMHQo7{*;;uEHuW<`tQK%<4C++A_-!chKM0qK6z?L*4C6}6Lo??qk!L)v2Ad!@=X zC`I{UzKd7E@*daUn4n%m4PZ2V{Y#al6LWkvitl&p2gWyyk!Jery2{X`i zM+ng@2(i*BV(CcYu10$OUpeK87ETUaW^CO4XRCwQXjuI_528xMSaWNGNjiHq?$4<| z10swM;*Q8tilkm#&%U@(xdr93Jjpvv0z%&&SG93tI^Uga;Yh@;CNDB>e?-ARuJO1N z(%Zu)NckD#9V;yKDBJA%@PPcKyj2C*Zj8CuY0LevL@#J-Jp7IV$QShIzb3d$*HMhsb_Rrhp;6@(%1!H}oi!-2Ha?Fy?>wr{Mf|1H#fEP1xKvTvpZ_BPzktd-4ll7y z9Ct?Ku~^;YNcsdy?#4(fkkzIhJiBHLJ>IFc=H`79r^uRVFX1KNY4FI9#9Plmywf73 zC?II)`IcUXCdM@VG6}L}o_*C%u?@~rZKdFX-rQpcbQc&_7`Zus|B_MAR9qH@gaCNQ zx`nIRsYr)snh6D_h!Ky*rn%*(dzuJ3Vuk(z^7fbO0TCZioC*R#BMS2KLq3+X#E2~V zi14}+i}8J*B}qq#w8CmL^#_L;VvKbgC6)_Qt^p@iwBw)ZANAVW{A>^4?Eq}RHlwk9 zZOyJyQoIb_qp`gq*TQa>OZ4d(l%q1y@U$bT(D+)u4`2w$A$qmbYbLa=xc7MO!>bJ1 zIN$NdQF#pC+AL;+^6U@M0h%F-92x$yqGGPg+9Nn*UI`?hrVtvySU;? zG^V^5ZT@RP%^5t7 z68RSC`@TjUK0I{cYKamGPUeAt|$ZX>F=` zbx$%7Dv;5oxbI!Y{R0wFT`BDdG1GOyDt0a;AF7!7t8w7vINQ%}%137aCi5D2{m2qops=Xrkk z{sYf@&OJFNcX#gWlsh}Svv(%F(;`wZV6CO1Ak62V?uz}2t9;MI^R*Gp;xMnAt!767 z*>&Un=UDl!$rx(uaq{jH;hopPR?Og#-#^}=zID^5-I7E(akqK`W{RvEob9zupF#Od z7$p0;sr5zl+P$xTvAt%{dHGOoNO{fV(#})I42=@PCwz(deWutf8}P!Q$XlU_Ol!945C#VHR;9lCHhF%5u>|O zszdrJUq?TC&S_mR&DJmNLx0nsU*k*Xl1%-cYMp&eOF{c0eXY0%Z5jU*Hvda0pvvgK zESi+i>MUTNaq%Z5|18OH#7)`IqA#MjD;u*y4ZcC_HNCh2uy;y`CW`E!WdsmnuXbJe zle5$^b+#nF_-$OIwQ&^p*qI|3yjLm_5~NS^za119%>BS>$4)(c%+X>@eU%7uk@c;I z(tIP(I^`$tg7($q%<}+|4ml}uvF5d;<2PjFTX+4~=7g{mw2x}7f=f1`%`P0;Jp`A@ zK2CiDlv*!_LzBWJ4v%@=vK`~|s$x2WWH$r!Os@Ac&E zdGmcjM5lHTH2zf4aAV#Bzw0Rh`taq;6baHU)=cjd24P_H`m+!;DORgq_}!UZpUz;h&n_d9Df2P{YJtoTq50*? z(Ct!W0a{Ozekbx@(wZx4r!G7yTy=BH6tH7ioMQ2`)*zmdt(y3wKUe_1ZMd$A7C?|1 zwnNVp1y9erT2nJMD{$E+LPqTphcR*WO5{)ZqHp0MFT3ZjCr6sR+XQ`!lZM^XZDPKk@P-)n;HVqRvGgBoY`B31cFkR-!7%^9}bC6 zroZIYI;7iWFgz6==^>6p5HtMQhpt`)5lbD9jwIjd`254l>mD(Z?ur@I-%PGze_r)w zNS!~G%&tbswJL(hTVzD%U2l}v>7B|xbiNp`JaIy5%ddrvI^qreNS0b)Saj)bysUVW zTf)E!17%eflMk1jE5JsB+guYD9<^>7+k`$obtTb4Cdo_y4$6aw#845RJxuKk!qo-u zo3P?Z_cAcgaUy@xyG}?%TEBkjE_gq$!%#}RQ^D!k} zY_&avqYF{?QlAl1ktVNTE0;E)$oJF=c4K(<(X`fQQz$M^53zWmoO<$@njTlk&^2k$ z=^;=jOzDJoRmyVG??{C(uu5Xv0K+2^jv}lj74*~O4c+<)jM)A?zJQhaF0JF+GGvNb zMiH$rQU@%OZF#9Z=3l3P>a*}S`dqEGNky|$a`VOf{67DrXF5D0d$v=_5mzz`96wzv zy_N=$MVn1iV@Ii8&y>Gwe)6w^U357Tb<51WiWa#{SeGNke#kswq_Ildxn6Zu?X26A zZ1l(aV(B@w0Qh`Q`@6#6BaaFfn7r2IzeUEL!*9CuxwPc~A_%r+gwbb&B+(=nA)nJL z1cQ(+z>Tv)ir8#|rC=9LTT;--)KMw)HqIxJ;S?6MBM@|_oi#iU$Y6uKw_P!WsJq0W zi~%lJquhjqesm92AniT@KUCWGCEqU@!oJtsn{{zF8k0nE@m6Rw7;a7vfWn^jn9Fj9 zX>w!SUw3Sh8*zmg|H7I(docaIdK`tan#HrZU*EeUG#k>hF74hUc@RoaLLIxpa+Six zO2sY=%N(HuXs7WWmM)Rm#8HT#O>BRYjR$G=N3ha)w?c_~Te6E|>8W8cRyNLK&q!N8 z5GG-e-1YKB;0qPF4OG~~Kt#dh*qNF9omW)p2I)G-U}4j0uwMVA*j2nmI1{jLNbseQ^{OSs{~-j3&Nvj=OE z^J{eWCBHXi_)#C_oPKC{%S_VRYySTiJQMt#N;i-EKSR=Kauh!_u@kPnV+D-H^t`sQA~jdnd!Sc7K~ylVZ&J;9EeDc84kwU4K)ri}^Ct_}mAvweIbJW?KdJoU5f{}l{J z(I=YLouYqTvkmnBSSI8gfwy;laL8JF?0e0FApyzpYOenIMhs_n_m>9l@BvhjQ(uDc zt@;d}bXU7PFn`@usA{so_hU__sm@%oh5nIUriQzPfWPrcpMWh)O?P-Q z5Fe7)%gz!1RfBPkcxJwY!?pIb(}#EAqK{PV+m3t$pM(p}*Qcei_+3;#Y8FgDPgOdG zF#TZiX4LzJ4Qgrjsr6q!H6xsqEEq4tJ0IwE)=J&%A-os;SboFRQoc3(xbP?Eb7x6=>FFrpf`^P{5aX%$r`-A?EKMm!k+nTSdFkH%-g82})7N^%%MF!bm zM_E$uosx3F^M0Fpdz6;N4d1Lx3sB;{qu2ejcAxVW?9O~M&$L0Gd(Ci^AjO@dYP#MN ztZF8~+qxf|&RbSjIXlu1Ej8&c=r;k~dbsk@^*P_UW$u-1^P67?R$;I&1*XSmtKtM4 z7#EADtXbdfs@VAm?<)hh`)l-hG79@?vx;HwN;CA0drUHMcmv?V*&?7);{7U5JKsku zAJI42w3K#8i>@A?Dd?x|w=Yt}ZO`s_de_7K4c$DKzs+gOAq{A@5pI5}cz3ZI+ z{otv9PZj6kq_zpZCbKf53C=?)@py20_YJ=%gL##%mw!l?16MIm)c%}z6%a!sv_b9q z6Itk>V9rO2Yu4I|Q=juPfqT2YtKy;Mw~dugkvzTf-@g{4)*=6R-)tj_=49Br*miID zYOI%R9obZFYOiFKdPgXZz<$~rPg?a2Rd#qRjptVtC4Cth1#alk@~(vJbCrhNU=}LZ zbzXIO=Og6g;K)-rP!~N0TAy%DqlC=NLTpg+IVEj8a9R8<5$7}0J=&7(< zG|QR!v5?SHdNIM^0I%T|Z;OZ2e8l4nEj9Y}1IWpGd&U~)&L+Y}__$DeZJJcBL_m}n z8x$8_)NOx3DCwix&*Gq>D#PJ9u|m}6-Swj;WPNcGQoimm48d`J^PkQd4LMPgj-#+U ze%E}IwM!dOSO3n2RkBJ-=lQI*|JC&j#|q3k;6m}RiK;0^ZK3DZJb+&S&-nB)6gfKn z?gaa~uiXm9ZQfHl)bDlmQqdiY-8)vW;K?$AXTH)?{#!#oMQm+5duw04q&Kppf!-+9 zRwT8eWjcQpIin8f)*tzlhp5dMdNgv*{q#H62z{{zRv1-cuZ}hZ1^6UD0&py9o<3|BeG-1b|0iyqEeGMM*i( z8M}L27v&nH?^};-^Dz0;GH_^2jI8bCt{fy(biP`OJPm(`CFKGp{8B*CXIFKCbh94l zp6IX2l(}`B9)Gv(O|0Al*xtP2?T#mmDdMkb&E?SSwd#kRq3SUiEq|09j9=+)-2I`P z5tMujOme2Wn(u!w{5-eNY2Z=V9=y!jKGxVr*+TAn%|tE@+c?Yz!2iC~8|G75Ya8*9 z9p%vrRWAA6w(KcPqV4k4qXR3xw*+tY%oE~NOZ;JpiTlk7YflCqnj}PoeeDt^$vQwT zyoJ=>G5PymO7tR3PXH8vwDxGDx;#;m`Tb5xMJ#?u|4dKRYRaZlR}Atkd2dfn_Sdm$ zxSP(?W9ZNZU~?!uNb1zlvAayLq`u(M>Wx+!DKKfTI+vS(I917yPUt_N3uNy(J_8L1 zWMf0Du7|S*93ypRZf#!+hu?4WGbD>zXI)onCrt%g1zrCL+T3j9&Gw0KMr9UV{?i+( zRFLn|P7}0`u_&4gexv4U=3tc+kv#ae@{In1=QY*#`*JI=J0nla8cI!WwEFE|-Eu*? zA8%PFOeoE#UHxd=QLM3`7Jd-lxwJ4^BLKOp^!SA$R8x+XZ}h_jxy|yI9S@F6`5O*2 z2Pe#Se#aPC$wdFqwGub5d>S?r3a3P@BM^+m5wb^hWZKlvkxki>y}G-JoTw^Ind-Gy zUC$bPl>a%O4C4-Kah%F6<$Lq^_jqdfiM#T0YO}{aFDO^XR{2)1FHF~biwqiWw;>FR zf?IHM8$I%AtSj4*eZQvJ>4-V-$Qk>;0qM*fj2E^Dw>xM7Vd9Hd62rf(Jyr{A>Eon- z6Z`pVdW2>XG{5n&$cGtei4R6VHk!WF8QvcC@z85~!;RzITwNIRMm2o7>xZkZTnvF~ z{ar7qoe67P385T*Io4Wdyn^nkq|4Lw^4913phjGh&v^e2ezqAKXwH-{e>$*WmxTqK z{{Ir@Hc9^9B+OmUy!$wv64EZltl*l1igI`iz5C|=-At{YGFOmsrZ(>vwBD~3EJxj& zyoeE;{K#6j-1h#wk6w_Os`Kgwr?A*xjX1@wQvvVguSlKQ4lgvjQqbpdRlf~*Byc%! zXYEiv%r3B6p4DQ$;Q;g6T!auqU_=l|r|*lb@_XHTGAE;r*}K8--n^IE)u`vacjtJM z?cl}M4eW1zoyUbEHLq>v?-YtY>N?S56}h=KF#$Y=8*I*ajAoxE)G)?hU2`hk5WqVsV*!R0e(!`+Ib&hTW}kMcr1gp2<*_9~ z!l!48P44v4dv`gi*SG!J7}vS4fyFP#I(96w_ZtqqcvZ#m)kfnzzue|@J=F`%O1v8J zs(Ex7S9&5n?vlObA6NAU&8PdKd%40;tw)F9^cW9KC7S8yQkw%&nrR)_S)=ur6|DWv zSo9Bh4^+>KPc?8qpUJ&XOgel5%J&fI~X;0tRJrR_F!LK znRZomX=*Fu6JzCnFMTrgGGlh=#^R-=!YF#{=8{8hGp0h zSofngx60!ia4CnIT0+-P+9ljAb^bEk>< z%F@t7W(hsP$*XBW>yTWPkQ80wX+?#Z zx+(t)a;5*4J45NF!VdXe6z8M0!{LZaCX-Kg2?*M6x1Gi#0N>2;n1olEU zUi?Q8oT^;bN1~og-5Z4l;k9uAnxG@aPRK*v%12|XX|&XnMd25ELee-IGC1;oJl|XE zw?fwNpWV~r3ftZsb?pmcS7%+|u~V6^yw~nEKf0=-VQ0*UNe&WI?J!R!SL}BrB|kJz zTmh}7smmdzPW~skh6BjRkilwtfWa#1dl6pSdUvzfu)gv&y%zecw~pQ+s$$S?u2pib zWRPLJ?bJ6AqUc`JZ{=5%zP-XlY8-ubTX{9Y?9~m;u!i5U4;Ja4eaROS!WaIXyMv5kKJ9Ew7z??{6VM+OPyextUTnoq4l#ZU z_Xs{}3>-ZaKE?|>RPK8q-`}HoygfpwsvoDTXt&e)za5ksO?*}?G0%7~Q>%E0OK4h4 z-AD-nA!^CBy=&y|jx`3jkMaGX`K#1a+7iK-k-@k^v(l=1zA%zj2%N)Z-}{c&d;*9$m#m)$#kdofm)FeNTuGi zW#WX&WyWo&hk-h7h#`0Ep9;Gm^;;}I zgz-?mdx67Z9)oJFSa{S`#wwDOg>FAoxz>-gHe`HkzBZR|pzM_#biw&G&0#w$mt6H5 z^_Gk`S1B-kI)FbaWqJ=FHQ-_lgv=wqS2|B+XdmP18Q0sLCqh!m)6ohvk8z-y^sYg3 zWPb;YW_1$z1zjCjO^*=^cK?#Hk{20!9vf{FCOX%pnVat8CBEb+-McSy8Zi%U{ z$*U3GAh!FCJfB+`!-j*5w^&;CQ}v$XWl4bSl7YJ0eXksJj7`WlM3;-;DQvg7b=KXB z4AE^T`q~kKIAhg+W%Cxb2kGDr`vg_zW#u z*QolrMX-Gr1y34HX zKH9zY9*mJ->Sj5>{NmooUq-eYS)2L;> z&l|?OPSW(>h<(}>lkxui_^`UHBn_^KM@!Ye#u%gRVksgcQe%EBDUN-ymY}e9N%qOF zAa%oSr$rT#xEt;kRg7=ldEa#!IPsN_IlUM0L6;4tOO12Lw$IY|<}z=}=$>23-a{b& zR4I~i8HNQ#(eyuDPGu594=)us02$9k4<^s5&KI}6plG;hQ6e&dyek(Zo>oh`7(Bys z*h(UT83+3T|Gl0O8qI68+zX5Jo9AdB`BrZ9;U$TjdnM>2(iCOF=@Z&@5M7@iO)|Fa zJS4c%DIfl4vW5hBj*-(2Cj}V+T-B*6k&YbwwI5X58gU!i-SpA}1G`;B<)~z2?x~sg zUyS-F!;9;yJI91>;l5C5k6P9y&_j0pzj#J2xbdb2B}!DG_Q@E}c|n30+zb6qB#N%P z!u#ke{y=*l&9JhHWmIrB3W>Gf-9rM8YbR}0$(r{D(QIFHN0;gH6vk+>+XiL)+!Ep0 z5{Fn`4`&ipGmZ%{kI-x;IKJ%ZjMN#O_1%@ZY2+ou$V5k?URF8u9^xZ%?zS}yBzQm4 z>qngqC)rjP0X7?IBmPr|-Lcp;Gr$G4Q2Q&(D!LDi@`5N7{Y!Vw@jd!xSku7S)OXfI zi(x#)PbE2Gm?eCl-ft)9(U(si9+s+c@+XFt6wTnvAd@_nw!2I}!P5aaq6b zSCX&N!9>+o)gSz{`?I1?B4!NTJNSJp3K^NWE+&q z{@}{4lp1nJ6?+(5(sjb4?)J7M&v(!d$Hs@B3-&*7o>#_95<=O&JB@v196*vQ{74L2 zL5~L3LGsOKsoYrcOEqiC&NWsI0kZLDuu02DX zi(pi@(yp_T>5|tBzwkc!+mQ;05;_NL-zG=I_4bH+z9#V5=e39-CLr0 ziu%9I2;*O3`u0QonX12daM1o&E_a?%+7Zdv|2D-SZ3Zv;O)2a} zaD8$ikZR}_*H%_}*$w~yxDLJ?dhry-k1Hr&?MqTbrJ znR0t>h~n4!39-mVZv7-pzBrolJa2VL%DH^$?A^1!x<51pXqOrJ+m;BF$9rH%CMCWa zWt?`W@7ZqE9C0{fWB=5%*DL&b%bD6je@B5zV=^ZMrhC zQ=Ye{48C=T@o-zySZpV#op5UF7G*v#9LzXgN82QwMyvZ%r~jQr;wH9B=N^)9tc;Dk z!#7U9$fMx!{d2Pyg7vw+SHHB}nzdh+Nn?C!N!9L0pHMlulf6MiZy%4hW2DIvjKjAE zemQF*7qzvl8&4r6fM+ErXT5VCDwZX}g82r()c3HlP9zE#2^_D}TVmSaaW6^yb6M}) zIQxN`4CkjX(qq974pcs0icwuo4O^mC)kFOYRS_k=kMlde=~vOl!;`|P9;+Icj-7)Z=aw3c?Y*cRo+!(8;PsD(s3^IK`oYw+sy{f z%4=IEDtLgAkdg|RofY^o@ARx;kfKhMLs<%P)?rI`1zTMJY&f;S7MkNAfk>3xyc^Ej zaldyPBG`+LT1AZ5R%rCaotu4DaFHyh`Hej+yjBqT`qu7}bm;e*<_F|CEB1rI#Bhb$ z6Z&R4$+#=w#e)P1w?GHb~*+46wJhN{3I*Rh_Z{ zUuTg8FWs$-G}c6*a;MS~C!_!{#*qkoDBG!D41}({0yWGUDf>sW*?Ug)NQCts$IpL+2rqHO91iAFP45l z^j`L4^|(WAEUbB*3=v$qB&I#|)Aq>w-7uYR0pZ3uWqC+ALpGfVoKl>qCII0008+!; zjY0P9`CGG15OoRi?Y9i`kAMl*2@Y=|Er+sa`>c}~k6cg{;OmH^897G`xpWEgunIT= z)F2vW?xXrS^$}5=4d+xu39=VzAIwEbgVO~&&fgW@Z1oYLCc{SSNmiAJ%~P9A=7ux2 zRrSnNG|4nz0_60qCLniBiVC`4ga!Oq?A{K+2cjY%%-J6kw?D- zFZe$DqIqXN^`BbsKW}?7I4ol+V84FcyRA)kyGlj#P6|Z?(YlcR7;@wAt~lz@6)5qf z*1NaPDnp@%c@#uXQg?PJMSE_JZyk^P&H9le5l|1hnW{7kMnV0t!0A7;hI^q#{Tw+$ z_Qert*6>~Y<77|hibkoJXtHK_@Nn(^Ls4``@YTU*^%jqPeQx`ilf%%7a`&!NAN6*+ zL-3hGeRZe&TZq|@Ij1hEJFE1v46{D@1#pI1gA<~trl;~- zLB#{BHqEh*w5D(mV3d}l_MJ6?%Wm2<^aWeD=;ZF7z6_}u04T}hV}9Kd5fj`KOM@nG zrjJUZd&S}}5iLMld0pNjQ{GyIA(KYvOue88<%0lwK>z%ho97I1o@3Ya_xsjEbzp|b z>`Ihm-jn0$$6y|HFdoMMcj8<}JqHtsuinEmbhjSWiUo=@Ya4%8nx({!A!nH$wBSy7 zlzP)LKu5Y$e^zOvBq<%Dr-G1~c&-Ck`5{)SG8>}jA)RsVeCZicM^{S~P0=T*2n*5m z&UB%ziE>TB{n6dk4*a=3AEM&R>NH3@sFX_UXB}Z(!e98yI{6KZ+PQ?HFX82vQKod^ zE$0$qYZlDsDZ_P+wCQfWL4E}=g_c$3F0fo{5+5L$_@lA~(?EYhP3=nUqb!$LAw09# zHIClj@jA)_p!8ezyV1~wlbx5C{1{vectM-WJHy)1GynUeYo{1K{l$FGOLwd;IvYHz zjxPmN0cz_3mx{sOFyV7Lfdt5?&lWXu9OyyOI~#d)Glcie_#GQX zTjShT>U1a%{T6nsSXDi*=r2~M{%IMD>53LS>!3zbH@|6ir$AMq^jEg#870kz#l^AJ z1kEW&*sX(w%|EQ)vc$cMmRTS`oeb^Sz#Kn{)?lp%5($^n)4F~Xd$66p+>Nc^ATmW? zK8f{hget^Po3K~5a;}Bp?Y$QPk?+ec-LrT0%WL<4R&bLn6~+?U2Gbok3^>8%(oYuc_@lytxJPjW?Pu8%F!fW=g^sF|# zGQk3m9=*Z|e$cjvU3l)6FQbn}0NvQF#CT55n$bA-e!PG#%sZyPU_bxRF?vnJLYW`;BlzM1A`fr} z>(9b=coi6E1oQ^_Lx@Q!t-8t5WCI&58%L{mNExN@!z!e5J06p|pOZjPwqrX~w!MLD zv3sihZ{x+&37^Rp9i)KfE+#FN^gbgv6a@^m3}|@ z*AE`KY>IHdG&ESKr*H-kd{V2n?xG?DBNw@(pT)Y#ZxI6+L(0A5hLCR+o$fwF! zMgvI@+NH=WVz2}Wt9`D$2Kf3uT*+Ck*m3DS@=-?dP>xu_3aJwSCg*bAKA9F~UDpoS z%1f*SS2)R)KJ6KuE3UrnA+c}TAaQ=c>{C!k|LbGS4vG%|ky*GZ+{2+S zclN^@V8U5&P44GQSwDjNEMEB*0PW2Z1F`c9-1UwzC77Pk&aW>+(|^3+bjvIlWzF!k zw}Q)OP>ubgy0QuUMDWx?pHm=SWf%~$5rJQi8V6YkJOuhC04_qV3}7pPS-b{l&FdSn zmI#=wWotLzxHCSxH5Kwkj((&p_rJoz^iQ z5RY=9!Y3bz%bg-I6)ZxjU?(Xkr}q(f?0-nD5+BQlX|a+~+9kR=U&@(!g%k*%88`D;%zpa z{P)F!$bPswk&>$p9~m!E8$d2b9iK&ub*dO2_L@LWJKw+ z&h2AKZ9Qlqk{}p4O(mJ)#t(Yg2Ta`>=Gk&ma~M2wWmZ z^)LcErwN$AX@Kr#ZYHk>>pFW~DxmY}uJd_fA~@l+KQ!S3&egXfSr^6RbTre%H*~sn z#8mvR`tFKEkw7UVzQNRV*)H!XkA`Hsctz!Q@w zFA@OjKLNGv81N*9$~T0uT*k}as~YBLXU*|M<|4Ih%>8oRUSoZukWs6bXLlQ8+Bm4< z`_k(2PMya~k1vXNBvL;bAKF7ovY0q^;VLkokV+@^tbtH%MDA$loL$BUrr=P{FDd}_ z+hd;usE)f}1>7hAO8|X0-ND67tA%f9$4pCeuSf07nqacH0}=Re`H#-)K>k-iv(Gz* zjX%BAGDNn51dEIt$GDg93Z0qicN;gX_0#QVd+y)LF&b1%BNd)X+hjeQt(n`_u=vBa zY?vfBYxLulFVlt017y#vt+1@4ev{qX zm;Y4D%zbwtpCOXXNA5cu2FT9udE0&p(Y9Ha)n^F6 zk`GspjjC&OP{T(YtE_sUF-zV}yq*HX$t`=65@5G0Ssiz12RWg;dJgUvP#F|5qD=X=u=c%4}d&gFZ0H~S4qvP?Vh z(jFUdlM2vs4#SHdD9o_-I5llT$|It(PYz%+Cs-xCijx4M{?5VhtEH$Q*S__~U?n|HK=)pD*CNqSc6}0j8@^IiG7I=ET3&TxhYT)^6IyRwEqu23i zG)QwAYhW9~+cZ^jSx0T!djzz??JFglw)~=gsbYF-<&~B}z24Hko8xOLKqp6UYmH@{ zB?HvB#a!9<_j4L&P=5dx_4%^+{%8-bM|C;rMCp}HwqToWh%DCU|5}F|5DhGC)Dn>_ z`<4>E52Wgo-h^3xt+d7|a}xOLl#a^p4ps={lvXm5vE?v3 z&H597|2fJ18zatS@2!4p9d|Y0Aw0EqAf>b~J`1en#|o1deuDJYaVgQ_h>-9(%+dLG zz5?21^VZToS)A1}Lw7g!;810QYZwn$kAf17(wL35; zd%wZd(=jOn>0jeT03>WI(W9k_h!=0iOw}P_QA(Y4raBU&W~D{L#!1!~J!x(&%F!nJzv+sou7VX)IO=;v#Qz08`*9Y28h!V$jZ3ILn%7w+h&Q z13ZiRC(pEl^7rZtx5tvf#neBMQ51f;D}AtFYwE#zY1=31GHFy_>W@i4Xs^ph<;&t+ zbB3RGBxqEB_~ntnwB-NM(@FMa36($#YpZ0yB=!>|@L0Anz5;-B z7we)C-x_cTkQ1D+onsOr5!Bi+-^rTe&jtu;hV8J7m0H8`-w%3S=Y}9SOs$G-w7QY+ zNV`{%ik}p`(m1$}>tzqgD%|&a4cHm1b?Ehyz&*jYZuq?p#HNiWTeV?c`@>RYw4Wyp zVoTxA)myF;o6aL3RBtlWi7U$*xK5hK4=1L^@k+cw$Py#sV} zbaDY*(vY2{M!wpCv?QKeYd=ZNTw2f^U=PR6zXEo>S*6rkiGJ?Wl;O0LT2$pf|7CW+ zr&Zxv7e~oX`U*<$NTYSG?=JsjcToc3Blp3JGl`&R2Q7c*<&PWip~Nf78=EwtyQ zZ~F3hZ#K+qhV?N#n7N>e>LRqTX5lip<6vbwgckCqF0gM+vmpvjWv|PdHxv?IdP!9u zVx2q{fX|H6#rLz~z_9}ubzf8K{(kr~b!_FimD|ZGV9(od^BF^guJ_v{^0!S{R zTES+_X>Xe+*!y3)X%n`n0GJ38@y_WdlbBslg^N+YF>ZbmQ2^E*gJoj&cL2XQaI2>n z8KvGbnuJgLciAt{yo+_-Fq??T0Z3&6xG7CM{;0FMxiNqtET9`pcESK-vi*H6e}Ik@ z82*8V9ju#Rt^j!KLqtywAQ9-@Ryp4v9B4j=$p%DZqIdo9>!|DvHW%2|1h)dP3}`t# z&yrRb=1EwF6F939pgL%RCYGx|l4fzKk}pfB_R z#-})rMleJVKs9p^mBi-TTd2sHXC9{A6Th+g{e)qROr0~L2NOv`#a3dbIXuK6v!R|9 z)cWPTBwz~B3M{h{2luH=aKTG?YcgB3m$vsIwHVYZ<`LL5AMU$9P3)Cr=!z z^jIc$DVc2;62~vDqf-40+XJJM9e7EYb<|1(Z^+XW9o|K_%|WKb5>kc1@L%qGw#B^d z4rs%?4U9hD2V{EFX~6tbp}ZlEfMyJnPfUS~JMvb6kRC?9Oe_y`bd&dyT$3DWdlEYt zM;L&izIu0$qwrJB9;$gPMPes`GakT{7!&M|QfCfRXzByvT=o3VGJv^#T%W^uLoU#o zFmIX6_J$XP;v`0%@@_H}gU1N_zgFqJ5p{L>F~P43(oUb5Si_MXgPke#3OY{ zTSx*Z1&0G*+`w+1Kx*@bb9&nwON2>G^d_?N8dN5Y`%IW~E$`v6&Oql)sL6k@B$Ujz zX%$^{$z&j0iTP;oje3}H6cdR)*uP>KHI&AS$I37p=nf#aI#6>c?-Cueo1?iy*|;Oc z0hHRGH79P4|F~3GCjo-1KpUC0uz%g$4#-JDaxFHan+SUhDRk*~+iPpl`n%4jsd{x% z=RMuo_& zKvpsXdT?B2N{_TJbsNF5P#~#5mc^vUany*KEElj_8vqUUXKt)@NTU(ogQ&31ET9hB z&5&roIH|U1h=K;wJJBZeN`k~QU{^orGO~=!@Hp_JT%!9CB=jKBb&}1o0P{nNI(E><9n>ZC>oJ<)CMHL^0U+HNQSx zVvl6+BjjXnby@hJ_CGR!Pk8v%!lgljCxwpa^6El=U3#dR|Dv_BmdDlBYYx)mlV#i-LM zk)rSH8IOiA|8$9jdI1II1V6}yH}Hy4o}ih)rwk<{{X(Q#L6?suq!^SS)F#>bz3Jnu zf%<^}{n$Q zCAY%@HkKMD*n8%7rC_hxFUkO7iF+BC0BY37>+ty*`9n;I2Fk{MSFdGD^?EIx++{zO z{LO4MJ1b;D#N|XSk_I9qdOem-E_0_tE5ar8BqDhA{%cY(hP=-K3QR+c401IG_}75F zpbLo3dR|db)n4?KzX?)1PBOdRmqR;qt)dG02gH`R_p!Ut3EV@#I}x)xhHve7Q_u!W zDQtJvK3hc>kOmNI%nrSvHSBFLqEkmzM##Svz;~z&P*lu0oh8b0nUHHR2K|T_015LK zKva}#9s+J9rnerI_1?7VS)FS)c2s?L+0Yxgp>!C%?3tvwS&$2VS z?{_&vjmT^yu3q3w$HYSVxiN<;(lNh-ICU?GCoRjs`&pC=(VN#u39z|x|2i^~pu%%nwt*Vq+x2|e9 zv&$cf)oY4GDSO+a5Z;cnNBDRA6-bFkY}qq-TS#ySQ_+IO~I{goNC1>-p zL%a|ue8*4(z9d8syE3rBqBr_>_suT*5o<$~DrTN_f#W+wMb5LFg=|<14EHtsF3g+4 zFyok59q=$*LhN!d`{Do7Rbh~uL)0P5qv(1#*Ef!9^9-Z7&>7=nlyu>=@_#z;pOUNr zx}@_U$9l-(+VpTZo?}Yd zTR$0Y=RC1$^a1GoO5O@?7RPY87;|=j^sL|mQ$=I={OX{O|Cc%4+`^zTCHzG4BevVn zUvtd;_p!h6Kaj~H0I41HnIgxYGrb-kt1p!*6w22#a>^Z&oAFU z;QRe)ANzRC%xh+z^P1dhR}5s_<#P0>bs zabkQrzz2%9-qZ91ML}1WSn#ceC?QJhRjzNkN0j0B^RH*@(KD{vu%d=CmCDHlhOk2^ zJf;xX9>Y8=OJ-{!0;64+#igZ*7u!44;$TPCzQL&^)*l_K37B}f2AC2bIqb~)%I&!4 zT6DA=Am*iL6W^E?IGd#{6%$br^c``tH|yWz!VUn-ZGWWcBeW=^GVV#vw%acZ;*+*Jg-XHy79P^+2+ioND0iN$w_EtO#hs(|M)6AXsRBQ7H<|v`?}P#NUe(>cMR?I* z-#H<&`WE2SU%2;csSkl1mFWF?!Tlji=lL}FwOlv%HL*|J;n~!jb3L3t#YT3F3M5qL z_tRrEw6v&R?(!BUy-1G?(#c-Xfe!=8XzKrPN0#A+@+AF~H8XkM_t z@^^=Wb`)hrKQ;0?wHC}O32<%-T(ou9`F_DY_|$vD)FW4VBa_?1;K*kYZXC&CKO)BI zsO#Fc&Bm47N$>b8K}x!5(p$n%dv0IMIUalH4+0%cC!^01;>&enVSp;(0ecP#2aU5*;?p?bhOTSg zR;k3+6L+&m@3-aQfimY|=l+)$mrerbG9a5f!ij?pu;H(+;Px1gTS^A4_asYcr)a(ele-1hDly2S0$6o!j=g`%+}a^gG+nR|&fr z$Wu6eb5x$FIW-amuovvqyn&NWJqaI3is`$6>Noe?@n%N4x}IB}b~z;|n9kj#y}Spo z)+#Wka>+cUM=_l-1%Z3&PHXcf8Q!STqKs+RIRob6r5i;J(X?>ocYI^SkvI-U1xCAE>B4vnO;ra_>Z?W^(#! za^Pd%bh2?{rItYRcWn4&%Yu86M-;2YKS^}9b5wA<%6!b=Y!d90G^#vt{|sw0ZgSJk z6c>rlFF#scPLETadqyY#Bj&ZGfa?2wahbAv?J(-5ogH_eN-P1;Al(Uz^;0q{11v+& z+j2e?d_ZYTI<%6kUDE+gH3k%Jp1A$a`G^xCDj@PX$ANzJD)92=rH&`nDhK0VEph`b z`ZGAI1*^w7n#B4U&0K*Moaf}0IvjOgC=4%*-se?z%wft-Of_3{vQB8 zg3%=B1Y>NG{niJrT=GBnlLs5k)N&*vCpf1Ij;QMO0l^%FVI1WEPr+CQ{v%cP(`um> z{0G28Kw_{EdZefi3{OE@iX{Ssjonvt%^x5E{22AJM-nB1V3^zFlTQT~h6EsOV>J^QNt~tfblss=`3ng0i$%#;s&w@# zPM78W{y=boQA}aarO4<#XKAu?;KRsO7fRIumI3~szwDj`7aClXeSZf#4=6k6Tf`0Q zxg3S|RRQ9K{ zNF2OBVDl}B^T{epTA#W(m^3}D(>(JJdVDE{-vFAqxMifmF7_@?Wx~S2+=gL;+n+4VBiKp)8dpolxpGa{b@LwwU!l zm-MwiPKC@uh)i?{kPu_Di8)*atx$k#(Joo@&jb6RRC1Z@(u{{x5_-QnsI-0F2R&5l z5|N)Z88Vv%H5_V2gt_$3v!^?%6bjjYzEhAxrwf5BdpXufj%)>?ZS*4{Lb*I#>|!mx zUbg(8uGS?G2R~@jzDO;NVU4|4{xj4Id(swe7$F9onX;Uor5Vs}=+S>ia{1nB|6r^V z^&e)^kfyKr(VP@E1`|KA+CkVCQb54efwS|+i%1D;k>@E(O{-V@v7`fd=6;~RFk3itzGn0u0fXZQ* z%^ZSE|I*8T86PG4D6~8kdOl>p2_0EU(Z#7l^(l{k;+s@x`#bscxnLIEOx&l+2XD~~ zYxMaYnM3Y50`#&BGhBiC%@2hj>zhD*by!A2Gw=*uj)F^n*eWxq_myl=s9q77f`S}_ z7~=Dr0~vabgdbuu!CFq?j14+VWiSQ#j1E6%xohEq=0gU zjE(r&_#0M%6U?T6--g>i^X9%tUQcTaPh1#AH8=b^xF0Et*Z)mF+CtIzXq72tHaY;{U&k4A#Pq|$!4L|sjSBsI0o zi}Q6P7?mhJ8;O3{Rw_$&8!I4dQX8UN90+5p9qLrmRpfjfJEh2;?__&8?3E)e>hpku zd&gkiKmCwWeU&5Tp&Q$>s*QNM5VfI+tVxrjxTG5!7wpK8gG}sbmnIS{&W98bhAhAx zau8j-!u&&oHYUA2lfpO5>}LQeBR}GInoaw0?w6{a-Rvoj*N&9;ze3_!F%1drKoZr<>`K+!k3N7m`Aegyd108kPoWTGXx(&^7i6T@9RZ zWUoLw4Hgd(I|Ar;%B9%DUCz?`8F*cd##_y#B{^MZV71qLTC*SoMF~b{bs2`S+E=N# zYLSh-58*`L5yfH(`suv;t}gj0JnMTb%!9Q*6J}p~&7+m%EcEk#FqhFNSW{xPU<#dg zD+}pzMRUx!0qp%Td;%qfkDPGCHnskPUD}kRAp1VIUQH!B8~*9d^`FLTQ+gx)a|!)( z-e{X%!)t6GUCr^LHuu%$0EAB=F0D}u>qnV`pvBQN-fcF>Z z`}hATGcpJH@aS@|F!xI;I(-6b6NgBp4Ybkl#hL?A=Esk_pj`^ z?70wXA=BOFlE$4NBcmIgUvoz+xSyI{(+EvLPZ{7zWEQENJO{i7$3t>pF}*b@hY0}M|%>EWjLrpVFRM21hUeqpBIVtV`+BjLaCgGlzn3QUP%mcI!S z^4DXJyVY7!|H+{)ay+3A14H|p8@`;uOpo5v-v>&SUec?KRMK3>JZq&)H*e(Cl8hr4>w*^-Sl4eKzElN#BNWMjl~c#^MVWG|U{_DWd4 zSIc=MEqNmOqUidsJHIUzMpUg?V?x$3fvO6iEq)E2sGJXE_}ke@_Hm8a*?r+@Q-a`L z!eJVLe%QIh?B8ogq$lA*FPXSBTsYyvdV3~j{4FEeS!*KyJ0bD>VgLN)W#X@CuUm!B zXeNdBnhG#4GkD6HNbBiQvf0D!z7f+vwtIAW(U!Zl%RcXSl|$)hxL_mFo5lyaw0B~w zzLxyHWh@|VIvs3Ae3P-zk@q(*c1@|?ly<4)GDPh-NmnI~9vxUprHph)61~f)rgOxr zqlOt%K*{q{f-9^vW?X3}`%X1b{$Pgsm)XmTS>7PcmuL`Wt@lrah^x8J9dORmt;tgv zujjF(B>Y<(25G}fG}Z&tQ~xW1DgJIqy8Q5xKdgLDxHf$E{4QYE&_^*^JQ^44d?l>Z zIbq@^*lccKe5A~^HWNtywZu74KZi(7tAlp6iz?V-xrmS1 zt@?R5EgMCe7=qct3+297^40{I5hWf_!49ukGx&1Z{!J4Mh#w~z6waGCglu`s$Aca& z>Z!6Hbj|2c>aR5GHoVPM>C-H1!LIT_B3IqoF>%=#VRevYO;y8c%nt*aX(+uW^M84P zMZGnkD^rh7RAK7~VHrs?z~clhS6t3i2t1OS&DP7QQH4F+5WwXuL_QDb!l})Qwg3K% zY-kYv{<$&xrL@1?E8N-Tld#wnp0%0r3W9ER?+={^})~XC_ps3HztsFhIC5 zYt|=IZw`m?qT0E_A2k-1R37{_d^m}?Hu)=ty&yE>ykE>r*TJLU=W-7W5NUrbWFA4X zrcB|*6oPeC&D-4xds=R}v$rUvrz&YaHR@rj3N6z#z&ujFYRjT<0qMX4{~C|&%BM>D z=$mu0)1#C7!kj;<)9g128dGIv#_FA$#`c`Aqur_h^B5kkh2m#{0Q$YDf=c@LXD&WO z%ERj5g9zM?jdx*A7BKo4mX$Qq8W-je_+=GBVK6lD|LSL-)V+Ev-#C4$W1^%kb588f z#NB}zUWDOor4RG5np4$tXSlvCgk?FW+b3BCOi0c@{}Y%ai&JqcI(WL8LXY@JbKEd= z2?L>1wtv*F+N!$<5MY{{6UKSfY(_t-5wxaL4p~KjpqEI$53%u4rTX%wq z^gh>By|_}<0D`;9!BupbK>JB_aM_2Pa%astdvJ0Tva>l~LlT-itk9t43jW(vGhJ4) z&F?4b)44W{e34s5(!33E1Ie{cNdA~Vt(h4_MS`s|+ z^_hwQ%qF%meUs^3cwmk;Dcc-@d?^xrVljDFDWcr<%isT=6g}xGd)0Z|->%H=Wf_s~ zY-NsnQqxpFck-F;6ZnScOkOK?!Jd6guR{cE5LVh3G1Z-qVWCo{oHA$Q;fvnLf;gxU*{ zdUm`iQDwgBzF(+dCRjGEhNr?ZDRbMo=Erbx)nZ#LNmCbNOwpiCp$r`I4}Q2)51cZp zO=;UCsw=6wk$Uh0c4Ch9Ytvq~F z?wuuR?}aPH(aL9o%Wk`0ExCuV8e1_`T2i`{T$&YMWA;(SEk7zdELb>%n#% z0iv8O?J;y$`)u*j>58lh$%;(ZL+m(?_BM+BZr-|FxMLzB{QGovY+MnMJ7*@?YU zAN}53ffj#s;?t&8bE@b*Ykq8b2R&b+Bj9CNA%`|RTw)Mv0~%<#uw78=2s$|CO{3BG z(aGq*Pf8Dd5pP7zuV?WqX)vOS7K51JwudSwo&n5joI8{K2(Ls`%nU3gckpiGQ|cEf z3bU}E%gUR5aqYV~DS;TFDgwXuq+pjK^xLtCxS#FJfrBcb>wNC@C9O^`&~55l_OXex z?FV<0y`426(|@D)u%)ZH33yF;CD;7V!;Qq7+&E5OsZpEzwNH-GL@;qo$8u#zrXLzX zR9!qpxj$Ft3Ucurj{ebJg-Ino+;36Y)@+PX8Uz zrozyq^Gv{oT*A4oPVn7L?kz|}x)|?ZEh#RAY0%LhfEePY39-Zc3NtFMlz!5^8DAZ> z)w{F0<>jtv_i!>O-wXoMe{~7CWJP-?tT+Vw_*8}Qow*0&9C%sP`u_CCy|iK1j%Yj9yp6M46jA7-tJNCDV>yv?E*B`nRO?XeTqiYI;(XUK+2Sx`1Rn+gt} zhUBkcMp53|UQffCcMYUF;GheweDqP*!sq#84y119Cies-`rA;dBxc3w$m0*wx=@hY z^wa6g+XFE$-_2~c{&)Nb-=${#CV0*~dU1GpR$(j|W#ZTQ$p_7`X-E8U zbx4Hec+N44-kBVCk;x9HChrv20sQXnq^#6`&6I4A#*5ic>PY7K8K)X@PYV4TG^YjA zxQR`ygX|T4`TNZG^3bA6&Uq8&ux8%J^O|GIg<1)hK$Tg*i!7LjyKl>chEGQrO`%*m z5R>ik7$QvtBpwi`s^pK+_pi(fey2yaOpIZJE4xvJGdp0fj`}k9VeRPvS1JnBKwT4b zzA$d(e|_3{EwE^WI+D#Byy$S9LVIt$(XnSZ;Jgp%k)}#b>mGF~Q(wBw^ODADZezdR z=`dVr#ioMBPZcDsBZgvfzQ7+6-+`O*2J8?mmdh3#(9sBM*he($fmyI+R=izR&p{%S z;XjE9a94LZS?ls&)xltkiVVT;%u(ny^K`pfob}At_rInn&ELu{8sk=+`skb=ncA0L zS$HjW*!!oc`&8ynt?s#6lns4NUP?~Xbmd2B?mOT^|HK&PqxJJJWMIEn3}0?!RC(X# z?;1l8wwM}NZj(0A<%7I2*(l%*>;xu%k-R^$K&BjLIIVJ*jmlwhS$J{CaK7}XJ0JZB zj>zHOk!{{r-{?A5X9~2$CSrrCx%lz;t<5G`1W|C5o-m0DXa|;eMPJR0d?HT55J#PA zP1)hmb9h!k74c0I+#wYQ?A70J9A(&@6#f&3bb_RtZ-BTB~^7A5#zQj!~ra z_R5cYeF>)DKpDELIwuTybsQ0V*L;=A&~cd@1MgC=+Fi=kv+LVO zv7I6Gy2J~FP;1~9T^-!(a|fMLbwUH3nfzrcyvM6`;%DZt>Cc~2 z%nel0@_#7Uz=Sf&x%|L^RO?kRerJdLX)>!H+|htErH8E&!rk2vtOF4PeUl|(aC;d> zrY|0EZ`ZKoxbrhBfm!{1DcBtqWJ9(tvikTjXa;t?5qRf$%GYW_)jDQs6kLrY_O^M_ zm3|&E>*qnbG|iI5Y)>>=F^hv~4W{s^iAkMSNAgBU-}4|B)wc~Td1|ZD6UF&<4bE1y z_a8O!ne@OIz4%9-Bj8%Dt->1VSP=Z-Z8+;GSUl4c_oj4Mf11*an@l-l1sR&Bde;J7 zpsOHdTj3eXNWi`7&cVA;ZOYztx`|;w)P26raYCv^$7R!{QV`?PMc7UKftUhTAIo|d zPoSqK!Pis3`qKgH9zeVT!GNMNLCly6NRFJQw{X|es6cQQ!3jo^Ft(aBk8ItbfaSp+ zv5uRyAK{OC&uQEyGnxBZ=QffM8AKRAntYv7-%;#`{93OTeLet%wvf)#}bLWkHv(TqW~ED9$QQbadx0T270_Q13bN_})un}{C+7d= z#e5YbsBLnE`a5`Af0#h8RiX4<$f9(4^hz<67AWXZHCzK$ZS-$Ohe{KJR^gJJUh}p< z|9R7ToOi1OYilyY$>5GK`Z}bvY9#?$GZ5oc#!(A&2pI=uWGv;gugl(W+ccogJlS2952=PZpr*u?2C+pF-DO!in=_8R13Hv-) zO{d8F4Z7@XEplVU3vld)8wrd!by<{~d&?a%V80$TG!xFf1Vhx!v?K0P1KEkDW@l$J zyWDLcH0#xlhy;I2P!GuQQfex@eW^0mCeJWvLwafdRn&%>0S&w z_qQ`D`l~*LIm`StX$O zF3!p_m$WD~jg8(c-r%KJQo(B&OAbJUe%7nlq&qMFq0-H|*cbaGN6OWK6VBNOtXZ8D z>J92a+yt}WnF`%yW{wm2`PxHt(YePg8nc$iy*V zLC9XKKk9rO!N?IoxloUHvGJZCHU#uRi#CT@PX%<4^>XC^cvduP6dnD7y06c zXTka@3%P^M7ikRY_#OH2umeap#F8tGWIvp-8 z^afgf^i(2FsiC6Nruri0Ajgqe&T^TrDP5mPdqopaZD=DiT78O~oB<=R5f3B-4^(JZ zJb*$XbTcnQ2NXl(Rko!T6c0TZB~#`ab73H%diVn9#xRLQLqLB z(F3;qq}(9=i6$6@OfI2-l$-+&JZ*H699E+(OE7f@9+iBQsVxf!N-{ZN0z96;0hG7k zk~*l}a(_I?9gnd*LscdnUVqTm5sBuuAa!ldd3WqQ_?S(Luu$P8%25UX zDOUip8Xy+2NH5?hs?gLl91|pOguI0P$Qcyr}Is9B_l4N@@V* z3H~XRdP50vjez>B9u1?Q6wM>sd5OiLSQ;l@3nzuu?EGoDfZ~o6R0*O25PR-xl?8&~ ziIgsv-+nTmNM6t^ERhq#u<# z1T2LOM_8lU@IlUPo9$R8_>NmZ6(FnQ4MuaRgFOBwTkxN3R69)vZLUT43bknf?5p=I zy?cfuP+cWV!NC~#_*3|B6Ajhc9!|i^`mZhpklT@vddp9(9l+OnnoIsw zi;**ljs%#uVU9avi;DF>2CWfnYDsrizYh=B*CNDLmtvP6MRkCHE2u;#GG_tKBqjzP zGBpJ5JkjQ*uLX{US)^d0VMpd)*uFHKEcP688vCyC?Ldu@XA7>T9cvG)+M8dJbV9;l zNezwISX(D`l0T3SQkYF++O>H#SRQjgR(5ZWt%XY_fqR4I5Kk)*Lnx?oE^wj)YXMe4 zt*jMh$254<(PJlD6SEKGiJXNqI7jMb2uwW-JmKTR-o<$|4NU~8Ngq;m(6x@*QnJ4E62-1%u8$`q*J_0-;!%d~o&ieTSa!JqA zcElGLLd>t~$f&@*a+6$+b4qY_y|Z$-I@_#=0J}D=o!Cmmae{1=)0T{EAePY5)@_{G zNdwCH&AAD>Lyz;k;H}`~*s}IfwweuHswO9L4DYvJPD3AM2rQuQIxg4MIxHNCFa>SJ zZG{|Z^hznLd$wNl+?wXnXbXM1&v!MS&`!p9&c;&}soo0qWO%YsNVBO11%Rc`Q0Kv& zzd5k)Ov&?(>Oj|aq+gXk=N4oUG8~}_88b9oqu0548}>)RXXa;+rDXizOGp+Yp>oe( zoqTv!Px#1lZ&}s_jQB0&_2CCi8qj^+2H#+q1p?C#T2J0Q8tgTAiU#gAZsv|cUjEd& zgtK@Zcytf4Dz+FN;E&~b5sZ`SH245}ogLuCexb*$VMFLLwDEVk+6TTV-0J?md{34- zie^+i2TnRUaVp0Uk1X*i*VdsaQJ4`)aXIiX)c>FLTpeRNclmB z$e18XbE5}ar`3GLiCH-__tK@l%bk7gBfF^y*MLZeJ%fzGyJ-g};@D1y)RUNq39%Uck3swQkq0 zCQa1$(#0FXF7F-+L-k&FIx$cgqs#_)yti{*=FAMsR_a~HZO6I}P2I*XoX(~Z%)~?@ zu@jT;3@0AeiVq-h$P=4>MX44-c z{t7D?9dt$Z>Il5>wX?S~)tJmdr!TwE)4k^qxlI4H0DYzf$>sZK*r$ady=^Av;U2|7 zFSSgs)uE58(Ho>d94l%SprUZ9gv*dihx2N_od4QA4k@>HL(Fk(r{;m- zeD#>b%k(BaMsX0tm)nJL`s)0MB&am418>HLfx}Z$rKxXdh-SLNGl5WVBj_$7qd8K~ zwT~wh7!m_EjW$@%+(wW$*(#5CJ&oBx8SMpd)8nMi*8){I%bK{rH>Rf8teI<*r~W&N zx;~vki#{Ks?($EN_K(jz(JzZ5mQA!LUZ^K2bG`4p*;GeD$)$SPw@jl6amVi}i$w5Gg^*@U#Sy{3tM`+$ z7XbQJKN5m?1xL^DUaiXd`O+*M#(!8;+2vW(C<6~dG*`yN#5*V`7i+pFywOseIBfde z*%j}1szBy=nB}K^@;BW;$Ne#;zKWEf3aE|I*mS?=`sspni|*VLfeH!=%cXSVRGA8ts+^)23?8^39V zU7R3N2&LaXgjp=mpokQ@X8$GSn_uq_yycSsUzgwo=icvZ^NZ7+6lCC!{Z|?m_xN}8 zwQsMcF6oVuh2wI|WSX3AFS=pV6fk^0tW7SN*|@7(cysm|w(cvag!(*7_xiXu&O6#< z`^s=|Q2N{HA(NrsJ4*_pnSb>K71y<>AsW9IJK+rOBeXei@Jz4s|ZD;Ldakzl*ge{j>?Hve+p z$Ghz!p~pR2l?SP(8Q(Cx>dF$&J>@@OK!5c?`ATZ*7X$Mx0R|jQc*GgW)9?7$(@b8V zlm6Y%?xMyg-nluh(RUQ}mzy$R3%4n!te!iK1YB}>;5Nzxyw+k$*q*jS4(l(9{{RVn zJDS|V5qrBpoS~|vy%<*k)Vh0nJImBz>*WbZxIzv_8>A?`Yy?R;nat&HvC#K*PQ(v& zVi0Ty+FKjAH~pL}SSxA}J101O36fy{+9&GvqX@1zq1_i3h2yHg52!l1CXK0nJNvxP zZ&+q;|HU58$CedH8^*BGUaib7Oul;G!nJ-=_ptsS?<>BEl4ClLE6U8JPZ|FTuY3NK zAb5{&Z3S=Kc8nYwS#_d_4=4o<6rX-m;P|4J=hWIjVaioKE*SUmkP3<0LQ67M^0MZ4 zDoC`iwsW~JdcU1)R^uBwWL-5!9DZB#uzd$~(!1MJoI-Zzc^z>vzx389wsBrisNl1R zt*Z}K_T1(-!l;muFA6~q_OJH4827E$`RK!y7v!j$rkH|HRtBQI{$4`M;1{0`V|2`MwKJO)i6IE{(It0q^{kw>i;b=+h`}BT>iV9?E@cR#)9SU;& zVB4F1zzKEhJ(9h7KU zsybNz*%FGhN;iPV0~C3+d;7TRajneF4Sa||k74T4XZ8IC^8Bop!`D&ojBi=|*7uSp zlYe|W5Uv1;H_G!TUxv&nYa7eO+=TMPJ5`OH#Qw{A{MY8#Pk!y}KWgyj;Fx&N5ALR| z$Y8Odz60RM(yd8lsnZ6#L*{j}Bm&QGqdA+oz8Z_uoOIQd*i_CXI&p2eO%?6PHN6u7 zPo-3bV}fw^?LYGw${p_&NO&Gv zaRW6wvDzD_OB~Pe+aZQ12Ne?7DKuESoFqtohYUdc?j`yylZ(WDx@z)-$JLW)X=TcLN;9Z_); zo@*eW}mGnuw1SFblR^c zUvtN*n^sg!%-?JO7v0E|aH@jlGqg2tJFfq2Xg!RwniTyMb$&6vkDE74)GXGJ#HM@= z3(WaBP2GDmRooh&{y^dNex1XD^SIX1#585e^_)_U$K}(D{~0t~>ggQxvP;uPI;SN} zHoCfK<$uHVM?aHkLJ>}=PZk-*zPNVo?1L7e!yq^>((w z&HA?tSH5q=xY`Wk(jDq?pOnXG3IOZ%RD{vJ-#D&6l`oRmZ)A=&_LcfQ)xRck+brEW zsQRb$ujbqBvMWpbG+XHwpXB9~pL>>m<)$~7b3%=~e_+=e{*&w&5((q)@j5YiUB+mz z%-SqlnR91~{I*)Fd%LaXmsZO2)D=Lf`>LsBprw+4&|!s^jG6c!=Hcuh+&q8a?&{3r zPV}bO^_IJR2_5-wnFHba-ccz_@79iQpW9~hsNMcA$S+$-FJ0iyPVyNJZ2=JHyzD>A zO?kUufhiBqzLS`N+gyX<0xQ|Pe-1~#urq^yPnu;40f)NWHv9T!iq^guO7seGC>CWj zRjIR+q;756^CUH!Y!CKU0{#RAZEa3!2TH|(niBfo&6}5VrP6+1I@Mo#F-}kLUmHl4 zX;mI9k*rl4_<&Q}waqR2Rnxk&!3*ve%mHhBgNyj9z3f^Azv35thMl+6V7tffD37@)3zRx4$B6@8T8N%%jn5k0Q2{wEGMETR%+Aptn zCE4h|6NoFZ@5%47U&_Py76N#G60bY0m6y(FmEL~l3#ob4X7uh)R4-Jk<$+5!h#$}( zU_y}kGbb_ftoAb%*Ib z%-SH3kni$G?$o^-m3yaN7mC@1*O5Z5`&rBWkT>OKSKH$vkFr}Yu0m9W74ApTsrbdP z*LJ%~zb|K$BGx)De6|zfHMnx;6%#F!ptIGuCIM~D_IoXLPbKC>GvaJQ_jwkgkIq(J zx=F5Fm>fmjF!)xmk8i87%O2v7jyk@(b=~WYlw5Z?Jm6XLHQVQL`+aS9qSYoQebhy6 z$VVTPh6{PA{pJ+;G9hr^Joa_Yz0Tn>$!X<6!WaScCwR)~SJ2IepPc0a#CBN9zOk2% zM;-h3FD^--s)uIxXDP<#_40>=2bCPJgM?%WVVAY#U!0IWc&vBxSW%kqxv1NO2y6;h zZQSQ1i(J~!4xeK5126t3dlR3i;Q04fa__h7K1F>{DY|HKKco4_Ni)tHSf6>i6`w(T zm=aLs(W=#rTe|7R-~M=yZ`u&I7B1P@lPeu#3#Z-wekobfOtq|d9{f#kXmtDge%J-w zrHPx9QGp4l@`-TYFBQH9dnA zWXKj$H62NY-|iv7eFjOv!TcJ@oClj5rHnHw-b}$9lg{ANtj?GRuOzr z5QP?%C8l{x7BAV&aE4nK+FX%(m3R_*+q2qFjXV!{2jx392kGGEyvH8bOO?_7cKPCa zJS*bReR}`bh-x8UTuOoe9Q6%_cv<*xI_$TRW{C0H8RPL9zr1arj+sQllBd@;xw$u# zKMk$_;I2J;H>KJr0YOeZ=$~b;$rQDUY2=$nS0dzGPl?0C-3r0N*7yIhKV8X+x%2pZ zH+aVe5F2;zU9LTL@7BlT_sui9zMP<;iXJa<@fYV|dT7i!y}BH4)M@g)7&m#Rm8$vk zH=l>E$&Y&N^z8wQk(f(DS*!px){<3TID)#;%BI|fbiF%+p3vKba zq~S}#w=yV2nIn|1@ZM}r z7pc)o%l?x;^Y&DmpRvLdhmPJb()45vTF84APcD5Cq0({(f4mN6lyY<~cWdOENeIa3 z1TRnhNSQy*_wNU|QI-Q~7&206SNxdSBl=pRk;;pMB-^*Fn;-R`Gh` zrLJG5D7A@+Mn4#-$oYAtKk-ld*h8i&zi|{GsgHQi_LVv_um5oZ^@W9=^ND{fXVDm2 z_v-ePSb!TqaxI+G)a*pqg+;u8+FJ_vKGvN1C?2HvdI}$$U_hEpd1VXJGQKHfqczjY zk$M{^{>_o3p6LhGIPav(UTm$v&feAVUq3S`qIq{L;e%6FRKEBYrA4hlD&ot(7;z3p{aqb7_l`?(m=2N3M)%uL4@#EQ z9F9|_r9D7~_Sy*YTlI}q$b+h-=F>SYqNq6ZIS`L}3ADCn! z2yD_hwYNF*#V3lMr7Y0$HzT=JX%9J4@15_DJT+{^`9Kg=AVie2HX5tBtWo7ws4H~r z%C!MNK~K^`!yVCblRh2Wf&Dwd<2@%ezKLZ17g@l33dF#ZhT6s?Rpj0rjSHF^oz?5T zW__g3A4!0{e`hBaEwwSk&(Fx|;~yWVP@H*6!N+$6T64ZEmC7%7d{X@Ky6~DrA_J>{ zG3KUSm!C@2%aXQIH*9BVEGiscdVTQssTN86rVfDV_~{I=mLsr!FK1x0%u|1(h4^s2 zStvDra&!9X*z=IBp1^l6ns{)R^e>+cg{p(z74wuw-{(>Y)E0i4{r4E0w6qTT>sI2k zd`qO>#+L`4CI%>K-+FDERd}q^e%eqkY(HP21RTJrAZN4KKm0JqztY{hJ!1C*)S)xP zHe9WwTvzjruuHjQxvKfP#9o)gTj7SIg#NT)0mq}jBF)64rm4CTUrF^Z4;dFG^Ed91 z+F#qIZoI_jmpLqI`caGaPw}@PtvC!Wgu$rqdjl~a|BFa|N*#aGLR!jNlJl{+T6Hnsb~=}MDa9y-Huv)SDUD}3 zGYL%}i&{g&HpSID^$t&>-&>Zb-B1ZmLLNVwZGTd#R~TdcpJwOLmNvBMGkf{D+;KbE zYu8&Y+VjY}Rd}o#;S8@toh&%R^#XfOsJyDSeCC)qA+h0myOw?GXs1rJt5mPn+=XK+ z?`#2TIG0$#s1sL&1W31)zSCMq5p+=Xp{?_=9ljgaL25jox%DPd&s5AM3ZC4&vd7KO z2)Fb<4LplEe*580eA@v39#?P4Ml~5VZv`~~Khw_c6t`HZ>V()@9RxBQ!^y+xXiF7# zgYs|c^mg;f-Q@e~jl*W<)>{LRX4P-kH}E>Z+X0ExI1*!*5^S4g<=)cDWd`kv*% z`QRwz3%AcDnh$G+{U@@DYhMe?b)V5^YN$sdX0*1ippe;KD}kA*o9FJ0H40V zyruUlT07H0Sya0I)`T%p{6=1(@Uq++pT5;~UpAxa4W_W@&P4f$*bdJ>$~^EEGN|3@ z>b!)f32(+urc2pT$9s0dSH9_L_wUlVwQKlT=Y8atd%LGU2Gyv=2or>iQ*p-W><{|| z#_766DLmfylZBTlOg3FIeb21_xXw*6@Usnk+1;)bIr|bN*TW}q0j1d%LvH7EgC&H; zFLH>G##_#C+AFm^qky0708auz#_1OudFjrX+o3%O8dBFo{=JUagSnSZ(+xSrSct>v z&j5(8#5zW={+&n0U;ixZtK9&!dD&XB#{s{Ywo4^7Z*&Dtxt2H$L*dY>>j z1Uh5LmdNq&Nqo$rT!G0v*bZATfGA9HtYCrzmi zz6_Vul=;;ip}(9!an!Xt);Py@U!-G4mSJkss#+JYb>}A#ltO~;hKKL(Rte5n2tpedopQ5Y_Z9qz=gLlk6gHV6f&&S&@5izvyRhU=F{Bw7};J zPFol(dxp@=^SD{r4P55^^eN<;MtSmA=EsE2=St1ztWNzvb#qvHWGYP@pXj=N|LqxG z57W}Dp)VFwtS{Ru`?R7vyifK`4Qg%n;pYLtTI;!xQ8RT)jYO*${ZHV9Z5ixLk~lx0 zxhTF^&Rgrhh%dUpt;!ccB`YU=k-@Uz>GO-{R|=ZxTemT?K}JF_b+01p z{Yx^fACm(_dya^u6pwHPSKZp%*=7k|9l9UAX*X=g$WGB>K9|Z0=2RP169sGRANpLe z9XQMIN^rbB>sdM9Mmskf)BHd;XXTF?5todpJxK+=x~!d|V^!o9=(*RoO@6gTeXV&; zHz4Gz#dKsA@1FT_rO}zNwx~sF?H$5V>%@UtvcT7xP{^}z5tt$V#@lLTXK*Iap zsdH>qjW;#$#>j`XV_#s=te~mddGN^ACSd9i^|UhNI>4*tipmHhzVnpKQd_Ojd6v_q z=7iZOIjlj?-I9<#SM@Bo%{k8~{p1ICOm_s0)8i}Ys^5&(G>Cn4Bp>JayewU_>6oT> zeMSV^J%#uPX}UX0e|&aVuFCqal(BIyDAXgOPkQ&&{yo=sy>U9FdxUil?~q6TK!O%p zFh%}jF8Mj>=YW}x@!+ZQ!qS*{t(lSMu7kSks7iz-l|zk#v5V#}X`Jh|q^X z?@nF+wfEMxnKH&Ohl8>t@xo`^mgrF9+$=RY8}&8r|8nTp_1Z7HNF||BN zPv^@f1#Z+0O?I^5sV|p{sPD|1cnbYW^8E68DkvZwLJ?G2=%{q* z9Vr15vQK{Z|NfWfyg1KtJ?pIXyfBLwo4u}SpZV^YYxc~ZLEdI=r(@!t5047=VPNx{ zd%Ic9Z(C8#7ZEI2EN;j?O*@wM`lznxda+q1KK0>vFVlHcGYPs^-WNQ$haNgC4BH`I zlJY_nqq*+?NKdpDu{B~~`s%Y)(GeZ^@vWm%9cXPZa9cRnP)l7qlE9lBJsst}xTBMP zL-1^KIV0V80eM?w@_fy*{Ck-3Q0F#VW8?-Jc~597R+Mh^RrEJcgtynu z-voYCq5VzI-t)UQZl=>bZMEO&lAY@Yx&gQl6!W#g)4CL8~bQ6m5o;#OuDX70- zEb&WHC%p-JZ+yzR=k~}9s3%2dxP~#>k3SWdM7#ltu(8XFSBtjxLx0su(yJ{z^GTkI>0=#J35`0pYo#z zgv(^-O|IN^krhT`-*p^k)TKjuLYukIgWl)z@~WziM_k1m>2J%Dr^#~9KkExnqxL?k zb@=*ZX*J=pPC1Grw}d)}?y>PaLmJ*7q@g=yrA?7b)RSU3lKR#1nWIs-#}zQW9uHS= zzeh{F%DKKY;QHg4t*2gm@3lA8w5*;a=UOSGIP9*TUVS;|!#6ir^dVQ z_VlezjTs>8&c(2er~zE&jvvS*pxeW-{qX67T&-}e`iTlJw70E-9>5DBl4y-q?tNRwvFfkMx89VP2diHH@KBzaYez6AoRItJ5p;@tz zno%IiUo~!7k$AOZnc8c7g%;^5MCopwp>t2fAK^_jl^MZ*px+O584f~U%hEIU{BWi} zG1-ynjLw7KK~$rORp{4i3VQ9;ukbu+Zyw){9e0pXwCbCzaLBup#`Q(rs_80anbW0a zskJm7@Q)W1Q}l*L;MCte@*=)57PC3bLATQwEm$?EOC$6!T8_N8#1DU-0B+_7Q;J!` zt)8mCJvB!f1dQq`gNDOdLs!Tr;s>zyYjr*n&BtW1k*X~8OOYcn?X_$(t znG+*l4KrR(9cIeLpGjAiY$ywedLVj;wy5@CSJ|NRn{TFXfGd5Vmd9dm9Oi070IoIY zJaj5|`iSHPL$fUpn?G>-*e=;&6wSJtr{^LVfp$_!tMR*1oaFMCP?x7Jv3q?2FP)D& zIhoXv)Y;?_4Tdn5mGCG=^Y_Ra(DyMd;M^@9m(VF^HirG*n*Kho3lbh~i7gce6kFoW zJNCbO`2&!m%LkF?`72zUzOP8g+~1sT(2N(#%tLC;8z_8iG7E;PW)YQEJPD=Sj^Hb+l6?MpZ4MwuPm*z?@@cQ)zKdir79yn%}G{h(NF(EpM9pk^bj8fde5j~4NRMw7yHBs z)Pl}2moBKWQxC;mee_{q(R`|2NQR` zfa>DjjOK-{!kA=kw-c4+UWx3rXd3FB&%7-w7YlRRu(z-NXyoM$Hi@VDTm>&h}#CfADK}~TzlkutxSiHSyJ6rN4 zPbB77 zJF$rn=?umf$G_cj1-CshH^T+t%9%@-$JEQ@&IWf~V>e7wTP%HDd+>(hWc;@S1xFb(UVUB7zG zQ>(FETQOa()y2cUr=VBKRpfd5O6Xd?DVQzg2l0#b3+C116(nt>?g&S%0W@Kr_1MhG zMoH~r7%v60lFepLqW3wYed)N^pW88}B&^57+34)_M^2h@1*gqTDlNbI&ONW1I1lc|W(wCqBsg7p}&?2=%=cVs?ueO=@})`E-3`v;UokCBXOi-fZv`afs%OSi z$V1iEZJpNJww6?(hSlk38tp%OR^HD9f4rO-`Xlu1O7qOv0PQU%eza^RTJ7CT&%Ldv z&ETLlF@Bx1%Q+7|NMP2fJ)FiTmfDYn>0^f5t>6=d1A|*XH=F~+7zdi`>T{9elB@@_ zvOh{s8uzwtRc$Spi9EN5ML!u?@H#YL(%nt-`^r&lS0_jmlnS+nwSUI$UaSq!HPz7u zF_&6IYOVEKC^U&prmPTxPH9CfF2=vFom@di4q@jXU3&m=F?k?!LQ=bc@4jS@ZCh59 zzG?H+kvLE?a4&A)^8+@6)br6L{n=D_H?2NT=B=sSbA6lSQGS75?^NiTz9%uz2=N@X@GS| z=R5n#r)AZB&{82?`sOo!;;I4FO5!gMZ!L@6nu|}(MlD!X{omczjA4F{ z$Met^><5z<%}ZbIt`1P>;IXWD)i=HxM>nr~m}4*3)}8l#A2)r;AtqB>kUx9|aMbAW zvwP@L)BvpO$=X}5@GfU51t{+~*Jg4p)F;eF4c=V~j*r(?b)pq8Pjyuv*Da$}|GK~( z)h?H|^+>4b$A9)18^q<#b+fzl>1hJeTJl$UQND7J ziF2XE`Kb-9#jG_Cw?bsS$?m}XvuS??HJ%;GTkRP5V=F?*;eLg2seccFKHDyguG_V| zuU{k4L?YMJOG5()O3oatY&LIR{v5heS4Z(s40_{3sIznUm%s?o_4^ELL(6{rn#ki- zyxH?a^zv#puU^*Gs!|Vr#V2uh^Txw;Jm*=RcB*4lBoGgMF1~ADjn8;b_HIUuA#4Gf zP>E+LDwV5t+8fQ|jeUI#R|NIAqn(9=%_?-4fPnZrrj3g^J&$xnle1-{Cx&V{67SU} z*g2cNnKOD){lVvP%Sy%I=cj|T;C@!W3_eX%$Hz>>lCyy5>#f%-ul%gWO@1vLgwBB0 zE1YDV@rhoPL!?0-YeuAs^+vM*Mdoy91N&v_Q#xYI zGS108iw;vGJl4K^2DAFELn*;kJw;r#hPv8}MxNV4T{6gHeg6qE^Vt%f$FutWCj2bN z+~qqzMw-P!#|kJaC2<)G_b7@8O_$ZwV3U2+CGW6aaNfd&nU&Sic*k4bC)S1liM9FL z$rHmsS|8(&@E6R;YoM#LglY7wy4PunD$zE={i*RF7^yQmxb-0;pyTCF4)-FrE7{Cp zghZdgS5HRIpNZ1HCpJhsH+YHsppt^z=cmGNrv|m(ZCGQ5F8$s+^rv4hZp3^#Nu6>`?dZu z$_ns%NZ*I*iv;C-OQg~G44d2?g)^j*k++nbd;9N-dAC3AL6A0EtwGyN0Lk2E?_|~UQ-_>YUSNMk5 z+&T6p7XNKSq4$S-j>1h+{nwY@jc09qPd)QJ!tQDfRx+wFoh@$Y4SV^4KdN4}@m#x7 zj*aw`^P1nx-8tDWKM15&&~@AF&%XjDPgt4`>!sKGOTG~5s6P+l0o2lEo0G4mt-mrW z!~Xw&$r1@br*k=SkfJZ~6ciUi|9gq(;rjpA<)QzCCod7rpFmnoWR^L<4OC>}z@DK@ zen8Qccg0xX;^oPwnJ*k`pGHqjdm^{9g7bj~GfvN%u1HhpzO#ze7NC~8{%pxJ=R%}5 zi;QR!pHqR;81d*EVajj&3X2g{lrEgNl}a-x_}lN+CVazPsJ9A-#5Lj0UT%nvHcr6y znhTLtqsY-b1To?Y0-dN~TRgN4nb@usE9BBUg$;oMKgjS*>Ixnm&+)p11eewLw_C~ho>4d}F z&|c+9+Z=s8wFbmd)`dU+(cFn}2kXe;+*-$2l%ziGjF3z~svZ4-^EzhSV1Vc52x6T6 zgPc?ksUW?HAz1eBtq|K8ep2==xCr zVNC&`huc|$Cn%C4Vkhp?J#f+t5KW}>2K^Q$5uO5s15}_EfsZnF7!gspul|};-4{u< zUKsRK>v!RhGsQj@WepRv!gm3=i<%QWQiZ3^$0X-3sH>~8MRyM>vxu5cNdRrLVkOb= zFyH3gEFg9=mAj(4JA)(ezh&4z5q{wi-0~Uizs=5iaA{ghAREo2@^Z9G3z@fv|R*}I#DHzB(3s) zO1Y|OwF==`JRtBfv;+Qa_{=)sCQNOCbZ1@wdSsK$Yl4YJw4WB9^1dV|g<1@Oia_zS zL{iEgpQx`Q>$%`qn1!Sqa8SiX!Y6uHaE*d~v|JJsUdU(@Qkw zc{`%WhytLh2Bv4}22jB}Bv;}}HGwS)IM)o#NI)?To59YoyCF{Sp(^#WJO6~exm~v3 zcDxN3&f8F%!_Fk#htZLj)Ya=jTPZtDsx{g0`lOX^l)th_EMWX|b{p=(PR9wH>@`Xn}rSCcm|rlI3OheFyJ_|&bUvG?aRK`kO*lD@+<5pi#RBu^L`8?rhdK@l3lW;xw!_f zpZYaePuzfH00McSM`FOuw&`{c)Jj$bV1-1*%@QoIDBxleic$Pd6{_9Gi^+-~CWa(l zCYE-$xna(1Cw&4IAtq25Ra{O1p}HAwNSFq~H3&)FsEPi@Re-{ry8tpc!#Kw{H-mBScwMY2Y77(Ry$YhHsXZM9~=SW+|6t}tx&d= z$C)i2Pwdi(T*EV+ge~LYr-pmegBO5(>{1`Gk_EVVmZzln9@MV?k|+O}sLSF&)T~=1 zu>x{qnsgWZX<>FmOP!Nqlh5i>HmPODt_?4F4}h}>&S~Cw<0Mc3(RSSAqdPaMh4n0?^<(lg+sPpC9^`U4vo*i;eZSNIX%_@)hxG+_y&mH^!6 z!X6*9Y2O2b9BcO>b|hi{rX>Wnz!p`-JT?DwTuIf^^z-BX$IlRE8FE!d%~pI#-7(e)qYk^2wOO?%MEs*aGi;o5745 zISG&b85Lk;74O+B?+wDr=`HRxo-%)J!@*&bq(uE;#KWOQh!oh9*>7*44Ljuy=#UK7 z632UZ@S8RX<~&NWST}ed>;g?KR1J3AD^`QLdIANL0XopjWtwY-lxu!nDc%^7lcgj>A3cBQwUmzwKo zQEeQ?pOKMRA*+z;vL}bD>A<;guA1VwMWjixa^5S&=u=6{Uz)K5QqpB^{8yA^>4Wmx zVL8TqjA}xLsx15!(fzX~HDe=s%!&0lJLc!CzZ6ca+z;zp23l%sH90XI%`_u5pjG8L zE6}vPbSKAyixEuIhsQ^Xv--0dmf5_tJJ(XKAk9gN3q(U~TLJj}BRW`Qf-B;rj*I%8 zJQgKmn@3c8y#{^se{8f~Il% zVB3+T2%zO8RelNvEcPVpInBgLy;Z;`l;FH8W=8vD0w+PPg3#>jps=gBiQkMET(%Lse`MVBNC+VlOd;Rn}Z$CgEKt>Ry#}f%a>YA+) zvzfcRZNMB@{3YmU#}%U`2pdgA89aPN&drbttr=`eqelrg$s_im#ZVPBXKX&6zw>MM z-85I+=QdCHvZt-cB_7>N{SpYU1P};CA1;g8D$dEm=tKbRbpN?ulCaTAsqIXZAz1$6 zGeBTMqCQ+h2W%r|=|8BUjz!w_V{6IIlfL zjYXD4!>Juyuw~}a>Qz?*Bg;T5+_s!XqyTW1m_Iqzs3!D}6|xh6$O@{MR%Hww3o^~#5qfQ}@UNTx|BKi89NxA^M2315K-^&45-y1-BT5`Y~ zT+|`tyiDZKX8^U`ZJwAfGvFx%D!K_xQZWSA27;$R`A}}aoYOm?DpGT&G{3OiGChm% zg;b|ZY5=)Iy5yc$0aVUVGgyq=qK&;%YLNn?w!hHU)-*gp%Ub)!(Vtq<@8zgl&w0BwjdLW;ij!BRuD@}Xgi zvm=Ooi=){-KR7>$kVy7z>uWmpQ1LjJ)#!?rA$a91nVho$nD!I7ke1H0!-7RUU8wEt zZ$O|cb{+1?PwuZKTgD=1!1FiQzM_nDNQu6I^aiF*S19;stDluJO;lY+h;gBgF$u(-cgozAc-6&}P&mYkWol zh^fO3yG`K-XL;iNkMU%&Pg}K9> zhqeRvopTAlC3^a%evnum{&1vUrJrjV>-Z7r+xBG|5hU2euy!9y_6I?RV2vbJawNDK z2m$ptkE#?6p`DKWi7L$nA>B;PNa`@F&%;=@SQ#Kk)4|P__c*W#ux?D*_FX(K$eIS| zds<)w6x~c+FlQ=#am);eb^bB^^`16447rAmKghuc0dlnoM&dzU;D9N_?8TycoJKqu zoDvz-1&{%jL_j+aBr7MyH*LQ>t3qyRVP1tp1BIKY_@-#3li4Txn#7*E2Cf8iBQTRY zeT=+!CDH3V-ibj3Ai-q7|2TTZRC1Mf=rCwd6S{j0(u2x5wu!}4Y|VONHE5k&=eYg! zPX=+=mEPs4I@%S9p5;7^+5B--ylsHgR5vM~1H+Z@(2pQ2FcmpMb_No0LPtAtwgb_d zv+D@5bXnBEc@R$d?`?8MUYx6f+`v?%A8)gn+pYrU)Y)xrpYljEg%i_I=wXGjQJvjF zWgX9oZp;7ZSH@&Y!>&PZ_p>U~`hvSraS+Pn3ed4(O@<{Is9Z3o3*)*0cvDj!EN{-K zWb>xJ^mGFd@|Y`a64 zrNTE&lKtSaO$v9mYv9iJJzbiLslG&7J43LRfFC%TbhdsL9>keijrE5qk%Z+b0Qt)q zz;&3F2n%rWYg+|ca@id0n`NK9?c29dJvfNUln48Rn>f7Sq!odjNq4~J z%h*p#-%bnn`_RhSAlZgqX<4AD{ikPnv_kO>Ckk2Jnr*pqa*H97AoYq`h1wmb-}?YCo~nZn-^ z?@S8wXM`lgkc!^#96kWbE{N$Rmv>>dem1h-8}vA4Ax-|J%_fl@v6ngd0YCGL;uW3w|B#>xq%p0S7E-T7sVdnc1K~@SK{G zsmcnhyoo#OSKB|O2`vTTTJ@%Ox>YIc6ph{z8R}%FKqqX)s%M%&k-@25+}FO7JEUx= z8|xXek2;}>=3+NWMOA>tKy|oE1oUu&eB_r4#wn95mwVuqEyzu%vb$^>La#Wr8gh=%_R&t&tUPQegzDUJs@?jn_WH>Z$C zs0r(rBmLgcB2~uko4l{zD$jkR8OjejqM619R-k*50T0-5qCs-XAQcUk{Cpd*OliG% zX_ZeSQvglSpThD)aT>i@U@b?}HYgZ*$V)YaMh23Mdb^>m`_m+RbokA(tBSURl!nl8 zZuTPu#vd0+$glJic2`drf~h_4&>2HPbF-IV9`|4ux=@sW$7I_W!u})VjWTB7jC(Sw z3AAKWErW*)40wXwR(`f^bWWal0?@gTHY(zXhBuC!p@&h2s<5ao0?;)p1qUoMwa0XqA_hp5j%UimuJ?KvNPTqCy<$#MPKLIM5lLRP1hfX% zo#Wvb!J^KR($6Bn(HazZ-C_oVWLZ{()1@fC4pd1)SCghHNJKYFJEDmGoPWSIW zi83cY5zJTTXT2F$+QA(Wd9+$hPK6Ij9%HQ#U_l>$M9nrUFH*|DR$>mt*6AEwME&!C z@%y&z5xM~Ufi~W#@@W z;91~1^Hvt^$BW~MeAZ8&=rn>TPAgBPztq(>9qI2)0~>X6!)$7Becs7OOwW2`w={9f z7869K_~c>!=Kj3rN@D3Z69YG?`=Zl6p6<2|8f;k8$}zqm57V@3auYGpQt8^z>vE~O z;4kp^`7~-T*}JUS55Roy<^8~Cf7z@Zo66zDCb2wL7Lsr5-6ot|F&^uXfvj6TtRj7D zexUCs>k>UWA%ndnA|EpjF~P)t2i$j!pfw-)p#Cv})+g!SG^LZ42Sf8t8a*Nd0cE1( ziSfBdaea(}dOHIWgAM$I;o~{xO((ZL&&27!w-MClJR_I2Q1^r5Amg`2A}0VjSeUia z5*;SLa@*Gy-~VX4?r-k?NEZWF^a9%vw1=pUi9qEL2LvwhlihNdcspiD(W69mkB@y7} zQx)pFG~+j^8C9!jOTcXfG(X;ORB>!8TEc=<%F7^^tY8Nddt&u3xXR51UC3?rU30Gb zOnvb90C^`JWv7ng%sacNuWr~HsYeY`-vg|qi`PbZuF-CFF`Wdn&upvaLJjl&j z&I<2bHRkv89|uU~)k1>RPT2E;kvY-b3sagoym#|%qy*At8Qy>zmP4khu~#itaWEHp z*}cxSqGSGfIFa@}_>swZ9^FGV+o$u>Q;5&GP>|eQWfXA#D@yx47!%FvkNaRC9Z3&4 z5OdK5!?Sx(ShqpC&r^tT_1&Fb6R=>>!~J#gnAR{_wl8z!(a>c>8kZu=X!^!m zU&X3c!=l!(1(AfIBYR2}7r`eYCbWBkufY&M{y5h2!&?5De{ZA}#b>VwqeR41vr zT&eJp-@MJEIea)mi<5Bbk+*PYR+WHRI z?QEL%>EXyekW@J*qX}5G9_{3Wq2ZCc4t-o;Ze+9OiWrC)C(P&Ps&VZ$GY|8;(>-lG zI5Sm&R6j)KEYCFk1lFt9_GWf0&D5==W$oS&L={JM*vN-MXRIN7v_q88;k##=nR{{+ z7TAe=Ec6xhap&XN*Wovepad4tb$E@b6WX7#{29cB&*VBZn<~Q1RyF-CXd3N_kq8Bc zr*nRO$UvOm*A5tTQL}ivtZ{tbp=4>XrEwDnYPIfw@Ea;z$!dhRy#;@>JB(mC;-Fdt%Au~Sq0_LR0`f0rSofEm}G>3kPz|HwJplhVLS zXds(j;ts9hyvK2gEi#h)!c#%mfj2uxN_UlY6Vd^l2jBxSqgaiKX!Y?13Q{=dG?v~@ zbxN}vCbtezSeZe5tj5On!Eiij{J^sV-7rN>VCZ%5H*D7-+X(Z7bqs;;?6A%b?UZ)q z7^PE(SxOD=trl%oA(~Luo^wDQ@dRfmCp~-}6Hnit?m|&h_)8A%$Nz-hBBk?G+B+k$ z2&^KAi8fipL~-re|7P^c57T-paqVg6@U}s z>C)O@_BOmmEA7vyo{ZF~0By!x2&7bm^PItBH92=pW$%$+$lQ^`X+8wIm!dbxt^rDu z6wds%>2YWo`u_eqbkl^57Fg1fNUe%SFibcI9k`WKZQI>MvxXF;^S%+1cmXw-kfl^v z9ktc)H{+?HeYje*bphKTEg{}Dq_tV^29GsQEv6TUKYv}GmVQ?adm_c53kJE_DiSy7 z{lSgVH(?$=GYk#rN1L$G5H1mWU~c?m>222m1Ca3 zQL1FuKl+BNXn%)*GOKHv@Fa_tkfj!~I<6LP4!4RJcWd{T()M3vMW}|j$nwzj7=Be# z0s*7w0;)+GrfoRKGl(`cNcc8e)1@XYjnHtl8$&G@`53;oH$HvmxX?y+Zo9uOo6GC$>fG;*IQPo-(~6GE-j%@|B(1@exq_fC z-b+{J<6p<#hShqX1i#!+ez|Ynhu#5CgIqK_Yl@s4bdhsTt++4 zTVO)9oUK!P2_&}!qz>l1q&~~yI0LR5tTe_y5&DiV>sk9I4BSzWLOEflC1=s7{vX2~ zmbYk`y8^to0`i~<6LX}yAfAKPVik5joI15>ef;hIB|d7GqK)8rI99pR)^QOj%s?wLC+ zm$U7Ungh8aoIv@sVPge@)SHlxGO#0axIS18>`l-9>1JD!bKz0z_fuLI$P{5+crF|w z?#(dwa;)nyl3VCq+r{R@987yy{p&P|@>pJ&MZLDIhKLkfBaQ34%&IoSeNF5>KpA9> zp_gL#?#NpXp>~_Qeyj(TmSYUpPSb{p=2FcIz66_s2{MYfr!DA+F+}MY#mS1Q!@)Gx z+gtCqXOET|A#|E_NaxKN6lOgTxzqZaaT_=n1}B~&CE|0QH&S^=2eEC>GR-)>bH#lc zt22TAJiCsFKEdthuG`V6i)^w=w3V=Lg-EMMvEGC_@s`tnuaBNrQgOKgeQWqjETN!^ zQ)$^X(z)H%wyg>D9op(v0m_5s)}v!Im~Xk+Mi*B)-BuA&g?lEhlO?OV@F;h??7bX~YNLh#5YnF@5$HnDiR9)1?VK}ildgD=$JcfLv zX4P2awrynvS~sZ<{qPp}v=JTRiD6KU+|D(u;dA&-f71=SS9k|}*KZu#QmN?}p&v$x z>on!-JSCrHQnZ^jjP^w5)bF)M^1gGQskTVv#n!KD<$7BLMXT0p8_`zI@ql2qyvdm8 zG>LVJeGId>KPHxb4RfkoU#CKTYi)QPTsHhF+jFjd3?%RUIgiF4ObX+aaz^hRlMbc_ z1fQW?;RU7W>=r2R;9Mcc9rA%4r}s!Gyx%d9Ghe5f$yyH`Lq|`8>uEg0;24@3Z+s)^ zI?+tpr+=jD?g1`|HDtDfK)D*sygfTd-#DbDICw^$p|K?T7S1jJM^YPzZ3ItaU1w-s zqtiLKt_?d=wS%KP5i?9%kkuA!HElV%weiuxH3#Hx>xtW&r=0~H-Uugb>lf0#n{C!u zWIuQkXd_%aPCog~)-l*Vh&604d&lddAks?wJil$QptGhGE%XkB3CT8U9<(Az&+yH3 za#OVIbO5Sz59Q!IxSLfL>@CFC{%~>s_26AfBJ*f_wHochXuIoJyZ7exS_b0~K3oI5 zZ-vrx4l{<;C_~3opm_#zq=xF{r%l^L^UvOEKb5Z6EGAZ*?mUER+y#e<0opYxdO-DT z2XdAOJ9ghSEnwB~vzTjuhaKKo?lp`xJdbQ6P9oH&aAD;k+4Z-=DDYi|Sc1!3_*nbo z7?Q)SNYA1^ij@otzp-n`)7)W1_8`i^eNlD-y<+Uo%;)lG5~SLMgmcB_M8qZBr|IXMH9kM>^OzD+u<@&NIu+3AjuaMq^_{aW^JgvU ze?7~EzPJU=hTeY}Jw!i^?u$}zXRZ0FO4%P(9-u(S7mt#Q^s|$3AkPUe`cW2^fxd$m zTTj^wPSd>&Nhva;h^8|15%?RI%h=|?0$ENbORK159^#%RiI>R3c`Q$4cA0f>17eeuBL{7YxD;zrPE zuOhS}+H?M{vv9J7Gr8+?LXTiwJrVtsn=#adwDoAgDT)@pGdICB6=?KJp0)SKX$Q#Z zvSGXmxdvzoz>VNBY|$53m!AuE+twI-Hx4?AzuUmRN0H^AB>A7i2ov4C+K@vK51xyVwz;`lj||L>Wl z^tl+h-L3;z%^GAi6hx@DOF=b7dyw`$`4s`w??Dt5KuO{t-uZ0^>=~}k@#V|{(IcIF zZ9_y(Bj!<1M8xUk3J{32qY}i0BWc!hMgs1OfKDASgCFCD<9{v|1Zv@*uiunf-i*r1 zL~1ZM_wCU5Yj|WK&*W0M*ou(oID;&l&;b%yv1>IfT7o~%i&Mdztxj{^WSuw>Z~dA>w}fft8dM(7)@lMDs6Z;T% zdu0a!O^vf^K`P`39y1XXsSEroUKR57Ker_X;Quo5cUKqC`X@oSnSl{p z$II6dE+-))AtP=tr63EJmz9u}msXUO7lX@4NhwK6DN4%7h)XM}NK30o%fSEr6VNzI zzEbmba8fbWz54G2$^WSfxcK{ft4K;Bkw^)otb~`Zv!t}Lva+O<%sS+e*Z4nL+}4e$kXrN^vE0~ zgY3K|r6r^!|0`y4Ks7QI6&+tkJAW@<6E80hjsGb7Raaj}q?fO|A6(~-oSNkSZuH-& z{&#psT|0kA4H+qEDRC)facMadX?YbX1r;epi~kY+KaBpj;09g}u1>-K8C*tON?Ke- z!9+${MNUdZTK?a`|A*1PgR4pY1LA*x`4{zn%Jje8|9=DKVEiUc8j|lvc@n3cQ#q~!7{>b>R zy8hz&BLaV9{8wFnas3g2KQjKSuD`hch`=8i|5evtTz^F1kBtAS>o2Z9BJfAXf7SID z*B=r1Bjdm7`itw22>g-pUv>S(^+yE$$oQ|i{^I&00)J%uS6zQ`{SkpbGXATszqtO0 zz#kd^Ro7o!e?;JqjQ^_ZFRni#@JGgf)%6$G9})N?hodL?+YTi8BaWes<82BG%lPZMS|&l?w^~B0#J`#I9TDy!cP5lq3J9(H z1!-4PGE#NBF2_x>$D~mPUb5}E+x;$5=dOr0Nc1z9tyHN?^l3(g)m%q=JHHP#3#9_c z>b=joDjx2HA}J*`D&v@|oR6KZ%Hn-y2tzHc#G#6b-{b4GA$a8`e9OvLkFl}5>cN?o zCS>L8@7CF!(4iK9pq{LEf;re<1mL8r?Sxk+h8sI#5ke;d=X#uu0AAi6QhagYJkKB@ zJyiV_T^U0J!_qI%wWWE>-{HbX#l*sfpI%3rV%CH0?#f}XCct~aEcw8FacAoAgc~64 zHX5&RwK~(L5YwW-c~-FVmZFJQIKa@PLeUSC)9-^|Qp0mPv|))qv&)j!3`~|l*X)cU zbpfYV3kafrLJYVZ;dItU$JTQoWjczrta}0ZBu}HdjqSaXFrHU?U&~w({|pJ$uFjOdtiAm-Fl*si=_};zeKm08(Y8fPqR*UfL(5Zs zvFi_Z9QIXXe>i#zz4$VsWXT;F;F0u@t7x^ZM}6w8L><&eu@_Yk+F(?qnyI3ocmLuo zNty@|-ciSDIMF4gkI2~i>$9_5(DK=DE(|9J3TCE5_jAj8oD5bbzlf4%^%`YImaRLS z`4JF{Y~APLQ{oHJ2@?gv)!i|8hl2_Wy9d$Zobf9A@K6z{ZiFousk%<$&S zq@yFZ?I2lA%8InGS8-p<*bNFP{MMT8gey_MEz&r+Ha^sOpZ{R@o!6|c%o9D+v=X(rOciZ$D7^WY(~fQG6o%Q3a=4Y;Yd$!obq7g#IaU*6vyIefLu7l zU@`St4l3pVFbV(hnYbTO7Qg)h;73R>D>ZHgo7`DB_^~SBj1!B9y;mb~Q_YQ3l`5_dj3GJ!Jfok&3)kwXzX5e49mWi__^(S`vt#|QLi5wd~Mw2 zGfovZOWlf${{7~6!A7{Oq+kLLz0lkgomY*#GTPXhTQ#fI5JoZH+tvE%tqqOG+xP&= z91q`{m$jsiz0$vaIj>)K+SxYzF|nUL(|aeL2beTfIDF7l4$=syjw_Z88(`BEBfd_$|5cDuudu1z-TPBJWxUr#;_1C|Bd0Rw zK}K&bvFl$HSxy8E_r3}nX7|~?|3ORp$%ulGr$ZX7mMQq;v%_z^aQOK7MP)j3v$VV2 z<#;6Utprpqz7S?6Gh->Z__5+!@NJb- zPLBP|OV0Cjdkwtptnbp4BtIaGiW6?828M3aJ8XNMv+~}{YDI8(Bg00`LQ7a;b2y2ci52bU8?=fwBF(jrH=@c z{;=Tyj2ZE4y!Xn%b&9%`mM3E+nll$phjw8NE0wZ*bg%E0BH|vtwI`qUo=-|i)%k{W z)$I>$^3)KZn9SYDjTT}s~+9Im7I^||29d%Xtm!}U40~* zcFp+gi!TN>!-i%jU+-b<)u?E-z9v?woKKGiSLj|-_YynXo|W(%cCE2AY^@$I#Jc#j z{_YnZjf5R}FJYT+UR#T=u3XDCVuw~*2Ruz z(JG^&zj!iO1B42P~y7p+?1b9mpPl0kP~wwn@&kVJRN5y$YMtC>sfZvGID*ch;{Uy zIJG9H_rdOoDAQpe)?{GHufostw@!AbXA|MMdD@_l=?4SZd;38yBAN z%L*txRA+au|I!Ji)>Nrr0zaBCeLPOs6s_*0$aqxGC!=#A6MiL4$?nAu2qx9)8I5>| zZn#3MNd^3>M(d*wYE<=6eW0J?gwoC4BMbGqii>Mk@7jmva^EkxmEe4qsg%wrPp+>k z;H6L2g|IJgpw*rmnpyhYrGZHUW^QkTp6L7%zc2doiJ30qb*Tdf1f>4!i~IQ;-6X6K z^>@h$$c@KF$?tM22OI80T;aDcmh35UJX?*9ow_`7=W~O_t!(P(M+@0U`x``Q0k=eW zYfX3q1(Sx5D#3{1P;49>t-PdGs~kI#0~g3Lwx!`L%+7nn=bIk%!tz65@BQ!gXXK^? zcI-@e6lS!G9-JrIkAK!P#eDFAzlYGu2hjcMjrkIadhTxY+2(;`w^m;l%Y*i$sdy@z zL(7bb;5K(jEw|Zk!^4jE{}%v}KyJTbyt|Lt^*h6n^tI=E4$DD7+@dXGb*NKBuDleI zEV9?F$6I`eSShBC-7L7_^7CZXA1F?V!Y_@DJL0D#wnKlUFdAPK1e$_OUutCnlOW#C zsz}QMXBosM!7LILDIn@l(Hasfz$b#DzBNI&C3!>l9ldL$DTV(@y ziX*Zq`x&gEe@}_ja*AYPact4XNI19R7u=+RizeAo3dCr~0oI5S%;{A@05n&~@)@#x zimIHqg#+TCc?EkG!^*xKTo-d7&5}4)16&fx0u1N|Ty?4s%dOqGR}AeLTvIUM?6>44 z9W^IOLO-~J!%$g{oolxo_j}}SRp?StxahlxIGg0Wcx)qC4jTc6YdaMHah>WUU+ZP+ zwv+~}&kUd*n-HNG@1v>~KKdJViQGHAN7BhIdQ%Cz(}0@Gc&SmQU?#0$07wqW&v>Px`@s591*<|Se5c@ zNWB`v!zMii&{pMRfh=OO_rke^Y^{k`@#vCV9y)nvU156V28zj!>%5O`)`MIW91-t^ zTa=yJ3thEfl2~|@uw3NrrL~(NW!(r!1yEXJNj%h0R7H-Wrbh=BW}p~dfnLp9i=G3w zskPuqEKl~ccZs+)%Y$V_uicciy zlf7#mI2vhUcx=QW=WzZp6~Lx(|5`ZnqX$y}=I7Rz7a- zloI#L{GEbL#p}S*C6h;MOq&bQ~4xK*4uq2pwqLhV@ zk9IIWTX{z!QFF_!8O*MLia5-&^^2WDibO4mmxUTdO~A%l31$-ufpRXJVdrcL+nBWd z{@v(lvK+EAR&qhhkriXiuHIUEO!deuM+zJiMyQQ3iK#qMt`X1v<(2}5NDfsXliFjO z#%ml&8IcOLz$`I~#Y%{k*EU`^2Rx>EjxASiyIaf}nVKNaCw3va)DGv)kchmqac!(s zkro13d#Cp-GI?bOVMJaE12%6=3Imd)dj(c#eW#v#$OCn%X9khN?AmRRQX)8eK@0Lw z-KvyQMTz)!B)z>MfqpL_3c46J12d<4OmPB-gPV{Uj`OMSN1hx zqfAM_=d~WQtGAJlCY`QTFAFk+K^qnvB5PN=C_?#Y`zo=_+a}kjE#Sc^ zkUXT-(2e61f`#=6-xuix+$JwEZc?X6WW}`hZUX6+6H+d<8ZLBXf(7NYF7L{e-iDdZ z@|q+sA$bl6Bx)6Kl~_i@#T!Ggt}x!&$9QMI7dScrGDMV|Qo*|Of`d~j8lg)S@!*N$ z@+$qwP92(hZg6aMZaM$3} zJ}y>&aP*j2OzoSwiRF;VJZv&Tqd8X|7=yAGnZM=9VicsD#LhWPloY!ucf>q;CM?D+ zUV3AYt|398C}vnLP9mxR!wJwbr9*T(G0m;($-A!Oi4_UF1V$MtVf*>)_1{}65 zeq@z}C`OSQaFWq?WkEz>Fsr&GfnC(R28T94%S;-)Ok^b27ubS;+-yeES*3PcwF~&O z$=GK0k4d^0Nb2IJs~FAp&JB(hK)7r1bH`1h6e(Y~$a=U{t?XNBlkr$-ZG_YjQ=*|l zWcm|Pw_7NEXhK5C3PB`7C%x0&J9-48T{puFWZ4*5zCu~9nyPWp+DF7igbUTUHMS8$ z0|;&xZZ=;vld~RV(%V@Cc!{<7tgK$pAX^yb)tOKjAF4qGCXlM7Y%jqPaKHm4hYmHx zPqY0S$w+x`fTY0@&kRim@~A&Jf<;W|igEaERV>GeNjdih2+|f1RQ(B^B1o(Evxmud zcUleODAK|`(f=xzDbTuPTI*4@3pIQz&j9jb2Ca{6ewEmg4P-Q2LJV7X%k{@%yK7q> z!tC>qhPOyz+17N_3!RjPX4kQijN9idj-o!eEEw*!p;EE2zfq1wOZ6JB+JnUZsQ|j6Rj3p#A{j&S4&6$w632g zfHeE$+gb&<1<$S(kR3Ot1S@A@wV+L$L>sH3Y}+6f>w%yJX;-zSYPnjB*;T*z=|TWm z4-QV0riMGa3aS9bVr?y=FwZAK}B0Df82svWvQmB?Ga4W!*aLb~Lo8nf;9J_m}Z3h?WohCsScp3W< z??!SULS9U@odXH%4!u6JtF z@3*leE-u0zPpMrN@i)LIOpPif6)g^O#0>DXb|hL_4l$;>rWZ|Wiw3kmkEG^5+5<_g zXz4)ZGvAxmPBuaT;#$dPBU1yN#F zi1$~O0K9jYHpzF(P(|+zlEqc={=jLM>O4_m4qPrT5QApy+NhXfxm-Bz!#*gid}Q9uzn8XE!pshWf=pF-<1!A}}xiM>=1#}oX;E+gd(|ug_H{-5{ZvxjHbJo?AFCkj{6J-kTo;B z!wRvms~%vg!zcm>x5CMkQ+$nA$I8@+z zQx015nouT?ozl52u9yX!WeU1rROQMnDh*KPIEj@>weGdSk=<6>&il+r&d$^G{WraO;7Od3Lsw0f-uKZ zBqHFDeOi6qK|*1WHP1@8RquG+aXe@f zWqv3XYgtiPw1!)BT$u%D`OtK||V{tLN>gE+zcwcHTbA zq4Y}r%?)Ict{A0S!61@C(j^tAS4(7&q-Z!mPM)M_pfTVxL>||OyMOXyQn8U_UQU9q z_eewWqW|n~aPzIEc-e62P)Q6Mzb2 z)GS>=^wx6o4qFW!ZT7C|;MmlzR9rV*7)`)c0sfE5TO`Y1S-+4Jim{(K=LvI1^T>{;b-vkWHtrV>6Yo z!0`r7wi$V|W{f*I+S$c;XAjy+*o|^Ffayrfyb;$*fWU!6+2|*c9L?2O6wy{+DG@JG zLGcf@hdz4jIabw%NDh~XUVCPm5g|e$lM`^&5 zX9_D`x#t>EEgROEJS!P5X?AQh3Re?l_ud;=4z45l+ynwm@fM*niIln~*dUOq>}UlJ zz9a=k1&ztxRb<83#5iJeRYTQhyG>J z6RH-8>8K$HW4wc+7N)(p-NNFugd3TC$TCfZ;JPiw`UgjQZ>{4F6bb0{07xuAnY?9z z#US5$cssqyYUx*(p6cZi358V&TrCSW3sOJO*Z{*?5@?XpMjEc&E3m2yuebu~$`W`6 z0je?|K{mq#sOvk%vnv=)cEP$7BN}A-35(ymDl(}TQ1TckDB96NiN)%`fCda8Gpblc zTrFW$1d;b*Ctm-gb)f0gJtJ-vqkGmzz%@t=%84#=iWmp#HVw(3C9hGs0;|ZBxaI7{ zJz{NxNG^6_D9WMBW!nOfcG;gjy<9X01wH z*pdTk29Sax2M%~#Masc3WveD)6sD`FBG|Rp1=nJ=?-X?IagpM7av~d-F^d?GL_JBT zC177|Y8CM)zt3%o^h-S=HaRrNwQ{bOhHTzI1Y}vAra>%SFcy-kvm_2k+IRt(-@v-L zCN?ST#)?p5Ibd8ZL(;K2ara+WCLU6|T#8YvZRWNqHNtdn-&pv{)QVWsBARLI;Jc1A z!5~Tig}Wl@VbnL|Aj);rcovb$;n{S>g2XK2RMxj*?x)^y)s4 zXYp`NS8N0wm3_+!^MeNfpG4k2!gzKav^(hlLdu0&BkdlYVR`ldz{=i=e6)}3>h1*y zB`5feS?pQ^x&o>aSgoK}OO&exxLO&)zH)_9tb^=ZEl9ftX${UM7|ou-Y=6g(!Fa9U zIcVG+0Jvn_fAiBQ4}TZTD?lkIR$yH-joK3^O(!JAboYuWep>lmYV2Zf2KbodMG8Xl zO`B7&UzNDVn&fdeASBL0=C#%!14!}>K#i_qG`)=v{gt1_CtkkW&zp!mzUcGcf^Yez zufXzfS^KDJjAFKn@BOR)8-DMjuk~LyAzui3%@Kevc;8#_t>5wiERU*sR}(Hxv3niA z`l~PDZ~S*Z@4YXT^l_fYcl`Tbh0p!GXHg#CuW!U;51)GF5Fh&1|Ng?7eWnOSkzrKi z7*9r+O$+SpO|XAuh8x#*aqH$D?%dwTom*FM?fO12Er8Vm*}(zk^J8=U?4s;8i0Ftq zQ-#^?O?>>*$M`GX`D5rFO`D#D{?51E!FPSz2XMT21=Zp{Kw0f#sXEKI{<#hYj;2?T zkEh_OtRv>0!lj)xVdD)%Tl*w{K#3CQPLfU(iHU@+l@j2{%-b?#it*z=^Q-vS$KSZ{ zjlOqqif{Y24+1nYl3;I?9`FMbg?9RpyfQGt8+kMW~F@k`j|BhM6W-rU2BFWkl#eg0eV zm0$jDeCe0G1A9;1#Pa@qEYFT>zR<*j*ttw~jH5zP6u5Wq9)9d6f91mG@pz&;%e^%zt6&K64}p9J40w`Mj;(Q#`eU z$jQ2)#7mhCC^CTkz3E0bad&5IB*xr)bOtcv%HCw~1@q?JBRsfwgezC3(5tHTUA*$? z`&(J4MUfdvxv+OR%M`Z#C@YQEUO&KVuOHx-e&wV1z90NQ@Zwu;<1haCKZkrc7_C?L$`p4e zUWu;t)I{e^Yam;h$f6;h4qDTuL88+0#^I-s#9S<7&edv^ssV1IqbmiVM)iNU^mya$ zF)Ap|Tl3h1uap=3fC)BXh5Z`3~w$u>KrU1wgs^oC4$7K2$am>np28AoDX( zCN}EK%K;3B1=%2Xs(ouzH6+JG_wVB^G`jjv3$wLCtkjbul)-@fbajle}LJ|s~|N7XjCs= zYQRzW66z*j0Rd3C14aW#WEf3$k&UKcHsGM#fGKk#5vdv^GQ}xOdlG6a*{0np9j7{P zstHjfq8oE6J=mfJ=`*nN;Xl6PyZ;y5 zxqSs+_vin0oVk29x2til zNC}V#U`VmZ%jS^lHOL|-n}wdJeKk?JQijVyE%NUO{@05p}K1-BC8S9accaUWhFx6zdcE57V(`u09 zg11SKWDX>dLF|wgL?-Yii|`0)UG*BZGJGSTk`z>U%BG^2K`wEzHFy+WcQ{UY`3f?qe2jdwV-|a1I7i_uM`KM$S`)`X?C&)%gSWCqlH!B~}?XcH9!M8rh3Iw=mdZXTY?@$r}5u*rxjQ#d$0#cQ8=uzheOn?YRc zaeB4{$~ji|7pU$Y;%olwm*De0_l1qrtfP}TR2J*0yrJ>Pa%8!Yk*!))c)vrsSB9Xg zfUJ|?CvoB~-7Q~mqM(6DgO}xsakgM6*1G;>$*6QtxrfNlq(tZQAPr?u73<;pjd++2S;3L)me3*x309l52yzPk#A7egW;p}t?HLyZSM4;SMe3KHOC=BN) z3|K}s{ipuj@eHYQk%^BuA|DAsP}q4#gfa~0E-PR@XRNB)Jmt;iuPb2A8hNfz6wT-j zik7D$*=m#?Sw7Lm7UYv}LZ2?`G0J%z11ZaDtKdjmlA#V<_ba=$zjNWEEmtMx^A%L4 z;JS@$ZbgXvncz*qQ=Suw@fawSoBz_(aOetHF0ed3#PaMATpokCs%2r*0}R6D3VZ&! z8W-i7-x+#QR*cn(QB}1!)ocj4b&<@YEEyFuih?o98I^k5**7}?b^e)FGy3_tik z`~v>rKl<&po70_JSMb62zYE{|?|vD+_)Fe}<-Jd1e)y6BK<@Vt4n{N(>NZDJMOdx? zUDgTxibVfkG3#BC@RFnvXk4t-im_NSmSwXcNuT*4BojR(%rP@Q`SM*;(#sDa2J$ej zYMA@7TGa(qO3`5F&dk7U1f2nY>AQa%U+@+GC4T5f{%J4E{rVe+_@92_m+>Y4#(#}( z|IQx;#y2s&{tlZJO${dzBweWtgbbx7%$J1u5;f;m1Vx!x*Jh2IYs~%5e@b{v&FAR`UTNbL+Fz30uGr`fp z8NTM<{yzNUU;Ot_@CfoFMS3-^FMz(M!U~qdGv8pd1&HU$e_6{6e~-hs%m++ z_K1|3YUKuu+K!B_A3Av?k%%loXV zDAlC51<{OygHycz`oY@Qk4O0cWwOJ%sI-M+m)^~#LBz^m{lCUTVl_uk2DVv`~wy7a`V5l+TD~gJ|erfG#v^EGx(l;a1tEm22 zYXg=N*rqai(Z}BI1Vz5SO!TFf-vkyV-ne^=`v>b|AkRN{3zJE)*>WD-@D8mq#ae!; zOP9D^X^p@6w|@cIbRQ*8z+CrbQbT;Ji`k7u90j%!zEy`Q-JsNv)6HG1Z7*O& zm#_BcAOGU-;q+*UVzgsRYe^{q1Z7-A(^TC9h6~o(6>n$0#wHyjuqiNafnxYxK{x1))#JHILB!_@@@)*ThJFSA%}eJ^+Vjfe~Mx> z4O}V#2t}su$NxuOBXvhByn?A%-v-}cQ?e`2w(s>OkNv$Vo_pqcuh)L^Q+M&{9t2$} z!t>AF+92A#B?}UpM{-HElT0am@co~IulbYj!*fqx-}rc^^CeDC7s#^0s}X6e2#Kiz zGLO(3+hi476G?B(OcRn<)K(%n94s616zQgIg{g10OOM9|p6Fpg0N^8k@H#&A>GfLt z-tH97JaxT}5o{zh1%rex>%&Iphdboo`o4b$-}tQ`0M3>;IXuIE^xZ#>|Mq|Urx!ln zYE@yrD50`RJ+z5Yr|@d!#s$`sK}9;fufnpKlhA-fMb>*OX_JD#`$mL(-)w+;teJsQ z*J&>m*KW)PH%2MK+unM+*K2?0!=LIEMZbAt54Uga2MW&xEXX!UI??d{8wWpZc-ZS# zckpk0)xV0>XHIeS#u7WD3I3CB`znmb7mxaD%~-BVL&l}G@aNHx64|`uC+Ic=-f^v1 z=W$Q!ZS#_(d_S-Wsi%0=iDX=A@CDamRcXBVmVN;Fe}C%}`1mK*w@rQW&Q)B!IvXJM zVhbcaxEGhvF5SL$1>^Au%T?V*==7|_^=mtL>dw`Tj$2mRxKx>+2Bl;hDYLG{)MXde z@IxCapxsf6ky`(96ack`YYwpyl2gbw*bJAe5-&V=v)5~X{Wo3`ZGh5`=bpWZ*=)Qa zSD=Tqf@v_YH9?R~_l3Q^F~*}DF6)*tofNosb!W2!vz%b81Tf4T5=DMD6Ow~yA+kuC zY+g^9Xs^OpR}@N3NZ7_rji%e56Cle9cWzz9&dwMoCyUP4J~%k-b<7u@zg73w-PC2F zZCH?Dfu}VCRh{w5%qU8rtTsc)WL(ri)|GP$@?2qOHro8^?V@j*#~GK38_wbCLRij$ zB(me*tc&H{F!nU9sB7D{DvW5U$RMO}m}8cu#`PP!c=FEvVDNp*^S4n|8{NgNn2vge zP0iaH?zZV}&-OSR@i#86U{O%vt(4kq(}{sZO1XBZtS^j^q9})Ubs+?gnD|0rJTgmx zhIq;Pp7|U^YpA^`o_prTV8?v+*;`mFs|`R~54KsY>VH^ilN7k=wbr}+s$#4v1C~K} z5^p0&-|fv^c;5+0crfmdC_y7A*pGu&h}1D%*)IwuJKTq8cmon?VYm4sFYwmqZx41O zpL^y8O4*I^!Z$L^>K*F4Bd8%gHoKGZU?6GbF%OkUfUr-3?Cpl2&5ysj!`P8Z-8}O8 zH3c8u9UDlX zFbqKM+}X$ejlDVzd_xXGu@Ui``WQrmMsK%rmwCPC zve`L@QF9*-OtQ$1L4gkscHzZoK1(@*%MCVf^)-6~X?kb0C3t;-WJoV&N?Cowo z#;pe0XrPrbtjtSM_DO@?52COw?IrS|qYMYFQjI>zuq3m2*N;lzwEHYXj?4Ne@*L2N zTi5sSD6P^-su4+EeQB;8x|QH;o732)U@4o)_i zG2WZv`2nQfw>*Cvbzj#3Kq1@o484C2R^~3!F9zP=X&W)ppp{juwuo95CVmd86G`VH zT)h2VYopc3z$i1j|7MTpp1TR48sMhVHujX^cX;aKWn4->tJk{f`b4$v98+QlW1FDZ zUix?6tlp%%R%Cek$?I@k1_zRIz>kS2NLxC}HZqU9EGzeVoAOlciVfEdJ9dyo9r6)LgJ9TY*%Ne?YV>*~XUNB%v88@=g6Go#p@;`wJU zF7*zTuF^(554u3)t~_5XH#(Y9k7375)`@kBHL5EI79;@Lay#foHV(YZpfMg!T^tiwiqK-jHh0$p zLG`nv!X$CRa#*kox48~`58S!EeFLfYnWwJkb zbxez1c9J@i-HcGq<_v^X85C?7?)L26$f2Iv!#z2uDDXHc1YRjrr2zgf8Q* zDsF69;rVB8T*x{-apx+oU)zPQ^nj~*dx}Oj%JAUc5kC5{S2sH3Y&tdr4+j(uD(zb( z0$|$q4CP*O!$VS@kNo@uFH!z|@&UoIZHe1xC<3SPfJ>UCx zf$#mhgMIT?_h;DOn}Vxh@9nFv9pLcj40mo%P?Z&mNrr6a9 z;r8o*3UJ!(LVM3rR(p#1V2vqa85q<7AR44@gM)18k!KgGPk+nvw*bu|D{UY)jqL_n zH+ej9=PGvgrggWJ0Usx)3;f7W{4?O%1d|(MXkz@0AO0seUemX%>#?^t!QSo!Rkd^$ zM66Txe&;~7ir<606@Djp{u+fNbQ`yd1UN|79j;k#_ihciX}|3)=Z}Rv_sk7oRoTsi zE-6nR^6}2M-2rA}@M1am$$r;={=eeqf8oQpadQv9`CFgF@BaQ~V#2K(yV&0yV|lW) zZk6X=!BGU}z?vl7-yDLjkVKNd|A+`hSoTQ~N=tbn3(fqy zq=^1gA9yeH@d2Qh9ymB4?Cizu+VZdT4ODwS2!etkSXfBAo=UI<3!+OvF9E%oteWSU z8JtoAyhK%&xOR01&zvKx_xWdU0Fx28IVTKIFvS|oC78=g=U%=2t#|M}ANn)UM`vcC zt<3k=C`6mNU87Mx==px#*M0%s`*|hHa^6bp-c<~N? z_NV?s>|fhKIiK5WX7g`|jqpGI)(_%C-}xs8Ip61>xsLDuuf7he!#ALp4~#XaLBd>l zN-N6^o?`)Uf9>aSf0Aoeio_eKiHB$qE1_UfeIkJ2Bm)s?!0fD*(-Jl=zo>66YX#&A zFP=lmdg}|f0bPOh%6wnr4bR2Qz0)gmT?HO>CNu%~#;^ZU{MG;B&*A#bT`V8mHydK; zS!~{0?2rACx8s9<_DgVdvcO;ep`YIf96$Sa{sY{;HO1j89|2%22P&apRdyryLlPSC zbSmL}kjl)P8(BB>i)*q&PzH5W6Cn_YEBDPZtOeOb%Lahcvt@lyZFbhI{`KVaV#lrD zM`jnSYJjuGTb}P3_uw!7 zg)hbDfAQOa<6|uEziCv;K)^9x#3pf(OewtgUC#gyj`028{|$KWyPw5h`ktS}8+VV+ zb)2vM6Ys_kec#vPnWy(~`03vQ_{?}hr3H!OA(i7E4HuQki!4co`#GN<-a|2&Asfxy zN|*5v79eS+eVVvOGcCe-X%#dR-v3A6iD#d_7IYeY-9AEgp7x5>)>~lJ7m}U8R$J?HSK05{LWsTi( zh5fx5zVgf7-EIlqLz}*H`x6T&zCdtZSMy#GD;!5{v4{N&&Jway7&S*GwMU-VXd%Qt>G{@kDb*TBmI9KP}! zfIcr@3he!(|lN!Y+LLvqF{U0qj%L|Nm_A%bQhGIMgTssESW-hCzhEnE! zMh2+*_hwK#)CAey=VIr|b;oWf1WBrAsm!?5I6FQBmnKb{6sWk4>F$2b17hz9sTGOO zDmCUOhd_DH0iR|VO>aR>N0H-&Bej6)bb)ey7l5&1YJzNZ1NqK0cuS;bQf#rDEwNfW z0MHaOjiHKtb39%z4aZhwm<P#n=V%HwzKMKi z(h?ndfm#ELqa&a?GK)0Bcy zI{|(4$EMtv9LfufrdKf8yNY7615jqtOtKmP;c%9Dr0|u7f-<_-(k!T1RjhOfW4# zDuZl{9B(Y#?(A3l!)5Td+-yRzu7H%@s5ud{ViEi`q?D!VD1&L<5gQ2~fomoAxg4bO z;G$xiLYTQm=GZYXX2^>P^4SiM7l2as^#B9q5`21u)02Cs76$;E%}UqzhNb1;<~i+K z6kWCABQ;nI&n7}SKf&39H?UgHK_C6G3EL)Z%}{iep_uMsvUd%m$u6KWQ%&1g4yyla zKm&7A>&4czoX-T1ly7OEOcP`Y6tuuX-t#PWdGbe42Mdy=$YAdoiQ|x+`ArmbrV4H_ zQDTu&aIA1(W2c3SeU|JXOUchXGPPZ1dwcv%&b^l0 zpE=!0Jyey-vJ_x1Yl)p#W`VPfzttjAZPFtVgISpq z1)Zs36mIZAG02vRj>Y{rp^u??%puE`g5f-UF*ve*U5fRwl+8vfFtES#*+pKD`?Ay zL~^XwR;`KyLV&=&uM!qi<`mXkqnw}M^x#cY%QG{_P$81;E=;o-w!T`-Q7&FbHa@_3 z=PHWn9w;9f#j7$?MB#F`Oe}?zan0-nH!ZW3$#AJ%t$NC6u&CskX%-;69ocDZB&Y_( z7Y~bdDnTL^HjCri&5+y2Yc(ohVRb=v!K?*tYn{t$r*?x~?wax&EG|ZxJ9zP}1UL{j zTGI(@6}v`h;R5T=iSv(|#9Coe#LzN&@8wig!h&co($5@mp=|1jb1g{`SZkae-o^ar z0RUaQ%;KE>|s@_2pY~W|%oKSaUsjm$g6b z$NQPwnBH)qn{(lC!3ALHt7NgasGA$>FGgayE4GPugD)S9!o_ONS-2K0jUs5;+9vbl zxh7E{buBJ733eRm-qCWa!oMNmjv?6LBb!KqH?e1RBeyGY4b9I=F?6}I9ZGDV4?Z}! zu^P?uHn1JWKpF#}uv(np^ue2`7H73VX!udQqEHaprZ|dqz2g&GKc1@<=12FiI6KC8 zx{J|l4|y?hVSBhH3-v`Q@kJA;2`+jwfMW|k;bM?#0jQTNBMno_4GMNc=_W+hBvA^m zA?O+xtv#WlEkuPpECzlGECxB%-YlxD@pEl}x9!`v1AnpZnL6SANU$=}orM#>%hphF zLS@!XeGX*HD$JtS#$5sn6-xUNb8B%b6W?=|gNVSo!r9S%%#ZGabp=u?>Hp>a=^5XZ zf*KGTK?hhF6-t+wpB!L0KS43x!FaZZd_1WE#g*rD+CZa}k6D468F8GLh}308#NTZR zi`@O~boi9QEP>g44vAwyC@9q4^t}0Y)V53r&nNKPlagEDYIZC-=2&WWpgw6%xL9p) zaICtK(M|&=3DlU|DTujzUUsnOR0aoHsZL4(Rkad%fnlwO1W=d)fk2)hMS+saq~F$*3Lu>h8j1iQWmMrsUTu#0ocwPHet1&p05 z;sYK9|7kafq7KoD+xcd(woHgv4d$qIXcK$?bJzn-Y={9lFm#Fw@dnZyQQnA*uv3+y z8C;NjD6B@aDaWkF;`qQ=4f19 ziS>MeK?rscg#eKEC{oVA9C34WQWcB4g^l9pNO;u!>bxhR#W0&^@YzZcsd4UYR+sNwDABneIfCk2j(YsGCl7PTH-5_RM=EnhNntWtn3OMgwW zRJYi(8k#FC4j*7~`~aG@^?3Z0cLZHQ{f#4!fB%w;X!D&YP#pjqXQ(o*b7kZ@<~1M@pXDEh+SG;7 zcBd?{Ct*Yq10*>C4#N8Q2e=(;4AiIi^skglm2j&%El^+%I1Qpz%j6-01{aT9M1<+icnRR zE5#6X=QY>Rm>(ZtesTcT#%dt!u&5*AL_CKr5A@3xNXV)WY+@QR-l5$$qHEsbCQBuM$VNGORiiC$J5A?6hF7fQGe zND%nQd)LaI9m$$W>mid(IXF#Tlfr?xMUnTvCFh=J;7qfrc3DYI2Rg0t7;_gLKT?^3 zF1b!Xlu>sgl*=>BkM5&fojUo~*CFDCT z#EI1W4i^(?E;q|Ftjx@lns95(j}Ngpt*r){vWym?XUkQhZezsV%G>f~!ji}XSZ+cd z;pQfCENVsAM&y-C$uNnC42{)tj&iwxDhd?iDT>j=0YzF5N~CWzAn~?SLaY?UonItb z(8|e9GH&w*3O4o#Py%3u8RxBZnlA9fMOJI0*MlNvTR5T`^Z^IQKaMEjqQ#>A6foU{ z?AuD44+e57f>L$7ty;}-dUzkza&BjxJBtx%U>zgm9TqZ=Yy1Kvtihx_R8zBUH(Z2O zU<%3QUzsnEHH*wyCDbfaZ?RgSS}n26Yo3yi#!xCV$B%Fb6NjJ_QTGB<IIH=yhG-rr7S_0Rq2{Dc^=Cl*A+)C2PJq1Q>cdNfVYHT z)ILWcLRMrrzV{{;r$;r{P-MDZYQIGl3|M5N^|j!Lageo^q_Gla|6=WP5gQ;nbhy`s0E3g19 zw~kzX6!j$OsYseIaHD0SE*7X(3xI*q zYzM35nGFiE1c&V5#@fqQ>lQA0%Dn9ZHEtCP8=wRRv;~T8+%P7O924oV>YbnxCRvoFOa5D8^%CdEpa!kc-GSJ2wSg*qQuQ&W-9uPAZMO#>v{D(iM|D zBda>)!&A>qznPWSiqeMHL@IvABaHt&@L=s4jtBoIzKmTTe=% zm`t%aKD1=LJ#tO=RP@lrK~itO-a4q)jBvL^tptd`nnWfqyzW9a>>+$o zhH`EgfYtI0%oz@HsiglmJ3wNg^SAx3pN_husS<|u2un` za+Q%TMZP~U)&QgfC<2AdZ1S+$R^H>&*4mieo<$ifH|g|3;j%7|_DFS;d0CZ9oK-7i zS&m{nMs9)P7O~fqzDM5(28(Jq4pb?2RdW<1eM^LQe*Gi_u72p?1{~A_3>~ge=Won? zCn85I2dCpM)CG=pt`%(OnAKu#tcFkcZ6KQzM$;Xf9p1BDcWix&T0TtXQuB3&u% zXV}_w5%%0Jm*PVF>9_sw~5OQnei{hZSF9#PR{U2s-OL=mtRXAokP+;V1>LIC& z)#40iCr4PEo!T$iZ*IrwG>4!_h+|o>iTFXRd%pdS`216(G8H6Ab`GOzKgk!h8iRnN zJ8;GvA%wjPQpz*1deE%_gd0UjG&W`3qS_1vBYk*Kv}Mk6ZKZz)BW}0n-pH@AWqw>1X>K=Uilb2yn2l8WmQ{d0h|{kl9wWog+en zd2Y0`ht=ZL1Pq56QITI1A95kXq?8b$1&De|m6UTue=A#a;sDe1{%va6SrTCjpl*<` z;1T6&g|aM>=Q;9Xj4aO$p?5dPlhlBS+vR}b4e{h4DCXxG_aNjH4F)P(BYgqtao))K zI$&%I98#fLEv%X-Jrh56!NH~Z?TnZiSyo^)-NoYQz!b^g5dlQ_>g}XGnZFfg)XFSj z2a+b^w{2BK9F3T0FvV_WVu**Pu5c|g$(jB~kE8-PPv4P8}Eif8V&w9FB(igRF%@pKoVT}Rrr_|mxjS+U`Z4F4G7d6pf#8;xC8SkBKdKRv;6 zF^7&$vEl@g4;BLl&DA(Iha2hRgQ1jKv*SfZ<8dE1in~~QkiZw*qh8?PUf>`j$u1XX zC>M)BIP3|Ih-(#Ijn);4=?=2dc#wHDn_G}3Pb=8XU7&!20HV)F+U}Kbm16FgEt6$m zsU;ST+gQY0xb2pS6nbWj)nb8iRTKKW7$M8DH5MhZV^X4pBCmIl>hx_w{{+Nyu#&zO zW1ZDVf&*)=)rlrm*;oy6|2X2F_KLQL*25{LL<*CgD>!-Z#^rLcoMk{;)oSq(LBtt7 zl&zGz)#w6LHyLX_Q;G(Hm)O3wUY5H_gj+39m34$ZFGf `&`AD;%+Y+s>eDwo~1y%qON2gr`*Ao{&9`UZbir30lmgmUx!YW^>1@42E)e5EGXsn~dO4yW=FwlrVBKGEoKv*A{s*Yav5vOQRElL!3+yuxoSG>i8hwrYc*Qqe&u$XIf zv?OviL=z`5qZMtDek4{RW?{swiAZ>44lD}0_=#r$*ruH@80Bh-#n}nwXQ!yj(t#@) z3#o&F=a2CA*fCocrblw*T8qJv8gkc%su2|D^%4AN@X=rjPgaGy6MEAaX-HJfqDRg zlM2jc2953gW#DZAys;z8AVzkji@GTn#EK z_WLZ3Tc-LQloTn)5)$i8`j@oW$CqX)gjl7|D31W4E{bZUVlNHh#$jvTmAEnXt6 ziA-|{TfIbXjH7z0_2 z!N9=-fdkI<9Zh$!Iyl6WP%@KaE{dJ(%8Q@q6N@*X#F8@%s!^#h8W z%I-JPW^vzq4pN712;<4r@4_B-PpT?Zx`L+Kq9`wTID}iV4iw>Q>g#I9HTqbM6dAuf z;E=NKXy*#f4(_gXvmP!$x;RNRn21*Xl*db2E*B!!1ZlS`2^1kL5Osqk0vKk-WVTl~ z-{v)n3)jIc3lx<>W!VKR%6UMs&T5z{zgAqMkJUgTjU-GTc{Jc))U!p3$rP*k#Iy=h zmk1!|1PNO8ALLb{MP06_HEGb(2PiTg;+dZ|SV9iMjL~F@yeQ65u+zL(qpCD2T_RIi zP3V>CSR`wfexF!L6W3TRQF>K=>GIcpRwD=kir#6D0vsHfPcquM;!P%vl zC#;HAY|^4$VT;Cc~UjYzAtsx$!Yt#&-!fgO?Ia?eVH63-S(nQ@K;Q8>tA!XmuY`5+ZAF(2j z9v~JZb!IMkZj_9J1iU2R(w;T3+yqg#%Lj@ZQfkK{h&(QeC|98y#>|-P>>@9MMgap8 z%({G_M5RmUGIKzo{@vosWu1i#Sq9w&jM#N>M{Rh^Y9PrsTGMU$Aps+>BBR+ZR`XME z)vg)2900Ljpmq^&N~fmSg7$EBc)t}F zxjX>r%S#Yc35NZrStwmCw@XCB<+x#4q)UO@LevIJqg*kdvJ8{i9@bBmy&z_I{SH{I zmUV(;mP08?=uKQ>CE^+(lj?HOn3Fssl|IrQj}QSzR~`Dp1V{6Jtx-&Ou$-TwS}i=; z_woTG%1gpU99fSZs6;t!3@D!KCCGf(LNo~$_t}%3D}Ykac7}WpGz{6n#9BfF6x3Z= zvZAQ#RhEmoP)H0Z-53}fT8%buc{IQXg^Tgd6`TgGf-WaOECGzAtV?SWlY@N{@5fFY zAx{HET9p)74DZ;zf-EnLYZaL%C9*MxaJ2e}R77>k{n_yYROJfAc!I1LTWPaj$HXpR z#9|jXCpdWH&4LHcc4n>3(AXJPbJr^9@&csG8RI4TC$A`5`1wy%)BP&M8CsQ$ri+g|~1`J<~N&*9Mu5nph zs`S^%7|r%jikgwj4-f$osmdm;i6HSYe&Sh_fMc|+iX=9$;K=dC(R76nGqVnnW!J6x&_zM8?n!Erv9 z6(bar9W2j|K%^cMfQTEh)|z;etoi~4tr=<@WfJwmo9^wq;i0KfV)C6d^lm5e0~8sn zae#8QwAG99d>Z$#TCGqnAJk>RlPQYv)V43;W>j+0mkTnP*A3EHN5uJ2t)qUjZ@D;y z*41MKke=40<#wfjg0_O?8I6F;u=(<4SFRz;M|Cie(ojlCqeEH^0CB6)p%m547D_CR z53o8ruKn)GtWIZmyqW zQQP&B0Ek!>0GjR};(_uuJ zJW|iBkr!i3cCWZJlGLqZ9{|*$Y@PcC8?erg4?U~F34-1JuG(%|vxzLvPOzMx+62D5 z7@HJK-7|6?8HI2eEXM`AzYr-*_O3qW0ExO?VL5FID8UV|W7}kak^x=s*wZW5f?c{F zF;B1C^Gv@bV>K3-A041vEdc4mj$uP+L>;cGHpu}7+QG#d} zzH`A?YfSd`QA}pSwMvaz!+PNK1ya+~i)yQJcJu({a$$m3kyQ62Kj;C9h*Zl$x~^7B zoGlkvwoFtq%X*B@G{P+NilfzaotglCxCC-lSV{v+jVmw1J+pR}y zGhetd^j53~4~xprUVy9vN-P3z4Ty%ya?Gw=_xcMr=7=Mx@VWOz6BG^CSTXlU zeWolUBGrFKPAOS(KW23Dj9LUrSks+KK%XX*+xJr P00000NkvXXu0mjf66+?u diff --git a/examples/tfidf_svm_example.py b/examples/tfidf_svm_example.py deleted file mode 100644 index aa69a129..00000000 --- a/examples/tfidf_svm_example.py +++ /dev/null @@ -1,63 +0,0 @@ -""" -Example how to use tfidf with svm on simple SST dataset. -""" -from sklearn.metrics import accuracy_score - -from podium import Iterator -from podium.datasets import SST -from podium.experimental.models import AbstractSupervisedModel, FeatureTransformer -from podium.experimental.models.impl import ScikitLinearSVCModel -from podium.experimental.models.impl.simple_trainers import SimpleTrainer -from podium.storage import LargeResource -from podium.vectorizers import TfIdfVectorizer - - -def tfidf_svm_example_main(): - """ - Function obtains sst dataset and then trains scikit svc linear model by - using tfidf as input. - """ - train_set, test_set, _ = SST.get_dataset_splits() - - train_iter = Iterator(batch_size=len(train_set)) - - tfidf_vectorizer = TfIdfVectorizer() - tfidf_vectorizer.fit(dataset=train_set, field=train_set.field_dict["text"]) - - def feature_extraction_fn(x_batch): - return tfidf_vectorizer.transform(x_batch.text) - - def label_extraction_fn(y_batch): - return y_batch.label.ravel() - - feature_transformer = FeatureTransformer(feature_extraction_fn) - - model = ScikitLinearSVCModel() - trainer = SimpleTrainer() - - trainer.train( - model=model, - dataset=train_set, - iterator=train_iter, - feature_transformer=feature_transformer, - label_transform_fun=label_extraction_fn, - **{trainer.MAX_EPOCH_KEY: 1}, - ) - - x_batch, y_batch = train_set.batch() - x_train = feature_transformer.transform(x_batch) - y_train = label_extraction_fn(y_batch) - prediction_train = model.predict(X=x_train)[AbstractSupervisedModel.PREDICTION_KEY] - print(x_train.shape, y_train.shape, prediction_train.shape) - print(accuracy_score(y_true=y_train, y_pred=prediction_train)) - - x_batch, y_batch = test_set.batch() - x_test = feature_transformer.transform(x_batch) - y_test = label_extraction_fn(y_batch) - prediction_test = model.predict(X=x_test)[AbstractSupervisedModel.PREDICTION_KEY] - print(x_test.shape, y_test.shape, prediction_test.shape) - print("Accuracy:", accuracy_score(y_true=y_test, y_pred=prediction_test)) - - -LargeResource.BASE_RESOURCE_DIR = "downloaded_datasets" -tfidf_svm_example_main() From 6ff3314dbd776a13d8a2436a1c123c14b66113be Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 20:27:35 +0200 Subject: [PATCH 20/26] Remove examples dir from commands --- Makefile | 14 +++++++------- make.bat | 28 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/Makefile b/Makefile index d3dfb810..a61593b5 100644 --- a/Makefile +++ b/Makefile @@ -3,19 +3,19 @@ # Check code quality quality: @echo Checking code and doc quality. - black --check --line-length 90 --target-version py36 podium tests examples - isort --check-only podium tests examples - docformatter podium tests examples --check --recursive \ + black --check --line-length 90 --target-version py36 podium tests + isort --check-only podium tests + docformatter podium tests --check --recursive \ --wrap-descriptions 80 --wrap-summaries 80 \ --pre-summary-newline --make-summary-multi-line - flake8 podium tests examples + flake8 podium tests # Enforce code quality in source style: @echo Applying code and doc style changes. - black --line-length 90 --target-version py36 podium tests examples - isort podium tests examples - docformatter podium tests examples -i --recursive \ + black --line-length 90 --target-version py36 podium tests + isort podium tests + docformatter podium tests -i --recursive \ --wrap-descriptions 80 --wrap-summaries 80 \ --pre-summary-newline --make-summary-multi-line diff --git a/make.bat b/make.bat index 0ce1f11f..37d63d73 100644 --- a/make.bat +++ b/make.bat @@ -8,24 +8,24 @@ if "%1"!="" goto error :quality echo Checking code quality. - echo black --check --line-length 90 --target-version py36 podium tests examples - black --check --line-length 90 --target-version py36 podium tests examples - echo isort --check-only podium tests examples - isort --check-only podium tests examples - echo docformatter podium tests examples --check --recursive --wrap-descriptions 80 --wrap-summaries 80 --pre-summary-newline --make-summary-multi-line - docformatter podium tests examples --check --recursive --wrap-descriptions 80 --wrap-summaries 80 --pre-summary-newline --make-summary-multi-line - echo flake8 podium tests examples - flake8 podium tests examples + echo black --check --line-length 90 --target-version py36 podium tests + black --check --line-length 90 --target-version py36 podium tests + echo isort --check-only podium tests + isort --check-only podium tests + echo docformatter podium tests --check --recursive --wrap-descriptions 80 --wrap-summaries 80 --pre-summary-newline --make-summary-multi-line + docformatter podium tests --check --recursive --wrap-descriptions 80 --wrap-summaries 80 --pre-summary-newline --make-summary-multi-line + echo flake8 podium tests + flake8 podium tests goto :EOF :style echo Applying code style changes. - echo black --line-length 90 --target-version py36 podium tests examples - black --line-length 90 --target-version py36 podium tests examples - echo isort podium tests examples - isort podium tests examples - echo docformatter podium tests examples -i --recursive --wrap-descriptions 80 --wrap-summaries 80 --pre-summary-newline --make-summary-multi-line - docformatter podium tests examples -i --recursive --wrap-descriptions 80 --wrap-summaries 80 --pre-summary-newline --make-summary-multi-line + echo black --line-length 90 --target-version py36 podium tests + black --line-length 90 --target-version py36 podium tests + echo isort podium tests + isort podium tests + echo docformatter podium tests -i --recursive --wrap-descriptions 80 --wrap-summaries 80 --pre-summary-newline --make-summary-multi-line + docformatter podium tests -i --recursive --wrap-descriptions 80 --wrap-summaries 80 --pre-summary-newline --make-summary-multi-line goto :EOF :test From 715859db2a1fb858f1f05f5ce7e5b7d1f755c06a Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 20:30:00 +0200 Subject: [PATCH 21/26] Remove examples dir from action --- .github/workflows/ci.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a12bc935..8121a045 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -38,18 +38,18 @@ jobs: pip install .[quality] - name: Check black compliance run: | - black --check --line-length 90 --target-version py36 podium tests examples + black --check --line-length 90 --target-version py36 podium tests - name: Check isort compliance run: | - isort --check-only podium tests examples + isort --check-only podium tests - name: Check docformatter compliance run: | - docformatter podium tests examples --check --recursive \ + docformatter podium tests --check --recursive \ --wrap-descriptions 80 --wrap-summaries 80 \ --pre-summary-newline --make-summary-multi-line - name: Check flake8 compliance run: | - flake8 podium tests examples + flake8 podium tests build_and_test: runs-on: ${{ matrix.os }} From cf26d20c334542864c579634ba720cef1f8f3b4d Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Thu, 1 Apr 2021 21:04:04 +0200 Subject: [PATCH 22/26] Update readme outputs --- README.md | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 41019095..c6cfcc22 100644 --- a/README.md +++ b/README.md @@ -56,13 +56,13 @@ SST({ name: text, keep_raw: False, is_target: False, - vocab: Vocab({specials: ('', ''), eager: False, finalized: True, size: 16284}) + vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 16284}) }), LabelField({ name: label, keep_raw: False, is_target: True, - vocab: Vocab({specials: (), eager: False, finalized: True, size: 2}) + vocab: Vocab({specials: (), eager: False, is_finalized: True, size: 2}) }) ] }) @@ -94,7 +94,7 @@ HFDatasetConverter({ name: 'text', keep_raw: False, is_target: False, - vocab: Vocab({specials: ('', ''), eager: False, finalized: True, size: 280619}) + vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 280619}) }), LabelField({ name: 'label', @@ -105,7 +105,7 @@ HFDatasetConverter({ }) ``` -Load your own dataset from a standardized tabular format (e.g. `csv`, `tsv`, `jsonl`): +Load your own dataset from a standardized tabular format (e.g. `csv`, `tsv`, `jsonl`, ...): ```python >>> from podium.datasets import TabularDataset @@ -121,24 +121,27 @@ TabularDataset({ fields: [ Field({ name: 'premise', + keep_raw: False, is_target: False, - vocab: Vocab({specials: ('', ''), eager: False, finalized: True, size: 19}) + vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 15}) }), Field({ name: 'hypothesis', + keep_raw: False, is_target: False, - vocab: Vocab({specials: ('', ''), eager: False, finalized: True, size: 19}) + vocab: Vocab({specials: ('', ''), eager: False, is_finalized: True, size: 6}) }), LabelField({ name: 'label', + keep_raw: False, is_target: True, - vocab: Vocab({specials: (), eager: False, finalized: True, size: 1}) + vocab: Vocab({specials: (), eager: False, is_finalized: True, size: 1}) }) ] }) ``` -Or define your own `Dataset` subclass (tutorial coming soon). +Also check our documentation to see how you can load a dataset from [Pandas](https://pandas.pydata.org/), the CoNLL format, or define your own `Dataset` subclass (tutorial coming soon). ### Define your preprocessing @@ -151,6 +154,7 @@ We wrap dataset pre-processing in customizable `Field` classes. Each `Field` has >>> label = LabelField(name='label') >>> fields = {'text': text, 'label': label} >>> sst_train, sst_dev, sst_test = SST.get_dataset_splits(fields=fields) +>>> sst_train.finalize_fields() >>> print(vocab) Vocab({specials: ('', ''), eager: True, finalized: True, size: 5000}) ``` @@ -175,6 +179,7 @@ You could decide to lowercase all the characters and filter out all non-alphanum >>> text.add_posttokenize_hook(filter_alnum) >>> fields = {'text': text, 'label': label} >>> sst_train, sst_dev, sst_test = SST.get_dataset_splits(fields=fields) +>>> sst_train.finalize_fields() >>> print(sst_train[222]) Example({ text: (None, ['a', 'slick', 'engrossing', 'melodrama']), @@ -201,9 +206,11 @@ A common use-case is to incorporate existing components of pretrained language m ... numericalizer=tokenizer.convert_tokens_to_ids) >>> fields = {'text': subword_field, 'label': label} >>> sst_train, sst_dev, sst_test = SST.get_dataset_splits(fields=fields) +>>> # No need to finalize since we're not using a vocab! >>> print(sst_train[222]) Example({ - subword: (None, ['a', 'slick', ',', 'eng', '##ross', '##ing', 'mel', '##od', '##rama', '.']),label: (None, 'positive') + subword: (None, ['a', 'slick', ',', 'eng', '##ross', '##ing', 'mel', '##od', '##rama', '.']), + label: (None, 'positive') }) ``` From d5a37e7d215026861681bc537ca88b34cee9980b Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Fri, 2 Apr 2021 11:29:51 +0200 Subject: [PATCH 23/26] Add roadmap --- README.md | 6 +++--- Roadmap.md | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 3 deletions(-) create mode 100644 Roadmap.md diff --git a/README.md b/README.md index c6cfcc22..a3c85505 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ The main source of inspiration for Podium is an old version of [torchtext](https ### Contents - [Installation](#installation) -- [Usage examples](#usage-examples) +- [Usage examples](#usage) - [Contributing](#contributing) - [Versioning](#versioning) - [Authors](#authors) @@ -216,11 +216,11 @@ Example({ For a more interactive introduction, check out the quickstart on Google Colab: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/takelab/podium/blob/master/docs/source/notebooks/quickstart.ipynb) -More complex examples can be found in our [examples folder](./examples). +Full usage examples can be found in our [docs](https://takelab.fer.hr/podium/examples). ## Contributing -We welcome contributions! To learn more about making a contribution to Podium, please see our [Contribution page](CONTRIBUTING.md). +We welcome contributions! To learn more about making a contribution to Podium, please see our [Contribution page](CONTRIBUTING.md) and our [Roadmap](ROADMAP.md). ## Versioning diff --git a/Roadmap.md b/Roadmap.md new file mode 100644 index 00000000..27adff82 --- /dev/null +++ b/Roadmap.md @@ -0,0 +1,46 @@ +# Roadmap + +If you are interested in contributing to Podium, some of our +Order does not reflect importance. + +## Major changes + +- Dynamic application of Fields + - Right now, for every change in Fields the dataset needs to be reloaded. The goal of this change would be to allow users to replace or update a Field in a Dataset. The Dataset should be aware of this change (e.g. by keeping a hash of the Field object) and if it happens, recompute all the necessary data for that Field. +- Parallelization + - For data preprocessing (apply Fields in parallel) + - For data loading +- Conditional processing in Fields + - Handle cases where the values computed in one Field are dependent on values computed in another Field +- Experimental pipeline + - `podium.experimental`, wrappers for model framework agnostic training & serving + - Low priority + +## Minor changes + +- Populate hooks & preprocessing utilities + - Lowercase, truncate, extract POS, ... +- Improve Dataset coverage + - Data wrappers / abstract loaders for other source libraries and input formats +- BucketIterator modifications + - Simplify setting the sort key (e.g., in the basic case where the batch should be sorted according to the length of a single Field, accept a Field name as the argument) +- Improve HF/datasets integration + - Better automatic Field inference from features + - Cover additional feature datatypes (e.g., image data) +- Centralized and intuitive download script + - Low priority as most data loading is delegated to hf/datasets +- Add a Mask token for MLM (can be handled with posttokenization hooks right now, but not ideal) +- Populate pretrained vectors + - Word2vec + +## Documentation + +- Examples + - Language modeling + - Tensorflow model + - Various task types +- Chapters + - Handling datasets with missing tokens + - Loading data from pandas / porting data to pandas + - Loading CoNLL datasets + - Implementing your own dataset subclass From 7436d30da335de7e49d478155a1337fbf41536a9 Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Fri, 2 Apr 2021 11:31:02 +0200 Subject: [PATCH 24/26] Add roadmap --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a3c85505..adf48c20 100644 --- a/README.md +++ b/README.md @@ -220,7 +220,7 @@ Full usage examples can be found in our [docs](https://takelab.fer.hr/podium/exa ## Contributing -We welcome contributions! To learn more about making a contribution to Podium, please see our [Contribution page](CONTRIBUTING.md) and our [Roadmap](ROADMAP.md). +We welcome contributions! To learn more about making a contribution to Podium, please see our [Contribution page](CONTRIBUTING.md) and our [Roadmap](Roadmap.md). ## Versioning From a3cd42eceab46efe67d6443b3d2a55cfa51b42e2 Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Fri, 2 Apr 2021 11:40:50 +0200 Subject: [PATCH 25/26] Add roadmap --- Roadmap.md | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/Roadmap.md b/Roadmap.md index 27adff82..5fe9bb4d 100644 --- a/Roadmap.md +++ b/Roadmap.md @@ -1,17 +1,34 @@ # Roadmap -If you are interested in contributing to Podium, some of our +If you are interested in making a contribution to Podium, this page outlines some changes we are planning to focus on in the near future. Feel free to propose improvements and moficiations either via [discussions](https://github.com/TakeLab/podium/discussions) or by raising an [issue](https://github.com/TakeLab/podium/issues). + Order does not reflect importance. ## Major changes - Dynamic application of Fields - Right now, for every change in Fields the dataset needs to be reloaded. The goal of this change would be to allow users to replace or update a Field in a Dataset. The Dataset should be aware of this change (e.g. by keeping a hash of the Field object) and if it happens, recompute all the necessary data for that Field. + + The current pattern is: + ```python + # Load a dataset + fields = {'text':text, 'label':label} + dataset = load_dataset(fields=fields) + + # Decide to change something with one of the Fields + text = Field(..., tokenizer=some_different_tokenizer) + # Potentially expensive dataset loading is required again + dataset = load_dataset(fields=fields) + ``` + Dataset instances should instead detect changes in a Field and recompute values (Vocabs) for the ones that changed. + - Parallelization - For data preprocessing (apply Fields in parallel) - For data loading + - Conditional processing in Fields - - Handle cases where the values computed in one Field are dependent on values computed in another Field + - Handle cases where the values computed in one Field are dependent on values computed in another Field + - Experimental pipeline - `podium.experimental`, wrappers for model framework agnostic training & serving - Low priority @@ -20,6 +37,8 @@ Order does not reflect importance. - Populate hooks & preprocessing utilities - Lowercase, truncate, extract POS, ... +- Populate pretrained vectors + - Word2vec - Improve Dataset coverage - Data wrappers / abstract loaders for other source libraries and input formats - BucketIterator modifications @@ -27,11 +46,10 @@ Order does not reflect importance. - Improve HF/datasets integration - Better automatic Field inference from features - Cover additional feature datatypes (e.g., image data) + - Cleaner API? - Centralized and intuitive download script - Low priority as most data loading is delegated to hf/datasets - Add a Mask token for MLM (can be handled with posttokenization hooks right now, but not ideal) -- Populate pretrained vectors - - Word2vec ## Documentation From 10bf1cfb0968b18b3f13765f28a4834e9efb5efc Mon Sep 17 00:00:00 2001 From: Martin Tutek Date: Fri, 2 Apr 2021 13:01:24 +0200 Subject: [PATCH 26/26] Polish, comments, rename BasicVectorStorage to WordVectors --- Roadmap.md | 1 + podium/datasets/dataset.py | 2 +- podium/datasets/iterator.py | 2 +- podium/field.py | 2 +- podium/vectorizers/__init__.py | 2 +- podium/vectorizers/impl/glove.py | 4 +-- podium/vectorizers/impl/nlpl.py | 4 +-- podium/vectorizers/vectorizer.py | 2 +- tests/vectorizers/test_vectorizer.py | 50 ++++++++++++---------------- 9 files changed, 31 insertions(+), 38 deletions(-) diff --git a/Roadmap.md b/Roadmap.md index 5fe9bb4d..caba2749 100644 --- a/Roadmap.md +++ b/Roadmap.md @@ -39,6 +39,7 @@ Order does not reflect importance. - Lowercase, truncate, extract POS, ... - Populate pretrained vectors - Word2vec + - Interface with e.g. gensim - Improve Dataset coverage - Data wrappers / abstract loaders for other source libraries and input formats - BucketIterator modifications diff --git a/podium/datasets/dataset.py b/podium/datasets/dataset.py index f26b3a6e..ab681adf 100644 --- a/podium/datasets/dataset.py +++ b/podium/datasets/dataset.py @@ -278,7 +278,7 @@ def shuffled(self) -> "DatasetBase": return self[shuffled_indices] def __repr__(self): - fields_str = ",\n".join(textwrap.indent(repr(f), " " * 8) for f in self.fields) + fields_str = ",\n".join(textwrap.indent(repr(f), " " * 4) for f in self.fields) fields_str = f"[\n{fields_str}\n \n]" attrs = {"size": len(self), "fields": fields_str} return repr_type_and_attrs(self, attrs, with_newlines=True, repr_values=False) diff --git a/podium/datasets/iterator.py b/podium/datasets/iterator.py index bd8781aa..794df5a0 100644 --- a/podium/datasets/iterator.py +++ b/podium/datasets/iterator.py @@ -494,7 +494,7 @@ def __init__(self, dataset: DatasetBase = None, shuffle=True, add_padding=True): returned as python lists of ``matrix_class`` instances. """ - batch_size = len(dataset) if dataset else 0 + batch_size = len(dataset) if dataset else None super().__init__( dataset=dataset, diff --git a/podium/field.py b/podium/field.py index cd946577..fc554412 100644 --- a/podium/field.py +++ b/podium/field.py @@ -954,7 +954,7 @@ def remove_pretokenize_hooks(self): def __repr__(self): fields_str = ",\n".join( - textwrap.indent(repr(f), " " * 8) for f in self._output_fields + textwrap.indent(repr(f), " " * 4) for f in self._output_fields ) fields_str = f"[\n{fields_str}\n \n]" attrs = {"fields": fields_str} diff --git a/podium/vectorizers/__init__.py b/podium/vectorizers/__init__.py index 3342dc08..60807585 100644 --- a/podium/vectorizers/__init__.py +++ b/podium/vectorizers/__init__.py @@ -5,8 +5,8 @@ from .impl import GloVe, NlplVectorizer from .tfidf import TfIdfVectorizer from .vectorizer import ( - BasicVectorStorage, VectorStorage, + WordVectors, random_normal_default_vector, zeros_default_vector, ) diff --git a/podium/vectorizers/impl/glove.py b/podium/vectorizers/impl/glove.py index ddd3b2cc..dd676d2d 100644 --- a/podium/vectorizers/impl/glove.py +++ b/podium/vectorizers/impl/glove.py @@ -1,10 +1,10 @@ import os from podium.storage import LargeResource -from podium.vectorizers.vectorizer import BasicVectorStorage, random_normal_default_vector +from podium.vectorizers.vectorizer import WordVectors, random_normal_default_vector -class GloVe(BasicVectorStorage): +class GloVe(WordVectors): """ Class represents concrete vector storage for GloVe vectors described in https://nlp.stanford.edu/projects/glove/ . Class contains a Large resource diff --git a/podium/vectorizers/impl/nlpl.py b/podium/vectorizers/impl/nlpl.py index 77e96c8a..e8398721 100644 --- a/podium/vectorizers/impl/nlpl.py +++ b/podium/vectorizers/impl/nlpl.py @@ -1,10 +1,10 @@ import os from podium.storage import LargeResource -from podium.vectorizers.vectorizer import BasicVectorStorage, zeros_default_vector +from podium.vectorizers.vectorizer import WordVectors, zeros_default_vector -class NlplVectorizer(BasicVectorStorage): +class NlplVectorizer(WordVectors): NAME = "nlpl_vectors" URL = "http://vectors.nlpl.eu/repository/11/36.zip" ARCHIVE_TYPE = "zip" diff --git a/podium/vectorizers/vectorizer.py b/podium/vectorizers/vectorizer.py index 2f7b1e1a..42946568 100644 --- a/podium/vectorizers/vectorizer.py +++ b/podium/vectorizers/vectorizer.py @@ -227,7 +227,7 @@ def __repr__(self): return repr_type_and_attrs(self, attrs) -class BasicVectorStorage(VectorStorage): +class WordVectors(VectorStorage): """ Basic implementation of VectorStorage that handles loading vectors from system storage. diff --git a/tests/vectorizers/test_vectorizer.py b/tests/vectorizers/test_vectorizer.py index 38454b65..e005d60d 100644 --- a/tests/vectorizers/test_vectorizer.py +++ b/tests/vectorizers/test_vectorizer.py @@ -48,7 +48,7 @@ def test_basic_not_initialized(): with create_temp_vect_file( vect_file_name="vect1", file_data=BASIC_VECT_DATA ) as vect_file_path: - vect = vectorizer.BasicVectorStorage(path=vect_file_path) + vect = vectorizer.WordVectors(path=vect_file_path) with pytest.raises(RuntimeError): vect["."] with pytest.raises(RuntimeError): @@ -59,7 +59,7 @@ def test_basic_load_all_vectors(): with create_temp_vect_file( vect_file_name="vect1", file_data=BASIC_VECT_DATA ) as vect_file_path: - vect = vectorizer.BasicVectorStorage(path=vect_file_path) + vect = vectorizer.WordVectors(path=vect_file_path) vect.load_all() assert len(vect._vectors) == 4 assert vect["."].shape == (3,) @@ -74,7 +74,7 @@ def test_get_vector_dimension(): with create_temp_vect_file( vect_file_name="vect1", file_data=BASIC_VECT_DATA ) as vect_file_path: - vect = vectorizer.BasicVectorStorage(path=vect_file_path) + vect = vectorizer.WordVectors(path=vect_file_path) vect.load_all() assert vect.get_vector_dim() == vect["."].shape[0] assert vect.get_vector_dim() == 3 @@ -84,7 +84,7 @@ def test_get_vector_dim_not_initialized_vector_storage(): with create_temp_vect_file( vect_file_name="vect1", file_data=BASIC_VECT_DATA ) as vect_file_path: - vect = vectorizer.BasicVectorStorage(path=vect_file_path) + vect = vectorizer.WordVectors(path=vect_file_path) with pytest.raises(RuntimeError): vect.get_vector_dim() @@ -93,7 +93,7 @@ def test_basic_load_with_header(): with create_temp_vect_file( vect_file_name="vect1", file_header=BASIC_VECT_HEADING, file_data=BASIC_VECT_DATA ) as vect_file_path: - vect = vectorizer.BasicVectorStorage(path=vect_file_path) + vect = vectorizer.WordVectors(path=vect_file_path) vect.load_all() assert len(vect._vectors) == 4 assert vect["."].shape == (3,) @@ -108,9 +108,7 @@ def test_basic_no_token(): with create_temp_vect_file( vect_file_name="vect1", file_data=BASIC_VECT_DATA ) as vect_file_path: - vect = vectorizer.BasicVectorStorage( - path=vect_file_path, default_vector_function=None - ) + vect = vectorizer.WordVectors(path=vect_file_path, default_vector_function=None) vect.load_all() with pytest.raises(KeyError): print(vect["a"]) @@ -122,9 +120,7 @@ def test_basic_token_none(): with create_temp_vect_file( vect_file_name="vect1", file_data=BASIC_VECT_DATA ) as vect_file_path: - vect = vectorizer.BasicVectorStorage( - path=vect_file_path, default_vector_function=None - ) + vect = vectorizer.WordVectors(path=vect_file_path, default_vector_function=None) vect.load_all() with pytest.raises(ValueError): vect[None] @@ -136,7 +132,7 @@ def test_basic_token_default(): with create_temp_vect_file( vect_file_name="vect1", file_data=BASIC_VECT_DATA ) as vect_file_path: - vect = vectorizer.BasicVectorStorage( + vect = vectorizer.WordVectors( path=vect_file_path, default_vector_function=vectorizer.zeros_default_vector ) vect.load_all() @@ -151,9 +147,7 @@ def test_basic_load_vocab(): with create_temp_vect_file( vect_file_name="vect1", file_data=BASIC_VECT_DATA ) as vect_file_path: - vect = vectorizer.BasicVectorStorage( - path=vect_file_path, default_vector_function=None - ) + vect = vectorizer.WordVectors(path=vect_file_path, default_vector_function=None) vocab = [".", ":"] vect.load_vocab(vocab=vocab) assert len(vect._vectors) == 2 @@ -174,7 +168,7 @@ def test_basic_load_vocab_none(): with create_temp_vect_file( vect_file_name="vect1", file_data=BASIC_VECT_DATA ) as vect_file_path: - vect = vectorizer.BasicVectorStorage(path=vect_file_path) + vect = vectorizer.WordVectors(path=vect_file_path) with pytest.raises(ValueError): vect.load_vocab(vocab=None) @@ -201,7 +195,7 @@ def test_get_embedding_matrix(tokens, expected_matrix, expected_shape): with create_temp_vect_file( vect_file_name="vect1", file_data=BASIC_VECT_DATA ) as vect_file_path: - vect = vectorizer.BasicVectorStorage(path=vect_file_path) + vect = vectorizer.WordVectors(path=vect_file_path) vect.load_all() embedding_matrix = vect.get_embedding_matrix(vocab=tokens) @@ -213,7 +207,7 @@ def test_basic_diff_dimensions(): with create_temp_vect_file( vect_file_name="vect1", file_data=DIFF_DIM_VECT_DATA ) as vect_file_path: - vect = vectorizer.BasicVectorStorage(path=vect_file_path) + vect = vectorizer.WordVectors(path=vect_file_path) with pytest.raises(RuntimeError): vect.load_all() @@ -236,7 +230,7 @@ def test_basic_max_vectors_less_than_num_lines(): with create_temp_vect_file( vect_file_name="vect1", file_data=BASIC_VECT_DATA ) as vect_file_path: - vect = vectorizer.BasicVectorStorage(path=vect_file_path, max_vectors=2) + vect = vectorizer.WordVectors(path=vect_file_path, max_vectors=2) vect.load_all() assert len(vect._vectors) == 2 contained_elements = [".", "'"] @@ -249,7 +243,7 @@ def test_basic_max_vectors_vocab(): with create_temp_vect_file( vect_file_name="vect1", file_data=BASIC_VECT_DATA ) as vect_file_path: - vect = vectorizer.BasicVectorStorage(path=vect_file_path, max_vectors=2) + vect = vectorizer.WordVectors(path=vect_file_path, max_vectors=2) vocab = [".", ":", ","] vect.load_vocab(vocab) assert len(vect._vectors) == 2 @@ -263,7 +257,7 @@ def test_basic_max_vectors_bigger_than_num_lines(): with create_temp_vect_file( vect_file_name="vect1", file_data=BASIC_VECT_DATA ) as vect_file_path: - vect = vectorizer.BasicVectorStorage(path=vect_file_path, max_vectors=20) + vect = vectorizer.WordVectors(path=vect_file_path, max_vectors=20) vect.load_all() assert len(vect._vectors) == 4 contained_elements = [".", "'", ":", ","] @@ -271,7 +265,7 @@ def test_basic_max_vectors_bigger_than_num_lines(): def test_basic_both_paths_none(): - vect = vectorizer.BasicVectorStorage(path=None, cache_path=None) + vect = vectorizer.WordVectors(path=None, cache_path=None) with pytest.raises(ValueError): vect.load_all() @@ -284,7 +278,7 @@ def test_basic_both_paths_doesnt_exist(tmpdir): cache_path = os.path.join(base, "cache.t") assert not os.path.exists(cache_path) - vect = vectorizer.BasicVectorStorage(path=file_path, cache_path=cache_path) + vect = vectorizer.WordVectors(path=file_path, cache_path=cache_path) with pytest.raises(ValueError): vect.load_all() @@ -295,7 +289,7 @@ def test_basic_path_none_cache_doesnt_exist(tmpdir): cache_path = os.path.join(base, "cache.t") assert not os.path.exists(cache_path) - vect = vectorizer.BasicVectorStorage(path=None, cache_path=cache_path) + vect = vectorizer.WordVectors(path=None, cache_path=cache_path) with pytest.raises(ValueError): vect.load_all() @@ -307,7 +301,7 @@ def test_basic_cache_max_vectors(tmpdir): assert os.path.exists(vect_file_path) cache_path = os.path.join(tmpdir, "cache.t") assert not os.path.exists(cache_path) - vect = vectorizer.BasicVectorStorage( + vect = vectorizer.WordVectors( path=vect_file_path, max_vectors=2, cache_path=cache_path ) vect.load_all() @@ -329,9 +323,7 @@ def test_basic_cache_vocab(): assert os.path.exists(vect_file_path) cache_path = os.path.join(base, "cache.t") assert not os.path.exists(cache_path) - vect = vectorizer.BasicVectorStorage( - path=vect_file_path, cache_path=cache_path - ) + vect = vectorizer.WordVectors(path=vect_file_path, cache_path=cache_path) vocab = [".", ":", ","] vect.load_vocab(vocab) @@ -351,7 +343,7 @@ def test_load_plain_text(): assert os.path.exists(file_path) file.writelines(BASIC_VECT_DATA_PLAIN) - vec_storage = vectorizer.BasicVectorStorage(file_path, binary=False) + vec_storage = vectorizer.WordVectors(file_path, binary=False) vec_storage.load_all() assert len(vec_storage) == 4