From 97c114dc35bd1d4334fff41ec0f0351696971145 Mon Sep 17 00:00:00 2001 From: "Wang, Chang" Date: Mon, 25 Dec 2023 16:52:47 +0800 Subject: [PATCH 001/101] add QuantizeLinearForQbits activation contiguous check (#1072) Signed-off-by: changwangss --- intel_extension_for_transformers/llm/quantization/nn/modules.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/intel_extension_for_transformers/llm/quantization/nn/modules.py b/intel_extension_for_transformers/llm/quantization/nn/modules.py index c3f4c905e16..88223fcf275 100644 --- a/intel_extension_for_transformers/llm/quantization/nn/modules.py +++ b/intel_extension_for_transformers/llm/quantization/nn/modules.py @@ -120,6 +120,8 @@ def forward(self, x: torch.Tensor): m = reduce(mul, shape[0:-1]) out = torch.zeros(m, self.out_features, dtype=x.dtype) bias = None if self.bias is None else self.bias.data + if not x.is_contiguous(): + x = x.contiguous() out = matmul_kbit( x.view(m, shape[-1]), self.weight, bias, out, self.compute_dtype, self.weight_dtype, self.scale_dtype, do_dequant=self.training From 3e14b0517cb267d8fa67bb4b6e6c59df2a379c19 Mon Sep 17 00:00:00 2001 From: zhentaoyu Date: Mon, 25 Dec 2023 20:32:09 +0800 Subject: [PATCH 002/101] [Doc] Add doc of save & load low bit model in CPU (#1071) Add doc of save & load low bit model in CPU Signed-off-by: Yu, Zhentao --- docs/weightonlyquant.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/docs/weightonlyquant.md b/docs/weightonlyquant.md index 5ba72bd5741..65b9d2ec1d9 100644 --- a/docs/weightonlyquant.md +++ b/docs/weightonlyquant.md @@ -115,3 +115,24 @@ gen_ids = woq_model.generate(input_ids, max_new_tokens=32, **generate_kwargs) gen_text = tokenizer.batch_decode(gen_ids, skip_special_tokens=True) print(gen_text) ``` + +You can also save and load your quantized low bit model by the below code. + +```python +from intel_extension_for_transformers.transformers import AutoModelForCausalLM + +model_path = "meta-llama/Llama-2-7b-chat-hf" # your_pytorch_model_path_or_HF_model_name +saved_dir = "4_bit_llama2" # your_saved_model_dir +# quant +model = AutoModelForCausalLM.from_pretrained(model_path, load_in_4bit=True, use_llm_runtime=False) +# save quant model +model.save_pretrained(saved_dir) +# load quant model +loaded_model = AutoModelForCausalLM.from_pretrained(saved_dir) +``` +| Inference Framework | Load GPT-Q model from HuggingFace | Load the saved low-precision model from ITREX | +|:--------------:|:----------:|:----------:| +| LLM Runtime (use_llm_runtime=True) | ✔ | ✔ | +| PyTorch (use_llm_runtime=False) | stay tuned | ✔ | + +> Note: Only supports CPU device for now. For LLM runtime model loading usage, please refer to [graph readme](../intel_extension_for_transformers/llm/runtime/graph/README.md#2-run-llm-with-transformer-based-api) From f29c1ec7a87eed9a06d1a32b9ff37c911f0470d3 Mon Sep 17 00:00:00 2001 From: lvliang-intel Date: Mon, 25 Dec 2023 20:33:24 +0800 Subject: [PATCH 003/101] [NeuralChat] Support magicoder model (#1067) * Support magicoder model and refine load model Signed-off-by: lvliang-intel --- .../llm/quantization/optimization.py | 7 +- .../neural_chat/chatbot.py | 12 ++- .../neural_chat/models/base_model.py | 18 +++- .../neural_chat/models/model_utils.py | 84 +++++++++++-------- .../neural_chat/prompts/prompt.py | 11 ++- 5 files changed, 89 insertions(+), 43 deletions(-) diff --git a/intel_extension_for_transformers/llm/quantization/optimization.py b/intel_extension_for_transformers/llm/quantization/optimization.py index 30062aa7e58..0fc454212ea 100644 --- a/intel_extension_for_transformers/llm/quantization/optimization.py +++ b/intel_extension_for_transformers/llm/quantization/optimization.py @@ -52,10 +52,11 @@ def optimize(self, model, use_llm_runtime=False): or re.search("bloom", model_name, re.IGNORECASE) or re.search("llama", model_name, re.IGNORECASE) or re.search("opt", model_name, re.IGNORECASE) - or re.search("neural-chat-7b-v1", model_name, re.IGNORECASE) - or re.search("neural-chat-7b-v2", model_name, re.IGNORECASE) - or re.search("neural-chat-7b-v3", model_name, re.IGNORECASE) + or re.search("neural-chat", model_name, re.IGNORECASE) or re.search("starcoder", model_name, re.IGNORECASE) + or re.search("codegen", model_name, re.IGNORECASE) + or re.search("mistral", model_name, re.IGNORECASE) + or re.search("magicoder", model_name, re.IGNORECASE) or re.search("solar", model_name, re.IGNORECASE) ): from intel_extension_for_transformers.transformers import AutoModelForCausalLM diff --git a/intel_extension_for_transformers/neural_chat/chatbot.py b/intel_extension_for_transformers/neural_chat/chatbot.py index b4d9613365f..57d7daefcbc 100644 --- a/intel_extension_for_transformers/neural_chat/chatbot.py +++ b/intel_extension_for_transformers/neural_chat/chatbot.py @@ -69,7 +69,7 @@ def build_chatbot(config: PipelineConfig=None): return # create model adapter - if "llama" in config.model_name_or_path.lower() or "magicoder" in config.model_name_or_path.lower(): + if "llama" in config.model_name_or_path.lower(): from .models.llama_model import LlamaModel adapter = LlamaModel() elif "mpt" in config.model_name_or_path.lower(): @@ -95,7 +95,8 @@ def build_chatbot(config: PipelineConfig=None): "flan-t5" in config.model_name_or_path.lower() or \ "bloom" in config.model_name_or_path.lower() or \ "starcoder" in config.model_name_or_path.lower() or \ - "codegen" in config.model_name_or_path.lower(): + "codegen" in config.model_name_or_path.lower() or \ + "magicoder" in config.model_name_or_path.lower(): from .models.base_model import BaseModel adapter = BaseModel() else: @@ -163,6 +164,7 @@ def build_chatbot(config: PipelineConfig=None): try: adapter.load_model(parameters) except RuntimeError as e: + logger.error(f"Exception: {e}") if "out of memory" in str(e): set_latest_error(ErrorCodes.ERROR_OUT_OF_MEMORY) elif "devices are busy or unavailable" in str(e): @@ -173,6 +175,7 @@ def build_chatbot(config: PipelineConfig=None): set_latest_error(ErrorCodes.ERROR_GENERIC) return except ValueError as e: + logger.error(f"Exception: {e}") if "load_model: unsupported device" in str(e): set_latest_error(ErrorCodes.ERROR_DEVICE_NOT_SUPPORTED) elif "load_model: unsupported model" in str(e): @@ -187,6 +190,7 @@ def build_chatbot(config: PipelineConfig=None): set_latest_error(ErrorCodes.ERROR_GENERIC) return except Exception as e: + logger.error(f"Exception: {e}") set_latest_error(ErrorCodes.ERROR_GENERIC) return return adapter @@ -204,14 +208,17 @@ def finetune_model(config: BaseFinetuningConfig): try: finetuning.finetune() except FileNotFoundError as e: + logger.error(f"Exception: {e}") if "Couldn't find a dataset script" in str(e): set_latest_error(ErrorCodes.ERROR_DATASET_NOT_FOUND) except ValueError as e: + logger.error(f"Exception: {e}") if "--do_eval requires a validation dataset" in str(e): set_latest_error(ErrorCodes.ERROR_VALIDATION_FILE_NOT_FOUND) elif "--do_train requires a train dataset" in str(e): set_latest_error(ErrorCodes.ERROR_TRAIN_FILE_NOT_FOUND) except Exception as e: + logger.error(f"Exception: {e}") if config.finetune_args.peft == "lora": set_latest_error(ErrorCodes.ERROR_LORA_FINETUNE_FAIL) elif config.finetune_args.peft == "llama_adapter": @@ -237,6 +244,7 @@ def optimize_model(model, config, use_llm_runtime=False): try: model = optimization.optimize(model, use_llm_runtime) except Exception as e: + logger.error(f"Exception: {e}") from intel_extension_for_transformers.transformers import ( MixedPrecisionConfig, WeightOnlyQuantConfig, diff --git a/intel_extension_for_transformers/neural_chat/models/base_model.py b/intel_extension_for_transformers/neural_chat/models/base_model.py index cf94ffe6863..2f29e1aea61 100644 --- a/intel_extension_for_transformers/neural_chat/models/base_model.py +++ b/intel_extension_for_transformers/neural_chat/models/base_model.py @@ -24,6 +24,7 @@ from ..utils.common import is_audio_file from .model_utils import load_model, predict, predict_stream, MODELS from ..prompts import PromptTemplate +from ..prompts.prompt import MAGICODER_PROMPT from ..utils.error_utils import set_latest_error from ..errorcode import ErrorCodes import logging @@ -163,7 +164,7 @@ def predict_stream(self, query, origin_query="", config=None): self.get_conv_template(self.model_name, config.task) if (self.conv_template.roles[0] in query and self.conv_template.roles[1] in query) or \ "starcoder" in self.model_name.lower() or "codellama" in self.model_name.lower() or \ - "codegen" in self.model_name.lower(): + "codegen" in self.model_name.lower() or "magicoder" in self.model_name.lower(): query_include_prompt = True # plugin pre actions @@ -207,6 +208,16 @@ def predict_stream(self, query, origin_query="", config=None): if not query_include_prompt and not is_plugin_enabled("retrieval"): query = self.prepare_prompt(query, self.model_name, config.task) + # Phind/Phind-CodeLlama-34B-v2 model accpects Alpaca/Vicuna instruction format. + if "phind" in self.model_name.lower(): + conv_template = PromptTemplate(name="phind") + conv_template.append_message(conv_template.roles[0], query) + conv_template.append_message(conv_template.roles[1], None) + query = conv_template.get_prompt() + + if "magicoder" in self.model_name.lower(): + query = MAGICODER_PROMPT.format(instruction=query) + try: response = predict_stream( **construct_parameters(query, self.model_name, self.device, self.assistant_model, config)) @@ -256,7 +267,7 @@ def predict(self, query, origin_query="", config=None): self.get_conv_template(self.model_name, config.task) if (self.conv_template.roles[0] in query and self.conv_template.roles[1] in query) or \ "starcoder" in self.model_name.lower() or "codellama" in self.model_name.lower() or \ - "codegen" in self.model_name.lower(): + "codegen" in self.model_name.lower() or "magicoder" in self.model_name.lower(): query_include_prompt = True # plugin pre actions @@ -298,6 +309,9 @@ def predict(self, query, origin_query="", config=None): conv_template.append_message(conv_template.roles[1], None) query = conv_template.get_prompt() + if "magicoder" in self.model_name.lower(): + query = MAGICODER_PROMPT.format(instruction=query) + # LLM inference try: response = predict( diff --git a/intel_extension_for_transformers/neural_chat/models/model_utils.py b/intel_extension_for_transformers/neural_chat/models/model_utils.py index 272ff14485b..014e68e38bf 100644 --- a/intel_extension_for_transformers/neural_chat/models/model_utils.py +++ b/intel_extension_for_transformers/neural_chat/models/model_utils.py @@ -416,30 +416,45 @@ def load_model( else: MODELS[model_name]["assistant_model"] = None + try: + config = AutoConfig.from_pretrained(model_name, use_auth_token=hf_access_token, trust_remote_code=True \ + if (re.search("chatglm", model_name, re.IGNORECASE) or \ + re.search("qwen", model_name, re.IGNORECASE)) else False) + except ValueError as e: + logging.error(f"Exception: {e}") + if "Unrecognized model in" in str(e): + raise ValueError(f"load_model: model config is not found, {e}") + else: + raise ValueError(f"load_model: unknown ValueError occurred, {e}") + except EnvironmentError as e: + logging.error(f"Exception: {e}") + if "not a local folder and is not a valid model identifier" in str(e): + raise ValueError(f"load_model: model name or path is not found, {e}") + else: + raise ValueError(f"load_model: unknown EnvironmentError occurred, {e}") + except Exception as e: + logging.error(f"Exception: {e}") + raise ValueError(f"load_model: an unexpected error occurred, {e}") + + MODELS[model_name]["model_type"] = config.model_type + try: tokenizer = AutoTokenizer.from_pretrained( tokenizer_name, - use_fast=False if (re.search("llama", model_name, re.IGNORECASE) - or re.search("neural-chat-7b-v2", model_name, re.IGNORECASE)) else True, + use_fast=False if config.model_type == "llama" else True, use_auth_token=hf_access_token, trust_remote_code=True if (re.search("qwen", model_name, re.IGNORECASE) or \ re.search("chatglm", model_name, re.IGNORECASE)) else False, ) except EnvironmentError as e: + logging.error(f"Exception: {e}") if "not a local folder and is not a valid model identifier" in str(e): - raise ValueError("load_model: tokenizer is not found") - else: - raise - - try: - config = AutoConfig.from_pretrained(model_name, use_auth_token=hf_access_token, trust_remote_code=True \ - if (re.search("chatglm", model_name, re.IGNORECASE) or \ - re.search("qwen", model_name, re.IGNORECASE)) else False) - except ValueError as e: - if "Unrecognized model in" in str(e): - raise ValueError("load_model: model config is not found") + raise ValueError(f"load_model: tokenizer is not found, {e}") else: - raise + raise ValueError(f"load_model: unknown EnvironmentError occurred, {e}") + except Exception as e: + logging.error(f"Exception: {e}") + raise ValueError(f"load_model: an unexpected error occurred, {e}") load_to_meta = model_on_meta(config) @@ -478,19 +493,13 @@ def load_model( trust_remote_code=True) elif (( re.search("gpt", model_name, re.IGNORECASE) - or re.search("mpt", model_name, re.IGNORECASE) - or re.search("bloom", model_name, re.IGNORECASE) - or re.search("llama", model_name, re.IGNORECASE) - or re.search("magicoder", model_name, re.IGNORECASE) - or re.search("neural-chat-7b-v1", model_name, re.IGNORECASE) - or re.search("neural-chat-7b-v2", model_name, re.IGNORECASE) - or re.search("neural-chat-7b-v3", model_name, re.IGNORECASE) - or re.search("qwen", model_name, re.IGNORECASE) - or re.search("starcoder", model_name, re.IGNORECASE) - or re.search("codellama", model_name, re.IGNORECASE) - or re.search("mistral", model_name, re.IGNORECASE) - or re.search("codegen", model_name, re.IGNORECASE) - ) and not ipex_int8) or re.search("opt", model_name, re.IGNORECASE): + or config.model_type == "bloom" + or config.model_type == "qwen" + or config.model_type == "gpt_bigcode" + or config.model_type == "mpt" + or config.model_type == "llama" + or config.model_type == "mistral" + ) and not ipex_int8) or config.model_type == "opt": with smart_context_manager(use_deepspeed=use_deepspeed): model = AutoModelForCausalLM.from_pretrained( model_name, @@ -498,13 +507,12 @@ def load_model( torch_dtype=torch_dtype, low_cpu_mem_usage=True, quantization_config=bitsandbytes_quant_config, - trust_remote_code=True if (re.search("qwen", model_name, re.IGNORECASE) or \ + trust_remote_code=True if (config.model_type == "qwen" or \ re.search("codegen", model_name, re.IGNORECASE)) else False ) elif ( - (re.search("starcoder", model_name, re.IGNORECASE) - or re.search("codellama", model_name, re.IGNORECASE) - or re.search("codegen", model_name, re.IGNORECASE) + (config.model_type == "gpt_bigcode" + or config.model_type == "llama" ) and ipex_int8 ): with smart_context_manager(use_deepspeed=use_deepspeed): @@ -520,9 +528,9 @@ def load_model( model_name, file_name="best_model.pt", ) - elif( - (re.search("llama", model_name, re.IGNORECASE) - or re.search("opt", model_name, re.IGNORECASE) + elif ( + (config.model_type == "llama" + or config.model_type == "opt" or re.search("gpt_neox", model_name, re.IGNORECASE) or re.search("gptj", model_name, re.IGNORECASE) or re.search("falcon", model_name, re.IGNORECASE) @@ -547,10 +555,14 @@ def load_model( raise ValueError(f"unsupported model name or path {model_name}, \ only supports FLAN-T5/LLAMA/MPT/GPT/BLOOM/OPT/QWEN/NEURAL-CHAT/MISTRAL/CODELLAMA/STARCODER/CODEGEN now.") except EnvironmentError as e: + logging.error(f"Exception: {e}") if "not a local folder and is not a valid model identifier" in str(e): raise ValueError("load_model: model name or path is not found") else: - raise + raise ValueError(f"load_model: unknown EnvironmentError occurred, {e}") + except Exception as e: + logging.error(f"Exception: {e}") + raise ValueError(f"load_model: an unexpected error occurred, {e}") if re.search("llama", model.config.architectures[0], re.IGNORECASE): # unwind broken decapoda-research config @@ -1192,6 +1204,8 @@ def predict(**params): output = tokenizer.decode(generation_output.sequences[0], skip_special_tokens=True) if "### Response:" in output: return output.split("### Response:")[1].strip() + if "@@ Response" in output: + return output.split("@@ Response")[1].strip() if "### Assistant" in output: return output.split("### Assistant:")[1].strip() if "\nassistant\n" in output: diff --git a/intel_extension_for_transformers/neural_chat/prompts/prompt.py b/intel_extension_for_transformers/neural_chat/prompts/prompt.py index 25d505401b8..cd03598223e 100644 --- a/intel_extension_for_transformers/neural_chat/prompts/prompt.py +++ b/intel_extension_for_transformers/neural_chat/prompts/prompt.py @@ -220,4 +220,13 @@ def get_prompt(self) -> str: return res def clear_messages(self) -> str: - self.conv.messages = [] \ No newline at end of file + self.conv.messages = [] + +# pylint: disable=C0301 +MAGICODER_PROMPT = """You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions. + +@@ Instruction +{instruction} + +@@ Response +""" From 8f75eb1c61f1390c80407900eff040bc222862ef Mon Sep 17 00:00:00 2001 From: Yi DING Date: Mon, 25 Dec 2023 04:35:24 -0800 Subject: [PATCH 004/101] [LLM Runtime] Sync inference scripts (#961) Sync inference scripts --- .github/workflows/cpp-graph-test.yml | 20 +- .../script/models/cpp_graph_inference.sh | 249 ----------- .../workflows/script/models/local_models.json | 17 + .../llm/runtime/graph/requirements.txt | 12 +- .../graph/scripts/ci/calculate_percentiles.py | 36 +- .../graph/scripts/ci/cpp_graph_inference.sh | 403 ++++++++++++++++++ .../graph/scripts/ci/cpp_graph_prompts.json | 57 +++ .../graph/scripts/requirements/baichuan.txt | 3 + .../graph/scripts/requirements/chatglm-6b.txt | 3 + .../graph/scripts/requirements/common.txt | 12 + .../graph/scripts/requirements/mistral.txt | 2 + 11 files changed, 542 insertions(+), 272 deletions(-) delete mode 100644 .github/workflows/script/models/cpp_graph_inference.sh create mode 100644 .github/workflows/script/models/local_models.json rename .github/workflows/script/models/calculate_percentage.py => intel_extension_for_transformers/llm/runtime/graph/scripts/ci/calculate_percentiles.py (74%) mode change 100644 => 100755 create mode 100755 intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh create mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_prompts.json create mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/baichuan.txt create mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/chatglm-6b.txt create mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/common.txt create mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/mistral.txt diff --git a/.github/workflows/cpp-graph-test.yml b/.github/workflows/cpp-graph-test.yml index c5024b11029..6bb001b4714 100644 --- a/.github/workflows/cpp-graph-test.yml +++ b/.github/workflows/cpp-graph-test.yml @@ -5,10 +5,9 @@ on: branches: [main] paths: - '.github/workflows/cpp-graph-test.yml' - - '.github/workflows/script/models/cpp_graph_inference.sh' - 'intel_extension_for_transformers/llm/runtime/graph/**' - 'intel_extension_for_transformers/llm/library/jblas/**' - - '!intel_extension_for_transformers/llm/runtime/graph/*.md' + - '!**/*.md' workflow_dispatch: inputs: compiler_version: @@ -70,8 +69,19 @@ jobs: - name: BF16 Benchmark run: | - cd ${{ github.workspace }}/.github/workflows/script/models - bash cpp_graph_inference.sh cpp-graph-test ${{ matrix.modelName }} ${{ env.INPUT_COMPILER_VERSION }} + WORKSPACE=${{ env.WORKING_DIR }} bash -eo pipefail ${{ env.GRAPH_DIR }}/scripts/ci/cpp_graph_inference.sh \ + --local_models="${{ github.workspace }}/.github/workflows/script/models/local_models.json" \ + --cores_list="48," \ + --input_list="32,1024" \ + -- \ + cpp-graph-test \ + ${{ matrix.modelName }} \ + ${{ env.GRAPH_DIR }} \ + ${{ env.WORKING_DIR }} \ + ${{ env.INPUT_COMPILER_VERSION }} + env: + WORKSPACE: ${{ env.WORKING_DIR }} + GRAPH_DIR: ${{ env.WORKING_DIR }}/intel_extension_for_transformers/llm/runtime/graph - name: Rename summary run: | @@ -137,7 +147,7 @@ jobs: /usr/bin/bash generate_report.sh --workflow=deploy sed -n '//,/<\/body>/p' generated/report.html | sed -r '/^$/d' | sed -r 's/^ +//g' >> $GITHUB_STEP_SUMMARY env: - RUN_DISPLAY_URL: https://github.com/VincyZhang/intel-extension-for-transformers/actions/runs/${{ github.run_id }} + RUN_DISPLAY_URL: https://github.com/${{github.repository}}/actions/runs/${{ github.run_id }} BUILD_NUMBER: ${{ github.run_id }} JOB_STATUS: succeed MR_source_branch: ${{ github.head_ref }} diff --git a/.github/workflows/script/models/cpp_graph_inference.sh b/.github/workflows/script/models/cpp_graph_inference.sh deleted file mode 100644 index 10a69547365..00000000000 --- a/.github/workflows/script/models/cpp_graph_inference.sh +++ /dev/null @@ -1,249 +0,0 @@ -#!/bin/bash -set -eo pipefail -set -x - -cores_list=(48) -batch_size_list=(1) -input_list=(32 1024) -output=32 -beam_list=(1) - -function main() { - conda_env="$1" - model="$2" - compiler_version="$3" - working_dir="${WORKING_DIR}/intel_extension_for_transformers/llm/runtime/graph" - # init params - if [[ "${model}" == "llama-7b-hf" ]]; then - convert_script="${working_dir}/scripts/convert_llama.py" - quant_script="./build/bin/quant_llama" - infer_cmd="./build/bin/run_llama" - input_model="/tf_dataset2/models/nlp_toolkit/llama-7b-hf" - precision_list=("q4_j_b128" "q4_j_b32" "q4_0") - elif [[ "${model}" == "llama-2-7b-chat" ]]; then - convert_script="${working_dir}/scripts/convert_llama.py" - quant_script="./build/bin/quant_llama" - infer_cmd="./build/bin/run_llama" - input_model="/tf_dataset2/models/nlp_toolkit/llama-2-7b-chat/Llama-2-7b-chat-hf" - precision_list=("q4_j_b128" "q4_j_b32" "q4_0" "q8e4m3_j_f32_g128_fp8" "q8e5m2_j_f32_g128_fp8" "q8e4m3_j_f32_g128_fp32" "q8e5m2_j_f32_g128_fp32" "q4e2m1_j_f32_g128" "nf4_j_f32_g128") - elif [[ "${model}" == "gpt-neox-20b" ]]; then - convert_script="${working_dir}/scripts/convert_gptneox.py" - quant_script="./build/bin/quant_gptneox" - infer_cmd="./build/bin/run_gptneox" - input_model="/tf_dataset2/models/nlp_toolkit/gpt-neox-20b" - precision_list=("q4_j_b128" "q4_j_b32" "q4_0") - elif [[ "${model}" == "mpt-7b" ]]; then - convert_script="${working_dir}/scripts/convert_mpt.py" - quant_script="./build/bin/quant_mpt" - infer_cmd="./build/bin/run_mpt" - input_model="/tf_dataset2/models/nlp_toolkit/mpt-7b" - precision_list=("q4_j_b128" "q4_j_b32" "q4_0") - elif [[ "${model}" == "falcon-7b" ]]; then - convert_script="${working_dir}/scripts/convert_falcon.py" - quant_script="./build/bin/quant_falcon" - infer_cmd="./build/bin/run_falcon" - input_model="/tf_dataset2/models/nlp_toolkit/falcon-7b" - precision_list=("q4_j_b128" "q4_j_b32" "q4_0") - elif [[ "${model}" == "gptj-6b" ]]; then - convert_script="${working_dir}/scripts/convert_gptj.py" - quant_script="./build/bin/quant_gptj" - infer_cmd="./build/bin/run_gptj" - model_name="EleutherAI/gpt-j-6b" - input_model="/tf_dataset2/models/pytorch/gpt-j-6B" - precision_list=("q4_j_b128" "q4_j_b128_asym") - elif [[ "${model}" == "starcoder-3b" ]]; then - convert_script="${working_dir}/scripts/convert_starcoder.py" - quant_script="./build/bin/quant_starcoder" - infer_cmd="./build/bin/run_starcoder" - model_name="bigcode/starcoder" - input_model="/tf_dataset2/models/pytorch/starcode_3b" - precision_list=("q4_j_b128" "q4_j_b32" "q4_0") - fi - - # init conda - # . $(dirname ${CONDA_EXE})/../etc/profile.d/conda.sh - conda activate $conda_env || source activate $conda_env - pip install cmake ninja psutil - if [[ "${compiler_version}" != "12.1.0" ]]; then - conda install --update-deps -c conda-forge gxx==${compiler_version} gcc==${compiler_version} gxx_linux-64==${compiler_version} libstdcxx-ng sysroot_linux-64 -y - fi - - # compile binary - cd ${working_dir} - mkdir build - cd build - cmake .. -G Ninja - ninja - cd .. - - ## prepare example requiement - pip install -r requirements.txt - - ## prepare fp32 bin - python ${convert_script} --outtype f32 --outfile ${working_dir}/${model}-fp32.bin ${input_model} - - # launch benchmark - for cores_per_instance in ${cores_list[@]}; do - for batch_size in ${batch_size_list[@]}; do - for input in ${input_list[@]}; do - for precision in ${precision_list[@]}; do - # [[ "${input}" == "32" ]] && output=32 || - if [[ "${input}" == "32" ]]; then - prompt="Once upon a time, there existed a little girl, who liked to have adventures. She wanted to go to places and meet new people, and have fun." - elif [[ "${input}" == "10" ]]; then - prompt="Once upon a time, there existed a " - output=1024 - elif [[ "${input}" == "2016" ]]; then - if [[ "${model}" == "llama"* ]]; then - prompt="It is done, and submitted. You can play 'Survival of the Tastiest' on Android, and on the web. Playing on the web works, but you have to simulate multiple touch for table moving and that can be a bit confusing. There is a lot I'd like to talk about. I will go through every topic, insted of making the typical what went right/wrong list. Concept Working over the theme was probably one of the hardest tasks which I had to face. Originally, I had an idea of what kind of game I wanted to develop, gameplay wise - something with a lot of enemies/actors, simple graphics, maybe set in space, controlled from a top-down view. I was confident that I could fit any theme around it. In the end, the problem with a theme like 'Evolution' in a game is that evolution is unassisted. It happens through several seemingly random mutations over time, with the most apt permutation surviving. This genetic car simulator is, in my opinion, a great example of actual evolution of a species facing a challenge. But is it a game? In a game, you need to control something to reach an objective. That control goes against what evolution is supposed to be like. If you allow the user to pick how to evolve something, it's not evolution anymore - it's the equivalent of intelligent design, the fable invented by creationists to combat the idea of evolution. Being agnostic and a Pastafarian, that's not something that rubbed me the right way. Hence, my biggest dillema when deciding what to create was not with what I wanted to create, but with what I did not. I didn't want to create an 'intelligent design' simulator and wrongly call it evolution. This is a problem, of course, every other contestant also had to face. And judging by the entries submitted, not many managed to work around it. I'd say the only real solution was through the use of artificial selection, somehow. So far, I have not seen any entry using this at its core gameplay. Alas, this is just a fun competition and after a while I decided not to be as strict with the game idea, and allowed myself to pick whatever I thought would work out. My initial idea was to create something where humanity tried to evolve to a next level but had some kind of foe trying to stop them from doing so. I kind of had this image of human souls flying in space towards a monolith or a space baby (all based in 2001: A Space Odyssey of course) but I couldn't think of compelling (read: serious) mechanics for that. Borgs were my next inspiration, as their whole hypothesis fit pretty well into the evolution theme. But how to make it work? Are you the borg, or fighting the Borg? The third and final idea came to me through my girlfriend, who somehow gave me the idea of making something about the evolution of Pasta. The more I thought about it the more it sounded like it would work, so I decided to go with it. Conversations with my inspiring co-worker Roushey (who also created the 'Mechanical Underdogs' signature logo for my intros) further matured the concept, as it involved into the idea of having individual pieces of pasta flying around and trying to evolve until they became all-powerful. A secondary idea here was that the game would work to explain how the Flying Spaghetti Monster came to exist - by evolving from a normal dinner table. So the idea evolved more or less into this: you are sitting a table. You have your own plate, with is your 'base'. There are 5 other guests at the table, each with their own plate. Your plate can spawn little pieces of pasta. You do so by 'ordering' them through a menu. Some pastas are better than others; some are faster, some are stronger. They have varying 'costs', which are debited from your credits (you start with a number of credits). Once spawned, your pastas start flying around. Their instinct is to fly to other plates, in order to conquer them (the objective of the game is having your pasta conquer all the plates on the table). But they are really autonomous, so after being spawned, you have no control over your pasta (think DotA or LoL creeps). Your pasta doesn't like other people's pasta, so if they meet, they shoot sauce at each other until one dies. You get credits for other pastas your own pasta kill. Once a pasta is in the vicinity of a plate, it starts conquering it for its team. It takes around 10 seconds for a plate to be conquered; less if more pasta from the same team are around. If pasta from other team are around, though, they get locked down in their attempt, unable to conquer the plate, until one of them die (think Battlefield's standard 'Conquest' mode). You get points every second for every plate you own. Over time, the concept also evolved to use an Italian bistro as its main scenario. Carlos, Carlos' Bistro's founder and owner Setup No major changes were made from my work setup. I used FDT and Starling creating an Adobe AIR (ActionScript) project, all tools or frameworks I already had some knowledge with. One big change for me was that I livestreamed my work through a twitch.tv account. This was a new thing for me. As recommended by Roushey, I used a program called XSplit and I got to say, it is pretty amazing. It made the livestream pretty effortless and the features are awesome, even for the free version. It was great to have some of my friends watch me, and then interact with them and random people through chat. It was also good knowing that I was also recording a local version of the files, so I could make a timelapse video later. Knowing the video was being recorded also made me a lot more self-conscious about my computer use, as if someone was watching over my shoulder. It made me realize that sometimes I spend too much time in seemingly inane tasks (I ended up wasting the longest time just to get some text alignment the way I wanted - it'll probably drive someone crazy if they watch it) and that I do way too many typos where writing code. I pretty much spend half of the time writing a line and the other half fixing the crazy characters in it. My own stream was probably boring to watch since I was coding for the most time. But livestreaming is one of the cool things to do as a spectator too. It was great seeing other people working - I had a few tabs opened on my second monitor all the time. It's actually a bit sad, because if I could, I could have spent the whole weekend just watching other people working! But I had to do my own work, so I'd only do it once in a while, when resting for a bit. Design Although I wanted some simple, low-fi, high-contrast kind of design, I ended up going with somewhat realistic (vector) art. I think it worked very well, fitting the mood of the game, but I also went overboard. For example: to know the state of a plate (who owns it, who's conquering it and how much time they have left before conquering it, which pasta units are in the queue, etc), you have to look at the plate's bill. The problem I realized when doing some tests is that people never look at the bill! They think it's some kind of prop, so they never actually read its details. Plus, if you're zoomed out too much, you can't actually read it, so it's hard to know what's going on with the game until you zoom in to the area of a specific plate. One other solution that didn't turn out to be as perfect as I thought was how to indicate who a plate base belongs to. In the game, that's indicated by the plate's decoration - its color denotes the team owner. But it's something that fits so well into the design that people never realized it, until they were told about it. In the end, the idea of going with a full physical metaphor is one that should be done with care. Things that are very important risk becoming background noise, unless the player knows its importance. Originally, I wanted to avoid any kind of heads-up display in my game. In the end, I ended up adding it at the bottom to indicate your credits and bases owned, as well as the hideous out-of-place-and-still-not-obvious 'Call Waiter' button. But in hindsight, I should have gone with a simple HUD from the start, especially one that indicated each team's colors and general state of the game without the need for zooming in and out. Development Development went fast. But not fast enough. Even though I worked around 32+ hours for this Ludum Dare, the biggest problem that I had to face in the end was overscoping. I had too much planned" - elif [[ "${model}" == "gptj-6b" ]]; then - prompt="It is done, and submitted. You can play 'Survival of the Tastiest' on Android, and on the web. Playing on the web works, but you have to simulate multiple touch for table moving and that can be a bit confusing. There is a lot I'd like to talk about. I will go through every topic, insted of making the typical what went right/wrong list. Concept Working over the theme was probably one of the hardest tasks which I had to face. Originally, I had an idea of what kind of game I wanted to develop, gameplay wise - something with a lot of enemies/actors, simple graphics, maybe set in space, controlled from a top-down view. I was confident that I could fit any theme around it. In the end, the problem with a theme like 'Evolution' in a game is that evolution is unassisted. It happens through several seemingly random mutations over time, with the most apt permutation surviving. This genetic car simulator is, in my opinion, a great example of actual evolution of a species facing a challenge. But is it a game? In a game, you need control something to reach an objective. That control goes against what evolution is supposed to be like. If you allow the user to pick how to evolve something, it's not evolution anymore - it's the equivalent of intelligent design, the fable invented by creationists to combat the idea of evolution. Being agnostic and a Pastafarian, that's not something that rubbed me the right way. Hence, my biggest dillema when deciding what to create was not with what I wanted to create, but with what I did not. I didn't want to create an 'intelligent design' simulator and wrongly call it evolution. This is a problem, of course, every other contestant also had to face. And judging by the entries submitted, not many managed to work around it. I'd say the only real solution was through the use of artificial selection, somehow. So far, I haven't seen any entry using this at its core gameplay. Alas, this is just a fun competition and after a while I decided not to be as strict with the game idea, and allowed myself to pick whatever I thought would work out. My initial idea was to create something where humanity tried to evolve to a next level, but had some kind of foe trying to stop them from doing so. I kind of had this image of human souls flying in space towards a monolith or a space baby (all based in 2001: A Space Odyssey of course) but I couldn't think of compelling (read: serious) mechanics for that. Borgs were my next inspiration, as their whole hypothesis fit pretty well into the evolution theme. But how to make it work? Are you the borg, or fighting the Borg? The third and final idea came to me through my girlfriend, who somehow gave me the idea of making something about the evolution of Pasta. The more I thought about it the more it sounded like it would work, so I decided to go with it. Conversations with my inspiring co-worker Roushey (who also created the 'Mechanical Underdogs' signature logo for my intros) further matured the concept, as it involved into the idea of having individual pieces of pasta flying around and trying to evolve until they became all-powerful. A secondary idea here was that the game would work to explain how the Flying Spaghetti Monster came to exist - by evolving from a normal dinner table. So the idea evolved more or less into this: you are sitting a table. You have your own plate, with is your 'base'. There are 5 other guests at the table, each with their own plate. Your plate can spawn little pieces of pasta. You do so by 'ordering' them through a menu. Some pastas are better than others; some are faster, some are stronger. They have varying 'costs', which are debited from your credits (you start with a number of credits). Once spawned, your pastas start flying around. Their instinct is to fly to other plates, in order to conquer them (the objective of the game is having your pasta conquer all the plates on the table). But they are really autonomous, so after being spawned, you have no control over your pasta (think DotA or LoL creeps). Your pasta doesn't like other people's pasta, so if they meet, they shoot sauce at each other until one dies. You get credits for other pastas your own pasta kill. Once a pasta is in the vicinity of a plate, it starts conquering it for its team. It takes around 10 seconds for a plate to be conquered; less if more pasta from the same team are around. If pasta from other team are around, though, they get locked down in their attempt, unable to conquer the plate, until one of them die (think Battlefield's standard 'Conquest' mode). You get points every second for every plate you own. Over time, the concept also evolved to use an Italian bistro as its main scenario. Carlos, Carlos' Bistro's founder and owner Setup No major changes were made from my work setup. I used FDT and Starling creating an Adobe AIR (ActionScript) project, all tools or frameworks I already had some knowledge with. One big change for me was that I livestreamed my work through a twitch.tv account. This was a new thing for me. As recommended by Roushey, I used a program called XSplit and I got to say, it is pretty amazing. It made the livestream pretty effortless and the features are awesome, even for the free version. It was great to have some of my friends watch me, and then interact with them and random people through chat. It was also good knowing that I was also recording a local version of the files, so I could make a timelapse video later. Knowing the video was being recorded also made me a lot more self-conscious about my computer use, as if someone was watching over my shoulder. It made me realize that sometimes I spend too much time in seemingly inane tasks (I ended up wasting the longest time just to get some text alignment the way I wanted - it'll probably drive someone crazy if they watch it) and that I do way too many typos where writing code. I pretty much spend half of the time writing a line and the other half fixing the crazy characters in it. My own stream was probably boring to watch since I was coding for the most time. But livestreaming is one of the cool things to do as a spectator too. It was great seeing other people working - I had a few tabs opened on my second monitor all the time. It's actually a bit sad, because if I could, I could have spent the whole weekend just watching other people working! But I had to do my own work, so I'd only do it once in a while, when resting for a bit. Design Although I wanted some simple, low-fi, high-contrast kind of design, I ended up going with somewhat realistic (vector) art. I think it worked very well, fitting the mood of the game, but I also went overboard. For example: to know the state of a plate (who owns it, who's conquering it and how much time they have left before conquering it, which pasta units are in the queue, etc), you have to look at the plate's bill. The problem I realized when doing some tests is that people never look at the bill! They think it's some kind of prop, so they never actually read its details. Plus, if you're zoomed out too much, you can't actually read it, so it's hard to know what's going on with the game until you zoom in to the area of a specific plate. One other solution that didn't turn out to be as perfect as I thought was how to indicate who a plate base belongs to. In the game, that's indicated by the plate's decoration - its color denotes the team owner. But it's something that fits so well into the design that people never realized it, until they were told about it. In the end, the idea of going with a full physical metaphor is one that should be done with care. Things that are very important risk becoming background noise, unless the player knows its importance. Originally, I wanted to avoid any kind of heads-up display in my game. In the end, I ended up adding it at the bottom to indicate your credits and bases owned, as well as the hideous out-of-place-and-still-not-obvious 'Call Waiter' button. But in hindsight, I should have gone with a simple HUD from the start, especially one that indicated each team's colors and general state of the game without the need for zooming in and out. Development Development went fast. But not fast enough. Even though I worked around 32+ hours for this Ludum Dare, the biggest problem I had to face in the end was overscoping. I had too much planned, and could not get it all done. Content-wise, I had several kinds of pasta planned - Wikipedia is just amazing in that regard, split into several different groups, from small Pastina to huge Pasta al forno. But because of time constraints, I ended up scratching most of them, and ended up with 5 different types of small pasta - barely something to start when talking about the evolution of Pasta. Pastas used in the game. Unfortunately, the macs where never used Which is one of the saddest things about the project, really. It had the framework and the features to allow an endless number of elements in there, but I just did not have time to draw the rest of the assets needed (something I loved to do, by the way)." - else - prompt="It is done, and submitted. You can play 'Survival of the Tastiest' on Android, and on web. Playing on web works, but you have to simulate multiple touch for table moving and that can be a bit confusing. There is a lot I'd like to talk about. I will go through every topic, insted of making the typical what went right/wrong list. Concept Working over the theme was probably one of the hardest tasks which I had to face. Originally, I had an idea of what kind of game I wanted to develop, gameplay wise - something with a lot of enemies/actors, simple graphics, maybe set in space, controlled from a top-down view. I was confident that I could fit any theme around it. In the end, the problem with a theme like 'Evolution' in a game is that evolution is unassisted. It happens through several seemingly random mutations over time, with the most apt permutation surviving. This genetic car simulator is, in my opinion, a great example of actual evolution of a species facing a challenge. But is it a game? In a game, you need control something to reach an objective. That control goes against what evolution is supposed to be like. If you allow the user to pick how to evolve something, it's not evolution anymore - it's the equivalent of intelligent design, the fable invented by creationists to combat the idea of evolution. Being agnostic and a Pastafarian, that's not something that rubbed me the right way. Hence, my biggest dillema when deciding what to create was not with what I wanted to create, but with what I did not. I didn't want to create an 'intelligent design' simulator and wrongly call it evolution. This is a problem, of course, every other contestant also had to face. And judging by the entries submitted, not many managed to work around it. I'd say the only real solution was through the use of artificial selection, somehow. So far, I haven't seen any entry using this at its core gameplay. Alas, this is just a fun competition and after a while I decided not to be as strict with the game idea, and allowed myself to pick whatever I thought would work out. My initial idea was to create something where humanity tried to evolve to a next level, but had some kind of foe trying to stop them from doing so. I kind of had this image of human souls flying in space towards a monolith or a space baby (all based in 2001: A Space Odyssey of course) but I couldn't think of compelling (read: serious) mechanics for that. Borgs were my next inspiration, as their whole hypothesis fit pretty well into the evolution theme. But how to make it work? Are you the borg, or fighting the Borg? The third and final idea came to me through my girlfriend, who somehow gave me the idea of making something about the evolution of Pasta. The more I thought about it the more it sounded like it would work, so I decided to go with it. Conversations with my inspiring co-worker Roushey (who also created the 'Mechanical Underdogs' signature logo for my intros) further matured the concept, as it involved into the idea of having individual pieces of pasta flying around and trying to evolve until they became all-powerful. A secondary idea here was that the game would work to explain how the Flying Spaghetti Monster came to exist - by evolving from a normal dinner table. So the idea evolved more or less into this: you are sitting a table. You have your own plate, with is your 'base'. There are 5 other guests at the table, each with their own plate. Your plate can spawn little pieces of pasta. You do so by 'ordering' them through a menu. Some pastas are better than others; some are faster, some are stronger. They have varying 'costs', which are debited from your credits (you start with a number of credits). Once spawned, your pastas start flying around. Their instinct is to fly to other plates, in order to conquer them (the objective of the game is having your pasta conquer all the plates on the table). But they are really autonomous, so after being spawned, you have no control over your pasta (think DotA or LoL creeps). Your pasta doesn't like other people's pasta, so if they meet, they shoot sauce at each other until one dies. You get credits for other pastas your own pasta kill. Once a pasta is in the vicinity of a plate, it starts conquering it for its team. It takes around 10 seconds for a plate to be conquered; less if more pasta from the same team are around. If pasta from other team are around, though, they get locked down in their attempt, unable to conquer the plate, until one of them die (think Battlefield's standard 'Conquest' mode). You get points every second for every plate you own. Over time, the concept also evolved to use an Italian bistro as its main scenario. Carlos, Carlos' Bistro's founder and owner Setup No major changes were made from my work setup. I used FDT and Starling creating an Adobe AIR (ActionScript) project, all tools or frameworks I already had some knowledge with. One big change for me was that I livestreamed my work through a twitch.tv account. This was a new thing for me. As recommended by Roushey, I used a program called XSplit and I got to say, it is pretty amazing. It made the livestream pretty effortless and the features are awesome, even for the free version. It was great to have some of my friends watch me, and then interact with them and random people through chat. It was also good knowing that I was also recording a local version of the files, so I could make a timelapse video later. Knowing the video was being recorded also made me a lot more self-conscious about my computer use, as if someone was watching over my shoulder. It made me realize that sometimes I spend too much time in seemingly inane tasks (I ended up wasting the longest time just to get some text alignment the way I wanted - it'll probably drive someone crazy if they watch it) and that I do way too many typos where writing code. I pretty much spend half of the time writing a line and the other half fixing the crazy characters in it. My own stream was probably boring to watch since I was coding for the most time. But livestreaming is one of the cool things to do as a spectator too. It was great seeing other people working - I had a few tabs opened on my second monitor all the time. It's actually a bit sad, because if I could, I could have spent the whole weekend just watching other people working! But I had to do my own work, so I'd only do it once in a while, when resting for a bit. Design Although I wanted some simple, low-fi, high-contrast kind of design, I ended up going with somewhat realistic (vector) art. I think it worked very well, fitting the mood of the game, but I also went overboard. For example: to know the state of a plate (who owns it, who's conquering it and how much time they have left before conquering it, which pasta units are in the queue, etc), you have to look at the plate's bill. The problem I realized when doing some tests is that people never look at the bill! They think it's some kind of prop, so they never actually read its details. Plus, if you're zoomed out too much, you can't actually read it, so it's hard to know what's going on with the game until you zoom in to the area of a specific plate. One other solution that didn't turn out to be as perfect as I thought was how to indicate who a plate base belongs to. In the game, that's indicated by the plate's decoration - its color denotes the team owner. But it's something that fits so well into the design that people never realized it, until they were told about it. In the end, the idea of going with a full physical metaphor is one that should be done with care. Things that are very important risk becoming background noise, unless the player knows its importance. Originally, I wanted to avoid any kind of heads-up display in my game. In the end, I ended up adding it at the bottom to indicate your credits and bases owned, as well as the hideous out-of-place-and-still-not-obvious 'Call Waiter' button. But in hindsight, I should have gone with a simple HUD from the start, especially one that indicated each team's colors and general state of the game without the need for zooming in and out. Development Development went fast. But not fast enough. Even though I worked around 32+ hours for this Ludum Dare, the biggest problem I had to face in the end was overscoping. I had too much planned, and could not get it all done. Content-wise, I had several kinds of pasta planned - Wikipedia is just amazing in that regard, split into several different groups, from small Pastina to huge Pasta al forno. But because of time constraints, I ended up scratching most of them, and ended up with 5 different types of small pasta - barely something to start when talking about the evolution of Pasta. Pastas used in the game. Unfortunately, the macs where never used Which is one of the saddest things about the project, really. It had the framework and the features to allow an endless number of elements in there" - fi - elif [[ "${input}" == "1024" ]]; then - if [[ "${model}" == "llama"* ]]; then - prompt="It is done, and submitted. You can play 'Survival of the Tastiest' on Android, and on the web. Playing on the web works, but you have to simulate multiple touch for table moving and that can be a bit confusing. There is a lot I'd like to talk about. I will go through every topic, insted of making the typical what went right/wrong list. Concept Working over the theme was probably one of the hardest tasks which I had to face. Originally, I had an idea of what kind of game I wanted to develop, gameplay wise - something with a lot of enemies/actors, simple graphics, maybe set in space, controlled from a top-down view. I was confident that I could fit any theme around it. In the end, the problem with a theme like 'Evolution' in a game is that evolution is unassisted. It happens through several seemingly random mutations over time, with the most apt permutation surviving. This genetic car simulator is, in my opinion, a great example of actual evolution of a species facing a challenge. But is it a game? In a game, you need to control something to reach an objective. That control goes against what evolution is supposed to be like. If you allow the user to pick how to evolve something, it's not evolution anymore - it's the equivalent of intelligent design, the fable invented by creationists to combat the idea of evolution. Being agnostic and a Pastafarian, that's not something that rubbed me the right way. Hence, my biggest dillema when deciding what to create was not with what I wanted to create, but with what I did not. I didn't want to create an 'intelligent design' simulator and wrongly call it evolution. This is a problem, of course, every other contestant also had to face. And judging by the entries submitted, not many managed to work around it. I'd say the only real solution was through the use of artificial selection, somehow. So far, I have not seen any entry using this at its core gameplay. Alas, this is just a fun competition and after a while I decided not to be as strict with the game idea, and allowed myself to pick whatever I thought would work out. My initial idea was to create something where humanity tried to evolve to a next level but had some kind of foe trying to stop them from doing so. I kind of had this image of human souls flying in space towards a monolith or a space baby (all based in 2001: A Space Odyssey of course) but I couldn't think of compelling (read: serious) mechanics for that. Borgs were my next inspiration, as their whole hypothesis fit pretty well into the evolution theme. But how to make it work? Are you the borg, or fighting the Borg? The third and final idea came to me through my girlfriend, who somehow gave me the idea of making something about the evolution of Pasta. The more I thought about it the more it sounded like it would work, so I decided to go with it. Conversations with my inspiring co-worker Roushey (who also created the 'Mechanical Underdogs' signature logo for my intros) further matured the concept, as it involved into the idea of having individual pieces of pasta flying around and trying to evolve until they became all-powerful. A secondary idea here was that the game would work to explain how the Flying Spaghetti Monster came to exist - by evolving from a normal dinner table. So the idea evolved more or less into this: you are sitting a table. You have your own plate, with is your 'base'. There are 5 other guests at the table, each with their own plate. Your plate can spawn little pieces of pasta. You do so by 'ordering' them through a menu. Some pastas are better than others; some are faster, some are stronger. They have varying 'costs', which are debited from your credits (you start with a number of credits). Once spawned, your pastas start flying around. Their instinct is to fly to other plates, in order to conquer them (the objective of the game is having your pasta conquer all the plates on the table). But they are really autonomous, so after being spawned, you have no control over your pasta (think DotA or LoL creeps). Your pasta doesn't like other people's pasta, so if they meet, they shoot sauce at each other until one dies. You get credits for other pastas your own pasta kill. Once a pasta is in the vicinity of a plate,", - elif [[ "${model}" == "gptj-6b" ]]; then - prompt="It is done, and submitted. You can play 'Survival of the Tastiest' on Android, and on the web. Playing on the web works, but you have to simulate multiple touch for table moving and that can be a bit confusing. There is a lot I'd like to talk about. I will go through every topic, insted of making the typical what went right/wrong list. Concept Working over the theme was probably one of the hardest tasks which I had to face. Originally, I had an idea of what kind of game I wanted to develop, gameplay wise - something with a lot of enemies/actors, simple graphics, maybe set in space, controlled from a top-down view. I was confident that I could fit any theme around it. In the end, the problem with a theme like 'Evolution' in a game is that evolution is unassisted. It happens through several seemingly random mutations over time, with the most apt permutation surviving. This genetic car simulator is, in my opinion, a great example of actual evolution of a species facing a challenge. But is it a game? In a game, you need to control something to reach an objective. That control goes against what evolution is supposed to be like. If you allow the user to pick how to evolve something, it's not evolution anymore - it's the equivalent of intelligent design, the fable invented by creationists to combat the idea of evolution. Being agnostic and a Pastafarian, that's not something that rubbed me the right way. Hence, my biggest dillema when deciding what to create was not with what I wanted to create, but with what I did not. I didn't want to create an 'intelligent design' simulator and wrongly call it evolution. This is a problem, of course, every other contestant also had to face. And judging by the entries submitted, not many managed to work around it. I'd say the only real solution was through the use of artificial selection, somehow. So far, I haven't seen any entry using this at its core gameplay. Alas, this is just a fun competition and after a while I decided not to be as strict with the game idea, and allowed myself to pick whatever I thought would work out. My initial idea was to create something where humanity tried to evolve to a next level, but had some kind of foe trying to stop them from doing so. I kind of had this image of human souls flying in space towards a monolith or a space baby (all based in 2001: A Space Odyssey of course) but I couldn't think of compelling (read: serious) mechanics for that. Borgs were my next inspiration, as their whole hypothesis fit pretty well into the evolution theme. But how to make it work? Are you the borg, or fighting the Borg? The third and final idea came to me through my girlfriend, who somehow gave me the idea of making something about the evolution of Pasta. The more I thought about it the more it sounded like it would work, so I decided to go with it. Conversations with my inspiring co-worker Roushey (who also created the 'Mechanical Underdogs' signature logo for my intros) further matured the concept, as it involved into the idea of having individual pieces of pasta flying around and trying to evolve until they became all-powerful. A secondary idea here was that the game would work to explain how the Flying Spaghetti Monster came to exist - by evolving from a normal dinner table. So the idea evolved more or less into this: you are sitting a table. You have your own plate, with is your 'base'. There are 5 other guests at the table, each with their own plate. Your plate can spawn little pieces of pasta. You do so by 'ordering' them through a menu. Some pastas are better than others; some are faster, some are stronger. They have varying 'costs', which are debited from your credits (you start with a number of credits). Once spawned, your pastas start flying around. Their instinct is to fly to other plates, in order to conquer them (the objective of the game is having your pasta conquer all the plates on the table). But they are really autonomous, so after being spawned, you have no control over your pasta (think DotA or LoL creeps). Your pasta doesn't like other people's pasta, so if they meet, they shoot sauce at each other until one dies. You get credits for other pastas your own pasta kill. Once a pasta is in the vicinity of a plate, it starts conquering it for its team. It takes around 10 seconds for a plate to be conquered; less if more pasta from the same team are around. If pasta from other team are around, though, they get locked down in their attempt, unable to conquer the plate, until one of them die (think Battlefield's standard 'Conquest' mode). You get points every second for every plate you own. Over time, the concept" - else - prompt="It is done, and submitted. You can play 'Survival of the Tastiest' on the Android, and on the web. Playing on the web works, but you have to simulate multiple touch for table moving and that can be a bit confusing. There is a lot I'd like to talk about. I will go through every topic, insted of making the typical what went right/wrong list. Concept Working over the theme was probably one of the hardest tasks which I had to face. Originally, I had an idea of what kind of game I wanted to develop, gameplay wise - something with a lot of enemies/actors, simple graphics, maybe set in the space, controlled from a top-down view. I was confident that I could fit any theme around it. In the end, the problem with a theme like 'Evolution' in a game is that evolution is unassisted. It happens through several seemingly random mutations over time, with the most apt permutation surviving. This genetic car simulator is, in my opinion, a great example of actual evolution of a species facing a challenge. But is it a game? In a game, you need to control something to reach an objective. That control goes against what evolution is supposed to be like. If you allow the user to pick how to evolve something, it's not evolution anymore - it's the equivalent of intelligent design, the fable invented by creationists to combat the idea of evolution. Being agnostic and a Pastafarian, that's not something that rubbed me the right way. Hence, my biggest dillema when deciding what to create was not with what I wanted to create, but with what I did not. I didn't want to create an 'intelligent design' simulator and wrongly call it evolution. This is a problem, of course, every other contestant also had to face it. And judging by the entries submitted, not many managed to work around it. I'd say the only real solution was through the use of artificial selection, somehow. So far, I haven't seen any entry using this at its core gameplay. Alas, this is just a fun competition and after a while I decided not to be as strict with the game idea, and allowed myself to pick whatever I thought would work out. My initial idea was to create something where humanity tried to evolve to a next level, but had some kind of foe trying to stop them from doing so. I kind of had this image of human souls flying in space towards a monolith or a space baby (all based in 2001: A Space Odyssey of course) but I couldn't think of compelling (read: serious) mechanics for that. Borgs were my next inspiration, as their whole hypothesis fit pretty well into the evolution theme. But how to make it work? Are you the borg, or fighting the Borg? The third and final idea came to me through my girlfriend, who somehow gave me the idea of making something about the evolution of Pasta. The more I thought about it the more it sounded like it would work, so I decided to go with it. Conversations with my inspiring co-worker Roushey (who also created the 'Mechanical Underdogs' signature logo for my intros) further matured the concept, as it involved into the idea of having individual pieces of pasta flying around and trying to evolve until they became all-powerful. A secondary idea here was that the game would work to explain how the Flying Spaghetti Monster came to exist - by evolving from a normal dinner table. So the idea evolved more or less into this: you are sitting a table. You have your own plate, with is your 'base'. There are 5 other guests at the table, each with their own plate. Your plate can spawn little pieces of pasta. You do so by 'ordering' them through a menu. Some pastas are better than others; some are faster, some are stronger. They have varying 'costs', which are debited from your credits (you start with a number of credits). Once spawned, your pastas start flying around. Their instinct is to fly to other plates, in order to conquer them (the objective of the game is having your pasta conquer all the plates on the table). But they are really autonomous, so after being spawned, you have no control over your pasta (think DotA or LoL creeps). Your pasta doesn't like other people's pasta, so if they meet, they shoot sauce at each other until one dies. You get credits for other pastas your own pasta kill. Once a pasta is in the vicinity of a plate, it starts conquering it for its team. It takes around 10 seconds for a plate to be conquered; less if more pasta from the same team are around." - fi - fi - ctx=$(($output + $input + 10)) - logs_file="${model}-${precision}-${cores_per_instance}-${batch_size}-${input}-${output}.log" - ## prepare model.bin - quantized_model="${model}-${precision}.bin" - if [[ ! -e ${quantized_model} ]]; then - if [[ ${precision} == "q4_j_vnni_b128" ]]; then - ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype int4 --group_size 128 --scale_dtype fp32 --compute_dtype int8 --alg sym - # deprecated since bfloat16 scale not mature - # elif [[ ${precision} == "q4_j_vnni_bf16_b32" ]]; then - # ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype int4 --group_size 32 --scale_dtype bf16 --compute_dtype int8 --alg sym - elif [[ ${precision} == "q8e4m3_j_f32_g128_fp8" ]]; then - ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype fp8 --group_size 128 --scale_dtype fp8 --compute_dtype fp32 --alg sym - elif [[ ${precision} == "q8e5m2_j_f32_g128_fp8" ]]; then - ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype fp8_e5m2 --group_size 128 --scale_dtype fp8 --compute_dtype fp32 --alg sym - elif [[ ${precision} == "q8e4m3_j_f32_g128_fp32" ]]; then - ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype fp8 --group_size 128 --scale_dtype fp32 --compute_dtype fp32 --alg sym - elif [[ ${precision} == "q8e5m2_j_f32_g128_fp32" ]]; then - ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype fp8_e5m2 --group_size 128 --scale_dtype fp32 --compute_dtype fp32 --alg sym - elif [[ ${precision} == "q4e2m1_j_f32_g128" ]]; then - ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype fp4 --group_size 128 --scale_dtype fp32 --compute_dtype fp32 --alg sym - elif [[ ${precision} == "nf4_j_f32_g128" ]]; then - ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype nf4 --group_size 128 --scale_dtype fp32 --compute_dtype fp32 --alg sym - elif [[ ${precision} == "q4_j_vnni_b32" ]]; then - ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype int4 --group_size 32 --scale_dtype fp32 --compute_dtype int8 --alg sym - elif [[ ${precision} == "q4_j_b32" ]]; then - ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype int4 --group_size 32 --scale_dtype fp32 --compute_dtype fp32 --alg sym - elif [[ ${precision} == "q4_j_b128" ]]; then - ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype int4 --group_size 128 --scale_dtype fp32 --compute_dtype fp32 --alg sym - elif [[ ${precision} == "q4_j_b128_asym" ]]; then - ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype int4 --group_size 128 --scale_dtype fp32 --compute_dtype fp32 --alg asym - elif [[ ${precision} == "q4_0" ]]; then - ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype int4 --group_size 32 --compute_dtype int8 --alg sym --use_ggml - elif [[ ${precision} == "q4_1" ]]; then - ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype int4 --group_size 32 --compute_dtype int8 --alg asym --use_ggml - elif [[ ${precision} == "q8_0" ]]; then - ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype int8 --group_size 32 --compute_dtype int8 --alg sym --use_ggml - else - ${quant_script} --model_file ${working_dir}/${model}-fp32.bin --out_file ${working_dir}/${model}-${precision}.bin --nthread $cores_per_instance --weight_dtype int4 - fi - fi - ## run inference - export LANG=en_US.UTF-8 - export LC_ALL=en_US.UTF-8 - OMP_NUM_THREADS=$(($cores_per_instance * 1)) numactl -m 0 -C 0-$(($cores_per_instance * 1 - 1)) \ - $infer_cmd --seed 1234 -t $cores_per_instance -b 2047 -c ${ctx} -n ${output} -m ${model}-${precision}.bin -p "$prompt" 2>&1 | tee ${WORKING_DIR}/${logs_file} || true & - minitor - - python ${WORKING_DIR}/.github/workflows/script/models/calculate_percentage.py ${WORKING_DIR}/${logs_file} ${model} ${precision} ${cores_per_instance} ${batch_size} ${input} ${output} - done - done - done - done - conda deactivate >/dev/null 2>&1 -} - -function collect_perf_logs_llm { - # latency - log_dir="${WORKING_DIR}/$1" - eval_time=($(grep -i 'eval time' ${log_dir} | grep -v "prompt" | sed -e 's/.*eval time = .* runs.*(//;s/[^0-9.]//g;s/\.$//' | awk ' - BEGIN { - num = 0; - sum = 0; - }{ - num ++; - sum += $1; - }END { - if(num > 0) { - printf("%.6f", sum / num); - }else { - printf("0"); - } - } - ')) - total_time=($(grep -i 'total time' ${log_dir} | sed -e 's/.*total time = //;s/[^0-9.]//g;s/\.$//' | awk ' - BEGIN { - num = 0; - sum = 0; - }{ - num ++; - sum += $1; - }END { - if(num > 0) { - printf("%.6f", sum / num); - }else { - printf("0"); - } - } - ')) - first_token_time=($(grep -i 'eval time' ${log_dir} | grep "prompt" | sed -e 's/.*prompt eval time = .* tokens.*(//;s/[^0-9.]//g;s/\.$//' | awk ' - BEGIN { - num = 0; - sum = 0; - }{ - num ++; - sum += $1; - }END { - if(num > 0) { - printf("%.6f", sum / num); - }else { - printf("0"); - } - } - ')) - input_tokens=$input - max_new_tokens=$output - # memory usage - used_memory=$(grep 'memory used total:' ${log_dir} | tail -n 1 | head -n 1 | awk '{print $(NF-1)}') - # summary - framework="engine" - mode_name="latency" - precision=$2 - link="${WORKING_DIR}/$1" - printf "${framework},${mode_name},${model_name},${precision},${batch_size}," | tee -a ${WORKING_DIR}/cpp_graph_summary.log - printf "${input_tokens},${max_new_tokens},${cores_per_instance},${latency[1]}," | tee -a ${WORKING_DIR}/cpp_graph_summary.log - printf "${first_token_time},${eval_time},${total_time},${used_memory},${link}\n" | tee -a ${WORKING_DIR}/cpp_graph_summary.log - set +x - echo -e "\n\n-------- Summary --------" - sed -n '1p;$p' ${WORKING_DIR}/cpp_graph_summary.log | column -t -s ',' -} - -function minitor() { - sleep 2 - echo "====== Monitor Start =======" - while true; do - if [ $(ps -ef | grep "$infer_cmd" | wc -l) -lt 2 ]; then - #python calculate_percertiles.py ${logs_file} ${model} ${precision} ${cores_per_instance} ${batch_size} ${input} ${output} - sleep 3 - - break - fi - echo "$(date +%s), $(numastat -p $(ps -ef | grep "$infer_cmd" | grep -v grep | awk '{printf("%s ", $2)}'))" >>${WORKING_DIR}/memory.txt 2>&1 - done -} -function get_data() { - python calculate_percertiles.py ${logs_file} ${model} ${precision} ${cores_per_instance} ${batch_size} ${input} ${output} -} -main $@ 2>&1 | tee ${WORKING_DIR}/launch.log diff --git a/.github/workflows/script/models/local_models.json b/.github/workflows/script/models/local_models.json new file mode 100644 index 00000000000..f1189b633c3 --- /dev/null +++ b/.github/workflows/script/models/local_models.json @@ -0,0 +1,17 @@ +{ + "meta-llama/Llama-2-7b-chat-hf": "/tf_dataset2/models/nlp_toolkit/llama-2-7b-chat/Llama-2-7b-chat-hf", + "EleutherAI/gpt-j-6b": "/tf_dataset2/models/pytorch/gpt-j-6B", + "EleutherAI/gpt-neox-20b": "/tf_dataset2/models/nlp_toolkit/gpt-neox-20b", + "mosaicml/mpt-7b": "/tf_dataset2/models/nlp_toolkit/mpt-7b", + "tiiuae/falcon-7b": "/tf_dataset2/models/nlp_toolkit/falcon-7b", + "bigcode/starcoder": "/tf_dataset2/models/pytorch/starcode_3b", + "bigscience/bloom-7b1": "/tf_dataset2/models/pytorch/bloom-7b1", + "facebook/opt-1.3b": "/tf_dataset2/models/pytorch/opt-1.3b", + "databricks/dolly-v2-3b": "/tf_dataset2/models/pytorch/dolly_v2_3b", + "THUDM/chatglm2-6b": "/tf_dataset2/models/pytorch/chatglm2-6b", + "THUDM/chatglm-6b": "/tf_dataset2/models/pytorch/chatglm-6b", + "baichuan-inc/Baichuan2-13B-Chat": "/tf_dataset2/models/pytorch/Baichuan2-13B-Chat", + "baichuan-inc/Baichuan-13B-Chat": "/tf_dataset2/models/pytorch/Baichuan-13B-Chat", + "mistralai/Mistral-7B-v0.1": "/tf_dataset2/models/pytorch/Mistral-7B-v0.1", + "Qwen/Qwen-7B-Chat": "/tf_dataset2/models/nlp_toolkit/Qwen-7B-Chat" +} diff --git a/intel_extension_for_transformers/llm/runtime/graph/requirements.txt b/intel_extension_for_transformers/llm/runtime/graph/requirements.txt index 3d67b759758..6067c750eb7 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/requirements.txt +++ b/intel_extension_for_transformers/llm/runtime/graph/requirements.txt @@ -1,11 +1 @@ -torch -transformers -numpy -sentencepiece -protobuf<3.20 -einops -accelerate -peft -datasets -transformers_stream_generator -tiktoken +-r scripts/requirements/common.txt diff --git a/.github/workflows/script/models/calculate_percentage.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/calculate_percentiles.py old mode 100644 new mode 100755 similarity index 74% rename from .github/workflows/script/models/calculate_percentage.py rename to intel_extension_for_transformers/llm/runtime/graph/scripts/ci/calculate_percentiles.py index b79c6c6df25..f3e2e3fc489 --- a/.github/workflows/script/models/calculate_percentage.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/calculate_percentiles.py @@ -1,13 +1,31 @@ +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import numpy as np import re import sys import os + + def calculate_percentile(data, percentile): return np.percentile(data, percentile, method="closest_observation") + def calculate_mean(data): return np.mean(data) + def parse_output_file(file_path): predictions = [] with open(file_path, 'r', encoding='UTF-8', errors='ignore') as file: @@ -17,6 +35,8 @@ def parse_output_file(file_path): prediction_time = float(match.group(1)) # Assuming the prediction time is in the second column predictions.append(prediction_time) return predictions + + def parse_memory_file(memory_file): memory_values = [] if os.path.exists(memory_file): @@ -44,14 +64,15 @@ def parse_memory_file(memory_file): batch_size = sys.argv[5] model_input = sys.argv[6] model_output = sys.argv[7] - memory_file = os.environ.get("WORKING_DIR") + "/memory.txt" + memory_file = os.environ.get("WORKSPACE") + "/memory.txt" predictions = parse_output_file(output_file) + assert len(predictions) > 0, "Model has no ouput tokens!" first_token_latency = predictions[0] p90 = calculate_percentile(predictions, 90) p99 = calculate_percentile(predictions, 99) latency_mean = calculate_mean(predictions[1:]) total_latency = np.sum(predictions) - + print("P90: {:.2f} ms".format(p90)) print("P99: {:.2f} ms".format(p99)) print("average_latency: {:.2f} ms".format(latency_mean)) @@ -63,9 +84,10 @@ def parse_memory_file(memory_file): memory_mean = calculate_mean(top_50_percent) print("Memory Mean (Top 50%): {:.2f}".format(memory_mean)) - log_file = os.environ.get("WORKING_DIR") + "/cpp_graph_summary.log" - link = os.environ.get("WORKING_DIR") + os.path.basename(output_file) - with open (log_file, 'a') as f: + log_file = os.environ.get("WORKSPACE") + "/cpp_graph_summary.log" + log_prefix = os.environ.get("log_prefix") + link = str(log_prefix) + os.path.basename(output_file) + with open(log_file, 'a') as f: f.write("engine,") f.write("latency,") f.write(model + ",") @@ -81,8 +103,8 @@ def parse_memory_file(memory_file): f.write(link + ",") f.write("{:.2f},".format(p90)) f.write("{:.2f},".format(p99)) - #f.write(",latency:") - #for latency in predictions: + # f.write(",latency:") + # for latency in predictions: # f.write(",{:.2f}".format(latency)) f.write("\n") f.close() diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh new file mode 100755 index 00000000000..ff4d84f81f5 --- /dev/null +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh @@ -0,0 +1,403 @@ +#!/bin/bash +#=============================================================================== +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#=============================================================================== + +script_dir=$(dirname "${BASH_SOURCE[0]}") +set -x +quant_nthr=48 +cores_list=(32 48 56) +#cores_list=(48) +batch_size_list=(1) +#input_list=(10 32 1024 2012) +input_list=(32 1024 2012) +output_list=(32) +beam_list=(1) +# input_list=(32 512) +# output_list=(32 128 512) +extra_precision_list=("q4_j_i8_g128" "q4_j_i8_g32" "q4_0") # precisions to be tested for most of supported models + +ppl_dataset_list=("/tf_dataset2/datasets/nlp_toolkit/wikitext-2-raw-v1-data-test") +ppl_nctx_list=() # no ppl test by defalut +# ppl_nctx_list=(256 1024 2048) +drop_caches=false +ppl_fp32_test=false +ppl_mf16_test=false +local_models="" + +# parse named arguments +while [ $# -gt 0 ]; do + case "$1" in + --local_models=*) + # A json file which map HF model name to local path + local_models="${1#*=}" + ;; + --cores_list=*) + IFS=', ' read -r -a cores_list <<<"${1#*=}" + ;; + --batch_size_list=*) + IFS=', ' read -r -a batch_size_list <<<"${1#*=}" + ;; + --input_list=*) + # list of input length + IFS=', ' read -r -a input_list <<<"${1#*=}" + ;; + --output_list=*) + IFS=', ' read -r -a output_list <<<"${1#*=}" + ;; + --beam_list=*) + IFS=', ' read -r -a beam_list <<<"${1#*=}" + ;; + --ppl_dataset_list=*) + IFS=', ' read -r -a ppl_dataset_list <<<"${1#*=}" + ;; + --extra_precision_list=*) + # tested precisions = unique(extra_precision_list + model-specific precision) + IFS=', ' read -r -a extra_precision_list <<<"${1#*=}" + ;; + --drop_caches) + # be careful to turn this on; it requires sudo + drop_caches=true + ;; + --ppl_nctx_list=*) + # set to non-empty to enable ppl test + IFS=', ' read -r -a ppl_nctx_list <<<"${1#*=}" + ;; + --ppl_fp32_test) + ppl_fp32_test=true + ;; + --ppl_mf16_test) + # be careful to turn this on; it will double the workload + ppl_mf16_test=true + ;; + --) + shift + break + ;; + *) + break + ;; + esac + shift +done + +declare -p cores_list +declare -p batch_size_list +declare -p input_list +declare -p output_list +declare -p beam_list +declare -p extra_precision_list + +declare -p ppl_dataset_list +declare -p ppl_nctx_list +declare -p ppl_fp32_test +declare -p ppl_mf16_test + +function ppl_eval() { + local task_name="$1" + local n_cores="$2" + local model_path="$3" + local quantized_weight_path="$4" + local memory_dtype_list=('auto') + if [[ "$ppl_mf16_test" = true ]]; then + memory_dtype_list+=('f16') + fi + + echo "======= PPL Evaluation Start =======" + for memory_dtype in ${memory_dtype_list[@]}; do + for ppl_dataset in ${ppl_dataset_list[@]}; do + for ppl_nctx in ${ppl_nctx_list[@]}; do + local ppl_task_name="$task_name-ppl-$(basename -- "$ppl_dataset")-nctx$ppl_nctx-M$memory_dtype" + echo "***** PPL: $ppl_task_name *****" + OMP_NUM_THREADS=$(($n_cores * 1)) numactl -m 0 -C 0-$(($n_cores * 1 - 1)) \ + python scripts/perplexity.py --model_name "$model_path" --dataset_name "$ppl_dataset" --quantized_weight_path "$quantized_weight_path" --ctx_size $ppl_nctx --n_threads $n_cores --memory_dtype $memory_dtype 2>&1 | + tee "$WORKSPACE/$ppl_task_name.log" + mv out/ppl.png "$WORKSPACE/$ppl_task_name.png" + mv out/ppl_data.json "$WORKSPACE/$ppl_task_name.json" + done + done + done + echo "======= PPL Evaluation End =======" +} + +declare -A model_name_map +model_name_map["llama-2-7b-chat"]="meta-llama/Llama-2-7b-chat-hf" +model_name_map["gptj-6b"]="EleutherAI/gpt-j-6b" +model_name_map["gpt-neox-20b"]="EleutherAI/gpt-neox-20b" +model_name_map["mpt-7b"]="mosaicml/mpt-7b" +model_name_map["falcon-7b"]="tiiuae/falcon-7b" +model_name_map["starcoder-3b"]="bigcode/starcoder" +model_name_map["bloom-7b"]="bigscience/bloom-7b1" +model_name_map["opt-1.3b"]="facebook/opt-1.3b" +model_name_map["dolly-v2-3b"]="databricks/dolly-v2-3b" +model_name_map["chatglm2"]="THUDM/chatglm2-6b" +model_name_map["chatglm-6b"]="THUDM/chatglm-6b" +model_name_map["baichuan2-13b"]="baichuan-inc/Baichuan2-13B-Chat" +model_name_map["baichuan-13b"]="baichuan-inc/Baichuan-13B-Chat" +model_name_map["mistral-7b"]="mistralai/Mistral-7B-v0.1" +model_name_map["qwen-7b"]="Qwen/Qwen-7B-Chat" + +function main() { + conda_env="$1" + model="$2" + working_dir="$3" # The "graph" directory with CMakeFile.txt + export log_prefix="$4" # The log url prefix + compiler_version="$5" + PROMPTS_PATH="$script_dir/cpp_graph_prompts.json" + + # init params + precision_list=() + requirements_file="requirements.txt" # some models need extra constraints + + model_name="${model_name_map["$model"]}" + + if [[ -n "$local_models" ]]; then + model_path=$(python -c "import sys, json; print(json.load(sys.stdin).get('$model_name', '$model_name'))" <"$local_models") + else + model_path=$model_name + fi + + if [[ "${model}" == "llama-2-7b-chat" ]]; then + quant_script="./build/bin/quant_llama" + infer_cmd="./build/bin/run_llama" + precision_list+=( # Our "main model" + "q4_j1_i8_g128" "q4_j_f32_g128" "q4_1" "q8_0" + "q8e4m3_j_f32_g128" "q8e4m3_j_f32_g128_fp8" "q8e5m2_j_f32_g128" "q8e5m2_j_f32_g128_fp8" + "q4e2m1_j_f32_g128" "nf4_j_f32_g128" + ) + elif [[ "${model}" == "gptj-6b" ]]; then + quant_script="./build/bin/quant_gptj" + infer_cmd="./build/bin/run_gptj" + precision_list+=("q4_j1_i8_g128" "q4_j1_bf16_pc") + elif [[ "${model}" == "gpt-neox-20b" ]]; then + quant_script="./build/bin/quant_gptneox" + infer_cmd="./build/bin/run_gptneox" + elif [[ "${model}" == "mpt-7b" ]]; then + quant_script="./build/bin/quant_mpt" + infer_cmd="./build/bin/run_mpt" + elif [[ "${model}" == "falcon-7b" ]]; then + quant_script="./build/bin/quant_falcon" + infer_cmd="./build/bin/run_falcon" + elif [[ "${model}" == "starcoder-3b" ]]; then + quant_script="./build/bin/quant_starcoder" + infer_cmd="./build/bin/run_starcoder" + elif [[ "${model}" == "bloom-7b" ]]; then + quant_script="./build/bin/quant_bloom" + infer_cmd="./build/bin/run_bloom" + elif [[ "${model}" == "opt-1.3b" ]]; then + quant_script="./build/bin/quant_opt" + infer_cmd="./build/bin/run_opt" + elif [[ "${model}" == "dolly-v2-3b" ]]; then + quant_script="./build/bin/quant_dolly" + infer_cmd="./build/bin/run_dolly" + elif [[ "${model}" == "chatglm2" ]]; then + quant_script="./build/bin/quant_chatglm2" + infer_cmd="./build/bin/run_chatglm2" + elif [[ "${model}" == "chatglm-6b" ]]; then + quant_script="./build/bin/quant_chatglm" + infer_cmd="python ./scripts/inference.py" + extension=" --model_name chatglm --tokenizer $model_path" + requirements_file="scripts/requirements/chatglm-6b.txt" + elif [[ "${model}" == "baichuan2-13b" ]]; then + quant_script="./build/bin/quant_baichuan" + infer_cmd="python ./scripts/inference.py" + requirements_file="scripts/requirements/baichuan.txt" + extension=" --model_name baichuan --tokenizer $model_path" + elif [[ "${model}" == "baichuan-13b" ]]; then + quant_script="./build/bin/quant_baichuan" + infer_cmd="python ./scripts/inference.py" + extension=" --model_name baichuan --tokenizer $model_path" + requirements_file="scripts/requirements/baichuan.txt" + elif [[ "${model}" == "mistral-7b" ]]; then + quant_script="./build/bin/quant_mistral" + infer_cmd="./build/bin/run_mistral" + requirements_file="scripts/requirements/mistral.txt" + elif [[ "${model}" == "qwen-7b" ]]; then + quant_script="./build/bin/quant_qwen" + infer_cmd="./build/bin/run_qwen" + else + echo "Error: Unexpedted model: $model" 1>&2 + exit 1 + fi + + if [[ $(lscpu | grep i9-12900 | wc -l) != 0 ]]; then + cores_list=(16) + quant_nthr=12 + precision_list+=("q4_j_f32_g128") + fi + + # add additional precisions + declare -A precisions_seen + for p in "${precision_list[@]}"; do + precisions_seen[$p]=x + done + for p in "${extra_precision_list[@]}"; do + [[ ${precisions_seen[$p]} ]] && continue + precision_list+=("$p") + precisions_seen[$p]=x + done + + # init conda + #. $(dirname ${CONDA_EXE})/../etc/profile.d/conda.sh + conda activate $conda_env || source activate $conda_env + pip install cmake ninja psutil + if [[ "${compiler_version}" != "12.1.0" ]]; then + conda install --update-deps -c conda-forge gxx==${compiler_version} gcc==${compiler_version} gxx_linux-64==${compiler_version} libstdcxx-ng sysroot_linux-64 -y + fi + + # setup conda env for LLM + + # get cpu info + # sockets=$(lscpu |grep 'Socket(s):' |sed 's/.*://;s/ //g') + # cores_per_instance=$(lscpu |grep 'Core(s) per socket:' |sed 's/.*://;s/ //g') + + # compile binary + cd ${working_dir} + git submodule update --init --recursive -- ./application/third_party + mkdir build + cd build + cmake .. -G Ninja + ninja + cd .. + + ## prepare example requiement + pip install -r "$requirements_file" + + echo "======= Convert Start =======" + ## prepare fp32 bin + python "$working_dir/scripts/convert.py" --outtype f32 --outfile ${working_dir}/${model}-fp32.bin ${model_path} + echo "======= Convert End =======" + + # launch benchmark + for cores_per_instance_idx in "${!cores_list[@]}"; do + cores_per_instance=${cores_list[cores_per_instance_idx]} + for batch_size_idx in "${!batch_size_list[@]}"; do + batch_size=${batch_size_list[batch_size_idx]} + for input_idx in "${!input_list[@]}"; do + input=${input_list[input_idx]} + for precision_idx in "${!precision_list[@]}"; do + precision=${precision_list[precision_idx]} + # [[ "${input}" == "32" ]] && output=32 || + if [[ "${input}" == "10" ]]; then output=1024; else output=32; fi + if [[ "${model}" == "chatglm2" || "${model}" == "chatglm-6b" || "${model}" == "baichuan-13b" || "${model}" == "baichuan2-13b" ]]; then + output=32 + fi + prompt=$(python -c "import sys, json; i = json.load(sys.stdin)['$input']; print(i['prompts'][i['map'].get('$model', 'default')])" <$PROMPTS_PATH) + + if [[ -z $prompt ]]; then + echo "Error: Unexpedted input: $input" 1>&2 + continue + fi + ctx=$(($output + $input + 10)) + if [[ "$drop_caches" = true ]]; then + sudo sh -c 'echo 3 > /proc/sys/vm/drop_caches' + fi + task_name="${model}-${precision}-${cores_per_instance}-${batch_size}-${input}-${output}" + logs_file="${task_name}.log" + ## prepare model.bin + if [[ ! -f ${working_dir}/${model}-${precision}.bin ]]; then + echo "======= Quantization Start =======" + local quant_script_prologue="'$quant_script' --model_file '$working_dir/$model-fp32.bin' --out_file '$working_dir/$model-$precision.bin' --nthread $quant_nthr" # in case there are space in the path + if [[ ${precision} == "q4_j_i8_g128" ]]; then + eval "$quant_script_prologue --weight_dtype int4 --group_size 128 --compute_dtype int8 --scale_dtype fp32 --alg sym" + # deprecated since bfloat16 scale not mature + # elif [[ ${precision} == "q4_j_i8_g32_bf16" ]]; then + # eval "$quant_script_prologue --weight_dtype int4 --group_size 32 --compute_dtype int8 --scale_dtype bf16 --alg sym" + elif [[ ${precision} == "q4_j_i8_g32" ]]; then + eval "$quant_script_prologue --weight_dtype int4 --group_size 32 --compute_dtype int8 --scale_dtype fp32 --alg sym" + elif [[ ${precision} == "q4_j_f32_g128" ]]; then + eval "$quant_script_prologue --weight_dtype int4 --group_size 128 --compute_dtype fp32 --scale_dtype fp32 --alg sym" + elif [[ ${precision} == "q4_j1_i8_g128" ]]; then + eval "$quant_script_prologue --weight_dtype int4 --group_size 128 --compute_dtype int8 --scale_dtype fp32 --alg asym" + elif [[ ${precision} == "q4_j1_bf16_pc" ]]; then + eval "$quant_script_prologue --weight_dtype int4 --group_size -1 --compute_dtype bf16 --scale_dtype fp32 --alg asym" + elif [[ ${precision} == "q4_0" ]]; then + eval "$quant_script_prologue --weight_dtype int4 --group_size 32 --compute_dtype int8 --alg sym --use_ggml" + elif [[ ${precision} == "q4_1" ]]; then + eval "$quant_script_prologue --weight_dtype int4 --group_size 32 --compute_dtype int8 --alg asym --use_ggml" + elif [[ ${precision} == "q8_0" ]]; then + eval "$quant_script_prologue --weight_dtype int8 --group_size 32 --compute_dtype int8 --alg sym --use_ggml" + elif [[ ${precision} == "q8e4m3_j_f32_g128" ]]; then + eval "$quant_script_prologue --weight_dtype fp8 --group_size 128 --scale_dtype fp32 --compute_dtype fp32 --alg sym" + elif [[ ${precision} == "q8e4m3_j_f32_g128_fp8" ]]; then + eval "$quant_script_prologue --weight_dtype fp8 --group_size 128 --scale_dtype fp8 --compute_dtype fp32 --alg sym" + elif [[ ${precision} == "q4e2m1_j_f32_g128" ]]; then + eval "$quant_script_prologue --weight_dtype fp4 --group_size 128 --scale_dtype fp32 --compute_dtype fp32 --alg sym" + elif [[ ${precision} == "q8e5m2_j_f32_g128" ]]; then + eval "$quant_script_prologue --weight_dtype fp8_e5m2 --group_size 128 --scale_dtype fp32 --compute_dtype fp32 --alg sym" + elif [[ ${precision} == "q8e5m2_j_f32_g128_fp8" ]]; then + eval "$quant_script_prologue --weight_dtype fp8_e5m2 --group_size 128 --scale_dtype fp8 --compute_dtype fp32 --alg sym" + elif [[ ${precision} == "nf4_j_f32_g128" ]]; then + eval "$quant_script_prologue --weight_dtype nf4 --group_size 128 --scale_dtype fp32 --compute_dtype fp32 --alg sym" + else + echo "Error: Unexpedted precision: $precision" 1>&2 + continue + fi + echo "======= Quantization End =======" + fi + ## run inference + export LANG=en_US.UTF-8 + export LC_ALL=en_US.UTF-8 + echo "======= Inference Start =======" + + real_ctx=$ctx # TODO(Zhenzhong): use same ctx for chatglm & baichuan + [[ "${model}" == "chatglm2" || "${model}" == "chatglm-6b" || + "${model}" == "baichuan-13b" || "${model}" == "baichuan2-13b" ]] && real_ctx=2047 + + OMP_NUM_THREADS=$cores_per_instance numactl -m 0 -C 0-$(($cores_per_instance - 1)) \ + $infer_cmd --seed 1234 -t $cores_per_instance -b 2047 -c $real_ctx -n ${output} -m ${model}-${precision}.bin $extension -p "$prompt" 2>&1 | tee ${WORKSPACE}/${logs_file} || true & + monitor + + echo "======= Inference End =======" + python $script_dir/calculate_percentiles.py ${WORKSPACE}/${logs_file} ${model} ${precision} ${cores_per_instance} ${batch_size} ${input} ${output} + + if [[ "$cores_per_instance" == "${cores_list[@]: -1:1}" ]] && + [[ "$batch_size_idx" == "0" ]] && + [[ "$input_idx" == "0" ]] && + [[ "${#ppl_nctx_list[@]}" != "0" ]]; then + ppl_eval "$task_name" "$cores_per_instance" "$model_path" "$model-$precision.bin" + fi + done + done + done + done + if [[ "${#ppl_nctx_list[@]}" != "0" ]] && [[ "$ppl_fp32_test" = true ]]; then + cores_per_instance="${cores_list[@]: -1:1}" + task_name="${model}-fp32-${cores_per_instance}-${batch_size_list[@]:0:1}-${input_list[@]:0:1}-${output}" + ppl_eval "$task_name" "$cores_per_instance" "$model_path" "$model-fp32.bin" + fi + conda deactivate >/dev/null 2>&1 +} + +function monitor() { + sleep 1 + # try first time + if [ $(ps -ef | grep "$infer_cmd" | wc -l) -lt 2 ]; then + sleep 1 + fi + # keep monitoring + echo "====== Monitor Start =======" + while true; do + if [ $(ps -ef | grep "$infer_cmd" | wc -l) -lt 2 ]; then + sleep 3 + break + fi + echo "$(date +%s), $(numastat -p $(ps -ef | grep "$infer_cmd" | grep -v grep | awk '{printf("%s ", $2)}'))" \ + >>${WORKSPACE}/memory.txt 2>&1 + done + echo "====== Monitor End =======" +} + +main $@ 2>&1 | tee ${WORKSPACE}/launch.log diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_prompts.json b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_prompts.json new file mode 100644 index 00000000000..9d234a67752 --- /dev/null +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_prompts.json @@ -0,0 +1,57 @@ +{ + "10": { + "map": {}, + "prompts": { + "default": "Once upon a time, there existed a " + } + }, + "32": { + "map": { + "llama-2-7b-chat": "llama", + "chatglm2": "chinese1", + "baichuan2-13b": "chinese1", + "chatglm-6b": "chinese2", + "baichuan-13b": "chinese2" + }, + "prompts": { + "default": "Once upon a time, there existed a little girl, who liked to have adventures. She wanted to go to places and meet new people, and have fun.", + "llama": "Once upon a time, there existed a little girl who liked to have adventures. She wanted to go to places and meet new people, and have fun", + "chinese1": "\"和我一起出去打个篮球玩玩吧一起出去打个篮球玩玩吧!\"", + "chinese2": "\"它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子.\"" + } + }, + "1024": { + "map": { + "llama-2-7b-chat": "llama", + "gptj-6b": "gptj-6b", + "chatglm2": "chinese1", + "baichuan-13b": "chinese1", + "baichuan2-13b": "chinese1", + "chatglm-6b": "chinese2" + }, + "prompts": { + "default": "It is done, and submitted. You can play 'Survival of the Tastiest' on the Android, and on the web. Playing on the web works, but you have to simulate multiple touch for table moving and that can be a bit confusing. There is a lot I'd like to talk about. I will go through every topic, insted of making the typical what went right/wrong list. Concept Working over the theme was probably one of the hardest tasks which I had to face. Originally, I had an idea of what kind of game I wanted to develop, gameplay wise - something with a lot of enemies/actors, simple graphics, maybe set in the space, controlled from a top-down view. I was confident that I could fit any theme around it. In the end, the problem with a theme like 'Evolution' in a game is that evolution is unassisted. It happens through several seemingly random mutations over time, with the most apt permutation surviving. This genetic car simulator is, in my opinion, a great example of actual evolution of a species facing a challenge. But is it a game? In a game, you need to control something to reach an objective. That control goes against what evolution is supposed to be like. If you allow the user to pick how to evolve something, it's not evolution anymore - it's the equivalent of intelligent design, the fable invented by creationists to combat the idea of evolution. Being agnostic and a Pastafarian, that's not something that rubbed me the right way. Hence, my biggest dillema when deciding what to create was not with what I wanted to create, but with what I did not. I didn't want to create an 'intelligent design' simulator and wrongly call it evolution. This is a problem, of course, every other contestant also had to face it. And judging by the entries submitted, not many managed to work around it. I'd say the only real solution was through the use of artificial selection, somehow. So far, I haven't seen any entry using this at its core gameplay. Alas, this is just a fun competition and after a while I decided not to be as strict with the game idea, and allowed myself to pick whatever I thought would work out. My initial idea was to create something where humanity tried to evolve to a next level, but had some kind of foe trying to stop them from doing so. I kind of had this image of human souls flying in space towards a monolith or a space baby (all based in 2001: A Space Odyssey of course) but I couldn't think of compelling (read: serious) mechanics for that. Borgs were my next inspiration, as their whole hypothesis fit pretty well into the evolution theme. But how to make it work? Are you the borg, or fighting the Borg? The third and final idea came to me through my girlfriend, who somehow gave me the idea of making something about the evolution of Pasta. The more I thought about it the more it sounded like it would work, so I decided to go with it. Conversations with my inspiring co-worker Roushey (who also created the 'Mechanical Underdogs' signature logo for my intros) further matured the concept, as it involved into the idea of having individual pieces of pasta flying around and trying to evolve until they became all-powerful. A secondary idea here was that the game would work to explain how the Flying Spaghetti Monster came to exist - by evolving from a normal dinner table. So the idea evolved more or less into this: you are sitting a table. You have your own plate, with is your 'base'. There are 5 other guests at the table, each with their own plate. Your plate can spawn little pieces of pasta. You do so by 'ordering' them through a menu. Some pastas are better than others; some are faster, some are stronger. They have varying 'costs', which are debited from your credits (you start with a number of credits). Once spawned, your pastas start flying around. Their instinct is to fly to other plates, in order to conquer them (the objective of the game is having your pasta conquer all the plates on the table). But they are really autonomous, so after being spawned, you have no control over your pasta (think DotA or LoL creeps). Your pasta doesn't like other people's pasta, so if they meet, they shoot sauce at each other until one dies. You get credits for other pastas your own pasta kill.", + "llama": "It is done, and submitted. You can play 'Survival of the Tastiest' on Android, and on the web. Playing on the web works, but you have to simulate multiple touch for table moving and that can be a bit confusing. There is a lot I'd like to talk about. I will go through every topic, insted of making the typical what went right/wrong list. Concept Working over the theme was probably one of the hardest tasks which I had to face. Originally, I had an idea of what kind of game I wanted to develop, gameplay wise - something with a lot of enemies/actors, simple graphics, maybe set in space, controlled from a top-down view. I was confident that I could fit any theme around it. In the end, the problem with a theme like 'Evolution' in a game is that evolution is unassisted. It happens through several seemingly random mutations over time, with the most apt permutation surviving. This genetic car simulator is, in my opinion, a great example of actual evolution of a species facing a challenge. But is it a game? In a game, you need to control something to reach an objective. That control goes against what evolution is supposed to be like. If you allow the user to pick how to evolve something, it's not evolution anymore - it's the equivalent of intelligent design, the fable invented by creationists to combat the idea of evolution. Being agnostic and a Pastafarian, that's not something that rubbed me the right way. Hence, my biggest dillema when deciding what to create was not with what I wanted to create, but with what I did not. I didn't want to create an 'intelligent design' simulator and wrongly call it evolution. This is a problem, of course, every other contestant also had to face. And judging by the entries submitted, not many managed to work around it. I'd say the only real solution was through the use of artificial selection, somehow. So far, I have not seen any entry using this at its core gameplay. Alas, this is just a fun competition and after a while I decided not to be as strict with the game idea, and allowed myself to pick whatever I thought would work out. My initial idea was to create something where humanity tried to evolve to a next level but had some kind of foe trying to stop them from doing so. I kind of had this image of human souls flying in space towards a monolith or a space baby (all based in 2001: A Space Odyssey of course) but I couldn't think of compelling (read: serious) mechanics for that. Borgs were my next inspiration, as their whole hypothesis fit pretty well into the evolution theme. But how to make it work? Are you the borg, or fighting the Borg? The third and final idea came to me through my girlfriend, who somehow gave me the idea of making something about the evolution of Pasta. The more I thought about it the more it sounded like it would work, so I decided to go with it. Conversations with my inspiring co-worker Roushey (who also created the 'Mechanical Underdogs' signature logo for my intros) further matured the concept, as it involved into the idea of having individual pieces of pasta flying around and trying to evolve until they became all-powerful. A secondary idea here was that the game would work to explain how the Flying Spaghetti Monster came to exist - by evolving from a normal dinner table. So the idea evolved more or less into this: you are sitting a table. You have your own plate, with is your 'base'. There are 5 other guests at the table, each with their own plate. Your plate can spawn little pieces of pasta. You do so by 'ordering' them through a menu. Some pastas are better than others; some are faster, some are stronger. They have varying 'costs', which are debited from your credits (you start with a number of credits). Once spawned, your pastas start flying around. Their instinct is to fly to other plates, in order to conquer them (the objective of the game is having your pasta conquer all the plates on the table). But they are really autonomous, so after being spawned, you have no control over your pasta (think DotA or LoL creeps). Your pasta doesn't like other people's pasta, so if they meet, they shoot sauce at each other until one dies. You get credits for other pastas your own pasta kill. Once a pasta is in the vicinity of a plate,", + "gptj-6b": "It is done, and submitted. You can play 'Survival of the Tastiest' on Android, and on the web. Playing on the web works, but you have to simulate multiple touch for table moving and that can be a bit confusing. There is a lot I'd like to talk about. I will go through every topic, insted of making the typical what went right/wrong list. Concept Working over the theme was probably one of the hardest tasks which I had to face. Originally, I had an idea of what kind of game I wanted to develop, gameplay wise - something with a lot of enemies/actors, simple graphics, maybe set in space, controlled from a top-down view. I was confident that I could fit any theme around it. In the end, the problem with a theme like 'Evolution' in a game is that evolution is unassisted. It happens through several seemingly random mutations over time, with the most apt permutation surviving. This genetic car simulator is, in my opinion, a great example of actual evolution of a species facing a challenge. But is it a game? In a game, you need to control something to reach an objective. That control goes against what evolution is supposed to be like. If you allow the user to pick how to evolve something, it's not evolution anymore - it's the equivalent of intelligent design, the fable invented by creationists to combat the idea of evolution. Being agnostic and a Pastafarian, that's not something that rubbed me the right way. Hence, my biggest dillema when deciding what to create was not with what I wanted to create, but with what I did not. I didn't want to create an 'intelligent design' simulator and wrongly call it evolution. This is a problem, of course, every other contestant also had to face. And judging by the entries submitted, not many managed to work around it. I'd say the only real solution was through the use of artificial selection, somehow. So far, I haven't seen any entry using this at its core gameplay. Alas, this is just a fun competition and after a while I decided not to be as strict with the game idea, and allowed myself to pick whatever I thought would work out. My initial idea was to create something where humanity tried to evolve to a next level, but had some kind of foe trying to stop them from doing so. I kind of had this image of human souls flying in space towards a monolith or a space baby (all based in 2001: A Space Odyssey of course) but I couldn't think of compelling (read: serious) mechanics for that. Borgs were my next inspiration, as their whole hypothesis fit pretty well into the evolution theme. But how to make it work? Are you the borg, or fighting the Borg? The third and final idea came to me through my girlfriend, who somehow gave me the idea of making something about the evolution of Pasta. The more I thought about it the more it sounded like it would work, so I decided to go with it. Conversations with my inspiring co-worker Roushey (who also created the 'Mechanical Underdogs' signature logo for my intros) further matured the concept, as it involved into the idea of having individual pieces of pasta flying around and trying to evolve until they became all-powerful. A secondary idea here was that the game would work to explain how the Flying Spaghetti Monster came to exist - by evolving from a normal dinner table. So the idea evolved more or less into this: you are sitting a table. You have your own plate, with is your 'base'. There are 5 other guests at the table, each with their own plate. Your plate can spawn little pieces of pasta. You do so by 'ordering' them through a menu. Some pastas are better than others; some are faster, some are stronger. They have varying 'costs', which are debited from your credits (you start with a number of credits). Once spawned, your pastas start flying around. Their instinct is to fly to other plates, in order to conquer them (the objective of the game is having your pasta conquer all the plates on the table). But they are really autonomous, so after being spawned, you have no control over your pasta (think DotA or LoL creeps). Your pasta doesn't like other people's pasta, so if they meet, they shoot sauce at each other until one dies. You get credits for other pastas your own pasta kill. Once a pasta is in the vicinity of a plate, it starts conquering it for its team. It takes around 10 seconds for a plate to be conquered; less if more pasta from the same team are around. If pasta from other team are around, though, they get locked down in their attempt, unable to conquer the plate, until one of them die (think Battlefield's standard 'Conquest' mode). You get points every second for every plate you own. Over time, the concept", + "chinese1": "\"它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念 围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?你是博格人,还是与博格人战斗?你是博格人,还是与博格人战斗?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?工作呢?\"", + "chinese2": "\"它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念 围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?你是博格人,还是与博格人战斗?你是博格人,还是与博格人战斗?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?工作呢?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。但是必须概念围绕这个主题工作但是必须概念围绕这个主题工作\"" + } + }, + "2012": { + "map": { + "llama-2-7b-chat": "llama", + "mistral-7b": "llama", + "chatglm2": "chinese1", + "baichuan-13b": "chinese1", + "baichuan2-13b": "chinese1", + "chatglm-6b": "chinese2" + }, + "prompts": { + "default": "It is done, and submitted. You can play 'Survival of the Tastiest' on Android, and on web. Playing on web works, but you have to simulate multiple touch for table moving and that can be a bit confusing. There is a lot I'd like to talk about. I will go through every topic, insted of making the typical what went right/wrong list. Concept Working over the theme was probably one of the hardest tasks which I had to face. Originally, I had an idea of what kind of game I wanted to develop, gameplay wise - something with a lot of enemies/actors, simple graphics, maybe set in space, controlled from a top-down view. I was confident that I could fit any theme around it. In the end, the problem with a theme like 'Evolution' in a game is that evolution is unassisted. It happens through several seemingly random mutations over time, with the most apt permutation surviving. This genetic car simulator is, in my opinion, a great example of actual evolution of a species facing a challenge. But is it a game? In a game, you need control something to reach an objective. That control goes against what evolution is supposed to be like. If you allow the user to pick how to evolve something, it's not evolution anymore - it's the equivalent of intelligent design, the fable invented by creationists to combat the idea of evolution. Being agnostic and a Pastafarian, that's not something that rubbed me the right way. Hence, my biggest dillema when deciding what to create was not with what I wanted to create, but with what I did not. I didn't want to create an 'intelligent design' simulator and wrongly call it evolution. This is a problem, of course, every other contestant also had to face. And judging by the entries submitted, not many managed to work around it. I'd say the only real solution was through the use of artificial selection, somehow. So far, I haven't seen any entry using this at its core gameplay. Alas, this is just a fun competition and after a while I decided not to be as strict with the game idea, and allowed myself to pick whatever I thought would work out. My initial idea was to create something where humanity tried to evolve to a next level, but had some kind of foe trying to stop them from doing so. I kind of had this image of human souls flying in space towards a monolith or a space baby (all based in 2001: A Space Odyssey of course) but I couldn't think of compelling (read: serious) mechanics for that. Borgs were my next inspiration, as their whole hypothesis fit pretty well into the evolution theme. But how to make it work? Are you the borg, or fighting the Borg? The third and final idea came to me through my girlfriend, who somehow gave me the idea of making something about the evolution of Pasta. The more I thought about it the more it sounded like it would work, so I decided to go with it. Conversations with my inspiring co-worker Roushey (who also created the 'Mechanical Underdogs' signature logo for my intros) further matured the concept, as it involved into the idea of having individual pieces of pasta flying around and trying to evolve until they became all-powerful. A secondary idea here was that the game would work to explain how the Flying Spaghetti Monster came to exist - by evolving from a normal dinner table. So the idea evolved more or less into this: you are sitting a table. You have your own plate, with is your 'base'. There are 5 other guests at the table, each with their own plate. Your plate can spawn little pieces of pasta. You do so by 'ordering' them through a menu. Some pastas are better than others; some are faster, some are stronger. They have varying 'costs', which are debited from your credits (you start with a number of credits). Once spawned, your pastas start flying around. Their instinct is to fly to other plates, in order to conquer them (the objective of the game is having your pasta conquer all the plates on the table). But they are really autonomous, so after being spawned, you have no control over your pasta (think DotA or LoL creeps). Your pasta doesn't like other people's pasta, so if they meet, they shoot sauce at each other until one dies. You get credits for other pastas your own pasta kill. Once a pasta is in the vicinity of a plate, it starts conquering it for its team. It takes around 10 seconds for a plate to be conquered; less if more pasta from the same team are around. If pasta from other team are around, though, they get locked down in their attempt, unable to conquer the plate, until one of them die (think Battlefield's standard 'Conquest' mode). You get points every second for every plate you own. Over time, the concept also evolved to use an Italian bistro as its main scenario. Carlos, Carlos' Bistro's founder and owner Setup No major changes were made from my work setup. I used FDT and Starling creating an Adobe AIR (ActionScript) project, all tools or frameworks I already had some knowledge with. One big change for me was that I livestreamed my work through a twitch.tv account. This was a new thing for me. As recommended by Roushey, I used a program called XSplit and I got to say, it is pretty amazing. It made the livestream pretty effortless and the features are awesome, even for the free version. It was great to have some of my friends watch me, and then interact with them and random people through chat. It was also good knowing that I was also recording a local version of the files, so I could make a timelapse video later. Knowing the video was being recorded also made me a lot more self-conscious about my computer use, as if someone was watching over my shoulder. It made me realize that sometimes I spend too much time in seemingly inane tasks (I ended up wasting the longest time just to get some text alignment the way I wanted - it'll probably drive someone crazy if they watch it) and that I do way too many typos where writing code. I pretty much spend half of the time writing a line and the other half fixing the crazy characters in it. My own stream was probably boring to watch since I was coding for the most time. But livestreaming is one of the cool things to do as a spectator too. It was great seeing other people working - I had a few tabs opened on my second monitor all the time. It's actually a bit sad, because if I could, I could have spent the whole weekend just watching other people working! But I had to do my own work, so I'd only do it once in a while, when resting for a bit. Design Although I wanted some simple, low-fi, high-contrast kind of design, I ended up going with somewhat realistic (vector) art. I think it worked very well, fitting the mood of the game, but I also went overboard. For example: to know the state of a plate (who owns it, who's conquering it and how much time they have left before conquering it, which pasta units are in the queue, etc), you have to look at the plate's bill. The problem I realized when doing some tests is that people never look at the bill! They think it's some kind of prop, so they never actually read its details. Plus, if you're zoomed out too much, you can't actually read it, so it's hard to know what's going on with the game until you zoom in to the area of a specific plate. One other solution that didn't turn out to be as perfect as I thought was how to indicate who a plate base belongs to. In the game, that's indicated by the plate's decoration - its color denotes the team owner. But it's something that fits so well into the design that people never realized it, until they were told about it. In the end, the idea of going with a full physical metaphor is one that should be done with care. Things that are very important risk becoming background noise, unless the player knows its importance. Originally, I wanted to avoid any kind of heads-up display in my game. In the end, I ended up adding it at the bottom to indicate your credits and bases owned, as well as the hideous out-of-place-and-still-not-obvious 'Call Waiter' button. But in hindsight, I should have gone with a simple HUD from the start, especially one that indicated each team's colors and general state of the game without the need for zooming in and out. Development Development went fast. But not fast enough. Even though I worked around 32+ hours for this Ludum Dare, the biggest problem I had to face in the end was overscoping. I had too much planned, and could not get it all done. Content-wise, I had several kinds of pasta planned - Wikipedia is just amazing in that regard, split into several different groups, from small Pastina to huge Pasta al forno. But because of time constraints, I ended up scratching most of them, and ended up with 5 different types of small pasta - barely something to start when talking about the evolution of Pasta. Pastas used in the game.", + "llama": "It is done, and submitted. You can play 'Survival of the Tastiest' on Android, and on the web. Playing on the web works, but you have to simulate multiple touch for table moving and that can be a bit confusing. There is a lot I'd like to talk about. I will go through every topic, insted of making the typical what went right/wrong list. Concept Working over the theme was probably one of the hardest tasks which I had to face. Originally, I had an idea of what kind of game I wanted to develop, gameplay wise - something with a lot of enemies/actors, simple graphics, maybe set in space, controlled from a top-down view. I was confident that I could fit any theme around it. In the end, the problem with a theme like 'Evolution' in a game is that evolution is unassisted. It happens through several seemingly random mutations over time, with the most apt permutation surviving. This genetic car simulator is, in my opinion, a great example of actual evolution of a species facing a challenge. But is it a game? In a game, you need to control something to reach an objective. That control goes against what evolution is supposed to be like. If you allow the user to pick how to evolve something, it's not evolution anymore - it's the equivalent of intelligent design, the fable invented by creationists to combat the idea of evolution. Being agnostic and a Pastafarian, that's not something that rubbed me the right way. Hence, my biggest dillema when deciding what to create was not with what I wanted to create, but with what I did not. I didn't want to create an 'intelligent design' simulator and wrongly call it evolution. This is a problem, of course, every other contestant also had to face. And judging by the entries submitted, not many managed to work around it. I'd say the only real solution was through the use of artificial selection, somehow. So far, I have not seen any entry using this at its core gameplay. Alas, this is just a fun competition and after a while I decided not to be as strict with the game idea, and allowed myself to pick whatever I thought would work out. My initial idea was to create something where humanity tried to evolve to a next level but had some kind of foe trying to stop them from doing so. I kind of had this image of human souls flying in space towards a monolith or a space baby (all based in 2001: A Space Odyssey of course) but I couldn't think of compelling (read: serious) mechanics for that. Borgs were my next inspiration, as their whole hypothesis fit pretty well into the evolution theme. But how to make it work? Are you the borg, or fighting the Borg? The third and final idea came to me through my girlfriend, who somehow gave me the idea of making something about the evolution of Pasta. The more I thought about it the more it sounded like it would work, so I decided to go with it. Conversations with my inspiring co-worker Roushey (who also created the 'Mechanical Underdogs' signature logo for my intros) further matured the concept, as it involved into the idea of having individual pieces of pasta flying around and trying to evolve until they became all-powerful. A secondary idea here was that the game would work to explain how the Flying Spaghetti Monster came to exist - by evolving from a normal dinner table. So the idea evolved more or less into this: you are sitting a table. You have your own plate, with is your 'base'. There are 5 other guests at the table, each with their own plate. Your plate can spawn little pieces of pasta. You do so by 'ordering' them through a menu. Some pastas are better than others; some are faster, some are stronger. They have varying 'costs', which are debited from your credits (you start with a number of credits). Once spawned, your pastas start flying around. Their instinct is to fly to other plates, in order to conquer them (the objective of the game is having your pasta conquer all the plates on the table). But they are really autonomous, so after being spawned, you have no control over your pasta (think DotA or LoL creeps). Your pasta doesn't like other people's pasta, so if they meet, they shoot sauce at each other until one dies. You get credits for other pastas your own pasta kill. Once a pasta is in the vicinity of a plate, it starts conquering it for its team. It takes around 10 seconds for a plate to be conquered; less if more pasta from the same team are around. If pasta from other team are around, though, they get locked down in their attempt, unable to conquer the plate, until one of them die (think Battlefield's standard 'Conquest' mode). You get points every second for every plate you own. Over time, the concept also evolved to use an Italian bistro as its main scenario. Carlos, Carlos' Bistro's founder and owner Setup No major changes were made from my work setup. I used FDT and Starling creating an Adobe AIR (ActionScript) project, all tools or frameworks I already had some knowledge with. One big change for me was that I livestreamed my work through a twitch.tv account. This was a new thing for me. As recommended by Roushey, I used a program called XSplit and I got to say, it is pretty amazing. It made the livestream pretty effortless and the features are awesome, even for the free version. It was great to have some of my friends watch me, and then interact with them and random people through chat. It was also good knowing that I was also recording a local version of the files, so I could make a timelapse video later. Knowing the video was being recorded also made me a lot more self-conscious about my computer use, as if someone was watching over my shoulder. It made me realize that sometimes I spend too much time in seemingly inane tasks (I ended up wasting the longest time just to get some text alignment the way I wanted - it'll probably drive someone crazy if they watch it) and that I do way too many typos where writing code. I pretty much spend half of the time writing a line and the other half fixing the crazy characters in it. My own stream was probably boring to watch since I was coding for the most time. But livestreaming is one of the cool things to do as a spectator too. It was great seeing other people working - I had a few tabs opened on my second monitor all the time. It's actually a bit sad, because if I could, I could have spent the whole weekend just watching other people working! But I had to do my own work, so I'd only do it once in a while, when resting for a bit. Design Although I wanted some simple, low-fi, high-contrast kind of design, I ended up going with somewhat realistic (vector) art. I think it worked very well, fitting the mood of the game, but I also went overboard. For example: to know the state of a plate (who owns it, who's conquering it and how much time they have left before conquering it, which pasta units are in the queue, etc), you have to look at the plate's bill. The problem I realized when doing some tests is that people never look at the bill! They think it's some kind of prop, so they never actually read its details. Plus, if you're zoomed out too much, you can't actually read it, so it's hard to know what's going on with the game until you zoom in to the area of a specific plate. One other solution that didn't turn out to be as perfect as I thought was how to indicate who a plate base belongs to. In the game, that's indicated by the plate's decoration - its color denotes the team owner. But it's something that fits so well into the design that people never realized it, until they were told about it. In the end, the idea of going with a full physical metaphor is one that should be done with care. Things that are very important risk becoming background noise, unless the player knows its importance. Originally, I wanted to avoid any kind of heads-up display in my game. In the end, I ended up adding it at the bottom to indicate your credits and bases owned, as well as the hideous out-of-place-and-still-not-obvious 'Call Waiter' button. But in hindsight, I should have gone with a simple HUD from the start, especially one that indicated each team's colors and general state of the game without the need for zooming in and out. Development Development went fast. But not fast enough. Even though I worked around 32+ hours for this Ludum Dare, the biggest problem that I had to face in the end was overscoping.", + "gptj-6b": "It is done, and submitted. You can play 'Survival of the Tastiest' on Android, and on the web. Playing on the web works, but you have to simulate multiple touch for table moving and that can be a bit confusing. There is a lot I'd like to talk about. I will go through every topic, insted of making the typical what went right/wrong list. Concept Working over the theme was probably one of the hardest tasks which I had to face. Originally, I had an idea of what kind of game I wanted to develop, gameplay wise - something with a lot of enemies/actors, simple graphics, maybe set in space, controlled from a top-down view. I was confident that I could fit any theme around it. In the end, the problem with a theme like 'Evolution' in a game is that evolution is unassisted. It happens through several seemingly random mutations over time, with the most apt permutation surviving. This genetic car simulator is, in my opinion, a great example of actual evolution of a species facing a challenge. But is it a game? In a game, you need control something to reach an objective. That control goes against what evolution is supposed to be like. If you allow the user to pick how to evolve something, it's not evolution anymore - it's the equivalent of intelligent design, the fable invented by creationists to combat the idea of evolution. Being agnostic and a Pastafarian, that's not something that rubbed me the right way. Hence, my biggest dillema when deciding what to create was not with what I wanted to create, but with what I did not. I didn't want to create an 'intelligent design' simulator and wrongly call it evolution. This is a problem, of course, every other contestant also had to face. And judging by the entries submitted, not many managed to work around it. I'd say the only real solution was through the use of artificial selection, somehow. So far, I haven't seen any entry using this at its core gameplay. Alas, this is just a fun competition and after a while I decided not to be as strict with the game idea, and allowed myself to pick whatever I thought would work out. My initial idea was to create something where humanity tried to evolve to a next level, but had some kind of foe trying to stop them from doing so. I kind of had this image of human souls flying in space towards a monolith or a space baby (all based in 2001: A Space Odyssey of course) but I couldn't think of compelling (read: serious) mechanics for that. Borgs were my next inspiration, as their whole hypothesis fit pretty well into the evolution theme. But how to make it work? Are you the borg, or fighting the Borg? The third and final idea came to me through my girlfriend, who somehow gave me the idea of making something about the evolution of Pasta. The more I thought about it the more it sounded like it would work, so I decided to go with it. Conversations with my inspiring co-worker Roushey (who also created the 'Mechanical Underdogs' signature logo for my intros) further matured the concept, as it involved into the idea of having individual pieces of pasta flying around and trying to evolve until they became all-powerful. A secondary idea here was that the game would work to explain how the Flying Spaghetti Monster came to exist - by evolving from a normal dinner table. So the idea evolved more or less into this: you are sitting a table. You have your own plate, with is your 'base'. There are 5 other guests at the table, each with their own plate. Your plate can spawn little pieces of pasta. You do so by 'ordering' them through a menu. Some pastas are better than others; some are faster, some are stronger. They have varying 'costs', which are debited from your credits (you start with a number of credits). Once spawned, your pastas start flying around. Their instinct is to fly to other plates, in order to conquer them (the objective of the game is having your pasta conquer all the plates on the table). But they are really autonomous, so after being spawned, you have no control over your pasta (think DotA or LoL creeps). Your pasta doesn't like other people's pasta, so if they meet, they shoot sauce at each other until one dies. You get credits for other pastas your own pasta kill. Once a pasta is in the vicinity of a plate, it starts conquering it for its team. It takes around 10 seconds for a plate to be conquered; less if more pasta from the same team are around. If pasta from other team are around, though, they get locked down in their attempt, unable to conquer the plate, until one of them die (think Battlefield's standard 'Conquest' mode). You get points every second for every plate you own. Over time, the concept also evolved to use an Italian bistro as its main scenario. Carlos, Carlos' Bistro's founder and owner Setup No major changes were made from my work setup. I used FDT and Starling creating an Adobe AIR (ActionScript) project, all tools or frameworks I already had some knowledge with. One big change for me was that I livestreamed my work through a twitch.tv account. This was a new thing for me. As recommended by Roushey, I used a program called XSplit and I got to say, it is pretty amazing. It made the livestream pretty effortless and the features are awesome, even for the free version. It was great to have some of my friends watch me, and then interact with them and random people through chat. It was also good knowing that I was also recording a local version of the files, so I could make a timelapse video later. Knowing the video was being recorded also made me a lot more self-conscious about my computer use, as if someone was watching over my shoulder. It made me realize that sometimes I spend too much time in seemingly inane tasks (I ended up wasting the longest time just to get some text alignment the way I wanted - it'll probably drive someone crazy if they watch it) and that I do way too many typos where writing code. I pretty much spend half of the time writing a line and the other half fixing the crazy characters in it. My own stream was probably boring to watch since I was coding for the most time. But livestreaming is one of the cool things to do as a spectator too. It was great seeing other people working - I had a few tabs opened on my second monitor all the time. It's actually a bit sad, because if I could, I could have spent the whole weekend just watching other people working! But I had to do my own work, so I'd only do it once in a while, when resting for a bit. Design Although I wanted some simple, low-fi, high-contrast kind of design, I ended up going with somewhat realistic (vector) art. I think it worked very well, fitting the mood of the game, but I also went overboard. For example: to know the state of a plate (who owns it, who's conquering it and how much time they have left before conquering it, which pasta units are in the queue, etc), you have to look at the plate's bill. The problem I realized when doing some tests is that people never look at the bill! They think it's some kind of prop, so they never actually read its details. Plus, if you're zoomed out too much, you can't actually read it, so it's hard to know what's going on with the game until you zoom in to the area of a specific plate. One other solution that didn't turn out to be as perfect as I thought was how to indicate who a plate base belongs to. In the game, that's indicated by the plate's decoration - its color denotes the team owner. But it's something that fits so well into the design that people never realized it, until they were told about it. In the end, the idea of going with a full physical metaphor is one that should be done with care. Things that are very important risk becoming background noise, unless the player knows its importance. Originally, I wanted to avoid any kind of heads-up display in my game. In the end, I ended up adding it at the bottom to indicate your credits and bases owned, as well as the hideous out-of-place-and-still-not-obvious 'Call Waiter' button. But in hindsight, I should have gone with a simple HUD from the start, especially one that indicated each team's colors and general state of the game without the need for zooming in and out. Development Development went fast. But not fast enough. Even though I worked around 32+ hours for this Ludum Dare, the biggest problem I had to face in the end was overscoping. I had too much planned, and could not get it all done. Content-wise, I had several kinds of pasta planned - Wikipedia is just amazing in that regard, split into several different groups, from small Pastina to huge Pasta al forno. But because of time constraints, I ended up scratching most of them, and ended up with 5 different types of small pasta - barely something to start when talking about the evolution of Pasta. Pastas used in the game. Unfortunately, the macs where never used Which is one of the saddest things about the project, really. It had the framework and the features to allow an endless number of elements in there, but I just did not have time to draw the rest of the assets needed (something I loved to do).", + "chinese1": "\"它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念 围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?你是博格人,还是与博格人战斗?你是博格人,还是与博格人战斗?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?工作呢?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?你是博格人,还是与博格人战斗?你是博格人,还是与博格人战斗?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?\"", + "chinese2": "\"它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念 围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?你是博格人,还是与博格人战斗?你是博格人,还是与博格人战斗?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?工作呢?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?你是博格人,还是与博格人战斗?你是博格人,还是与博格人战斗?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是?\"" + } + } +} diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/baichuan.txt b/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/baichuan.txt new file mode 100644 index 00000000000..07c70c77ed3 --- /dev/null +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/baichuan.txt @@ -0,0 +1,3 @@ +# To avoid the error: 'ChatGLMTokenizer' object has no attribute 'sp_tokenizer' +-r common.txt +transformers==4.33.1 diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/chatglm-6b.txt b/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/chatglm-6b.txt new file mode 100644 index 00000000000..07c70c77ed3 --- /dev/null +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/chatglm-6b.txt @@ -0,0 +1,3 @@ +# To avoid the error: 'ChatGLMTokenizer' object has no attribute 'sp_tokenizer' +-r common.txt +transformers==4.33.1 diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/common.txt b/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/common.txt new file mode 100644 index 00000000000..441da4dde29 --- /dev/null +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/common.txt @@ -0,0 +1,12 @@ +--extra-index-url https://download.pytorch.org/whl/cpu +torch==2.1.0+cpu +transformers +numpy +sentencepiece +protobuf<3.20 +einops +accelerate +peft +datasets +transformers_stream_generator +tiktoken diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/mistral.txt b/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/mistral.txt new file mode 100644 index 00000000000..786b72c2795 --- /dev/null +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/mistral.txt @@ -0,0 +1,2 @@ +-r common.txt +transformers>=4.34.0 From ab787f8335212e7f6c078c97c5a6512257961c36 Mon Sep 17 00:00:00 2001 From: lvliang-intel Date: Tue, 26 Dec 2023 07:05:46 +0800 Subject: [PATCH 005/101] [NeuralChat] Fix magicoder model tokenizer issue (#1075) * fix magicoder tokenizer issue Signed-off-by: lvliang-intel --- .../neural_chat/models/model_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/intel_extension_for_transformers/neural_chat/models/model_utils.py b/intel_extension_for_transformers/neural_chat/models/model_utils.py index 014e68e38bf..61bc99d685a 100644 --- a/intel_extension_for_transformers/neural_chat/models/model_utils.py +++ b/intel_extension_for_transformers/neural_chat/models/model_utils.py @@ -441,7 +441,8 @@ def load_model( try: tokenizer = AutoTokenizer.from_pretrained( tokenizer_name, - use_fast=False if config.model_type == "llama" else True, + use_fast=False if (re.search("llama", model_name, re.IGNORECASE) + or re.search("neural-chat-7b-v2", model_name, re.IGNORECASE)) else True, use_auth_token=hf_access_token, trust_remote_code=True if (re.search("qwen", model_name, re.IGNORECASE) or \ re.search("chatglm", model_name, re.IGNORECASE)) else False, From 1a2afa91beda4f73dc77ad6f4b5f3f877a928df3 Mon Sep 17 00:00:00 2001 From: XuhuiRen <44249229+XuhuiRen@users.noreply.github.com> Date: Tue, 26 Dec 2023 07:06:30 +0800 Subject: [PATCH 006/101] [NeuralChat] optimize prompt template to decrease model hallucination and enhance the ability of RAG (#1065) * optimize prompt template Signed-off-by: XuhuiRen --- .../plugins/prompt/prompt_template.py | 6 ++-- .../neural_chat/prompts/prompt.py | 31 ++++++++++++++----- 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/pipeline/plugins/prompt/prompt_template.py b/intel_extension_for_transformers/neural_chat/pipeline/plugins/prompt/prompt_template.py index 63f1d2baea9..6e01432c29b 100644 --- a/intel_extension_for_transformers/neural_chat/pipeline/plugins/prompt/prompt_template.py +++ b/intel_extension_for_transformers/neural_chat/pipeline/plugins/prompt/prompt_template.py @@ -54,10 +54,10 @@ def generate_qa_enterprise(query, context=None, history=None): def generate_prompt(query, history=None): if history: - conv = PromptTemplate("rag_with_context_memory") + conv = PromptTemplate("rag_without_context_memory") conv.append_message(conv.roles[0], query) - conv.append_message(conv.roles[2], history) - conv.append_message(conv.roles[3], None) + conv.append_message(conv.roles[1], history) + conv.append_message(conv.roles[2], None) else: conv = PromptTemplate("rag_without_context") conv.append_message(conv.roles[0], query) diff --git a/intel_extension_for_transformers/neural_chat/prompts/prompt.py b/intel_extension_for_transformers/neural_chat/prompts/prompt.py index cd03598223e..6627205a2a0 100644 --- a/intel_extension_for_transformers/neural_chat/prompts/prompt.py +++ b/intel_extension_for_transformers/neural_chat/prompts/prompt.py @@ -129,9 +129,11 @@ register_conv_template( Conversation( name="rag_with_context_memory", - system_message="Have a conversation with a human, answer the following questions as best you can." + \ - " You can refer to the following document and context.\n", - roles=("### Question: ", "### Context: ", "### Chat History: ", "### Response: "), + system_message="""### You are a helpful, respectful and honest assistant to help the user with questions. \ + - Please refer to the search results obtained from the local knowledge base. But be careful to not \ + incorporate the information that you think is not relevant to the question. + - If you don't know the answer to a question, please don't share false information.\n""" , + roles=("### Question:", "### Search Results:", "### Chat History:", "### Response:"), sep_style=SeparatorStyle.NO_COLON_SINGLE, sep="\n", ) @@ -143,7 +145,19 @@ name="rag_without_context", system_message="Have a conversation with a human. " + \ "You are required to generate suitable response to the user input.\n", - roles=("### Input: ", "### Response: "), + roles=("### Input:", "### Response:"), + sep_style=SeparatorStyle.NO_COLON_SINGLE, + sep="\n", + ) +) + +# Rag without context template +register_conv_template( + Conversation( + name="rag_without_context_memory", + system_message="Have a conversation with a human. " + \ + "You are required to generate suitable response to the user input.\n", + roles=("### Input:", "### Chat History:", "### Response:"), sep_style=SeparatorStyle.NO_COLON_SINGLE, sep="\n", ) @@ -154,10 +168,11 @@ register_conv_template( Conversation( name="rag_with_threshold", - system_message="You are served as an AI agent to help the user complete a task." + \ - " You are required to comprehend the usr query and then use the given context to" + \ - " generate a suitable response.\n\n", - roles=("### User Query: ", "### Context: ", "### Chat History: ", "### Response: "), + system_message="""### You are a helpful, respectful and honest assistant to help the user with questions. \ + - Please refer to the search results obtained from the local knowledge base. But be careful to not \ + incorporate the information that you think is not relevant to the question. + - If you don't know the answer to a question, please don't share false information.\n""", + roles=("### Question:", "### Search Results:", "### Chat History:", "### Response:"), sep_style=SeparatorStyle.NO_COLON_SINGLE, sep="\n", ) From 9729b6a7d5ee8ec20a186a73e7c4c6556676aa43 Mon Sep 17 00:00:00 2001 From: lvliang-intel Date: Tue, 26 Dec 2023 07:08:57 +0800 Subject: [PATCH 007/101] [NeuralChat] Support Mixtral-8x7B-v0.1 model (#972) * Support Mixstral-8x7b model Signed-off-by: lvliang-intel --- intel_extension_for_transformers/neural_chat/README.md | 1 + intel_extension_for_transformers/neural_chat/chatbot.py | 3 ++- .../neural_chat/models/model_utils.py | 3 ++- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/README.md b/intel_extension_for_transformers/neural_chat/README.md index e5aba9f997b..df8d7099afc 100644 --- a/intel_extension_for_transformers/neural_chat/README.md +++ b/intel_extension_for_transformers/neural_chat/README.md @@ -144,6 +144,7 @@ The table below displays the validated model list in NeuralChat for both inferen |LLaMA2 series| ✅| ✅|✅| ✅ | |MPT series| ✅| ✅|✅| ✅ | |Mistral| ✅| ✅|✅| ✅ | +|Mixtral-8x7b-v0.1| ✅| ✅|✅| ✅ | |ChatGLM series| ✅| ✅|✅| ✅ | |Qwen series| ✅| ✅|✅| ✅ | |StarCoder series| | | | ✅ | diff --git a/intel_extension_for_transformers/neural_chat/chatbot.py b/intel_extension_for_transformers/neural_chat/chatbot.py index 57d7daefcbc..855c6837ade 100644 --- a/intel_extension_for_transformers/neural_chat/chatbot.py +++ b/intel_extension_for_transformers/neural_chat/chatbot.py @@ -96,7 +96,8 @@ def build_chatbot(config: PipelineConfig=None): "bloom" in config.model_name_or_path.lower() or \ "starcoder" in config.model_name_or_path.lower() or \ "codegen" in config.model_name_or_path.lower() or \ - "magicoder" in config.model_name_or_path.lower(): + "magicoder" in config.model_name_or_path.lower() or \ + "mixtral" in config.model_name_or_path.lower(): from .models.base_model import BaseModel adapter = BaseModel() else: diff --git a/intel_extension_for_transformers/neural_chat/models/model_utils.py b/intel_extension_for_transformers/neural_chat/models/model_utils.py index 61bc99d685a..d6bbbe94681 100644 --- a/intel_extension_for_transformers/neural_chat/models/model_utils.py +++ b/intel_extension_for_transformers/neural_chat/models/model_utils.py @@ -500,6 +500,7 @@ def load_model( or config.model_type == "mpt" or config.model_type == "llama" or config.model_type == "mistral" + or config.model_type == "mixtral" ) and not ipex_int8) or config.model_type == "opt": with smart_context_manager(use_deepspeed=use_deepspeed): model = AutoModelForCausalLM.from_pretrained( @@ -554,7 +555,7 @@ def load_model( ) else: raise ValueError(f"unsupported model name or path {model_name}, \ - only supports FLAN-T5/LLAMA/MPT/GPT/BLOOM/OPT/QWEN/NEURAL-CHAT/MISTRAL/CODELLAMA/STARCODER/CODEGEN now.") + only supports t5/llama/mpt/gptj/bloom/opt/qwen/mistral/mixtral/gpt_bigcode model type now.") except EnvironmentError as e: logging.error(f"Exception: {e}") if "not a local folder and is not a valid model identifier" in str(e): From db209b47bafb6a5e02dec58f4a9bd1bb080b2dfd Mon Sep 17 00:00:00 2001 From: WenjiaoYue <108783334+WenjiaoYue@users.noreply.github.com> Date: Tue, 26 Dec 2023 14:10:21 +0800 Subject: [PATCH 008/101] Remove the specified versions from requirements (#1066) --- .../neural_chat/ui/gradio/basic/README.md | 2 ++ .../neural_chat/ui/gradio/basic/requirements.txt | 2 +- .../neural_chat/ui/gradio/side_by_side/README.md | 2 ++ .../neural_chat/ui/gradio/side_by_side/requirements.txt | 2 +- workflows/chatbot/demo/basic_frontend/README.md | 3 +++ workflows/chatbot/demo/basic_frontend/requirements.txt | 2 +- 6 files changed, 10 insertions(+), 3 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/ui/gradio/basic/README.md b/intel_extension_for_transformers/neural_chat/ui/gradio/basic/README.md index 88f32d01b25..5d66885d2f0 100644 --- a/intel_extension_for_transformers/neural_chat/ui/gradio/basic/README.md +++ b/intel_extension_for_transformers/neural_chat/ui/gradio/basic/README.md @@ -71,3 +71,5 @@ The URL to access the chatbot frontend is http://{SERVER_IP_ADDRESS}:80. Please You also have the option to update the backend service URL in the `app.py` file. ![Update backend URL](https://i.imgur.com/gRtZHrJ.png) + +>**Note**: Please use Gradio version 3.36.0. \ No newline at end of file diff --git a/intel_extension_for_transformers/neural_chat/ui/gradio/basic/requirements.txt b/intel_extension_for_transformers/neural_chat/ui/gradio/basic/requirements.txt index 1820bdff82c..cf638b309b3 100644 --- a/intel_extension_for_transformers/neural_chat/ui/gradio/basic/requirements.txt +++ b/intel_extension_for_transformers/neural_chat/ui/gradio/basic/requirements.txt @@ -6,5 +6,5 @@ requests huggingface_hub markdown2 nh3 -gradio==3.36.0 +gradio fschat diff --git a/intel_extension_for_transformers/neural_chat/ui/gradio/side_by_side/README.md b/intel_extension_for_transformers/neural_chat/ui/gradio/side_by_side/README.md index 0467c820740..1e44a8e30e6 100644 --- a/intel_extension_for_transformers/neural_chat/ui/gradio/side_by_side/README.md +++ b/intel_extension_for_transformers/neural_chat/ui/gradio/side_by_side/README.md @@ -74,3 +74,5 @@ The URLs to access the chatbot frontend are http://{SERVER_IP_ADDRESS_1}:80 and You also have the option to update the backend service URL in the `app.py` file. ![Update backend URL](https://i.imgur.com/j7TTYaW.png) + +>**Note**: Please use Gradio version 3.34.0. \ No newline at end of file diff --git a/intel_extension_for_transformers/neural_chat/ui/gradio/side_by_side/requirements.txt b/intel_extension_for_transformers/neural_chat/ui/gradio/side_by_side/requirements.txt index dcdff25b454..43673c0c18f 100644 --- a/intel_extension_for_transformers/neural_chat/ui/gradio/side_by_side/requirements.txt +++ b/intel_extension_for_transformers/neural_chat/ui/gradio/side_by_side/requirements.txt @@ -6,5 +6,5 @@ requests huggingface_hub markdown2 nh3 -gradio==3.34.0 +gradio fschat diff --git a/workflows/chatbot/demo/basic_frontend/README.md b/workflows/chatbot/demo/basic_frontend/README.md index 2b082c4eaa3..75d1a19edd8 100644 --- a/workflows/chatbot/demo/basic_frontend/README.md +++ b/workflows/chatbot/demo/basic_frontend/README.md @@ -18,3 +18,6 @@ For detailed information about the configuration settings, please refer to the [ ## 🚀 Setup application To set up your application, copy the code files from this directory and configure them as needed. Alternatively, you can clone the existing space from [https://huggingface.co/spaces/Intel/NeuralChat-MPT](https://huggingface.co/spaces/Intel/NeuralChat-MPT). + + +>**Note**: Please use Gradio version 3.36.0. \ No newline at end of file diff --git a/workflows/chatbot/demo/basic_frontend/requirements.txt b/workflows/chatbot/demo/basic_frontend/requirements.txt index 3d3a37ee6fc..f2b6c5fb339 100644 --- a/workflows/chatbot/demo/basic_frontend/requirements.txt +++ b/workflows/chatbot/demo/basic_frontend/requirements.txt @@ -6,4 +6,4 @@ requests huggingface_hub markdown2 nh3 -gradio==3.36.0 +gradio From ec3d38a28893321aaee555b41323681133c0e999 Mon Sep 17 00:00:00 2001 From: intellinjun <105184542+intellinjun@users.noreply.github.com> Date: Tue, 26 Dec 2023 16:09:05 +0800 Subject: [PATCH 009/101] [LLM Runtime]Magicoder graph (#1053) --- .../llm/runtime/graph/README.md | 8 ++ .../llm/runtime/graph/core/ne_layers.c | 54 ++++++++------ .../llm/runtime/graph/core/ne_layers.h | 11 +-- .../runtime/graph/models/chatglm/chatglm.cpp | 4 +- .../runtime/graph/models/chatglm/chatglm2.cpp | 11 ++- .../runtime/graph/models/falcon/falcon.cpp | 4 +- .../llm/runtime/graph/models/gptj/gptj.cpp | 14 ++-- .../runtime/graph/models/gptneox/gptneox.cpp | 4 +- .../llm/runtime/graph/models/llama/llama.cpp | 11 ++- .../graph/models/model_utils/model_files.h | 2 + .../graph/models/model_utils/model_types.h | 3 +- .../graph/models/model_utils/model_utils.cpp | 10 ++- .../llm/runtime/graph/models/qwen/qwen.cpp | 4 +- .../graph/scripts/ci/cpp_graph_inference.sh | 4 + .../runtime/graph/scripts/convert_baichuan.py | 1 + .../runtime/graph/scripts/convert_bloom.py | 1 + .../runtime/graph/scripts/convert_chatglm.py | 1 + .../runtime/graph/scripts/convert_dolly.py | 3 +- .../runtime/graph/scripts/convert_falcon.py | 3 +- .../llm/runtime/graph/scripts/convert_gptj.py | 3 +- .../runtime/graph/scripts/convert_gptneox.py | 3 +- .../runtime/graph/scripts/convert_llama.py | 74 ++++++++++++------- .../runtime/graph/scripts/convert_mistral.py | 3 + .../llm/runtime/graph/scripts/convert_mpt.py | 3 +- .../llm/runtime/graph/scripts/convert_opt.py | 3 +- .../llm/runtime/graph/scripts/convert_qwen.py | 1 + .../graph/scripts/convert_starcoder.py | 1 + 27 files changed, 160 insertions(+), 84 deletions(-) diff --git a/intel_extension_for_transformers/llm/runtime/graph/README.md b/intel_extension_for_transformers/llm/runtime/graph/README.md index 774cb521867..38d6c3ac932 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/README.md +++ b/intel_extension_for_transformers/llm/runtime/graph/README.md @@ -193,6 +193,14 @@ LLM Runtime supports the following models: ✅ ✅ Latest + + + Magicoder-6.7B + ✅ + ✅ + ✅ + ✅ + Latest StarCoder-1B, diff --git a/intel_extension_for_transformers/llm/runtime/graph/core/ne_layers.c b/intel_extension_for_transformers/llm/runtime/graph/core/ne_layers.c index 94f9ab7dff9..5c2e11f5680 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/core/ne_layers.c +++ b/intel_extension_for_transformers/llm/runtime/graph/core/ne_layers.c @@ -2980,7 +2980,7 @@ struct ne_tensor* ne_soft_max_inplace(struct ne_context* ctx, struct ne_tensor* struct ne_tensor* ne_rope_impl(struct ne_context* ctx, struct ne_tensor* a, int n_past, int n_dims, int mode, int prompt_size, bool inplace, int n_keep, struct ne_tensor* cossin, int* n_padding, - bool padding_left, float freq_base) { + bool padding_left, float freq_base, float freq_scale) { NE_ASSERT(n_past >= 0 || n_keep >= 0); NE_ASSERT(padding_left); bool is_node = false; @@ -3020,7 +3020,9 @@ struct ne_tensor* ne_rope_impl(struct ne_context* ctx, struct ne_tensor* a, int ne_scratch_load(ctx); - ne_set_op_params(result, &freq_base, sizeof(freq_base)); + float params[] = {freq_base, freq_scale}; + ne_set_op_params(result, ¶ms, sizeof(params)); + result->op = NE_OP_ROPE; result->grad = is_node ? ne_dup_tensor(ctx, result) : NULL; result->src0 = a; @@ -3031,18 +3033,20 @@ struct ne_tensor* ne_rope_impl(struct ne_context* ctx, struct ne_tensor* a, int } struct ne_tensor* ne_rope(struct ne_context* ctx, struct ne_tensor* a, int n_past, int n_dims, int mode, - int prompt_size, float freq_base) { - return ne_rope_impl(ctx, a, n_past, n_dims, mode, prompt_size, false, -1, NULL, NULL, true, freq_base); + int prompt_size, float freq_base, float freq_scale) { + return ne_rope_impl(ctx, a, n_past, n_dims, mode, prompt_size, false, -1, NULL, NULL, true, freq_base, freq_scale); } struct ne_tensor* ne_rope_inplace(struct ne_context* ctx, struct ne_tensor* a, int n_past, int n_dims, int mode, - int prompt_size, float freq_base) { - return ne_rope_impl(ctx, a, n_past, n_dims, mode, prompt_size, true, -1, NULL, NULL, true, freq_base); + int prompt_size, float freq_base, float freq_scale) { + return ne_rope_impl(ctx, a, n_past, n_dims, mode, prompt_size, true, -1, NULL, NULL, true, freq_base, freq_scale); } struct ne_tensor* ne_rope_shift_inplace(struct ne_context* ctx, struct ne_tensor* a, int n_shift, int n_dims, int mode, - int prompt_size, int n_keep, struct ne_tensor* cossin, float freq_base) { - return ne_rope_impl(ctx, a, n_shift, n_dims, mode, prompt_size, true, n_keep, cossin, NULL, true, freq_base); + int prompt_size, int n_keep, struct ne_tensor* cossin, float freq_base, + float freq_scale) { + return ne_rope_impl(ctx, a, n_shift, n_dims, mode, prompt_size, true, n_keep, cossin, NULL, true, freq_base, + freq_scale); } // ne_rope_back @@ -3078,13 +3082,16 @@ struct ne_tensor* ne_rope_back(struct ne_context* ctx, struct ne_tensor* a, int } struct ne_tensor* ne_rope_with_padding(struct ne_context* ctx, struct ne_tensor* a, int n_past, int n_dims, int mode, - int prompt_size, int* n_padding, float freq_base) { - return ne_rope_impl(ctx, a, n_past, n_dims, mode, prompt_size, false, -1, NULL, n_padding, true, freq_base); + int prompt_size, int* n_padding, float freq_base, float freq_scale) { + return ne_rope_impl(ctx, a, n_past, n_dims, mode, prompt_size, false, -1, NULL, n_padding, true, freq_base, + freq_scale); } struct ne_tensor* ne_rope_with_padding_inplace(struct ne_context* ctx, struct ne_tensor* a, int n_past, int n_dims, - int mode, int prompt_size, int* n_padding, float freq_base) { - return ne_rope_impl(ctx, a, n_past, n_dims, mode, prompt_size, true, -1, NULL, n_padding, true, freq_base); + int mode, int prompt_size, int* n_padding, float freq_base, + float freq_scale) { + return ne_rope_impl(ctx, a, n_past, n_dims, mode, prompt_size, true, -1, NULL, n_padding, true, freq_base, + freq_scale); } // ne_alibi @@ -7867,9 +7874,8 @@ static void ne_compute_forward_rope_f32(const struct ne_compute_params* params, NE_ASSERT(src1->type == NE_TYPE_I32); NE_ASSERT(ne_nelements(src1) == 5 + bs); // 5 + bs params - float freq_base = 10000.0f; - memcpy(&freq_base, dst->op_params, sizeof(float)); - static const float freq_scale = 1.0f; + const float freq_base = ((float*)(dst->op_params))[0]; + const float freq_scale = 1 / ((float*)(dst->op_params))[1]; const int64_t n_past = ((int32_t*)src1->data)[ROPE_NPAST_IDX]; const int64_t n_dims = ((int32_t*)src1->data)[ROPE_NDIMS_IDX]; @@ -8043,7 +8049,10 @@ static void ne_compute_forward_rope_f16(const struct ne_compute_params* params, // row index used to determine which thread to use int ir = 0; - const float theta_scale = powf(10000.0, -2.0f / n_dims); + const float freq_base = ((float*)(dst->op_params))[0]; + const float freq_scale = 1 / ((float*)(dst->op_params))[1]; + + const float theta_scale = powf(freq_base, -2.0f / n_dims); const bool skip = mode & 1; const bool is_neox = mode & 2; @@ -8053,7 +8062,7 @@ static void ne_compute_forward_rope_f16(const struct ne_compute_params* params, NE_ASSERT(("shift RoPE is only implemented for the vanilla mode", !is_shift || !(is_glm || is_neox || skip))); if (is_shift) { - float theta = n_past; + float theta = n_past * freq_scale; ne_fp16_t* cossin = (dst->opt[0] != NULL) ? dst->opt[0]->data : NULL; if (cossin == NULL) { cossin = malloc(ne0 * sizeof(ne_fp16_t)); @@ -8098,7 +8107,7 @@ static void ne_compute_forward_rope_f16(const struct ne_compute_params* params, if (ir++ < ir0) continue; if (ir > ir1) break; - float theta = (float)p; + float theta = freq_scale * (float)p; if (!is_neox) { for (int64_t i0 = 0; i0 < ne0; i0 += 2) { @@ -8172,11 +8181,14 @@ static void ne_compute_forward_rope_jblas(const struct ne_compute_params* params const int seq_len = dst->ne[1]; const int head_size = dst->ne[0]; + const float freq_base = ((float*)(dst->op_params))[0]; + const float freq_scale = 1 / ((float*)(dst->op_params))[1]; + if (is_shift) { ne_fp16_t* cossin = (dst->opt[0] != NULL) ? dst->opt[0]->data : NULL; if (cossin == NULL) { - float theta = n_past; - const float theta_scale = powf(10000.0, -2.0f / n_dims); + float theta = n_past * freq_scale; + const float theta_scale = powf(freq_base, -2.0f / n_dims); cossin = malloc(head_size * sizeof(ne_fp16_t)); for (int i0 = 0; i0 < head_size; i0 += 2) { cossin[i0 + 0] = NE_FP32_TO_FP16(cosf(theta)); @@ -10016,7 +10028,7 @@ static void ne_compute_backward(struct ne_context* ctx, struct ne_tensor* tensor const int n_dims = ((int32_t*)src1->data)[1]; const int mode = ((int32_t*)src1->data)[2]; src0->grad = - ne_add_impl(ctx, src0->grad, ne_rope(ctx, tensor->grad, n_past, n_dims, mode, 0, 10000.0), inplace); + ne_add_impl(ctx, src0->grad, ne_rope(ctx, tensor->grad, n_past, n_dims, mode, 0, 10000.0, 1.0), inplace); } if (src1->grad) { // noop diff --git a/intel_extension_for_transformers/llm/runtime/graph/core/ne_layers.h b/intel_extension_for_transformers/llm/runtime/graph/core/ne_layers.h index a354c22b319..304809b5561 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/core/ne_layers.h +++ b/intel_extension_for_transformers/llm/runtime/graph/core/ne_layers.h @@ -403,29 +403,30 @@ NE_API struct ne_tensor* ne_soft_max_inplace(struct ne_context* ctx, struct ne_t // if mode & 4 == 1, especially for glm // TODO: avoid creating a new tensor every time NE_API struct ne_tensor* ne_rope(struct ne_context* ctx, struct ne_tensor* a, int n_past, int n_dims, int mode, - int prompt_size, float freq_base); + int prompt_size, float freq_base, float freq_scale); // in-place, returns view(a) NE_API struct ne_tensor* ne_rope_inplace(struct ne_context* ctx, struct ne_tensor* a, int n_past, int n_dims, int mode, - int prompt_size, float freq_base); + int prompt_size, float freq_base, float freq_scale); // shift all tokens by a give p (n_shift) // Optionally give a 1d tensor of precomputed interleaved cos/sin value of n_shift*scale^k for k \in [0, n_dims) NE_API struct ne_tensor* ne_rope_shift_inplace(struct ne_context* ctx, struct ne_tensor* a, int n_shift, int n_dims, int mode, int prompt_size, int n_keep, struct ne_tensor* cossin, - float freq_base); + float freq_base, float freq_scale); // rotary position embedding backward, i.e compute dx from dy // a - dy NE_API struct ne_tensor* ne_rope_back(struct ne_context* ctx, struct ne_tensor* a, int n_past, int n_dims, int mode); NE_API struct ne_tensor* ne_rope_with_padding(struct ne_context* ctx, struct ne_tensor* a, int n_past, int n_dims, - int mode, int prompt_size, int* n_padding, float freq_base); + int mode, int prompt_size, int* n_padding, float freq_base, + float freq_scale); // in-place, returns view(a) NE_API struct ne_tensor* ne_rope_with_padding_inplace(struct ne_context* ctx, struct ne_tensor* a, int n_past, int n_dims, int mode, int prompt_size, int* n_padding, - float freq_base); + float freq_base, float freq_scale); // alibi position embedding // in-place, returns view(a) diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm.cpp index 8cce0142b74..ab9c982b5f9 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm.cpp @@ -137,14 +137,14 @@ static bool chatglm_model_eval_internal(model_context* ctx, const model_input* i ne_set_name(query_layer, "query_layer"); query_layer = ne_rope_with_padding_inplace(ctx0, query_layer, n_past, rope_dim, 4, first_tokens_size, - n_padding.data(), hparams.freq_base); + n_padding.data(), hparams.freq_base, hparams.freq_scale); query_layer = ne_permute(ctx0, query_layer, 0, 2, 1, 3); // [bs, heads, qlen, head_size] ne_tensor* key_layer = ne_view_4d(ctx0, cur, head_size, num_attention_heads, qlen, batch_size, 3 * head_size * ne_element_size(cur), cur->nb[1], cur->nb[1] * qlen, head_size * ne_element_size(cur)); // [bs, qlen, heads, head_size] key_layer = ne_rope_with_padding_inplace(ctx0, key_layer, n_past, rope_dim, 4, first_tokens_size, - n_padding.data(), hparams.freq_base); + n_padding.data(), hparams.freq_base, hparams.freq_scale); ne_tensor* value_layer = ne_view_4d(ctx0, cur, head_size, num_attention_heads, qlen, batch_size, 3 * head_size * ne_element_size(cur), cur->nb[1], cur->nb[1] * qlen, diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm2.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm2.cpp index 8f37b65222d..680e867c8e3 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm2.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm2.cpp @@ -146,14 +146,15 @@ static bool chatglm_model_eval_internal(model_context* ctx, const model_input* i ne_view_3d(ctx0, cur, head_size, n_head, N, head_size * ne_element_size(cur), cur->nb[1], 0); // [N, heads, head_size] ne_set_name(query_layer, "query_layer"); - query_layer = ne_rope_inplace(ctx0, query_layer, std::max(n_cached - N, n_past), n_rot, 0, 0, hparams.freq_base); + query_layer = ne_rope_inplace(ctx0, query_layer, std::max(n_cached - N, n_past), n_rot, 0, 0, hparams.freq_base, + hparams.freq_scale); struct ne_tensor* key_layer = ne_view_3d(ctx0, cur, head_size, num_kv_heads, N, head_size * ne_element_size(cur), cur->nb[1], hidden_size * ne_element_size(cur)); // [N, kv_heads, head_size] ne_set_name(key_layer, "key_layer"); key_layer = ne_rope_inplace( // n_ctx exceeds but it will be shift-roped back with cached K - ctx0, key_layer, (is_ring_full ? n_ctx : n_past), n_rot, 0, 0, hparams.freq_base); + ctx0, key_layer, (is_ring_full ? n_ctx : n_past), n_rot, 0, 0, hparams.freq_base, hparams.freq_scale); struct ne_tensor* value_layer = ne_view_3d(ctx0, cur, head_size, num_kv_heads, N, head_size * ne_element_size(cur), cur->nb[1], @@ -198,7 +199,8 @@ static bool chatglm_model_eval_internal(model_context* ctx, const model_input* i // Currently we only cache cossin for N == 1 in model-wide; It may be worthwhile to cache cossin for other N // in a single eval execution if (N == 1) cossin_cache = kv_self.cossin; - key_layer = ne_rope_shift_inplace(ctx0, key_layer, -N, n_rot, 0, 0, n_keep, cossin_cache, hparams.freq_base); + key_layer = ne_rope_shift_inplace(ctx0, key_layer, -N, n_rot, 0, 0, n_keep, cossin_cache, hparams.freq_base, + hparams.freq_scale); key_layer = ne_permute(ctx0, key_layer, 0, 2, 1, 3); // perm back } @@ -253,7 +255,8 @@ static bool chatglm_model_eval_internal(model_context* ctx, const model_input* i // Currently we only cache cossin for N == 1 in model-wide; It may be worthwhile to cache cossin for other N // in a single eval execution if (N == 1) cossin_cache = kv_self.cossin; - key_layer = ne_rope_shift_inplace(ctx0, key_layer, -N, n_rot, 0, 0, n_keep, cossin_cache, hparams.freq_base); + key_layer = ne_rope_shift_inplace(ctx0, key_layer, -N, n_rot, 0, 0, n_keep, cossin_cache, hparams.freq_base, + hparams.freq_scale); } value_layer = ne_view_3d(ctx0, model.layers[il].v_cache, // tensor diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/falcon/falcon.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/falcon/falcon.cpp index cdcd00af427..87332e6ca7c 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/falcon/falcon.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/falcon/falcon.cpp @@ -162,8 +162,8 @@ static bool falcon_model_eval_internal(model_context* ctx, const model_input* in fused_qkv_row_nb, (n_embd + n_head_kv * head_dim) * ne_element_size(cur)); // using mode = 2 for neox mode - Qcur = ne_rope_inplace(ctx0, Qcur, n_past, head_dim, 2, 0, hparams.freq_base); - Kcur = ne_rope_inplace(ctx0, Kcur, n_past, head_dim, 2, 0, hparams.freq_base); + Qcur = ne_rope_inplace(ctx0, Qcur, n_past, head_dim, 2, 0, hparams.freq_base, hparams.freq_scale); + Kcur = ne_rope_inplace(ctx0, Kcur, n_past, head_dim, 2, 0, hparams.freq_base, hparams.freq_scale); // self-attention const float attn_scale = 1.0f / sqrtf(static_cast(head_dim)); diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/gptj/gptj.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/gptj/gptj.cpp index 8a04d128a52..23ff017aed8 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/gptj/gptj.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/gptj/gptj.cpp @@ -186,9 +186,10 @@ static bool gptj_model_eval_internal(model_context* ctx, const model_input* inpu Kcur = ne_reshape_4d(ctx0, ne_mul_mat(ctx0, model.layers[il].attn[1], cur), head_size, n_head, N, batch_size); Vcur = ne_mul_mat(ctx0, model.layers[il].attn[2], cur); } - Qcur = ne_rope_inplace(ctx0, Qcur, std::max(n_cached - N, n_past), n_rot, 0, 0, hparams.freq_base); + Qcur = + ne_rope_inplace(ctx0, Qcur, std::max(n_cached - N, n_past), n_rot, 0, 0, hparams.freq_base, hparams.freq_scale); Kcur = ne_rope_inplace( // n_ctx exceeds but it will be shift-roped back with cached K - ctx0, Kcur, (is_ring_full ? n_ctx : n_past), n_rot, 0, 0, hparams.freq_base); + ctx0, Kcur, (is_ring_full ? n_ctx : n_past), n_rot, 0, 0, hparams.freq_base, hparams.freq_scale); ne_set_name(Qcur, "Qcur"); ne_set_name(Kcur, "Kcur"); ne_set_name(Vcur, "Vcur"); @@ -293,7 +294,8 @@ static bool gptj_model_eval_internal(model_context* ctx, const model_input* inpu // Currently we only cache cossin for N == 1 in model-wide; It may be worthwhile to cache cossin for other N // in a single eval execution if (N == 1) cossin_cache = kv_self.cossin; - K = ne_rope_shift_inplace(ctx0, K, -N, n_rot, 0, 0, n_keep, cossin_cache, hparams.freq_base); + K = ne_rope_shift_inplace(ctx0, K, -N, n_rot, 0, 0, n_keep, cossin_cache, hparams.freq_base, + hparams.freq_scale); } const auto v_size = kv_cache_info.v_bytes; V = ne_view_4d(ctx0, kv_self.v, // tensor @@ -321,7 +323,8 @@ static bool gptj_model_eval_internal(model_context* ctx, const model_input* inpu // Currently we only cache cossin for N == 1 in model-wide; It may be worthwhile to cache cossin for other N in // a single eval execution if (N == 1) cossin_cache = kv_self.cossin; - K = ne_rope_shift_inplace(ctx0, K, -N, n_rot, 0, 0, n_keep, cossin_cache, hparams.freq_base); + K = ne_rope_shift_inplace(ctx0, K, -N, n_rot, 0, 0, n_keep, cossin_cache, hparams.freq_base, + hparams.freq_scale); K = ne_permute(ctx0, K, 0, 2, 1, 3); } } else { @@ -332,7 +335,8 @@ static bool gptj_model_eval_internal(model_context* ctx, const model_input* inpu // Currently we only cache cossin for N == 1 in model-wide; It may be worthwhile to cache cossin for other N in // a single eval execution if (N == 1) cossin_cache = kv_self.cossin; - K = ne_rope_shift_inplace(ctx0, K, -N, n_rot, 0, 0, n_keep, cossin_cache, hparams.freq_base); + K = ne_rope_shift_inplace(ctx0, K, -N, n_rot, 0, 0, n_keep, cossin_cache, hparams.freq_base, + hparams.freq_scale); K = ne_permute(ctx0, K, 0, 2, 1, 3); } diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/gptneox/gptneox.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/gptneox/gptneox.cpp index 4953f78d697..b5138dbe5e1 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/gptneox/gptneox.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/gptneox/gptneox.cpp @@ -188,9 +188,9 @@ static bool gptneox_model_eval_internal(model_context* ctx, const model_input* i // using mode = 2 for GPT-NeoX mode Qcur = ne_rope_inplace(ctx0, ne_reshape_4d(ctx0, Qcur, head_dim, n_head, N, batch_size), n_past, n_rot, 2, 0, - hparams.freq_base); + hparams.freq_base, hparams.freq_scale); Kcur = ne_rope_inplace(ctx0, ne_reshape_4d(ctx0, Kcur, head_dim, n_head, N, batch_size), n_past, n_rot, 2, 0, - hparams.freq_base); + hparams.freq_base, hparams.freq_scale); const float attn_scale = 1.0f / sqrtf(static_cast(head_dim)); // store key and value to memory if (!run_mha_reordered) { diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/llama/llama.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/llama/llama.cpp index 894f927a5d7..a498d740acd 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/llama/llama.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/llama/llama.cpp @@ -187,10 +187,11 @@ static bool llama_model_eval_internal(model_context* ctx, const model_input* inp Kcur = ne_reshape_3d(ctx0, ne_mul_mat(ctx0, model.layers[il].attn[1], cur), head_size, n_head_kv, N); Vcur = ne_mul_mat(ctx0, model.layers[il].attn[2], cur); } - Qcur = ne_rope_inplace(ctx0, Qcur, std::max(n_cached - N, n_past), n_rot, 0, 0, hparams.freq_base); + Qcur = + ne_rope_inplace(ctx0, Qcur, std::max(n_cached - N, n_past), n_rot, 0, 0, hparams.freq_base, hparams.freq_scale); ne_set_name(Qcur, "Qcur"); Kcur = ne_rope_inplace( // n_ctx exceeds but it will be shift-roped back with cached K - ctx0, Kcur, (is_ring_full ? n_ctx : n_past), n_rot, 0, 0, hparams.freq_base); + ctx0, Kcur, (is_ring_full ? n_ctx : n_past), n_rot, 0, 0, hparams.freq_base, hparams.freq_scale); ne_set_name(Kcur, "Kcur"); Vcur = ne_transpose(ctx0, ne_reshape_2d(ctx0, Vcur, head_size * n_head_kv, N)); ne_set_name(Vcur, "Vcur"); @@ -221,7 +222,8 @@ static bool llama_model_eval_internal(model_context* ctx, const model_input* inp // Currently we only cache cossin for N == 1 in model-wide; It may be worthwhile to cache cossin for other N in // a single eval execution if (N == 1) cossin_cache = kv_self.cossin; - K = ne_rope_shift_inplace(ctx0, K, -N, n_rot, 0, 0, n_keep, cossin_cache, hparams.freq_base); + K = ne_rope_shift_inplace(ctx0, K, -N, n_rot, 0, 0, n_keep, cossin_cache, hparams.freq_base, + hparams.freq_scale); } K = ne_permute(ctx0, K, 0, 2, 1, 3); ne_set_name(K, "K"); @@ -301,7 +303,8 @@ static bool llama_model_eval_internal(model_context* ctx, const model_input* inp // Currently we only cache cossin for N == 1 in model-wide; It may be worthwhile to cache cossin for other N in // a single eval execution if (N == 1) cossin_cache = kv_self.cossin; - K = ne_rope_shift_inplace(ctx0, K, -N, n_rot, 0, 0, n_keep, cossin_cache, hparams.freq_base); + K = ne_rope_shift_inplace(ctx0, K, -N, n_rot, 0, 0, n_keep, cossin_cache, hparams.freq_base, + hparams.freq_scale); } ne_set_name(K, "K"); diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_files.h b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_files.h index ff65ebaea14..a0c2e22dbc2 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_files.h +++ b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_files.h @@ -284,6 +284,7 @@ struct model_file_loader { file.read_raw(&hparams.rms_norm_eps, sizeof(float)); file.read_raw(&hparams.freq_base, sizeof(float)); + file.read_raw(&hparams.freq_scale, sizeof(float)); } void read_vocab() { @@ -403,6 +404,7 @@ struct model_file_saver { file.write_raw(&hparams.rms_norm_eps, sizeof(float)); file.write_raw(&hparams.freq_base, sizeof(float)); + file.write_raw(&hparams.freq_scale, sizeof(float)); } void write_vocab() { if (any_file_loader->file_version == MODEL_FILE_VERSION_NE) { diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_types.h b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_types.h index 0b5db5bae8c..5754cfaeac3 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_types.h +++ b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_types.h @@ -126,7 +126,8 @@ struct model_hparams { uint32_t word_embed_proj_dim = 0; // for opt bool do_layer_norm_before = false; // for opt float rms_norm_eps = 1e-6f; // rms norm epsilon - float freq_base = 10000.0f; + float freq_base = 10000.0f; // rope theta + float freq_scale = 1.0f; // rope scale factor // ChatGLM-2 int32_t multi_query_group_num = 0; diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_utils.cpp index f8a287a7fdb..95541011c3a 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_utils.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_utils.cpp @@ -142,11 +142,13 @@ static bool kv_cache_init(const struct model_hparams& hparams, struct model_kv_c const auto cossin_dtype = wtype == NE_TYPE_JBLAS ? NE_TYPE_F16 : wtype; cache.cossin = ne_new_tensor_1d(cache.ctx, cossin_dtype, head_size, NE_SIZE_CALC); ne_set_name(cache.cossin, "cossin(-1)"); - float theta = -1; + + float freq_base = hparams.freq_base; + float theta = -1 * hparams.freq_scale; float theta_scale = (model != nullptr && model->arch == MODEL_CHATGLM2) - ? std::pow(10000.f, -2.0f / (head_size / 2)) // chatglm2 has their DIM_SCALE of 2 - : hparams.n_rot > 0 ? std::pow(10000.f, -2.0f / hparams.n_rot) - : std::pow(10000.f, -2.0f / head_size); + ? std::pow(freq_base, -2.0f / (head_size / 2)) // chatglm2 has their DIM_SCALE of 2 + : hparams.n_rot > 0 ? std::pow(freq_base, -2.0f / hparams.n_rot) + : std::pow(freq_base, -2.0f / head_size); if (cossin_dtype == NE_TYPE_F16) { const auto data = reinterpret_cast(cache.cossin->data); for (int i = 0; i < head_size; i += 2) { diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/qwen/qwen.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/qwen/qwen.cpp index d55d183a2be..7d6c432ff8a 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/qwen/qwen.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/qwen/qwen.cpp @@ -180,9 +180,9 @@ static bool qwen_model_eval_internal(model_context* ctx, const model_input* inpu fused_qkv_row_nb, 2 * sizeof(float) * n_embd)); // using mode = 2 for GPT-NeoX mode - Qcur = ne_rope_inplace(ctx0, Qcur, n_past, n_rot, 2, 0, hparams.freq_base); + Qcur = ne_rope_inplace(ctx0, Qcur, n_past, n_rot, 2, 0, hparams.freq_base, hparams.freq_scale); ne_set_name(Qcur, "Qcur"); - Kcur = ne_rope_inplace(ctx0, Kcur, n_past, n_rot, 2, 0, hparams.freq_base); + Kcur = ne_rope_inplace(ctx0, Kcur, n_past, n_rot, 2, 0, hparams.freq_base, hparams.freq_scale); ne_set_name(Kcur, "kcur"); const float attn_scale = 1.0f / sqrtf(static_cast(head_dim)); // store key and value to memory diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh index ff4d84f81f5..7fa6ef56a3d 100755 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh @@ -148,6 +148,7 @@ model_name_map["baichuan2-13b"]="baichuan-inc/Baichuan2-13B-Chat" model_name_map["baichuan-13b"]="baichuan-inc/Baichuan-13B-Chat" model_name_map["mistral-7b"]="mistralai/Mistral-7B-v0.1" model_name_map["qwen-7b"]="Qwen/Qwen-7B-Chat" +model_name_map["magicoder"]="ise-uiuc/Magicoder-S-DS-6.7B" function main() { conda_env="$1" @@ -227,6 +228,9 @@ function main() { elif [[ "${model}" == "qwen-7b" ]]; then quant_script="./build/bin/quant_qwen" infer_cmd="./build/bin/run_qwen" + elif [[ "${model}" == "magicoder" ]]; then + quant_script="./build/bin/quant_llama" + infer_cmd="./build/bin/run_llama" else echo "Error: Unexpedted model: $model" 1>&2 exit 1 diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_baichuan.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_baichuan.py index 247f90eaeea..bbeb3c6d0ba 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_baichuan.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_baichuan.py @@ -158,6 +158,7 @@ def baichuan13B_convert(model, tokenizer, dir_model, fname_out, ftype, hparams): fout.write(struct.pack("i", hparams["intermediate_size"])) fout.write(struct.pack("f", hparams.get("rms_norm_eps", 1e-6))) # rms norm eps fout.write(struct.pack("f", 10000.0)) # freq_base + fout.write(struct.pack("f", 1.0)) # rope_factor fout.write(struct.pack("i", tokenizer.bos_token_id if tokenizer.bos_token_id is not None else 1)) fout.write(struct.pack("i", tokenizer.eos_token_id if tokenizer.eos_token_id is not None else 2)) diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_bloom.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_bloom.py index 3fca71b515f..7e2a3f8054f 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_bloom.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_bloom.py @@ -101,6 +101,7 @@ def main(args_in: Optional[List[str]] = None) -> None: fout.write(struct.pack("i", 0)) fout.write(struct.pack("f", hparams.get("rms_norm_eps", 1e-6))) # rms norm eps fout.write(struct.pack("f", 10000.0)) # freq_base + fout.write(struct.pack("f", 1.0)) # rope_factor fout.write(struct.pack("i", tokenizer.bos_token_id if tokenizer.bos_token_id is not None else 1)) fout.write(struct.pack("i", tokenizer.eos_token_id if tokenizer.eos_token_id is not None else 2)) diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_chatglm.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_chatglm.py index d6cda001b3a..58cc8766eff 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_chatglm.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_chatglm.py @@ -180,6 +180,7 @@ def chatglm2_convert(model, tokenizer, dir_model, fname_out, ftype, hparams): fout.write(struct.pack("i", 0)) fout.write(struct.pack("f", hparams.get("layernorm_epsilon", 1e-6))) # rms norm eps fout.write(struct.pack("f", 10000.0)) # freq_base + fout.write(struct.pack("f", 1.0)) # rope_factor fout.write(struct.pack("i", tokenizer.bos_token_id if tokenizer.bos_token_id is not None else 1)) fout.write(struct.pack("i", tokenizer.eos_token_id if tokenizer.eos_token_id is not None else 2)) diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_dolly.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_dolly.py index f5b589f966b..0bde4f3a3a7 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_dolly.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_dolly.py @@ -115,7 +115,8 @@ def main(args_in: Optional[List[str]] = None) -> None: fout.write(struct.pack("i", 0)) fout.write(struct.pack("f", hparams.get("rms_norm_eps", 1e-6))) # rms norm eps fout.write(struct.pack("f", 10000.0)) # freq_base - + fout.write(struct.pack("f", 1.0)) # rope_factor + fout.write(struct.pack("i", tokenizer.bos_token_id if tokenizer.bos_token_id is not None else 1)) fout.write(struct.pack("i", tokenizer.eos_token_id if tokenizer.eos_token_id is not None else 2)) fout.write(struct.pack("i", tokenizer.pad_token_id if tokenizer.pad_token_id is not None else -1)) diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_falcon.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_falcon.py index 2f1ddd2b8b7..c4a92222b22 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_falcon.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_falcon.py @@ -109,7 +109,8 @@ def main(args_in: Optional[List[str]] = None) -> None: fout.write(struct.pack("i", 0)) fout.write(struct.pack("f", hparams.get("rms_norm_eps", 1e-6))) # rms norm eps fout.write(struct.pack("f", 10000.0)) # freq_base - + fout.write(struct.pack("f", 1.0)) # rope_factor + fout.write(struct.pack("i", tokenizer.bos_token_id if tokenizer.bos_token_id is not None else 1)) fout.write(struct.pack("i", tokenizer.eos_token_id if tokenizer.eos_token_id is not None else 2)) fout.write(struct.pack("i", tokenizer.pad_token_id if tokenizer.pad_token_id is not None else -1)) diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptj.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptj.py index c7b89b2d8ce..a610032ea3b 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptj.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptj.py @@ -101,7 +101,8 @@ def main(args_in: Optional[List[str]] = None) -> None: fout.write(struct.pack("i", 0)) fout.write(struct.pack("f", hparams.get("rms_norm_eps", 1e-6))) # rms norm eps fout.write(struct.pack("f", 10000.0)) # freq_base - + fout.write(struct.pack("f", 1.0)) # rope_factor + fout.write(struct.pack("i", tokenizer.bos_token_id if tokenizer.bos_token_id is not None else 1)) fout.write(struct.pack("i", tokenizer.eos_token_id if tokenizer.eos_token_id is not None else 2)) fout.write(struct.pack("i", tokenizer.pad_token_id if tokenizer.pad_token_id is not None else -1)) diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptneox.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptneox.py index 3b67daa311f..8c50c006bd6 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptneox.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptneox.py @@ -115,7 +115,8 @@ def main(args_in: Optional[List[str]] = None) -> None: fout.write(struct.pack("i", 0)) fout.write(struct.pack("f", hparams.get("rms_norm_eps", 1e-6))) # rms norm eps fout.write(struct.pack("f", 10000.0)) # freq_base - + fout.write(struct.pack("f", 1.0)) # rope_factor + fout.write(struct.pack("i", tokenizer.bos_token_id if tokenizer.bos_token_id is not None else 1)) fout.write(struct.pack("i", tokenizer.eos_token_id if tokenizer.eos_token_id is not None else 2)) fout.write(struct.pack("i", tokenizer.pad_token_id if tokenizer.pad_token_id is not None else -1)) diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_llama.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_llama.py index 86f9a8aac47..b4185668cfd 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_llama.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_llama.py @@ -150,6 +150,9 @@ class Params: ffn_hidden_size: int rms_norm_eps: float rope_theta: float + rope_scale: float + bos_token_id: int + eos_token_id: int @staticmethod def guessed(model: 'LazyModel') -> 'Params': @@ -178,6 +181,11 @@ def loadHFTransformerJson(model: 'LazyModel', config_path: Path) -> 'Params': ffn_hidden_size = config["intermediate_size"] rms_norm_eps = config["rms_norm_eps"] rope_theta = config["rope_theta"] if "rope_theta" in config else 10000 + rope_scale = 1 + if config["rope_scaling"]: + rope_scale = config["rope_scaling"]["factor"] if "factor" in config["rope_scaling"] else 1 + bos_token_id = config["bos_token_id"] + eos_token_id = config["eos_token_id"] return Params( n_vocab=n_vocab, @@ -189,6 +197,9 @@ def loadHFTransformerJson(model: 'LazyModel', config_path: Path) -> 'Params': ffn_hidden_size=ffn_hidden_size, rms_norm_eps=rms_norm_eps, rope_theta=rope_theta, + rope_scale=rope_scale, + bos_token_id = bos_token_id, + eos_token_id = eos_token_id, ) # LLaMA v2 70B params.json @@ -204,6 +215,8 @@ def loadOriginalParamsJson(model: 'LazyModel', config_path: Path) -> 'Params': n_head = config["n_heads"] n_head_kv = config["n_kv_heads"] if "n_kv_heads" in config else n_head ffn_hidden_size = config["intermediate_size"] + bos_token_id = config["bos_token_id"] + eos_token_id = config["eos_token_id"] # hack to determine LLaMA v1 vs v2 vs CodeLlama if n_vocab == -1: @@ -217,6 +230,8 @@ def loadOriginalParamsJson(model: 'LazyModel', config_path: Path) -> 'Params': n_head=n_head, n_head_kv=n_head_kv, ffn_hidden_size=ffn_hidden_size, + bos_token_id = bos_token_id, + eos_token_id = eos_token_id, ) @staticmethod @@ -239,7 +254,7 @@ def load(model: 'ModelPlus') -> 'Params': class SentencePieceVocab: - def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> None: + def __init__(self, fname_tokenizer: Path, params_vocab_size: int, fname_added_tokens: Optional[Path]) -> None: self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer)) added_tokens: Dict[str, int] if fname_added_tokens is not None: @@ -258,25 +273,31 @@ def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list) self.fname_tokenizer = fname_tokenizer self.fname_added_tokens = fname_added_tokens + self.params_vocab_size = params_vocab_size def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]: tokenizer = self.sentencepiece_tokenizer - for i in range(tokenizer.vocab_size()): - text: bytes - if tokenizer.is_unknown(i): + for i in range(self.params_vocab_size): + text: bytes + if i < tokenizer.vocab_size(): + if tokenizer.is_unknown(i): + text = " \u2047 ".encode("utf-8") + elif tokenizer.is_control(i): + text = b"" + elif tokenizer.is_byte(i): + piece = tokenizer.id_to_piece(i) + if len(piece) != 6: + raise Exception(f"Invalid token: {piece}") + byte_value = int(piece[3:-1], 16) + text = struct.pack("B", byte_value) + else: + text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") + score: float = tokenizer.get_score(i) + yield text, score + else : text = " \u2047 ".encode("utf-8") - elif tokenizer.is_control(i): - text = b"" - elif tokenizer.is_byte(i): - piece = tokenizer.id_to_piece(i) - if len(piece) != 6: - raise Exception(f"Invalid token: {piece}") - byte_value = int(piece[3:-1], 16) - text = struct.pack("B", byte_value) - else: - text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") - score: float = tokenizer.get_score(i) - yield text, score + score: float = i + yield text, score def added_tokens(self) -> Iterable[Tuple[bytes, float]]: for text in self.added_tokens_list: @@ -1063,13 +1084,14 @@ def write_file_header(self, params: Params, file_type: NEFileType) -> None: self.fout.write(struct.pack("f", params.rms_norm_eps)) self.fout.write(struct.pack("f", params.rope_theta)) + self.fout.write(struct.pack("f", params.rope_scale)) # TODO, bos_token_id = 0 in https://huggingface.co/decapoda-research/llama-7b-hf/blob/main/config.json # but bos_token_id = 1 in llama.cpp - self.fout.write(struct.pack("i", 1)) - self.fout.write(struct.pack("i", 2)) - self.fout.write(struct.pack("i", 0)) - self.fout.write(struct.pack("i", 0)) + self.fout.write(struct.pack("i", params.bos_token_id)) + self.fout.write(struct.pack("i", params.eos_token_id)) + self.fout.write(struct.pack("i", -1)) + self.fout.write(struct.pack("i", -1)) def write_tensor_header(self, name: str, shape: Sequence[int], data_type: DataType) -> None: sname = name.encode('utf-8') @@ -1095,7 +1117,7 @@ def write_vocab_only(fname_out: Path, vocab: Vocab) -> None: @staticmethod def write_all(fname_out: Path, params: Params, model: LazyModel, vocab: Vocab, file_type: NEFileType) -> None: - check_vocab_size(params, vocab) + #check_vocab_size(params, vocab) of = OutputFile(fname_out) of.write_file_header(params, file_type) print("Writing vocab...") @@ -1224,7 +1246,7 @@ def filter_and_sort_tensors(model: LazyModel) -> LazyModel: return {name: model[name] for name in TENSORS_LIST if name in model} -def load_vocab(path: Path) -> SentencePieceVocab: +def load_vocab(path: Path, params_vocab_size: int) -> SentencePieceVocab: # Be extra-friendly and accept either a file or a directory. Also, if it's # a directory, it might be the model directory, and tokenizer.model might # be in the parent of that. @@ -1243,7 +1265,7 @@ def load_vocab(path: Path) -> SentencePieceVocab: ) added_tokens_path = path.parent / "added_tokens.json" print(f"Loading vocab file {path}") - return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None) + return SentencePieceVocab(path, params_vocab_size, added_tokens_path if added_tokens_path.exists() else None) def default_outfile(model_paths: List[Path], params: Params) -> Path: @@ -1306,13 +1328,13 @@ def main(args_in: Optional[List[str]] = None) -> None: if args.dump: do_dump_model(model_plus) return + model = model_plus.model + params = Params.load(model_plus) if model_plus.vocab is not None and args.vocab_dir is None: vocab = model_plus.vocab else: vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent - vocab = load_vocab(vocab_dir) - model = model_plus.model - params = Params.load(model_plus) + vocab = load_vocab(vocab_dir, params.n_vocab) model = do_necessary_conversions(model, params) output_type = pick_output_type(model, args.outtype) model = convert_to_output_type(model, output_type) diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mistral.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mistral.py index a2fb16f883d..aeb029e5ab7 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mistral.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mistral.py @@ -151,6 +151,7 @@ class Params: ffn_hidden_size: int rms_norm_eps: float rope_theta: float + rope_scale: float @staticmethod def guessed(model: 'LazyModel') -> 'Params': @@ -179,6 +180,7 @@ def loadHFTransformerJson(model: 'LazyModel', config_path: Path) -> 'Params': ffn_hidden_size = config["intermediate_size"] rms_norm_eps = config["rms_norm_eps"] rope_theta = config["rope_theta"] if "rope_theta" in config else 10000 + rope_scale = config["factor"] if "factor" in config else 1 return Params( n_vocab=n_vocab, @@ -1058,6 +1060,7 @@ def write_file_header(self, params: Params, file_type: NEFileType) -> None: self.fout.write(struct.pack("i", 0)) self.fout.write(struct.pack("f", params.rms_norm_eps)) self.fout.write(struct.pack("f", params.rope_theta)) + self.fout.write(struct.pack("f", params.rope_scale)) self.fout.write( struct.pack("i", 1) diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mpt.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mpt.py index 2bdf0ec4a69..7106555678d 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mpt.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mpt.py @@ -97,7 +97,8 @@ def main(args_in: Optional[List[str]] = None) -> None: fout.write(struct.pack("i", 0)) fout.write(struct.pack("f", hparams.get("rms_norm_eps", 1e-6))) # rms norm eps fout.write(struct.pack("f", 10000.0)) # freq_base - + fout.write(struct.pack("f", 1.0)) # rope_factor + fout.write(struct.pack("i", tokenizer.bos_token_id if tokenizer.bos_token_id is not None else 1)) fout.write(struct.pack("i", tokenizer.eos_token_id if tokenizer.eos_token_id is not None else 2)) fout.write(struct.pack("i", tokenizer.pad_token_id if tokenizer.pad_token_id is not None else -1)) diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_opt.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_opt.py index 76c27c05778..ab26bc538f7 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_opt.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_opt.py @@ -108,7 +108,8 @@ def main(args_in: Optional[List[str]] = None) -> None: fout.write(struct.pack("i", 0)) fout.write(struct.pack("f", hparams.get("rms_norm_eps", 1e-6))) # rms norm eps fout.write(struct.pack("f", 10000.0)) # freq_base - + fout.write(struct.pack("f", 1.0)) # rope_factor + fout.write(struct.pack("i", tokenizer.bos_token_id if tokenizer.bos_token_id is not None else 1)) fout.write(struct.pack("i", tokenizer.eos_token_id if tokenizer.eos_token_id is not None else 2)) fout.write(struct.pack("i", tokenizer.pad_token_id if tokenizer.pad_token_id is not None else -1)) diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_qwen.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_qwen.py index c559dd398ec..a4b9c7f4ffc 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_qwen.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_qwen.py @@ -114,6 +114,7 @@ def main(args_in: Optional[List[str]] = None) -> None: fout.write(struct.pack("i", 0)) fout.write(struct.pack("f", hparams.get("rms_norm_eps", 1e-6))) # rms norm eps fout.write(struct.pack("f", 10000.0)) # freq_base + fout.write(struct.pack("f", 1.0)) # rope_factor fout.write(struct.pack("i", tokenizer.special_tokens['<|endoftext|>'])) fout.write(struct.pack("i", tokenizer.special_tokens['<|endoftext|>'])) diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_starcoder.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_starcoder.py index 327f88864ce..932be3f8e42 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_starcoder.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_starcoder.py @@ -112,6 +112,7 @@ def main(args_in: Optional[List[str]] = None) -> None: fout.write(struct.pack("i", 0)) fout.write(struct.pack("f", hparams.get("rms_norm_eps", 1e-6))) # rms norm eps fout.write(struct.pack("f", 10000.0)) # freq_base + fout.write(struct.pack("f", 1.0)) # rope_factor fout.write(struct.pack("i", tokenizer.bos_token_id if tokenizer.bos_token_id is not None else 1)) fout.write(struct.pack("i", tokenizer.eos_token_id if tokenizer.eos_token_id is not None else 2)) From b1b407f931922b31822cdfa4e8e2047f281cd64e Mon Sep 17 00:00:00 2001 From: Yi DING Date: Tue, 26 Dec 2023 03:38:33 -0800 Subject: [PATCH 010/101] [LLM Runtime] Circumvent dependabot check for chatglm/baichuan/baichuan2 (#1078) --- .../graph/scripts/ci/cpp_graph_inference.sh | 15 +++++++++++---- .../graph/scripts/requirements/baichuan.sh | 19 +++++++++++++++++++ .../graph/scripts/requirements/baichuan.txt | 3 --- .../graph/scripts/requirements/chatglm-6b.sh | 19 +++++++++++++++++++ .../graph/scripts/requirements/chatglm-6b.txt | 3 --- 5 files changed, 49 insertions(+), 10 deletions(-) create mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/baichuan.sh delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/baichuan.txt create mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/chatglm-6b.sh delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/chatglm-6b.txt diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh index 7fa6ef56a3d..b97e49222c1 100755 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh @@ -210,17 +210,17 @@ function main() { quant_script="./build/bin/quant_chatglm" infer_cmd="python ./scripts/inference.py" extension=" --model_name chatglm --tokenizer $model_path" - requirements_file="scripts/requirements/chatglm-6b.txt" + requirements_file="scripts/requirements/chatglm-6b.sh" elif [[ "${model}" == "baichuan2-13b" ]]; then quant_script="./build/bin/quant_baichuan" infer_cmd="python ./scripts/inference.py" - requirements_file="scripts/requirements/baichuan.txt" + requirements_file="scripts/requirements/baichuan.sh" extension=" --model_name baichuan --tokenizer $model_path" elif [[ "${model}" == "baichuan-13b" ]]; then quant_script="./build/bin/quant_baichuan" infer_cmd="python ./scripts/inference.py" extension=" --model_name baichuan --tokenizer $model_path" - requirements_file="scripts/requirements/baichuan.txt" + requirements_file="scripts/requirements/baichuan.sh" elif [[ "${model}" == "mistral-7b" ]]; then quant_script="./build/bin/quant_mistral" infer_cmd="./build/bin/run_mistral" @@ -277,7 +277,14 @@ function main() { cd .. ## prepare example requiement - pip install -r "$requirements_file" + if [[ $requirements_file == *'.txt' ]]; then + pip install -r "$requirements_file" + elif [[ $requirements_file == *'.sh' ]]; then + source "$requirements_file" + else + echo "Error: Unexpedted requirements_file: $requirements_file" 1>&2 + exit 1 + fi echo "======= Convert Start =======" ## prepare fp32 bin diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/baichuan.sh b/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/baichuan.sh new file mode 100644 index 00000000000..75a88a8f9da --- /dev/null +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/baichuan.sh @@ -0,0 +1,19 @@ +#!/bin/bash +#=============================================================================== +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#=============================================================================== + +# To avoid the error: 'ChatGLMTokenizer' object has no attribute 'sp_tokenizer' +pip install -r "$(dirname "${BASH_SOURCE[0]}")/common.txt" transformers==4.33.1 diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/baichuan.txt b/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/baichuan.txt deleted file mode 100644 index 07c70c77ed3..00000000000 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/baichuan.txt +++ /dev/null @@ -1,3 +0,0 @@ -# To avoid the error: 'ChatGLMTokenizer' object has no attribute 'sp_tokenizer' --r common.txt -transformers==4.33.1 diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/chatglm-6b.sh b/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/chatglm-6b.sh new file mode 100644 index 00000000000..75a88a8f9da --- /dev/null +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/chatglm-6b.sh @@ -0,0 +1,19 @@ +#!/bin/bash +#=============================================================================== +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#=============================================================================== + +# To avoid the error: 'ChatGLMTokenizer' object has no attribute 'sp_tokenizer' +pip install -r "$(dirname "${BASH_SOURCE[0]}")/common.txt" transformers==4.33.1 diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/chatglm-6b.txt b/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/chatglm-6b.txt deleted file mode 100644 index 07c70c77ed3..00000000000 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/chatglm-6b.txt +++ /dev/null @@ -1,3 +0,0 @@ -# To avoid the error: 'ChatGLMTokenizer' object has no attribute 'sp_tokenizer' --r common.txt -transformers==4.33.1 From a32e1844eee85a0f13d0823200452d70c9bc22e0 Mon Sep 17 00:00:00 2001 From: VincyZhang Date: Wed, 27 Dec 2023 09:12:46 +0800 Subject: [PATCH 011/101] update datasets version for docker (#1077) --- docker/Dockerfile_chatbot | 1 + examples/huggingface/pytorch/language-modeling/nas/README.md | 1 + intel_extension_for_transformers/neural_chat/docker/Dockerfile | 1 + 3 files changed, 3 insertions(+) diff --git a/docker/Dockerfile_chatbot b/docker/Dockerfile_chatbot index bd919f91d09..5f599898e93 100644 --- a/docker/Dockerfile_chatbot +++ b/docker/Dockerfile_chatbot @@ -124,6 +124,7 @@ RUN cd /intel-extension-for-transformers/intel_extension_for_transformers/neural pip install -r requirements_hpu.txt && \ pip install transformers==4.34.1 && \ pip install accelerate==0.24.0 && \ + pip install datasets==2.14.7 && \ pip install pymysql WORKDIR /intel-extension-for-transformers/intel_extension_for_transformers/neural_chat/ diff --git a/examples/huggingface/pytorch/language-modeling/nas/README.md b/examples/huggingface/pytorch/language-modeling/nas/README.md index 42719f33390..7fb9702d652 100644 --- a/examples/huggingface/pytorch/language-modeling/nas/README.md +++ b/examples/huggingface/pytorch/language-modeling/nas/README.md @@ -10,6 +10,7 @@ Recommend python 3.9 or higher version. ```shell pip install -r requirements.txt pip install transformers==4.34.1 +pip install datasets==2.14.7 ``` >**Note**: Please use transformers no higher than 4.34.1 diff --git a/intel_extension_for_transformers/neural_chat/docker/Dockerfile b/intel_extension_for_transformers/neural_chat/docker/Dockerfile index 7304ba4d662..d7a7350ec02 100644 --- a/intel_extension_for_transformers/neural_chat/docker/Dockerfile +++ b/intel_extension_for_transformers/neural_chat/docker/Dockerfile @@ -128,6 +128,7 @@ RUN cd /intel-extension-for-transformers/intel_extension_for_transformers/neural pip install -r requirements_hpu.txt && \ pip install transformers==4.34.1 && \ pip install accelerate==0.24.0 && \ + pip install datasets==2.14.7 && \ pip install pymysql && \ pip uninstall -y intel_extension_for_pytorch From 4b5eb3f90f9b08f24f8fe477f93a40a499e2f87a Mon Sep 17 00:00:00 2001 From: "Dong, Bo" Date: Wed, 27 Dec 2023 18:32:14 +0800 Subject: [PATCH 012/101] [LLM Runtime] support neural chat on windows with magicoder (#1082) * fix windows with magicoder Signed-off-by: Dong, Bo1 --- .../llm/runtime/graph/__init__.py | 8 +++++++- .../llm/runtime/graph/scripts/convert_llama.py | 2 +- setup.py | 2 +- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/intel_extension_for_transformers/llm/runtime/graph/__init__.py b/intel_extension_for_transformers/llm/runtime/graph/__init__.py index 950c4fc40f1..af3c79e47fd 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/__init__.py +++ b/intel_extension_for_transformers/llm/runtime/graph/__init__.py @@ -131,8 +131,14 @@ def init_from_bin(self, model_type, model_path, **generate_kwargs): self.model = self.module.Model() if "threads" not in generate_kwargs: threads = os.getenv("OMP_NUM_THREADS") + import platform + sys_platform = platform.platform().lower() if threads is None: - generate_kwargs["threads"] = len(os.sched_getaffinity(0)) + if "windows" in sys_platform: + cpu_count = os.cpu_count() + generate_kwargs["threads"] = int(cpu_count) + else: + generate_kwargs["threads"] = len(os.sched_getaffinity(0)) else: generate_kwargs["threads"] = int(threads) self.model.init_model(model_path, **generate_kwargs) diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_llama.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_llama.py index b4185668cfd..93bcd8cde76 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_llama.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_llama.py @@ -258,7 +258,7 @@ def __init__(self, fname_tokenizer: Path, params_vocab_size: int, fname_added_to self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer)) added_tokens: Dict[str, int] if fname_added_tokens is not None: - added_tokens = json.load(open(fname_added_tokens)) + added_tokens = json.load(open(fname_added_tokens, encoding='utf-8')) else: added_tokens = {} vocab_size: int = self.sentencepiece_tokenizer.vocab_size() diff --git a/setup.py b/setup.py index 98f4b515a9a..583a24221ff 100644 --- a/setup.py +++ b/setup.py @@ -71,7 +71,7 @@ class CMakeBuild(build_ext): @staticmethod def _is_target_file(file_name: str) -> bool: - if file_name.endswith(".dll") or file_name.endswith(".exe"): + if file_name.endswith(".dll") or file_name.endswith(".exe") or file_name.endswith(".pyd"): return True if file_name.endswith(".so") or ".so." in file_name: return True From 2758d49f4933ea987d46bf2c91f2f51e39cb78cf Mon Sep 17 00:00:00 2001 From: lvliang-intel Date: Thu, 28 Dec 2023 07:15:09 +0800 Subject: [PATCH 013/101] Fix magicoder model tokenizer issue and remove codegen streaming redundant end format (#1086) * Fix magicoder tokenizer issue and streaming redundant end format Signed-off-by: lvliang-intel --- .../neural_chat/models/model_utils.py | 3 ++- .../neural_chat/server/multi_cpu_server.py | 20 +------------------ .../neural_chat/server/multi_hpu_server.py | 1 - 3 files changed, 3 insertions(+), 21 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/models/model_utils.py b/intel_extension_for_transformers/neural_chat/models/model_utils.py index d6bbbe94681..7bcf4b29761 100644 --- a/intel_extension_for_transformers/neural_chat/models/model_utils.py +++ b/intel_extension_for_transformers/neural_chat/models/model_utils.py @@ -566,7 +566,8 @@ def load_model( logging.error(f"Exception: {e}") raise ValueError(f"load_model: an unexpected error occurred, {e}") - if re.search("llama", model.config.architectures[0], re.IGNORECASE): + if re.search("llama", model.config.architectures[0], re.IGNORECASE) and \ + not re.search("magicoder", model_name, re.IGNORECASE): # unwind broken decapoda-research config model.generation_config.pad_token_id = 0 model.generation_config.bos_token_id = 1 diff --git a/intel_extension_for_transformers/neural_chat/server/multi_cpu_server.py b/intel_extension_for_transformers/neural_chat/server/multi_cpu_server.py index d1b584772dc..0e2cf61d4c6 100644 --- a/intel_extension_for_transformers/neural_chat/server/multi_cpu_server.py +++ b/intel_extension_for_transformers/neural_chat/server/multi_cpu_server.py @@ -250,31 +250,13 @@ async def chat_completion_endpoint(request: ChatCompletionRequest): if attr == "stream": continue setattr(gen_config, attr, value) - buffered_texts = "" if request.stream: generator, _ = chatbot.predict_stream(query=request.prompt, config=gen_config) if not isinstance(generator, types.GeneratorType): generator = (generator,) def stream_generator(): - nonlocal buffered_texts for output in generator: - if isinstance(output, str): - chunks = output.split() - for chunk in chunks: - ret = { - "text": chunk, - "error_code": 0, - } - buffered_texts += chunk + ' ' - yield json.dumps(ret).encode() + b"\0" - else: - ret = { - "text": output, - "error_code": 0, - } - buffered_texts += output + ' ' - yield json.dumps(ret).encode() + b"\0" - yield f"data: [DONE]\n\n" + yield output + "\0" return StreamingResponse(stream_generator(), media_type="text/event-stream") else: response = chatbot.predict(query=request.prompt, config=gen_config) diff --git a/intel_extension_for_transformers/neural_chat/server/multi_hpu_server.py b/intel_extension_for_transformers/neural_chat/server/multi_hpu_server.py index bee8da8d968..e84e42ee1a3 100644 --- a/intel_extension_for_transformers/neural_chat/server/multi_hpu_server.py +++ b/intel_extension_for_transformers/neural_chat/server/multi_hpu_server.py @@ -289,7 +289,6 @@ async def chat_completion_endpoint(request: ChatCompletionRequest): def stream_generator(): for output in generator: yield output + "\0" - yield f"data: [DONE]\n\n" return StreamingResponse(stream_generator(), media_type="text/event-stream") else: response = chatbot.predict(query=request.prompt, config=gen_config) From e8170aae7d89f663986c4b4e47094680a42db3d0 Mon Sep 17 00:00:00 2001 From: Sihan Chen <39623753+Spycsh@users.noreply.github.com> Date: Thu, 28 Dec 2023 09:39:27 +0800 Subject: [PATCH 014/101] remove coverage check for tts cn (#1084) --- .../neural_chat/pipeline/plugins/audio/tts_chinese.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/pipeline/plugins/audio/tts_chinese.py b/intel_extension_for_transformers/neural_chat/pipeline/plugins/audio/tts_chinese.py index b50fce6579c..d4f819c365d 100644 --- a/intel_extension_for_transformers/neural_chat/pipeline/plugins/audio/tts_chinese.py +++ b/intel_extension_for_transformers/neural_chat/pipeline/plugins/audio/tts_chinese.py @@ -22,7 +22,7 @@ class ChineseTextToSpeech(): def __init__(self): self.tts_executor = TTSExecutor() - def text2speech(self, input, output_audio_path): + def text2speech(self, input, output_audio_path): # pragma: no cover "Chinese text to speech and dump to the output_audio_path." self.tts_executor( text=input, @@ -43,5 +43,5 @@ def text2speech(self, input, output_audio_path): device=paddle.get_device()) return output_audio_path - def post_llm_inference_actions(self, text, output_audio_path): + def post_llm_inference_actions(self, text, output_audio_path): # pragma: no cover return self.text2speech(text, output_audio_path) From a4aba8ddb07c9b744b6ac106502ec059e0c47960 Mon Sep 17 00:00:00 2001 From: "Wang, Chang" Date: Thu, 28 Dec 2023 14:13:48 +0800 Subject: [PATCH 015/101] [LLM example] add calib_shuffle args for text-generation example (#1087) Signed-off-by: Wang, Chang1 --- .../text-generation/quantization/run_generation.py | 13 +++++++++---- .../transformers/modeling/modeling_auto.py | 4 +++- .../transformers/utils/__init__.py | 2 +- .../transformers/utils/config.py | 1 + .../transformers/utils/utility.py | 10 ++++++++++ 5 files changed, 24 insertions(+), 6 deletions(-) diff --git a/examples/huggingface/pytorch/text-generation/quantization/run_generation.py b/examples/huggingface/pytorch/text-generation/quantization/run_generation.py index 03c8878ee1f..483b523d067 100644 --- a/examples/huggingface/pytorch/text-generation/quantization/run_generation.py +++ b/examples/huggingface/pytorch/text-generation/quantization/run_generation.py @@ -11,6 +11,7 @@ AutoModel, ) from transformers.utils import check_min_version +from intel_extension_for_transformers.transformers.utils import str2bool from optimum.intel.generation.modeling import TSModelForCausalLM from intel_extension_for_transformers.transformers import ( MixedPrecisionConfig, @@ -67,6 +68,12 @@ parser.add_argument( "--calib_padding", action="store_true", help="Calibration dataset do padding." ) +parser.add_argument( + "--calib_shuffle", + default=True, + type=str2bool, + help="Calibration dataset do shuffle.", +) parser.add_argument( "--calib_pad_val", default=1, type=int, help="Calibration dataset padding value." ) @@ -126,16 +133,14 @@ parser.add_argument("--load_in_4bit", type=bool, default=False) parser.add_argument("--load_in_8bit", type=bool, default=False) parser.add_argument("--_commit_hash", default="main", type=str) -parser.add_argument("--trust_remote_code", default=False) +parser.add_argument("--trust_remote_code", type=bool, default=False) parser.add_argument("--use_llm_runtime", action="store_true") # ======================================= args = parser.parse_args() - # transformers version >= 4.32.0 contained the mpt modeling definition. # https://github.com/huggingface/transformers/blob/main/src/transformers/models/mpt/modeling_mpt.py # 4.31.0 for ipex.optimize_transformers check_min_version("4.31.0") - # get model config if args.peft_model_id: from peft import PeftConfig @@ -228,6 +233,7 @@ op_type_dict=op_type_dict, # default is {} excluded_precisions=excluded_precisions, # default is [] num_beams=generate_kwargs["num_beams"], + calib_shuffle=args.calib_shuffle, calib_iters=args.calib_iters, calib_padding=args.calib_padding, calib_len=args.calib_len, @@ -257,7 +263,6 @@ trust_remote_code=args.trust_remote_code, _commit_hash=args._commit_hash, use_llm_runtime=args.use_llm_runtime, - ) elif args.load_in_4bit or args.load_in_8bit: # CPU device usage is provided by intel-extension-for-transformers. diff --git a/intel_extension_for_transformers/transformers/modeling/modeling_auto.py b/intel_extension_for_transformers/transformers/modeling/modeling_auto.py index 9d47a9798f9..ae3b0de2d00 100644 --- a/intel_extension_for_transformers/transformers/modeling/modeling_auto.py +++ b/intel_extension_for_transformers/transformers/modeling/modeling_auto.py @@ -380,6 +380,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): from torch.utils.data import DataLoader calib_dataset = quantization_config.calib_dataset + calib_shuffle = quantization_config.calib_shuffle calib_iters = quantization_config.calib_iters calib_padding = quantization_config.calib_padding calib_len = quantization_config.calib_len @@ -392,7 +393,8 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): if calib_dataset in ["mbpp", "openai_humaneval"] else "train", ) - calib_dataset = calib_dataset.shuffle(seed=42) + if calib_shuffle: + calib_dataset = calib_dataset.shuffle(seed=42) def tokenize_function(examples): if "prompt" in examples: diff --git a/intel_extension_for_transformers/transformers/utils/__init__.py b/intel_extension_for_transformers/transformers/utils/__init__.py index d5d896a9d0e..3a8abc7dfc0 100644 --- a/intel_extension_for_transformers/transformers/utils/__init__.py +++ b/intel_extension_for_transformers/transformers/utils/__init__.py @@ -24,4 +24,4 @@ SparsityConfig, WeightOnlyQuantConfig, ) -from .utility import LazyImport, logger +from .utility import LazyImport, logger, str2bool diff --git a/intel_extension_for_transformers/transformers/utils/config.py b/intel_extension_for_transformers/transformers/utils/config.py index 6bf69a35b1d..fb49ca28a3b 100644 --- a/intel_extension_for_transformers/transformers/utils/config.py +++ b/intel_extension_for_transformers/transformers/utils/config.py @@ -390,6 +390,7 @@ class SmoothQuantConfig: tokenizer: Any = None calib_func: Any = None calib_dataset: str = "NeelNanda/pile-10k" + calib_shuffle: bool = True calib_iters: int = 100 calib_padding: bool = False calib_len: int = 512 diff --git a/intel_extension_for_transformers/transformers/utils/utility.py b/intel_extension_for_transformers/transformers/utils/utility.py index d6df2c0d9b9..d35f4330151 100644 --- a/intel_extension_for_transformers/transformers/utils/utility.py +++ b/intel_extension_for_transformers/transformers/utils/utility.py @@ -17,6 +17,7 @@ """Utils for pytorch framework.""" +import argparse import os from typing import Optional, Tuple from neural_compressor.utils import logger @@ -36,6 +37,15 @@ torch = LazyImport("torch") +def str2bool(v): + if isinstance(v, bool): + return v + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') def distributed_init( backend="gloo", From e6ecb21ce5d20d0c71935f2b7dccd2a148c56888 Mon Sep 17 00:00:00 2001 From: XuehaoSun Date: Fri, 29 Dec 2023 17:36:11 +0800 Subject: [PATCH 016/101] Add documentation for LLM quantization recipes. (#1095) * Add documentation for LLM quantization recipes Signed-off-by: Sun, Xuehao --- .../quantization/llm_quantization_recipes.md | 228 ++++++++++++++++++ 1 file changed, 228 insertions(+) create mode 100644 examples/huggingface/pytorch/text-generation/quantization/llm_quantization_recipes.md diff --git a/examples/huggingface/pytorch/text-generation/quantization/llm_quantization_recipes.md b/examples/huggingface/pytorch/text-generation/quantization/llm_quantization_recipes.md new file mode 100644 index 00000000000..34c63d8438f --- /dev/null +++ b/examples/huggingface/pytorch/text-generation/quantization/llm_quantization_recipes.md @@ -0,0 +1,228 @@ +# Step-by-Step recipes for LLM quantization + +This document describes the step-by-step instructions to run large language models (LLMs) on 4th Gen Intel® Xeon® Scalable Processor (codenamed Sapphire Rapids) with [PyTorch](https://pytorch.org/) and [Intel® Extension for PyTorch](https://github.com/intel/intel-extension-for-pytorch). + +The scripts [run_generation.py](./run_generation.py) provide two quantization approaches respectively (SmoothQuant, Weight-Only Quantization) based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor) and return last token prediction accuracy by `trainer`. + +# Validated Models + +| Model Name | +| :----------------------------------------------------: | +| [EleutherAI/gpt-j-6b](#eleutheraigpt-j-6b) | +| [facebook/opt-1.3b](#facebookopt-13b) | +| [facebook/opt-30b](#facebookopt-30b) | +| [meta-llama/Llama-2-7b-hf](#meta-llamallama-2-7b-hf) | +| [meta-llama/Llama-2-13b-hf](#meta-llamallama-2-13b-hf) | +| [meta-llama/Llama-2-70b-hf](#meta-llamallama-2-70b-hf) | +| [tiiuae/falcon-40b](#tiiuaefalcon-40b) | + +# Prerequisite + +```bash +# Installation +git clone https://github.com/intel/intel-extension-for-transformers.git + +# install ITREX +cd intel-extension-for-transformers +git checkout a4aba8ddb07c9b744b6ac106502ec059e0c47960 +pip install -r requirements.txt +pip install -v . + +# install requirements +cd examples/huggingface/pytorch/text-generation/quantization +pip install -r requirements.txt +pip install neural-compressor==2.4.1 +pip install transformers==4.32.0 +pip install torch==2.1.1+cpu --index-url https://download.pytorch.org/whl/cpu +pip install intel-extension-for-pytorch==2.1.100 +pip uninstall lm_eval -y +pip install git+https://github.com/EleutherAI/lm-evaluation-harness.git@cc9778fbe4fa1a709be2abed9deb6180fd40e7e2 +``` + +# Run Quantization and evaluate INT8 accuracy + +## EleutherAI/gpt-j-6b + +### SmoothQuant + +```bash +python run_generation.py \ + --model EleutherAI/gpt-j-6b \ + --output_dir ./saved_results \ + --trust_remote_code True \ + --fallback_add \ + --tasks lambada_openai \ + --int8 --sq --accuracy \ + --batch_size 1 \ + --alpha 0.85 +``` + +### Weight-Only Quantization + +```bash +python run_generation.py \ + --model EleutherAI/gpt-j-6b \ + --output_dir ./saved_results \ + --woq \ + --accuracy +``` + +## facebook/opt-1.3b + +### SmoothQuant + +```bash +python run_generation.py \ + --model facebook/opt-1.3b \ + --output_dir ./saved_results \ + --trust_remote_code True \ + --tasks lambada_openai \ + --int8 --sq --accuracy \ + --batch_size 1 \ + --alpha 0.9 +``` + +### Weight-Only Quantization + +```bash +python run_generation.py \ + --model facebook/opt-1.3b \ + --output_dir ./saved_results \ + --woq \ + --accuracy +``` + +## facebook/opt-30b + +### SmoothQuant + +```bash +python run_generation.py \ + --model facebook/opt-30b \ + --output_dir ./saved_results \ + --trust_remote_code True \ + --tasks lambada_openai \ + --int8 --sq --accuracy \ + --batch_size 1 \ + --alpha 0.5 +``` + +### Weight-Only Quantization + +```bash +python run_generation.py \ + --model facebook/opt-30b \ + --output_dir ./saved_results \ + --woq \ + --accuracy +``` + +## meta-llama/Llama-2-7b-hf + +### SmoothQuant + +```bash +python run_generation.py \ + --model meta-llama/Llama-2-7b-hf \ + --output_dir ./saved_results \ + --trust_remote_code True \ + --calib_len 2048 \ + --fallback_add \ + --calib_shuffle False \ + --tasks lambada_openai \ + --int8 --sq --accuracy \ + --batch_size 1 \ + --recipes "{'smooth_quant': True, 'smooth_quant_args': {'alpha': 'auto', 'folding': False, 'default_alpha': 0.8, 'auto_alpha_args': {'alpha_min': 0.8, 'alpha_max': 0.99, 'alpha_step': 0.01, 'shared_criterion': 'mean'}}}" +``` + +### Weight-Only Quantization + +```bash +python run_generation.py \ + --model meta-llama/Llama-2-7b-hf \ + --output_dir ./saved_results \ + --woq \ + --accuracy +``` + +## meta-llama/Llama-2-13b-hf + +### SmoothQuant + +```bash +python run_generation.py \ + --model meta-llama/Llama-2-13b-hf \ + --output_dir ./saved_results \ + --trust_remote_code True \ + --calib_len 1024 \ + --fallback_add \ + --calib_shuffle False \ + --tasks lambada_openai \ + --int8 --sq --accuracy \ + --batch_size 1 \ + --recipes "{'smooth_quant': True, 'smooth_quant_args': {'alpha': 'auto', 'folding': False, 'default_alpha': 0.8, 'auto_alpha_args': {'alpha_min': 0.75, 'alpha_max': 0.99, 'alpha_step': 0.01, 'shared_criterion': 'max'}}}" +``` + +### Weight-Only Quantization + +```bash +python run_generation.py \ + --model meta-llama/Llama-2-13b-hf \ + --output_dir ./saved_results \ + --woq \ + --accuracy +``` + +## meta-llama/Llama-2-70b-hf + +### SmoothQuant + +```bash +python run_generation.py \ + --model meta-llama/Llama-2-70b-hf \ + --output_dir ./saved_results \ + --trust_remote_code True \ + --tasks lambada_openai \ + --int8 --sq --accuracy \ + --batch_size 1 \ + --alpha 0.8 +``` + +### Weight-Only Quantization + +```bash +python run_generation.py \ + --model meta-llama/Llama-2-70b-hf \ + --output_dir ./saved_results \ + --woq \ + --accuracy +``` + +## tiiuae/falcon-40b + +```bash +pip install transformers==4.33.3 # for tiiuae/falcon-40b +``` + +### SmoothQuant + +```bash +python run_generation.py \ + --model tiiuae/falcon-40b \ + --output_dir ./saved_results \ + --trust_remote_code True \ + --tasks lambada_openai \ + --int8 --sq --accuracy \ + --batch_size 1 \ + --alpha 0.9 +``` + +### Weight-Only Quantization + +```bash +python run_generation.py \ + --model tiiuae/falcon-40b \ + --output_dir ./saved_results \ + --woq \ + --accuracy +``` From 29bbd804cc9ee41d88fcc04cdda4a831b63183b5 Mon Sep 17 00:00:00 2001 From: lvliang-intel Date: Tue, 2 Jan 2024 07:35:58 +0800 Subject: [PATCH 017/101] [NeuralChat] Support LLM runtime ggml int4 (#1098) * Support llm runtime ggml int4 Signed-off-by: lvliang-intel --- .../neural_chat/models/model_utils.py | 2 +- .../neural_chat/server/neuralchat_server.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/models/model_utils.py b/intel_extension_for_transformers/neural_chat/models/model_utils.py index 7bcf4b29761..44ad1a0be48 100644 --- a/intel_extension_for_transformers/neural_chat/models/model_utils.py +++ b/intel_extension_for_transformers/neural_chat/models/model_utils.py @@ -911,7 +911,7 @@ def generate_output(): max_new_tokens=max_new_tokens, ctx_size=max_new_tokens, ignore_prompt=True, - interactive=True, + interactive=False if "magicoder" in model_name.lower() else True, do_sample=do_sample, num_beams=num_beams, n_keep=2 if "chatglm" in model_name.lower() else 1 diff --git a/intel_extension_for_transformers/neural_chat/server/neuralchat_server.py b/intel_extension_for_transformers/neural_chat/server/neuralchat_server.py index de770a72495..1218c7e6b53 100644 --- a/intel_extension_for_transformers/neural_chat/server/neuralchat_server.py +++ b/intel_extension_for_transformers/neural_chat/server/neuralchat_server.py @@ -161,6 +161,7 @@ def init(self, config): compute_dtype = yaml_config.get("compute_dtype", {}) weight_dtype = yaml_config.get("weight_dtype", {}) use_cached_bin = yaml_config.get("use_cached_bin", {}) + use_ggml = yaml_config.get("use_ggml", False) mix_precision_dtype = yaml_config.get("mix_precision_dtype", {}) load_in_4bit = yaml_config.get("load_in_4bit", {}) bnb_4bit_quant_type = yaml_config.get("bnb_4bit_quant_type", {}) @@ -172,7 +173,7 @@ def init(self, config): from intel_extension_for_transformers.transformers import WeightOnlyQuantConfig, MixedPrecisionConfig if optimization_type == "weight_only": optimization_config = WeightOnlyQuantConfig(compute_dtype=compute_dtype, weight_dtype=weight_dtype, - use_cache=use_cached_bin) + use_ggml=use_ggml, use_cache=use_cached_bin) elif optimization_type == "mix_precision": optimization_config = MixedPrecisionConfig(dtype=mix_precision_dtype) elif optimization_type == "bits_and_bytes": From ee3bb0ef1f42fbcab94d3a9a7b363307ff488c9f Mon Sep 17 00:00:00 2001 From: VincyZhang Date: Tue, 2 Jan 2024 14:43:20 +0800 Subject: [PATCH 018/101] add accelerate (#1102) Signed-off-by: Wenxin Zhang --- .../text-classification/quantization/ptq/requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/huggingface/tensorflow/text-classification/quantization/ptq/requirements.txt b/examples/huggingface/tensorflow/text-classification/quantization/ptq/requirements.txt index d783f0d73e7..8067cf9633a 100644 --- a/examples/huggingface/tensorflow/text-classification/quantization/ptq/requirements.txt +++ b/examples/huggingface/tensorflow/text-classification/quantization/ptq/requirements.txt @@ -3,4 +3,5 @@ sentencepiece != 0.1.92 protobuf intel-tensorflow transformers -evaluate \ No newline at end of file +evaluate +accelerate \ No newline at end of file From c4d6e6d7b1840672f9fe072077a16129e0bc11fd Mon Sep 17 00:00:00 2001 From: lvliang-intel Date: Tue, 2 Jan 2024 20:00:54 +0800 Subject: [PATCH 019/101] [NeuralChat] Align inference parameters num_beams with HF transformers (#1092) Align inference parameters num_beams with HF transformers Signed-off-by: lvliang-intel --- .../neural_chat/config.py | 2 +- .../neural_chat/models/model_utils.py | 11 ++--------- .../neural_chat/server/multi_cpu_server.py | 6 +++--- .../neural_chat/server/multi_hpu_server.py | 6 +++--- workflows/chatbot/inference/generate.py | 6 +++--- 5 files changed, 12 insertions(+), 19 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/config.py b/intel_extension_for_transformers/neural_chat/config.py index 66ccab9a2b6..35fc0d095df 100644 --- a/intel_extension_for_transformers/neural_chat/config.py +++ b/intel_extension_for_transformers/neural_chat/config.py @@ -392,7 +392,7 @@ class GenerationConfig: top_k: int = 40 top_p: float = 0.75 repetition_penalty: float = 1.1 - num_beams: int = 0 + num_beams: int = 1 max_new_tokens: int = 256 do_sample: bool = True num_return_sequences: int = 1 diff --git a/intel_extension_for_transformers/neural_chat/models/model_utils.py b/intel_extension_for_transformers/neural_chat/models/model_utils.py index 44ad1a0be48..40c32029282 100644 --- a/intel_extension_for_transformers/neural_chat/models/model_utils.py +++ b/intel_extension_for_transformers/neural_chat/models/model_utils.py @@ -809,7 +809,7 @@ def predict_stream(**params): int(params["max_new_tokens"]) if "max_new_tokens" in params else 256 ) do_sample = params["do_sample"] if "do_sample" in params else True - num_beams = int(params["num_beams"]) if "num_beams" in params else 0 + num_beams = int(params["num_beams"]) if "num_beams" in params else 1 model_name = ( params["model_name"] if "model_name" in params else "Intel/neural-chat-7b-v3-1" ) @@ -851,9 +851,6 @@ def predict_stream(**params): streamer = TextIteratorStreamer( tokenizer, skip_prompt=True, skip_special_tokens=True ) - if num_beams == 0: - num_beams = 1 - do_sample = True generate_kwargs = get_generate_kwargs( max_new_tokens, input_token_len, @@ -1077,7 +1074,7 @@ def predict(**params): int(params["max_new_tokens"]) if "max_new_tokens" in params else 256 ) do_sample = params["do_sample"] if "do_sample" in params else True - num_beams = int(params["num_beams"]) if "num_beams" in params else 0 + num_beams = int(params["num_beams"]) if "num_beams" in params else 1 model_name = ( params["model_name"] if "model_name" in params else "mosaicml/mpt-7b-chat" ) @@ -1103,10 +1100,6 @@ def predict(**params): "starcoder" in model_name.lower() or \ "codegen" in model_name.lower()) else 1024 - if num_beams == 0: - num_beams = 1 - do_sample = True - input_tokens, input_token_len = tokenization(prompt, tokenizer, device) generate_kwargs = get_generate_kwargs( max_new_tokens, input_token_len, diff --git a/intel_extension_for_transformers/neural_chat/server/multi_cpu_server.py b/intel_extension_for_transformers/neural_chat/server/multi_cpu_server.py index 0e2cf61d4c6..9aa507b5187 100644 --- a/intel_extension_for_transformers/neural_chat/server/multi_cpu_server.py +++ b/intel_extension_for_transformers/neural_chat/server/multi_cpu_server.py @@ -84,7 +84,7 @@ def parse_args(): parser.add_argument( "--num_beams", type=int, - default=0, + default=1, help="The number of beams for beam search.", ) parser.add_argument( @@ -189,8 +189,8 @@ def parse_args(): raise ValueError("Top-k must be between 0 and 200.") if not 1.0 <= args.repetition_penalty <= 2.0: raise ValueError("Repetition penalty must be between 1 and 2.") -if not 0 <= args.num_beams <= 8: - raise ValueError("Number of beams must be between 0 and 8.") +if not 1 <= args.num_beams <= 8: + raise ValueError("Number of beams must be between 1 and 8.") if not 32 <= args.max_new_tokens <= 1024: raise ValueError( "The maximum number of new tokens must be between 32 and 1024." diff --git a/intel_extension_for_transformers/neural_chat/server/multi_hpu_server.py b/intel_extension_for_transformers/neural_chat/server/multi_hpu_server.py index e84e42ee1a3..6c462ab4c99 100644 --- a/intel_extension_for_transformers/neural_chat/server/multi_hpu_server.py +++ b/intel_extension_for_transformers/neural_chat/server/multi_hpu_server.py @@ -91,7 +91,7 @@ def parse_args(): parser.add_argument( "--num_beams", type=int, - default=0, + default=1, help="The number of beams for beam search.", ) parser.add_argument( @@ -196,8 +196,8 @@ def parse_args(): raise ValueError("Top-k must be between 0 and 200.") if not 1.0 <= args.repetition_penalty <= 2.0: raise ValueError("Repetition penalty must be between 1 and 2.") -if not 0 <= args.num_beams <= 8: - raise ValueError("Number of beams must be between 0 and 8.") +if not 1 <= args.num_beams <= 8: + raise ValueError("Number of beams must be between 1 and 8.") if not 32 <= args.max_new_tokens <= 1024: raise ValueError( "The maximum number of new tokens must be between 32 and 1024." diff --git a/workflows/chatbot/inference/generate.py b/workflows/chatbot/inference/generate.py index 5ff0ec2c5a2..b4f1192a087 100644 --- a/workflows/chatbot/inference/generate.py +++ b/workflows/chatbot/inference/generate.py @@ -73,7 +73,7 @@ def parse_args(): parser.add_argument( "--num_beams", type=int, - default=0, + default=1, help="The number of beams for beam search.", ) parser.add_argument( @@ -178,8 +178,8 @@ def main(): raise ValueError("Top-k must be between 0 and 200.") if not 1.0 <= args.repetition_penalty <= 2.0: raise ValueError("Repetition penalty must be between 1 and 2.") - if not 0 <= args.num_beams <= 8: - raise ValueError("Number of beams must be between 0 and 8.") + if not 1 <= args.num_beams <= 8: + raise ValueError("Number of beams must be between 1 and 8.") if not 32 <= args.max_new_tokens <= 1024: raise ValueError( "The maximum number of new tokens must be between 32 and 1024." From eb671562a8ab58392d98581c8d4f9d93fa529b23 Mon Sep 17 00:00:00 2001 From: "Dong, Bo" Date: Wed, 3 Jan 2024 09:37:59 +0800 Subject: [PATCH 020/101] [LLM Runtime] Add Whisper Example and Python API (#809) --- .../llm/runtime/graph/README.md | 21 + .../llm/runtime/graph/__init__.py | 32 +- .../runtime/graph/application/CMakeLists.txt | 4 +- .../llm/runtime/graph/application/common.cpp | 1 + .../llm/runtime/graph/application/common.h | 1 + .../runtime/graph/application/main_pybind.cpp | 1 + .../runtime/graph/application/quant_model.cpp | 12 +- .../graph/application/quant_whisper.cpp | 74 ++ .../graph/application/whisper_pybind.cpp | 472 +++++++++++++ .../graph/models/baichuan/baichuan_utils.cpp | 2 +- .../graph/models/bloom/bloom_utils.cpp | 2 +- .../graph/models/chatglm/chatglm2_utils.cpp | 2 +- .../graph/models/chatglm/chatglm_utils.cpp | 2 +- .../graph/models/falcon/falcon_utils.cpp | 2 +- .../runtime/graph/models/gptj/gptj_utils.cpp | 2 +- .../graph/models/gptneox/gptneox_utils.cpp | 2 +- .../graph/models/llama/llama_utils.cpp | 2 +- .../graph/models/model_utils/model_types.h | 5 +- .../graph/models/model_utils/model_utils.cpp | 252 ------- .../graph/models/model_utils/model_utils.h | 13 - .../graph/models/model_utils/quant_utils.cpp | 645 ++++++++++++++++++ .../graph/models/model_utils/quant_utils.h | 55 ++ .../runtime/graph/models/mpt/mpt_utils.cpp | 2 +- .../runtime/graph/models/opt/opt_utils.cpp | 2 +- .../runtime/graph/models/qwen/qwen_utils.cpp | 1 + .../models/starcoder/starcoder_utils.cpp | 2 +- .../graph/models/whisper/CMakeLists.txt | 4 +- .../runtime/graph/models/whisper/whisper.cpp | 80 ++- .../graph/models/whisper/whisper_utils.cpp | 2 +- .../graph/scripts/ci/cpp_graph_inference.sh | 39 +- .../llm/runtime/graph/scripts/convert.py | 7 +- .../runtime/graph/scripts/convert_whisper.py | 273 ++++---- 32 files changed, 1537 insertions(+), 479 deletions(-) create mode 100644 intel_extension_for_transformers/llm/runtime/graph/application/quant_whisper.cpp create mode 100644 intel_extension_for_transformers/llm/runtime/graph/application/whisper_pybind.cpp create mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/model_utils/quant_utils.cpp create mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/model_utils/quant_utils.h diff --git a/intel_extension_for_transformers/llm/runtime/graph/README.md b/intel_extension_for_transformers/llm/runtime/graph/README.md index 38d6c3ac932..4591e929f0c 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/README.md +++ b/intel_extension_for_transformers/llm/runtime/graph/README.md @@ -164,6 +164,18 @@ LLM Runtime supports the following models: Latest + + Whisper-tiny, + Whisper-base + Whisper-small + Whisper-medium + Whisper-large + ✅ + + ✅ + + Latest + @@ -283,6 +295,15 @@ model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=woq outputs = model.generate(inputs, streamer=streamer, max_new_tokens=300, ctx_size=100, n_keep=4, n_discard=-1) ``` +To use whisper to Audio-to-text, here is the sample code +```python +from intel_extension_for_transformers.transformers import AutoModelForCausalLM, WeightOnlyQuantConfig +model_name = "Local path for whisper" # please use local path +woq_config = WeightOnlyQuantConfig(use_ggml=True) #Currently, only Q40 is supported +model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=woq_config) +model('Local audio file') +``` + https://github.com/intel/intel-extension-for-transformers/assets/109187816/1698dcda-c9ec-4f44-b159-f4e9d67ab15b Argument description of WeightOnlyQuantConfig ([supported MatMul combinations](#supported-matrix-multiplication-data-types-combinations)): diff --git a/intel_extension_for_transformers/llm/runtime/graph/__init__.py b/intel_extension_for_transformers/llm/runtime/graph/__init__.py index af3c79e47fd..f033e9cb677 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/__init__.py +++ b/intel_extension_for_transformers/llm/runtime/graph/__init__.py @@ -64,6 +64,8 @@ def __import_package(self, model_type): import intel_extension_for_transformers.llm.runtime.graph.qwen_cpp as cpp_model elif model_type == "mistral": import intel_extension_for_transformers.llm.runtime.graph.mistral_cpp as cpp_model + elif model_type == "whisper": + import intel_extension_for_transformers.llm.runtime.graph.whisper_cpp as cpp_model else: raise TypeError("Unspported model type {}!".format(model_type)) self.module = cpp_model @@ -147,6 +149,7 @@ def quant_model(self, model_type, model_path, out_path, **quant_kwargs): self.__import_package(model_type) self.module.Model.quant_model(model_path=model_path, out_path=out_path, **quant_kwargs) + def generate(self, input_ids, streamer=None, interactive=False, ignore_prompt=False, stopping_criteria=None, **generate_kwargs): max_new_tokens = generate_kwargs.get("max_new_tokens", -1) @@ -222,11 +225,24 @@ def pad_token_id(self): " with padding!") return self.tokenizer.pad_token_id - def __call__(self, input_ids, reinit=False, **kwargs): - if self.model is None: - self.init_from_bin(self.model_type, self.bin_file, **kwargs) - self.generate_round = 0 - elif reinit: - self.model.reinit() - self.generate_round = 0 - return self.model.evaluate(input_ids.tolist()) + def __call__(self, model_input, reinit=False, **kwargs): + if self.model_type == 'whisper': + if self.model is None: + self.model = self.module.Model() + self.model.init_model(self.bin_file) + if os.path.isfile(model_input): + self.model.inference(model_input) + else: + print("Please input an audio file") + return + if isinstance(model_input, torch.Tensor): + if self.model is None: + self.init_from_bin(self.model_type, self.bin_file, **kwargs) + self.generate_round = 0 + elif reinit: + self.model.reinit() + self.generate_round = 0 + return self.model.evaluate(model_input.tolist()) + else: + print("Please input torch.Tensor") + return diff --git a/intel_extension_for_transformers/llm/runtime/graph/application/CMakeLists.txt b/intel_extension_for_transformers/llm/runtime/graph/application/CMakeLists.txt index 1276e71f99a..fc9a246399a 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/application/CMakeLists.txt +++ b/intel_extension_for_transformers/llm/runtime/graph/application/CMakeLists.txt @@ -68,7 +68,7 @@ compile_quant(quant_chatglm2 quant_model.cpp chatglm2 chatglm2) compile_quant(quant_baichuan quant_model.cpp baichuan baichuan) compile_quant(quant_mistral quant_model.cpp mistral llama) compile_quant(quant_qwen quant_model.cpp qwen qwen) - +compile_quant(quant_whisper quant_whisper.cpp whisper whisper) # all models running if (NE_PYTHON_API) include_directories(${CMAKE_CURRENT_SOURCE_DIR}) @@ -127,4 +127,4 @@ compile_run(run_mistral main_run.cpp main_pybind.cpp mistral llama) compile_run(run_qwen main_run.cpp main_pybind.cpp qwen qwen) # speech recognition -compile_run(run_whisper audio_run.cpp "" whisper whisper) +compile_run(run_whisper audio_run.cpp whisper_pybind.cpp whisper whisper) diff --git a/intel_extension_for_transformers/llm/runtime/graph/application/common.cpp b/intel_extension_for_transformers/llm/runtime/graph/application/common.cpp index b9dab596a54..4c81818b4da 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/application/common.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/application/common.cpp @@ -102,6 +102,7 @@ bool isValidFilename(const std::string& filename) { return infile.good(); } +int64_t common_time_us() { return ne_time_us(); } void gpt_print_usage(int /*argc*/, char** argv, const common_params& params) { fprintf(stderr, "usage: %s [options]\n", argv[0]); fprintf(stderr, "\n"); diff --git a/intel_extension_for_transformers/llm/runtime/graph/application/common.h b/intel_extension_for_transformers/llm/runtime/graph/application/common.h index 67a95782301..dbac92bf7a4 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/application/common.h +++ b/intel_extension_for_transformers/llm/runtime/graph/application/common.h @@ -43,6 +43,7 @@ int32_t get_num_physical_cores(); +int64_t common_time_us(); struct common_params { int32_t n_threads = get_num_physical_cores(); diff --git a/intel_extension_for_transformers/llm/runtime/graph/application/main_pybind.cpp b/intel_extension_for_transformers/llm/runtime/graph/application/main_pybind.cpp index 7e7f9bf8167..541766cf423 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/application/main_pybind.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/application/main_pybind.cpp @@ -40,6 +40,7 @@ #include "models/model_utils/model_types.h" #include "models/model_utils/model_config.h" #include "models/model_utils/model_utils.h" +#include "models/model_utils/quant_utils.h" #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) #include diff --git a/intel_extension_for_transformers/llm/runtime/graph/application/quant_model.cpp b/intel_extension_for_transformers/llm/runtime/graph/application/quant_model.cpp index 9302bffd852..fa049e923a3 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/application/quant_model.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/application/quant_model.cpp @@ -24,9 +24,9 @@ #include #include #include - -#include "common.h" #include "models/model_utils/model_utils.h" +#include "common.h" +#include "models/model_utils/quant_utils.h" std::shared_ptr get_model_quant_layer(const std::string model_name) { return ql_registry::create_ql(model_name); @@ -55,24 +55,24 @@ int main(int argc, char** argv) { printf("ne_ftype: %d\n", ftype); const int nthread = q_params.nthread; - const int64_t t_main_start_us = model_time_us(); + const int64_t t_main_start_us = common_time_us(); int64_t t_quantize_us = 0; auto quant_layer = get_model_quant_layer(q_params.model_name); // load the model { - const int64_t t_start_us = model_time_us(); + const int64_t t_start_us = common_time_us(); if (model_quantize(q_params, quant_layer)) { fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); return 1; } - t_quantize_us = model_time_us() - t_start_us; + t_quantize_us = common_time_us() - t_start_us; } // report timing { - const int64_t t_main_end_us = model_time_us(); + const int64_t t_main_end_us = common_time_us(); printf("\n"); printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us / 1000.0); diff --git a/intel_extension_for_transformers/llm/runtime/graph/application/quant_whisper.cpp b/intel_extension_for_transformers/llm/runtime/graph/application/quant_whisper.cpp new file mode 100644 index 00000000000..5f81d156c75 --- /dev/null +++ b/intel_extension_for_transformers/llm/runtime/graph/application/quant_whisper.cpp @@ -0,0 +1,74 @@ +// Copyright (c) 2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include +#include +#include +#include +#include +#include +#include +#include +#include //NOLINT +#include "models/model_utils/quant_utils.h" +#include "common.h" + +int main(int argc, char** argv) { + quant_params q_params; + if (quant_params_parse(argc, argv, q_params) == false) { + return 1; + } + + // needed to initialize f16 tables + { + struct ne_init_params params = {0, NULL, false}; + struct ne_context* ctx = ne_init(params); + ne_free(ctx); + } + const std::string fname_inp = q_params.model_file; + const std::string fname_out = q_params.out_file; + // printf("input_model_file:%s \n",fname_inp.c_str()); + + const ne_ftype ftype = quant_params_to_ftype(q_params); + if (ftype != NE_FTYPE_MOSTLY_Q4_0) { + fprintf(stderr, "%s: ITREX now only support quantize model to q4_0 \n", __func__); + return 1; + } + + const int64_t t_main_start_us = common_time_us(); + + int64_t t_quantize_us = 0; + + // load the model + { + const int64_t t_start_us = common_time_us(); + + if (!whisper_model_quantize(fname_inp, fname_out, ne_ftype(ftype))) { + fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); + return 1; + } + + t_quantize_us = common_time_us() - t_start_us; + } + + // report timing + { + const int64_t t_main_end_us = common_time_us(); + + printf("\n"); + printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us / 1000.0f); + printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us) / 1000.0f); + } + + return 0; +} diff --git a/intel_extension_for_transformers/llm/runtime/graph/application/whisper_pybind.cpp b/intel_extension_for_transformers/llm/runtime/graph/application/whisper_pybind.cpp new file mode 100644 index 00000000000..037d521c2f7 --- /dev/null +++ b/intel_extension_for_transformers/llm/runtime/graph/application/whisper_pybind.cpp @@ -0,0 +1,472 @@ +// Copyright (c) 2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Defines sigaction on msys: +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include // NOLINT +#include +#include +#include + +#include "models/whisper/whisper.h" +#include "models/model_utils/quant_utils.h" +#include "application/common.h" + +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) +#include +#include +#elif defined(_WIN32) +#define WIN32_LEAN_AND_MEAN +#define NOMINMAX +#include +#include +#endif + +namespace py = pybind11; + +#define STATIC_INPUT_HEAD_IDX 0 +class Model { + public: + Model() { + struct ne_init_params params = {0, NULL, false}; + struct ne_context* ctx = ne_init(params); + ne_free(ctx); + } + ~Model() { + if (ctx) delete (ctx); + } + void init_model(const std::string& model_path); + static void quant_model(const std::string& model_path, const std::string& out_path, const std::string& weight_dtype, + const std::string& alg, int group_size, const std::string& scale_dtype, + const std::string& compute_dtype, bool use_ggml, int threads); + void inference(const std::string& fname_inp); + + private: + whisper_context* ctx = nullptr; + whisper_params params; +}; + +const std::vector k_colors = { + "\033[38;5;196m", "\033[38;5;202m", "\033[38;5;208m", "\033[38;5;214m", "\033[38;5;220m", + "\033[38;5;226m", "\033[38;5;190m", "\033[38;5;154m", "\033[38;5;118m", "\033[38;5;82m", +}; + +void Model::init_model(const std::string& model_path) { + params.model = model_path; + ctx = whisper_init_from_file(params.model.c_str()); + + if (ctx == nullptr) { + fprintf(stderr, "error: failed to initialize whisper context\n"); + return; + } +} + +void Model::quant_model(const std::string& model_path, const std::string& out_path, const std::string& weight_dtype, + const std::string& alg, int group_size, const std::string& scale_dtype, + const std::string& compute_dtype, bool use_ggml, int threads) { + quant_params q_params; + q_params.model_file = model_path; + q_params.out_file = out_path; + q_params.use_ggml = use_ggml; + q_params.nthread = threads; + // needed to initialize f16 tables + { + struct ne_init_params params = {0, NULL, false}; + struct ne_context* ctx = ne_init(params); + ne_free(ctx); + } + const std::string fname_inp = q_params.model_file; + const std::string fname_out = q_params.out_file; + // printf("input_model_file:%s \n",fname_inp.c_str()); + + const ne_ftype ftype = quant_params_to_ftype(q_params); + if (ftype != NE_FTYPE_MOSTLY_Q4_0) { + fprintf(stderr, "%s: ITREX now only support quantize model to q4_0 \n", __func__); + return; + } + + const int64_t t_main_start_us = common_time_us(); + + int64_t t_quantize_us = 0; + + // load the model + { + const int64_t t_start_us = common_time_us(); + + if (!whisper_model_quantize(fname_inp, fname_out, ne_ftype(ftype))) { + fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); + return; + } + + t_quantize_us = common_time_us() - t_start_us; + } + + // report timing + { + const int64_t t_main_end_us = common_time_us(); + + printf("\n"); + printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us / 1000.0f); + printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us) / 1000.0f); + } + + return; +} + +bool read_wav(const std::string& fname, std::vector* pcmf32, std::vector>* pcmf32s, + bool stereo) { + drwav wav; + std::vector wav_data; // used for pipe input from stdin + + if (fname == "-") { + { + uint8_t buf[1024]; + while (true) { + const size_t n = fread(buf, 1, sizeof(buf), stdin); + if (n == 0) { + break; + } + wav_data.insert(wav_data.end(), buf, buf + n); + } + } + + if (drwav_init_memory(&wav, wav_data.data(), wav_data.size(), nullptr) == false) { + fprintf(stderr, "error: failed to open WAV file from stdin\n"); + return false; + } + + fprintf(stderr, "%s: read %zu bytes from stdin\n", __func__, wav_data.size()); + } else if (drwav_init_file(&wav, fname.c_str(), nullptr) == false) { + fprintf(stderr, "error: failed to open '%s' as WAV file\n", fname.c_str()); + return false; + } + + if (wav.channels != 1 && wav.channels != 2) { + fprintf(stderr, "%s: WAV file '%s' must be mono or stereo\n", __func__, fname.c_str()); + return false; + } + + if (stereo && wav.channels != 2) { + fprintf(stderr, "%s: WAV file '%s' must be stereo for diarization\n", __func__, fname.c_str()); + return false; + } + + if (wav.sampleRate != WHISPER_SAMPLE_RATE) { + fprintf(stderr, "%s: WAV file '%s' must be %i kHz\n", __func__, fname.c_str(), WHISPER_SAMPLE_RATE / 1000); + return false; + } + + if (wav.bitsPerSample != 16) { + fprintf(stderr, "%s: WAV file '%s' must be 16-bit\n", __func__, fname.c_str()); + return false; + } + + const uint64_t n = + wav_data.empty() ? wav.totalPCMFrameCount : wav_data.size() / (wav.channels * wav.bitsPerSample / 8); + + std::vector pcm16; + pcm16.resize(n * wav.channels); + drwav_read_pcm_frames_s16(&wav, n, pcm16.data()); + drwav_uninit(&wav); + + // convert to mono, float + (*pcmf32).resize(n); + if (wav.channels == 1) { + for (uint64_t i = 0; i < n; i++) { + (*pcmf32)[i] = static_cast(pcm16[i]) / 32768.0f; + } + } else { + for (uint64_t i = 0; i < n; i++) { + (*pcmf32)[i] = static_cast(pcm16[2 * i] + pcm16[2 * i + 1]) / 65536.0f; + } + } + + if (stereo) { + // convert to stereo, float + (*pcmf32s).resize(2); + + (*pcmf32s)[0].resize(n); + (*pcmf32s)[1].resize(n); + for (uint64_t i = 0; i < n; i++) { + (*pcmf32s)[0][i] = static_cast(pcm16[2 * i]) / 32768.0f; + (*pcmf32s)[1][i] = static_cast(pcm16[2 * i + 1]) / 32768.0f; + } + } + + return true; +} + +std::string estimate_diarization_speaker(std::vector> pcmf32s, int64_t t0, int64_t t1, + bool id_only = false) { + std::string speaker = ""; + const int64_t n_samples = pcmf32s[0].size(); + + const int64_t is0 = timestamp_to_sample(t0, n_samples); + const int64_t is1 = timestamp_to_sample(t1, n_samples); + + double energy0 = 0.0f; + double energy1 = 0.0f; + + for (int64_t j = is0; j < is1; j++) { + energy0 += fabs(pcmf32s[0][j]); + energy1 += fabs(pcmf32s[1][j]); + } + + if (energy0 > 1.1 * energy1) { + speaker = "0"; + } else if (energy1 > 1.1 * energy0) { + speaker = "1"; + } else { + speaker = "?"; + } + + // printf("is0 = %lld, is1 = %lld, energy0 = %f, energy1 = %f, speaker = %s\n", is0, is1, energy0, energy1, + // speaker.c_str()); + + if (!id_only) { + speaker.insert(0, "(speaker "); + speaker.append(")"); + } + + return speaker; +} + +void whisper_print_segment_callback(struct whisper_context* ctx, struct whisper_state* /*state*/, int n_new, + void* user_data) { + const auto& params = *(reinterpret_cast(user_data))->params; + const auto& pcmf32s = *(reinterpret_cast(user_data))->pcmf32s; + + const int n_segments = whisper_full_n_segments(ctx); + + std::string speaker = ""; + + int64_t t0 = 0; + int64_t t1 = 0; + + // print the last n_new segments + const int s0 = n_segments - n_new; + + if (s0 == 0) { + printf("\n"); + } + + for (int i = s0; i < n_segments; i++) { + if (!params.no_timestamps || params.diarize) { + t0 = whisper_full_get_segment_t0(ctx, i); + t1 = whisper_full_get_segment_t1(ctx, i); + } + + if (!params.no_timestamps) { + printf("[%s --> %s] ", to_timestamp(t0).c_str(), to_timestamp(t1).c_str()); + } + + if (params.diarize && pcmf32s.size() == 2) { + speaker = estimate_diarization_speaker(pcmf32s, t0, t1); + } + + if (params.print_colors) { + for (int j = 0; j < whisper_full_n_tokens(ctx, i); ++j) { + if (params.print_special == false) { + const whisper_token id = whisper_full_get_token_id(ctx, i, j); + if (id >= whisper_token_eot(ctx)) { + continue; + } + } + + const char* text = whisper_full_get_token_text(ctx, i, j); + const float p = whisper_full_get_token_p(ctx, i, j); + + const int col = std::max(0, std::min(static_cast(k_colors.size()) - 1, + static_cast(std::pow(p, 3) * static_cast(k_colors.size())))); + + printf("%s%s%s%s", speaker.c_str(), k_colors[col].c_str(), text, "\033[0m"); + } + } else { + const char* text = whisper_full_get_segment_text(ctx, i); + + printf("%s%s", speaker.c_str(), text); + } + + if (params.tinydiarize) { + if (whisper_full_get_segment_speaker_turn_next(ctx, i)) { + printf("%s", params.tdrz_speaker_turn.c_str()); + } + } + + // with timestamps or speakers: each segment on new line + if (!params.no_timestamps || params.diarize) { + printf("\n"); + } + + fflush(stdout); + } +} +void Model::inference(const std::string& fname_inp) { + params.fname_inp.emplace_back(fname_inp); + if (params.fname_inp.empty()) { + fprintf(stderr, "error: no input files specified\n"); + return; + } + + if (params.language != "auto" && whisper_lang_id(params.language.c_str()) == -1) { + fprintf(stderr, "error: unknown language '%s'\n", params.language.c_str()); + exit(0); + } + + if (params.diarize && params.tinydiarize) { + fprintf(stderr, "error: cannot use both --diarize and --tinydiarize\n"); + exit(0); + } + for (size_t f = 0; f < params.fname_inp.size(); ++f) { + const auto fname_inp = params.fname_inp[f]; + const auto fname_out = + f < params.fname_out.size() && !params.fname_out[f].empty() ? params.fname_out[f] : params.fname_inp[f]; + + std::vector pcmf32; // mono-channel F32 PCM + std::vector> pcmf32s; // stereo-channel F32 PCM + + if (!read_wav(fname_inp, &pcmf32, &pcmf32s, params.diarize)) { + fprintf(stderr, "error: failed to read WAV file '%s'\n", fname_inp.c_str()); + continue; + } + + // print system information + { + fprintf(stderr, "\n"); + fprintf(stderr, "system_info: n_threads = %d / %d | %s\n", params.n_threads * params.n_processors, + std::thread::hardware_concurrency(), whisper_print_system_info()); + } + + // print some info about the processing + { + fprintf(stderr, "\n"); + if (!whisper_is_multilingual(ctx)) { + if (params.language != "en" || params.translate) { + params.language = "en"; + params.translate = false; + fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", + __func__); + } + } + if (params.detect_language) { + params.language = "auto"; + } + fprintf(stderr, + "%s: processing '%s' (%d samples, %.1f sec), %d threads, %d processors, lang = %s, task = %s, " + "%stimestamps = %d ...\n", + __func__, fname_inp.c_str(), static_cast(pcmf32.size()), + static_cast(pcmf32.size()) / WHISPER_SAMPLE_RATE, params.n_threads, params.n_processors, + params.language.c_str(), params.translate ? "translate" : "transcribe", + params.tinydiarize ? "tdrz = 1, " : "", params.no_timestamps ? 0 : 1); + + fprintf(stderr, "\n"); + } + + // run the inference + { + whisper_full_params wparams = whisper_full_default_params(WHISPER_SAMPLING_GREEDY); + + wparams.strategy = params.beam_size > 1 ? WHISPER_SAMPLING_BEAM_SEARCH : WHISPER_SAMPLING_GREEDY; + + wparams.print_realtime = false; + wparams.print_progress = params.print_progress; + wparams.print_timestamps = !params.no_timestamps; + wparams.print_special = params.print_special; + wparams.translate = params.translate; + wparams.language = params.language.c_str(); + wparams.detect_language = params.detect_language; + wparams.n_threads = params.n_threads; + wparams.n_max_text_ctx = params.max_context >= 0 ? params.max_context : wparams.n_max_text_ctx; + wparams.offset_ms = params.offset_t_ms; + wparams.duration_ms = params.duration_ms; + + wparams.token_timestamps = params.output_wts || params.max_len > 0; + wparams.thold_pt = params.word_thold; + wparams.max_len = params.output_wts && params.max_len == 0 ? 60 : params.max_len; + wparams.split_on_word = params.split_on_word; + + wparams.speed_up = params.speed_up; + + wparams.tdrz_enable = params.tinydiarize; // [TDRZ] + + wparams.initial_prompt = params.prompt.c_str(); + + wparams.greedy.best_of = params.best_of; + wparams.beam_search.beam_size = params.beam_size; + + wparams.temperature_inc = params.no_fallback ? 0.0f : wparams.temperature_inc; + wparams.entropy_thold = params.entropy_thold; + wparams.logprob_thold = params.logprob_thold; + + whisper_print_user_data user_data = {¶ms, &pcmf32s}; + + // this callback is called on each new segment + if (!wparams.print_realtime) { + wparams.new_segment_callback = whisper_print_segment_callback; + wparams.new_segment_callback_user_data = &user_data; + } + + // example for abort mechanism + // in this example, we do not abort the processing, but we could if the flag is set to true + // the callback is called before every encoder run - if it returns false, the processing is aborted + { + static bool is_aborted = false; // NOTE: this should be atomic to avoid data race + + wparams.encoder_begin_callback = [](struct whisper_context* /*ctx*/, struct whisper_state* /*state*/, + void* user_data) { + bool is_aborted = *(reinterpret_cast(user_data)); + return !is_aborted; + }; + wparams.encoder_begin_callback_user_data = &is_aborted; + } + + if (whisper_full_parallel(ctx, wparams, pcmf32.data(), pcmf32.size(), params.n_processors) != 0) { + fprintf(stderr, "%s: failed to process audio\n", fname_inp); + return; + } + } + } + whisper_print_timings(ctx); + return; +} + +#if MODEL_NAME_ID == 16 + +PYBIND11_MODULE(whisper_cpp, m) +#endif +{ + m.doc() = "cpp model python binding"; + py::class_(m, "Model", py::module_local()) + .def(py::init()) + .def("init_model", &Model::init_model, "initial model with model path and parameters", py::arg("model_path")) + .def_static("quant_model", &Model::quant_model, "Quantize model", py::arg("model_path"), py::arg("out_path"), + py::arg("weight_dtype") = "int4", py::arg("alg") = "sym", py::arg("group_size") = 32, + py::arg("scale_dtype") = "fp32", py::arg("compute_dtype") = "int8", py::arg("use_ggml") = false, + py::arg("threads") = 8) + .def("inference", &Model::inference, "Translate audio to text"); +} diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/baichuan/baichuan_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/baichuan/baichuan_utils.cpp index d270a61a41e..104447d4bdd 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/baichuan/baichuan_utils.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/baichuan/baichuan_utils.cpp @@ -35,7 +35,7 @@ #include "models/model_utils/model_config.h" #include "models/model_utils/model_files.h" #include "models/model_utils/model_types.h" -#include "models/model_utils/model_utils.h" +#include "models/model_utils/quant_utils.h" #include "models/model_utils/util.h" #include "models/models.h" diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/bloom/bloom_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/bloom/bloom_utils.cpp index 87d68fbeaa2..c807e2658bd 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/bloom/bloom_utils.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/bloom/bloom_utils.cpp @@ -35,7 +35,7 @@ #include "models/model_utils/model_config.h" #include "models/model_utils/model_files.h" #include "models/model_utils/model_types.h" -#include "models/model_utils/model_utils.h" +#include "models/model_utils/quant_utils.h" #include "models/model_utils/util.h" #include "models/models.h" diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm2_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm2_utils.cpp index 91deb1fc813..29b92749022 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm2_utils.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm2_utils.cpp @@ -36,7 +36,7 @@ #include "models/model_utils/model_config.h" #include "models/model_utils/model_files.h" #include "models/model_utils/model_types.h" -#include "models/model_utils/model_utils.h" +#include "models/model_utils/quant_utils.h" #include "models/model_utils/util.h" #include "models/models.h" diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm_utils.cpp index 0249dccd380..b84da592dd9 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm_utils.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm_utils.cpp @@ -35,7 +35,7 @@ #include "models/model_utils/model_config.h" #include "models/model_utils/model_files.h" #include "models/model_utils/model_types.h" -#include "models/model_utils/model_utils.h" +#include "models/model_utils/quant_utils.h" #include "models/model_utils/util.h" #include "models/models.h" diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/falcon/falcon_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/falcon/falcon_utils.cpp index d962290c09b..e4984095cdb 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/falcon/falcon_utils.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/falcon/falcon_utils.cpp @@ -35,7 +35,7 @@ #include "models/model_utils/model_config.h" #include "models/model_utils/model_files.h" #include "models/model_utils/model_types.h" -#include "models/model_utils/model_utils.h" +#include "models/model_utils/quant_utils.h" #include "models/model_utils/util.h" #include "models/models.h" diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/gptj/gptj_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/gptj/gptj_utils.cpp index e81b9a02edc..bf4be37607d 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/gptj/gptj_utils.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/gptj/gptj_utils.cpp @@ -35,7 +35,7 @@ #include "models/model_utils/model_config.h" #include "models/model_utils/model_files.h" #include "models/model_utils/model_types.h" -#include "models/model_utils/model_utils.h" +#include "models/model_utils/quant_utils.h" #include "models/model_utils/util.h" #include "models/models.h" diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/gptneox/gptneox_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/gptneox/gptneox_utils.cpp index 20efec831e8..518ab8b39ee 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/gptneox/gptneox_utils.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/gptneox/gptneox_utils.cpp @@ -35,7 +35,7 @@ #include "models/model_utils/model_config.h" #include "models/model_utils/model_files.h" #include "models/model_utils/model_types.h" -#include "models/model_utils/model_utils.h" +#include "models/model_utils/quant_utils.h" #include "models/model_utils/util.h" #include "models/models.h" diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/llama/llama_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/llama/llama_utils.cpp index 69c4dbdc23c..89901675ef7 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/llama/llama_utils.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/llama/llama_utils.cpp @@ -35,7 +35,7 @@ #include "models/model_utils/model_config.h" #include "models/model_utils/model_files.h" #include "models/model_utils/model_types.h" -#include "models/model_utils/model_utils.h" +#include "models/model_utils/quant_utils.h" #include "models/model_utils/util.h" #include "models/models.h" diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_types.h b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_types.h index 5754cfaeac3..f30a39a397a 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_types.h +++ b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_types.h @@ -78,7 +78,8 @@ enum model_archs { MODEL_BAICHUAN, MODEL_CHATGLM2, MODEL_CHATGLM, - MODEL_QWEN + MODEL_QWEN, + MODEL_WHISPER }; static const size_t MB = 1024 * 1024; @@ -452,7 +453,7 @@ class model_name_to_arch { {"dolly", MODEL_GPTNEOX}, {"polyglot", MODEL_GPTNEOX}, {"starcoder", MODEL_STARCODER}, {"falcon", MODEL_FALCON}, {"bloom", MODEL_BLOOM}, {"chatglm2", MODEL_CHATGLM2}, {"chatglm", MODEL_CHATGLM}, {"baichuan", MODEL_BAICHUAN}, {"mistral", MODEL_LLAMA}, - {"qwen", MODEL_QWEN}}; + {"qwen", MODEL_QWEN}, {"whisper", MODEL_WHISPER}}; }; #ifdef __cplusplus diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_utils.cpp index 95541011c3a..c8dc90f1f28 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_utils.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_utils.cpp @@ -863,248 +863,6 @@ model_token model_sample_token(struct model_context* ctx, model_token_data_array return result; } -// -// quantization -// -quant_params_internal quant_params_to_internal(const quant_params& params) { - return quant_params_internal{parse_bits(params.weight_dtype), parse_alg(params.alg), params.group_size, - parse_scale_dtype(params.scale_dtype), - parse_compute_type(params.compute_dtype, params.use_ggml)}; -} - -size_t jblas_qpack(const int8_t* src_w, const float* src_scales, const int8_t* src_zps, void* dstpr, - const quant_params_internal params, int nthread, int n, int k, int* g_idx) { - auto ctype = quant2ne_comp_type(params.compute_dtype); - auto dstbptr = reinterpret_cast(dstpr); - jblas::parallel::OMPThreading threading(nthread); - JBLAS_DTYPE quant_type = JBLAS_DTYPE::S4_CLIP; - if (params.bits == quant_bits::q8) { - quant_type = JBLAS_DTYPE::S8; - } - auto dtype_type = static_cast( - jblas::utils::jblas_dtype_get_mask_val(quant_type, JBLAS_DTYPE::TypeMask, JBLAS_DTYPE::TypeShift)); - if (dtype_type == JBLAS_DTYPE::TypeFloat) { - printf("Not support float dtype in qpack\n"); - if (params.alg == quant_alg::asym) { - printf("Invalid alg for float quant types, will be igonred\n"); - } - if (params.compute_dtype == quant_comp::int8) { - printf("Compute Int8 is not supported by float quant types, will be igonred\n"); - } - } - JBLAS_DTYPE scale_type = JBLAS_DTYPE::BF16; - if (params.scale_dtype == quant_sdtype::fp32) { - scale_type = JBLAS_DTYPE::F32; - } - if (params.scale_dtype == quant_sdtype::fp16) { - printf("Current not support float16 scale, reset to bf16\n"); - } - auto gsize = params.group_size == -1 ? k : params.group_size; - auto size = JblasGemmPackBSize(n, k, gsize, quant_type, scale_type, params.alg == quant_alg::asym, ctype, g_idx); - if (size) { - if (!JblasGemmPackB(dstpr, src_w, src_scales, src_zps, n, k, n, gsize, quant_type, scale_type, - params.alg == quant_alg::asym, ctype, g_idx, &threading)) { - printf("Failed to quant this weight\n"); - return 0; - } - return size; - } - return 0; -} - -// dstptr: default maximum workspace = float array size -size_t jblas_quantize(const float* f32ptr, void* dstpr, const quant_params_internal params, int nthread, size_t n, - size_t k) { - auto ctype = quant2ne_comp_type(params.compute_dtype); - auto dstbptr = reinterpret_cast(dstpr); - jblas::parallel::OMPThreading threading(nthread); - JBLAS_DTYPE quant_type = JBLAS_DTYPE::S4_CLIP; - if (params.bits == quant_bits::q8) { - quant_type = JBLAS_DTYPE::S8; - } - if (params.bits == quant_bits::fp4_e2m1) { - quant_type = JBLAS_DTYPE::F4_E2M1; - } - if (params.bits == quant_bits::nf4) { - quant_type = JBLAS_DTYPE::F4_NF4; - } - if (params.bits == quant_bits::fp8_e4m3) { - quant_type = JBLAS_DTYPE::F8_E4M3; - } - if (params.bits == quant_bits::fp8_e5m2) { - quant_type = JBLAS_DTYPE::F8_E5M2; - } - auto dtype_type = static_cast( - jblas::utils::jblas_dtype_get_mask_val(quant_type, JBLAS_DTYPE::TypeMask, JBLAS_DTYPE::TypeShift)); - if (dtype_type == JBLAS_DTYPE::TypeFloat) { - if (params.alg == quant_alg::asym) { - printf("Invalid alg for float quant types, will be igonred\n"); - } - if (params.compute_dtype == quant_comp::int8) { - printf("Compute Int8 is not supported by float quant types, will be igonred\n"); - } - } - JBLAS_DTYPE scale_type = JBLAS_DTYPE::BF16; - if (params.scale_dtype == quant_sdtype::fp32) { - scale_type = JBLAS_DTYPE::F32; - } - if (params.scale_dtype == quant_sdtype::fp16) { - printf("Current not support float16 scale, reset to bf16\n"); - } - if (quant_type == JBLAS_DTYPE::F8_E4M3 || quant_type == JBLAS_DTYPE::F8_E5M2) { - if (params.scale_dtype != quant_sdtype::fp8 && params.scale_dtype != quant_sdtype::fp32) { - printf("Warning: fp8 weight only supports fp8 / fp32 scale now! Fall back to fp8.\n"); - } - scale_type = JBLAS_DTYPE::F8_E8M0; - } - auto gsize = params.group_size == -1 ? k : params.group_size; - auto size = JblasGemmPackBSize(n, k, gsize, quant_type, scale_type, params.alg == quant_alg::asym, ctype, nullptr); - bool constexpr IsTrans_TorchWeight = true; - if (size) { - if (!JblasGemmQuantPackB(dstpr, f32ptr, n, k, k, gsize, quant_type, scale_type, params.alg == quant_alg::asym, - ctype, IsTrans_TorchWeight, &threading)) { - printf("Failed to quant this weight\n"); - return 0; - } - return size; - } - return 0; -} - -size_t ggml_quantize(const float* f32ptr, void* dstpr, const ne_type new_type, int nthread, size_t nelements) { - std::vector hist_cur(1 << 4, 0); - std::vector workers; - std::mutex mutex; - int chunk_size = 32 * 512; - const int nchunk = (nelements + chunk_size - 1) / chunk_size; - const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1; - size_t new_size = 0; - if (nthread_use < 2) { - new_size = ne_quantize_chunk(new_type, f32ptr, dstpr, 0, nelements, hist_cur.data()); - } else { - size_t counter = 0; - new_size = 0; - auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32ptr, dstpr, nelements, chunk_size]() { - std::vector local_hist; - size_t local_size = 0; - while (true) { - std::unique_lock lock(mutex); - size_t first = counter; - counter += chunk_size; - if (first >= nelements) { - if (!local_hist.empty()) { - for (int j = 0; j < static_cast(local_hist.size()); ++j) { - hist_cur[j] += local_hist[j]; - } - new_size += local_size; - } - break; - } - lock.unlock(); - size_t last = std::min(nelements, first + chunk_size); - if (local_hist.empty()) { - local_hist.resize(hist_cur.size(), 0); - } - local_size += ne_quantize_chunk(new_type, f32ptr, dstpr, first, last - first, local_hist.data()); - } - }; - if (static_cast(workers.size()) < nthread_use - 1) { - workers.resize(nthread_use - 1); - } - for (int it = 0; it < nthread_use - 1; ++it) { - workers[it] = std::thread(compute); - } - compute(); - for (int it = 0; it < nthread_use - 1; ++it) { - workers[it].join(); - } - } - return new_size; -} - -void ne_common_quantize(const int nthread, const quant_params_internal& params, model_load_tensor& tensor, // NOLINT - model_file_saver& saver, size_t& size_org, size_t& size_new) { // NOLINT - size_t nelements = tensor.ne.at(0) * tensor.ne.at(1); - enum ne_type new_type = quant_params_to_type(params); - model_buffer work; - work.resize(nelements * 4); // upper bound on size - void* new_data = work.addr; - size_t new_size = 0; - float* f32_data = NULL; - model_buffer f32_conv_buf; - if (tensor.type == NE_TYPE_F32) { - f32_data = reinterpret_cast(tensor.data); - } else if (tensor.type == NE_TYPE_F16) { - f32_conv_buf.resize(nelements * sizeof(float)); - f32_data = reinterpret_cast(f32_conv_buf.addr); - const auto* f16_data = (const ne_fp16_t*)tensor.data; - for (size_t i = 0; i < nelements; i++) { - f32_data[i] = ne_fp16_to_fp32(f16_data[i]); - } - } else { - throw format("type %s unsupported for integer quantization", ne_type_name(tensor.type)); - } - printf("quantizing .. "); - fflush(stdout); - if (new_type == NE_TYPE_JBLAS) { - size_t k_ = tensor.ne.at(0); - size_t n_ = tensor.ne.at(1); - printf("JBLAS "); - new_size = jblas_quantize(f32_data, work.addr, params, nthread, n_, k_); - } else if (new_type >= NE_TYPE_Q4_0 && new_type < NE_TYPE_JBLAS) { - printf("GGML "); - new_size = ggml_quantize(f32_data, work.addr, new_type, nthread, nelements); - } - printf("size = %8.2f MB -> %8.2f MB\n", tensor.size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); - -__WRITE_FILE: - size_org += tensor.size; - size_new += new_size; - saver.write_tensor(tensor, new_type, new_data, new_size); - printf("\n"); -} - -static void model_quantize_internal(const quant_params& params, std::shared_ptr quant_layer) { - auto ftype = quant_params_to_ftype(params); - quant_layer->set_global_config(params.nthread, quant_params_to_internal(params)); - int nthread = params.nthread; - if (nthread <= 0) { - nthread = std::thread::hardware_concurrency(); - } - std::unique_ptr model_loader(new model_model_loader(params.model_file, /*use_mmap*/ false, - /*vocab_only*/ false)); - model_file_saver file_saver(params.out_file.c_str(), model_loader->file_loaders.at(0).get(), ftype); - size_t total_size_org = 0; - size_t total_size_new = 0; - size_t idx = 0; - for (model_load_tensor& tensor : model_loader->tensors_map.tensors) { - model_buffer read_data; - read_data.resize(tensor.size); - tensor.data = read_data.addr; - model_loader->load_data_for(tensor); - printf("[%4zu/%4zu] %36s - %16s, type = %6s, ", ++idx, model_loader->tensors_map.tensors.size(), - tensor.name.c_str(), model_format_tensor_shape(tensor.ne).c_str(), ne_type_name(tensor.type)); - std::vector tmpne(tensor.ne.size()); - for (size_t i = 0; i < tmpne.size(); i++) { - tmpne[i] = static_cast(tensor.ne[i]); - } - auto lconfig = quant_layer->get_layer_config(tensor.name, tmpne, tensor.type); - bool quantize = lconfig.valid(); - printf("%s,", lconfig.getstr().c_str()); - if (quantize) { - ne_common_quantize(nthread, lconfig, tensor, file_saver, total_size_org, total_size_new); - } else { - printf("size = %8.3f MB\n", tensor.size / 1024.0 / 1024.0); - total_size_org += tensor.size; - total_size_new += tensor.size; - file_saver.write_tensor(tensor, tensor.type, tensor.data, tensor.size); - printf("\n"); - } - } - printf("%s: model size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0); - printf("%s: quant size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0); -} - // // interface implementation // @@ -1231,16 +989,6 @@ struct model_context* model_init_from_file(const char* path_model, struct model_ void model_free(struct model_context* ctx) { delete ctx; } -int model_quantize(const quant_params& params, std::shared_ptr quant_layer) { - try { - model_quantize_internal(params, quant_layer); - return 0; - } catch (const std::string& err) { - fprintf(stderr, "%s: failed to quantize: %s\n", __func__, err.c_str()); - return 1; - } -} - int model_apply_lora_from_file_internal(struct model_context* ctx, const char* path_lora, const char* path_base_model, int n_threads) { fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora); diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_utils.h b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_utils.h index ff19090e1d0..0d6a5cc8a7f 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_utils.h +++ b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_utils.h @@ -22,7 +22,6 @@ #include "application/common.h" #include "models/model_utils/model_config.h" #include "models/model_utils/model_types.h" -#include "models/model_utils/quant_config.h" #ifdef MODEL_SHARED #if defined(_WIN32) && !defined(__MINGW32__) @@ -62,12 +61,10 @@ MODEL_API struct model_context_params model_context_default_params(); MODEL_API bool model_mmap_supported(); MODEL_API bool model_mlock_supported(); - // TODO: not great API - very likely to change // Initialize the model + ne backend // Call once at the start of the program MODEL_API void model_init_backend(); - MODEL_API int64_t model_time_us(); // Various functions for loading a ne model model. @@ -78,15 +75,6 @@ MODEL_API struct model_context* model_init_from_file(const char* path_model, str // Frees all allocated memory MODEL_API void model_free(struct model_context* ctx); -// TODO: not great API - very likely to change -// Returns 0 on success -// param - from args -// quant_layer - depends on each model's config -MODEL_API int model_quantize(const quant_params& param, std::shared_ptr quant_layer); -size_t jblas_qpack(const int8_t* src_w, const float* src_scales, const int8_t* src_zps, void* dstpr, - const quant_params_internal params, int nthread, int n, int k, int* g_idx); -size_t jblas_quantize(const float* f32ptr, void* dstpr, const quant_params_internal params, int nthread, size_t n, - size_t k); // Apply a LoRA adapter to a loaded model // path_base_model is the path to a higher quality model to use as a base for // the layers modified by the adapter. Can be NULL to use the current loaded @@ -98,7 +86,6 @@ MODEL_API int model_apply_lora_from_file(struct model_context* ctx, const char* // Returns the number of tokens in the KV cache MODEL_API int model_get_kv_cache_token_count(const struct model_context* ctx); - // Sets the current rng seed. MODEL_API void model_set_rng_seed(struct model_context* ctx, int seed); diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/quant_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/quant_utils.cpp new file mode 100644 index 00000000000..35934bcb8b9 --- /dev/null +++ b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/quant_utils.cpp @@ -0,0 +1,645 @@ +// Copyright (c) 2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Defines fileno on msys: +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#include +#include +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include //NOLINT +#include +#include +#include +#include +#include //NOLINT +#include + +#include "application/common.h" +#include "core/layers/jblas_common.hpp" +#include "core/layers/mha_dense.h" +#include "core/ne_layers.h" +#include "core/layers/jblas_gemm.h" +#include "jblas/jit_blas_parallel.h" +// #include "jblas/jblas/jit_blas_weight_compression.h" +// #include "models/model_utils/model_config.h" + +#include "models/model_utils/model_files.h" +#include "models/whisper/whisper.h" +#include "models/model_utils/quant_utils.h" +#include "models/model_utils/util.h" +#include "models/models.h" + +// default hparams (Whisper tiny) +struct whisper_hparams { + int32_t n_vocab = 51864; + int32_t n_audio_ctx = 1500; + int32_t n_audio_state = 384; + int32_t n_audio_head = 6; + int32_t n_audio_layer = 4; + int32_t n_text_ctx = 448; + int32_t n_text_state = 384; + int32_t n_text_head = 6; + int32_t n_text_layer = 4; + int32_t n_mels = 80; + int32_t ftype = 1; +}; + +struct whisper_filters { + int32_t n_mel; + int32_t n_fft; + + std::vector data; +}; + +// quantize a model +bool whisper_model_quantize(const std::string& fname_inp, const std::string& fname_out, ne_ftype ftype) { + gpt_vocab vocab; + + printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str()); + + auto finp = std::ifstream(fname_inp, std::ios::binary); + if (!finp) { + fprintf(stderr, "%s: failed to open '%s' for reading\n", __func__, fname_inp.c_str()); + return false; + } + + auto fout = std::ofstream(fname_out, std::ios::binary); + if (!fout) { + fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname_out.c_str()); + return false; + } + + // verify magic + { + uint32_t magic; + finp.read(reinterpret_cast(&magic), sizeof(magic)); + if (magic != NE_FILE_MAGIC) { + fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp.c_str()); + return false; + } + + fout.write(reinterpret_cast(&magic), sizeof(magic)); + } + + whisper_hparams hparams; + + // load hparams + { + finp.read(reinterpret_cast(&hparams.n_vocab), sizeof(hparams.n_vocab)); + finp.read(reinterpret_cast(&hparams.n_audio_ctx), sizeof(hparams.n_audio_ctx)); + finp.read(reinterpret_cast(&hparams.n_audio_state), sizeof(hparams.n_audio_state)); + finp.read(reinterpret_cast(&hparams.n_audio_head), sizeof(hparams.n_audio_head)); + finp.read(reinterpret_cast(&hparams.n_audio_layer), sizeof(hparams.n_audio_layer)); + finp.read(reinterpret_cast(&hparams.n_text_ctx), sizeof(hparams.n_text_ctx)); + finp.read(reinterpret_cast(&hparams.n_text_state), sizeof(hparams.n_text_state)); + finp.read(reinterpret_cast(&hparams.n_text_head), sizeof(hparams.n_text_head)); + finp.read(reinterpret_cast(&hparams.n_text_layer), sizeof(hparams.n_text_layer)); + finp.read(reinterpret_cast(&hparams.n_mels), sizeof(hparams.n_mels)); + finp.read(reinterpret_cast(&hparams.ftype), sizeof(hparams.ftype)); + + const int32_t qntvr_src = hparams.ftype / NE_QNT_VERSION_FACTOR; + const int32_t ftype_dst = NE_QNT_VERSION * NE_QNT_VERSION_FACTOR + ftype; + + fprintf(stderr, "%s: n_vocab = %d\n", __func__, hparams.n_vocab); + fprintf(stderr, "%s: n_audio_ctx = %d\n", __func__, hparams.n_audio_ctx); + fprintf(stderr, "%s: n_audio_state = %d\n", __func__, hparams.n_audio_state); + fprintf(stderr, "%s: n_audio_head = %d\n", __func__, hparams.n_audio_head); + fprintf(stderr, "%s: n_audio_layer = %d\n", __func__, hparams.n_audio_layer); + fprintf(stderr, "%s: n_text_ctx = %d\n", __func__, hparams.n_text_ctx); + fprintf(stderr, "%s: n_text_state = %d\n", __func__, hparams.n_text_state); + fprintf(stderr, "%s: n_text_head = %d\n", __func__, hparams.n_text_head); + fprintf(stderr, "%s: n_text_layer = %d\n", __func__, hparams.n_text_layer); + fprintf(stderr, "%s: n_mels = %d\n", __func__, hparams.n_mels); + fprintf(stderr, "%s: ftype (src) = %d\n", __func__, hparams.ftype); + fprintf(stderr, "%s: qntvr (src) = %d\n", __func__, qntvr_src); + fprintf(stderr, "%s: ftype (dst) = %d\n", __func__, ftype_dst); + fprintf(stderr, "%s: qntvr (dst) = %d\n", __func__, NE_QNT_VERSION); + + fout.write((const char*)&hparams.n_vocab, sizeof(hparams.n_vocab)); + fout.write((const char*)&hparams.n_audio_ctx, sizeof(hparams.n_audio_ctx)); + fout.write((const char*)&hparams.n_audio_state, sizeof(hparams.n_audio_state)); + fout.write((const char*)&hparams.n_audio_head, sizeof(hparams.n_audio_head)); + fout.write((const char*)&hparams.n_audio_layer, sizeof(hparams.n_audio_layer)); + fout.write((const char*)&hparams.n_text_ctx, sizeof(hparams.n_text_ctx)); + fout.write((const char*)&hparams.n_text_state, sizeof(hparams.n_text_state)); + fout.write((const char*)&hparams.n_text_head, sizeof(hparams.n_text_head)); + fout.write((const char*)&hparams.n_text_layer, sizeof(hparams.n_text_layer)); + fout.write((const char*)&hparams.n_mels, sizeof(hparams.n_mels)); + fout.write((const char*)&ftype_dst, sizeof(hparams.ftype)); + } + + // load mel filters + { + whisper_filters filters; + + finp.read(reinterpret_cast(&filters.n_mel), sizeof(filters.n_mel)); + fout.write(reinterpret_cast(&filters.n_mel), sizeof(filters.n_mel)); + finp.read(reinterpret_cast(&filters.n_fft), sizeof(filters.n_fft)); + fout.write(reinterpret_cast(&filters.n_fft), sizeof(filters.n_fft)); + + filters.data.resize(filters.n_mel * filters.n_fft); + finp.read(reinterpret_cast(filters.data.data()), filters.data.size() * sizeof(float)); + fout.write(reinterpret_cast(filters.data.data()), filters.data.size() * sizeof(float)); + } + + // load vocab + { + int32_t n_vocab = 0; + finp.read(reinterpret_cast(&n_vocab), sizeof(n_vocab)); + fout.write(reinterpret_cast(&n_vocab), sizeof(n_vocab)); + + // if (n_vocab != hparams.n_vocab) { + // fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", + // __func__, fname_inp.c_str(), n_vocab, hparams.n_vocab); + // return false; + // } + + char word[129]; + + for (int i = 0; i < n_vocab; i++) { + uint32_t len; + finp.read(reinterpret_cast(&len), sizeof(len)); + fout.write(reinterpret_cast(&len), sizeof(len)); + + word[len] = '\0'; + + finp.read(reinterpret_cast(word), len); + fout.write(reinterpret_cast(word), len); + + vocab.token_to_id[word] = i; + vocab.id_to_token[i] = word; + } + } + + // regexes of tensor names to not be quantized + const std::vector to_skip = { + // "encoder.*", + "encoder.conv1.bias", + "encoder.conv2.bias", + "encoder.positional_embedding", + "decoder.positional_embedding", + }; + + if (!model_quantize_special(finp, fout, ftype, {".*"}, to_skip)) { + fprintf(stderr, "%s: failed to quantize model '%s'\n", __func__, fname_inp.c_str()); + return false; + } + + finp.close(); + fout.close(); + + return true; +} +// +// quantization +// +quant_params_internal quant_params_to_internal(const quant_params& params) { + return quant_params_internal{parse_bits(params.weight_dtype), parse_alg(params.alg), params.group_size, + parse_scale_dtype(params.scale_dtype), + parse_compute_type(params.compute_dtype, params.use_ggml)}; +} + +size_t jblas_qpack(const int8_t* src_w, const float* src_scales, const int8_t* src_zps, void* dstpr, + const quant_params_internal params, int nthread, int n, int k, int* g_idx) { + auto ctype = quant2ne_comp_type(params.compute_dtype); + auto dstbptr = reinterpret_cast(dstpr); + jblas::parallel::OMPThreading threading(nthread); + JBLAS_DTYPE quant_type = JBLAS_DTYPE::S4_CLIP; + if (params.bits == quant_bits::q8) { + quant_type = JBLAS_DTYPE::S8; + } + auto dtype_type = static_cast( + jblas::utils::jblas_dtype_get_mask_val(quant_type, JBLAS_DTYPE::TypeMask, JBLAS_DTYPE::TypeShift)); + if (dtype_type == JBLAS_DTYPE::TypeFloat) { + printf("Not support float dtype in qpack\n"); + if (params.alg == quant_alg::asym) { + printf("Invalid alg for float quant types, will be igonred\n"); + } + if (params.compute_dtype == quant_comp::int8) { + printf("Compute Int8 is not supported by float quant types, will be igonred\n"); + } + } + JBLAS_DTYPE scale_type = JBLAS_DTYPE::BF16; + if (params.scale_dtype == quant_sdtype::fp32) { + scale_type = JBLAS_DTYPE::F32; + } + if (params.scale_dtype == quant_sdtype::fp16) { + printf("Current not support float16 scale, reset to bf16\n"); + } + auto gsize = params.group_size == -1 ? k : params.group_size; + auto size = JblasGemmPackBSize(n, k, gsize, quant_type, scale_type, params.alg == quant_alg::asym, ctype, g_idx); + if (size) { + if (!JblasGemmPackB(dstpr, src_w, src_scales, src_zps, n, k, n, gsize, quant_type, scale_type, + params.alg == quant_alg::asym, ctype, g_idx, &threading)) { + printf("Failed to quant this weight\n"); + return 0; + } + return size; + } + return 0; +} + +// dstptr: default maximum workspace = float array size +size_t jblas_quantize(const float* f32ptr, void* dstpr, const quant_params_internal params, int nthread, size_t n, + size_t k) { + auto ctype = quant2ne_comp_type(params.compute_dtype); + auto dstbptr = reinterpret_cast(dstpr); + jblas::parallel::OMPThreading threading(nthread); + JBLAS_DTYPE quant_type = JBLAS_DTYPE::S4_CLIP; + if (params.bits == quant_bits::q8) { + quant_type = JBLAS_DTYPE::S8; + } + if (params.bits == quant_bits::fp4_e2m1) { + quant_type = JBLAS_DTYPE::F4_E2M1; + } + if (params.bits == quant_bits::nf4) { + quant_type = JBLAS_DTYPE::F4_NF4; + } + if (params.bits == quant_bits::fp8_e4m3) { + quant_type = JBLAS_DTYPE::F8_E4M3; + } + if (params.bits == quant_bits::fp8_e5m2) { + quant_type = JBLAS_DTYPE::F8_E5M2; + } + auto dtype_type = static_cast( + jblas::utils::jblas_dtype_get_mask_val(quant_type, JBLAS_DTYPE::TypeMask, JBLAS_DTYPE::TypeShift)); + if (dtype_type == JBLAS_DTYPE::TypeFloat) { + if (params.alg == quant_alg::asym) { + printf("Invalid alg for float quant types, will be igonred\n"); + } + if (params.compute_dtype == quant_comp::int8) { + printf("Compute Int8 is not supported by float quant types, will be igonred\n"); + } + } + JBLAS_DTYPE scale_type = JBLAS_DTYPE::BF16; + if (params.scale_dtype == quant_sdtype::fp32) { + scale_type = JBLAS_DTYPE::F32; + } + if (params.scale_dtype == quant_sdtype::fp16) { + printf("Current not support float16 scale, reset to bf16\n"); + } + if (quant_type == JBLAS_DTYPE::F8_E4M3 || quant_type == JBLAS_DTYPE::F8_E5M2) { + if (params.scale_dtype != quant_sdtype::fp8 && params.scale_dtype != quant_sdtype::fp32) { + printf("Warning: fp8 weight only supports fp8 / fp32 scale now! Fall back to fp8.\n"); + } + scale_type = JBLAS_DTYPE::F8_E8M0; + } + auto gsize = params.group_size == -1 ? k : params.group_size; + auto size = JblasGemmPackBSize(n, k, gsize, quant_type, scale_type, params.alg == quant_alg::asym, ctype, nullptr); + bool constexpr IsTrans_TorchWeight = true; + if (size) { + if (!JblasGemmQuantPackB(dstpr, f32ptr, n, k, k, gsize, quant_type, scale_type, params.alg == quant_alg::asym, + ctype, IsTrans_TorchWeight, &threading)) { + printf("Failed to quant this weight\n"); + return 0; + } + return size; + } + return 0; +} + +size_t ggml_quantize(const float* f32ptr, void* dstpr, const ne_type new_type, int nthread, size_t nelements) { + std::vector hist_cur(1 << 4, 0); + std::vector workers; + std::mutex mutex; + int chunk_size = 32 * 512; + const int nchunk = (nelements + chunk_size - 1) / chunk_size; + const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1; + size_t new_size = 0; + if (nthread_use < 2) { + new_size = ne_quantize_chunk(new_type, f32ptr, dstpr, 0, nelements, hist_cur.data()); + } else { + size_t counter = 0; + new_size = 0; + auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32ptr, dstpr, nelements, chunk_size]() { + std::vector local_hist; + size_t local_size = 0; + while (true) { + std::unique_lock lock(mutex); + size_t first = counter; + counter += chunk_size; + if (first >= nelements) { + if (!local_hist.empty()) { + for (int j = 0; j < static_cast(local_hist.size()); ++j) { + hist_cur[j] += local_hist[j]; + } + new_size += local_size; + } + break; + } + lock.unlock(); + size_t last = std::min(nelements, first + chunk_size); + if (local_hist.empty()) { + local_hist.resize(hist_cur.size(), 0); + } + local_size += ne_quantize_chunk(new_type, f32ptr, dstpr, first, last - first, local_hist.data()); + } + }; + if (static_cast(workers.size()) < nthread_use - 1) { + workers.resize(nthread_use - 1); + } + for (int it = 0; it < nthread_use - 1; ++it) { + workers[it] = std::thread(compute); + } + compute(); + for (int it = 0; it < nthread_use - 1; ++it) { + workers[it].join(); + } + } + return new_size; +} + +void ne_common_quantize(const int nthread, const quant_params_internal& params, model_load_tensor& tensor, // NOLINT + model_file_saver& saver, size_t& size_org, size_t& size_new) { // NOLINT + size_t nelements = tensor.ne.at(0) * tensor.ne.at(1); + enum ne_type new_type = quant_params_to_type(params); + model_buffer work; + work.resize(nelements * 4); // upper bound on size + void* new_data = work.addr; + size_t new_size = 0; + float* f32_data = NULL; + model_buffer f32_conv_buf; + if (tensor.type == NE_TYPE_F32) { + f32_data = reinterpret_cast(tensor.data); + } else if (tensor.type == NE_TYPE_F16) { + f32_conv_buf.resize(nelements * sizeof(float)); + f32_data = reinterpret_cast(f32_conv_buf.addr); + const auto* f16_data = (const ne_fp16_t*)tensor.data; + for (size_t i = 0; i < nelements; i++) { + f32_data[i] = ne_fp16_to_fp32(f16_data[i]); + } + } else { + throw format("type %s unsupported for integer quantization", ne_type_name(tensor.type)); + } + printf("quantizing .. "); + fflush(stdout); + if (new_type == NE_TYPE_JBLAS) { + size_t k_ = tensor.ne.at(0); + size_t n_ = tensor.ne.at(1); + printf("JBLAS "); + new_size = jblas_quantize(f32_data, work.addr, params, nthread, n_, k_); + } else if (new_type >= NE_TYPE_Q4_0 && new_type < NE_TYPE_JBLAS) { + printf("GGML "); + new_size = ggml_quantize(f32_data, work.addr, new_type, nthread, nelements); + } + printf("size = %8.2f MB -> %8.2f MB\n", tensor.size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); + +__WRITE_FILE: + size_org += tensor.size; + size_new += new_size; + saver.write_tensor(tensor, new_type, new_data, new_size); + printf("\n"); +} + +static void model_quantize_internal(const quant_params& params, std::shared_ptr quant_layer) { + auto ftype = quant_params_to_ftype(params); + quant_layer->set_global_config(params.nthread, quant_params_to_internal(params)); + int nthread = params.nthread; + if (nthread <= 0) { + nthread = std::thread::hardware_concurrency(); + } + std::unique_ptr model_loader(new model_model_loader(params.model_file, /*use_mmap*/ false, + /*vocab_only*/ false)); + model_file_saver file_saver(params.out_file.c_str(), model_loader->file_loaders.at(0).get(), ftype); + size_t total_size_org = 0; + size_t total_size_new = 0; + size_t idx = 0; + for (model_load_tensor& tensor : model_loader->tensors_map.tensors) { + model_buffer read_data; + read_data.resize(tensor.size); + tensor.data = read_data.addr; + model_loader->load_data_for(tensor); + printf("[%4zu/%4zu] %36s - %16s, type = %6s, ", ++idx, model_loader->tensors_map.tensors.size(), + tensor.name.c_str(), model_format_tensor_shape(tensor.ne).c_str(), ne_type_name(tensor.type)); + std::vector tmpne(tensor.ne.size()); + for (size_t i = 0; i < tmpne.size(); i++) { + tmpne[i] = static_cast(tensor.ne[i]); + } + auto lconfig = quant_layer->get_layer_config(tensor.name, tmpne, tensor.type); + bool quantize = lconfig.valid(); + printf("%s,", lconfig.getstr().c_str()); + if (quantize) { + ne_common_quantize(nthread, lconfig, tensor, file_saver, total_size_org, total_size_new); + } else { + printf("size = %8.3f MB\n", tensor.size / 1024.0 / 1024.0); + total_size_org += tensor.size; + total_size_new += tensor.size; + file_saver.write_tensor(tensor, tensor.type, tensor.data, tensor.size); + printf("\n"); + } + } + printf("%s: model size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0); + printf("%s: quant size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0); +} + +size_t jblas_special_quantize(const float* f32ptr, void* dstpr, int group_size, int nthread, int n, int k) { return 0; } + +bool model_quantize_special(std::ifstream& finp, std::ofstream& fout, const ne_ftype ftype, + const std::vector& to_quant, const std::vector& to_skip) { + ne_type qtype = NE_TYPE_F32; + + switch (ftype) { + case NE_FTYPE_MOSTLY_Q4_0: + qtype = NE_TYPE_Q4_0; + break; + case NE_FTYPE_MOSTLY_Q_JBLAS: + qtype = NE_TYPE_JBLAS; + break; + case NE_FTYPE_MOSTLY_F16: { + fprintf(stderr, "%s: invalid model type %d\n", __func__, ftype); + return false; + } + } + if (!ne_is_quantized(qtype)) { + fprintf(stderr, "%s: invalid quantization type %d (%s)\n", __func__, qtype, ne_type_name(qtype)); + return false; + } + + size_t total_size_org = 0; + size_t total_size_new = 0; + + std::vector work; + + std::vector data_u8; + std::vector data_f16; + std::vector data_f32; + + std::vector hist_all(1 << 4, 0); + + while (true) { + int32_t n_dims; + int32_t length; + int32_t ttype; + + finp.read(reinterpret_cast(&n_dims), sizeof(n_dims)); + finp.read(reinterpret_cast(&length), sizeof(length)); + finp.read(reinterpret_cast(&ttype), sizeof(ttype)); + + if (finp.eof()) { + break; + } + + int32_t nelements = 1; + int32_t ne[4] = {1, 1, 1, 1}; + for (int i = 0; i < n_dims; ++i) { + finp.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); + nelements *= ne[i]; + } + + std::string name(length, 0); + finp.read(&name[0], length); + + printf("%64s - [%5d, %5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ne[2], ne_type_name((ne_type)ttype)); + + bool quantize = false; + + // check if we should quantize this tensor + for (const auto& s : to_quant) { + if (std::regex_match(name, std::regex(s))) { + quantize = true; + break; + } + } + + // check if we should skip this tensor + for (const auto& s : to_skip) { + if (std::regex_match(name, std::regex(s))) { + quantize = false; + break; + } + } + + // quantize only 2D tensors + quantize &= (n_dims == 2); + + if (quantize) { + if (ttype != NE_TYPE_F32 && ttype != NE_TYPE_F16) { + fprintf(stderr, "%s: unsupported ttype %d (%s) for integer quantization\n", __func__, ttype, + ne_type_name((ne_type)ttype)); + return false; + } + + if (ttype == NE_TYPE_F16) { + data_f16.resize(nelements); + finp.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ne_fp16_t)); + data_f32.resize(nelements); + for (int i = 0; i < nelements; ++i) { + data_f32[i] = ne_fp16_to_fp32(data_f16[i]); + } + } else { + data_f32.resize(nelements); + finp.read(reinterpret_cast(data_f32.data()), nelements * sizeof(float)); + } + + ttype = qtype; + } else { + const int bpe = (ttype == 0) ? sizeof(float) : sizeof(uint16_t); + + data_u8.resize(nelements * bpe); + finp.read(reinterpret_cast(data_u8.data()), nelements * bpe); + } + + fout.write(reinterpret_cast(&n_dims), sizeof(n_dims)); + fout.write(reinterpret_cast(&length), sizeof(length)); + fout.write(reinterpret_cast(&ttype), sizeof(ttype)); + for (int i = 0; i < n_dims; ++i) { + fout.write(reinterpret_cast(&ne[i]), sizeof(ne[i])); + } + fout.write(&name[0], length); + + if (quantize) { + work.resize(nelements); // for quantization + + size_t cur_size = 0; + std::vector hist_cur(1 << 4, 0); + + switch ((ne_type)ttype) { + case NE_TYPE_Q4_0: { + cur_size = ne_quantize_chunk((ne_type)ttype, data_f32.data(), work.data(), 0, nelements, hist_cur.data()); + } break; + case NE_TYPE_JBLAS: { + cur_size = jblas_special_quantize(data_f32.data(), work.data(), 32, 1, ne[0], ne[1]); + printf("JBLAS"); + } break; + case NE_TYPE_F32: { + fprintf(stderr, "%s: unsupported quantization type %d (%s)\n", __func__, ttype, ne_type_name((ne_type)ttype)); + return false; + } + } + + fout.write(reinterpret_cast(work.data()), cur_size); + total_size_new += cur_size; + + printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float) / 1024.0 / 1024.0, + cur_size / 1024.0 / 1024.0); + for (int i = 0; i < static_cast(hist_cur.size()); ++i) { + hist_all[i] += hist_cur[i]; + } + + for (int i = 0; i < static_cast(hist_cur.size()); ++i) { + printf("%5.3f ", hist_cur[i] / static_cast(nelements)); + } + printf("\n"); + } else { + printf("size = %8.3f MB\n", data_u8.size() / 1024.0 / 1024.0); + fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); + total_size_new += data_u8.size(); + } + + total_size_org += nelements * sizeof(float); + } + + printf("%s: model size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0); + printf("%s: quant size = %8.2f MB | ftype = %d (%s)\n", __func__, total_size_new / 1024.0 / 1024.0, ftype, + ne_type_name(qtype)); + + { + int64_t sum_all = 0; + for (int i = 0; i < static_cast(hist_all.size()); ++i) { + sum_all += hist_all[i]; + } + + printf("%s: hist: ", __func__); + for (int i = 0; i < static_cast(hist_all.size()); ++i) { + printf("%5.3f ", hist_all[i] / static_cast(sum_all)); + } + printf("\n"); + } + + return true; +} +int model_quantize(const quant_params& params, std::shared_ptr quant_layer) { + try { + model_quantize_internal(params, quant_layer); + return 0; + } catch (const std::string& err) { + fprintf(stderr, "%s: failed to quantize: %s\n", __func__, err.c_str()); + return 1; + } +} diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/quant_utils.h b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/quant_utils.h new file mode 100644 index 00000000000..90770e86ae3 --- /dev/null +++ b/intel_extension_for_transformers/llm/runtime/graph/models/model_utils/quant_utils.h @@ -0,0 +1,55 @@ +// Copyright (c) 2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#ifndef QUANT_UTILS_H +#define QUANT_UTILS_H + +#include "application/common.h" +#include "models/model_utils/quant_config.h" + +#ifdef MODEL_SHARED +#if defined(_WIN32) && !defined(__MINGW32__) +#ifdef MODEL_BUILD +#define QUANT_API __declspec(dllexport) +#else +#define QUANT_API __declspec(dllimport) +#endif +#else +#define QUANT_API __attribute__((visibility("default"))) +#endif +#else +#define QUANT_API +#endif + +#define MODEL_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt' +#define MODEL_FILE_MAGIC_GGLA 0x67676c61u // 'ggla' +#define MODEL_FILE_MAGIC_GGMF 0x67676d66u // 'ggmf' +#define MODEL_FILE_MAGIC_NE 0x67676d6cu // 'ne' +#define MODEL_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn' + +#define MODEL_FILE_VERSION 3 +#define MODEL_FILE_MAGIC MODEL_FILE_MAGIC_GGJT +#define MODEL_FILE_MAGIC_UNVERSIONED MODEL_FILE_MAGIC_NE +#define MODEL_SESSION_MAGIC MODEL_FILE_MAGIC_GGSN +#define MODEL_SESSION_VERSION 1 + +QUANT_API int model_quantize(const quant_params& param, std::shared_ptr quant_layer); +size_t jblas_qpack(const int8_t* src_w, const float* src_scales, const int8_t* src_zps, void* dstpr, + const quant_params_internal params, int nthread, int n, int k, int* g_idx); +size_t jblas_quantize(const float* f32ptr, void* dstpr, const quant_params_internal params, int nthread, size_t n, + size_t k); +QUANT_API bool model_quantize_special(std::ifstream& finp, std::ofstream& fout, const ne_ftype ftype, + const std::vector& to_quant, + const std::vector& to_skip); +QUANT_API bool whisper_model_quantize(const std::string& fname_inp, const std::string& fname_out, ne_ftype ftype); +#endif // MODEL_H \ No newline at end of file diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/mpt/mpt_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/mpt/mpt_utils.cpp index 839bff3f0e7..6e4cb05617b 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/mpt/mpt_utils.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/mpt/mpt_utils.cpp @@ -35,7 +35,7 @@ #include "models/model_utils/model_config.h" #include "models/model_utils/model_files.h" #include "models/model_utils/model_types.h" -#include "models/model_utils/model_utils.h" +#include "models/model_utils/quant_utils.h" #include "models/model_utils/util.h" #include "models/models.h" diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/opt/opt_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/opt/opt_utils.cpp index b9caef9c450..572efcf80c4 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/opt/opt_utils.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/opt/opt_utils.cpp @@ -35,7 +35,7 @@ #include "models/model_utils/model_config.h" #include "models/model_utils/model_files.h" #include "models/model_utils/model_types.h" -#include "models/model_utils/model_utils.h" +#include "models/model_utils/quant_utils.h" #include "models/model_utils/util.h" #include "models/models.h" void model_load_internal(const std::string& fname, model_archs arch, model_context* ctx, int n_gpu_layers, diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/qwen/qwen_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/qwen/qwen_utils.cpp index 67aaa074a0d..f1ca257cdd5 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/qwen/qwen_utils.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/qwen/qwen_utils.cpp @@ -35,6 +35,7 @@ #include "models/model_utils/model_config.h" #include "models/model_utils/model_files.h" #include "models/model_utils/model_types.h" +#include "models/model_utils/quant_utils.h" #include "models/model_utils/model_utils.h" #include "models/model_utils/util.h" #include "models/models.h" diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/starcoder/starcoder_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/starcoder/starcoder_utils.cpp index 41d5a511769..f43b9a4ed11 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/starcoder/starcoder_utils.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/starcoder/starcoder_utils.cpp @@ -35,7 +35,7 @@ #include "models/model_utils/model_config.h" #include "models/model_utils/model_files.h" #include "models/model_utils/model_types.h" -#include "models/model_utils/model_utils.h" +#include "models/model_utils/quant_utils.h" #include "models/model_utils/util.h" #include "models/models.h" diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/whisper/CMakeLists.txt b/intel_extension_for_transformers/llm/runtime/graph/models/whisper/CMakeLists.txt index 4060682f83d..6b578bfceb1 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/whisper/CMakeLists.txt +++ b/intel_extension_for_transformers/llm/runtime/graph/models/whisper/CMakeLists.txt @@ -13,7 +13,7 @@ # limitations under the License. set(TARGET whisper) -add_library_w_warning(${TARGET} whisper.cpp whisper_utils.cpp) # no (gpt) model utils needed +add_library_w_warning(${TARGET} whisper.cpp whisper_utils.cpp ${MODEL_UTILS_SOURCE}) # no (gpt) model utils needed target_compile_features(${TARGET} PUBLIC cxx_std_11) # don't bump set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON) -target_link_libraries(${TARGET} PUBLIC ne_layers ${LLAMA_EXTRA_LIBS} jblas::jblas) +target_link_libraries(${TARGET} PUBLIC ne_layers jblas::jblas) diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/whisper/whisper.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/whisper/whisper.cpp index 5dc5f7063e4..bc45719b097 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/whisper/whisper.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/whisper/whisper.cpp @@ -45,7 +45,7 @@ #define WHISPER_MAX_SCRATCH_BUFFERS 16 // avoid dup code -int64_t model_time_us() { return ne_time_us(); } +int64_t model_time_us_whisper() { return ne_time_us(); } // available whisper models enum e_model { @@ -351,7 +351,7 @@ struct whisper_kv_cache { struct ne_context* ctx; - std::vector buf; + model_ctx_buffer buf; int n; // number of tokens currently in the cache }; @@ -394,7 +394,7 @@ struct whisper_model { struct ne_context* ctx; // the model memory buffer is read-only and can be shared between processors - std::vector* buf; + model_ctx_buffer buf; // tensors int n_loaded; @@ -461,7 +461,7 @@ struct whisper_state { whisper_decoder decoders[WHISPER_MAX_DECODERS] = {}; // memory buffers used by encode / decode contexts - std::vector buf_compute; + model_ctx_buffer buf_compute; std::vector buf_scratch[WHISPER_MAX_SCRATCH_BUFFERS]; int buf_last = 0; @@ -547,8 +547,8 @@ static bool kv_cache_init(const struct whisper_hparams& hparams, const size_t me cache->buf.resize(mem_bytes); struct ne_init_params params = { - /*.mem_size =*/cache->buf.size(), - /*.mem_buffer =*/cache->buf.data(), + /*.mem_size =*/cache->buf.size, + /*.mem_buffer =*/cache->buf.addr, /*.no_alloc =*/false, }; @@ -580,11 +580,11 @@ static bool kv_cache_reinit(struct whisper_kv_cache* cache) { const ne_type wtype = cache->k->type; NE_ASSERT(wtype == cache->v->type); - NE_ASSERT(cache->buf.size() >= 2 * n_elements * ne_type_sizef(wtype)); + NE_ASSERT(cache->buf.size >= 2 * n_elements * ne_type_sizef(wtype)); struct ne_init_params params = { - /*.mem_size =*/cache->buf.size(), - /*.mem_buffer =*/cache->buf.data(), + /*.mem_size =*/cache->buf.size, + /*.mem_buffer =*/cache->buf.addr, /*.no_alloc =*/false, }; @@ -622,7 +622,7 @@ static void kv_cache_free(struct whisper_kv_cache* cache) { static bool whisper_model_load(struct whisper_model_loader* loader, whisper_context* wctx) { fprintf(stderr, "%s: loading model\n", __func__); - const int64_t t_start_us = model_time_us(); + const int64_t t_start_us = model_time_us_whisper(); wctx->t_start_us = t_start_us; @@ -716,8 +716,7 @@ static bool whisper_model_load(struct whisper_model_loader* loader, whisper_cont // initialize all memory buffers // always have at least one decoder - wctx->model.buf = new std::vector(); - wctx->model.buf->resize(scale * MEM_REQ_MODEL.at(wctx->wtype).at(model.type)); + wctx->model.buf.resize(scale * MEM_REQ_MODEL.at(wctx->wtype).at(model.type)); // we skip initialization of the state until it is needed // because it might be that state will always be provided externally. @@ -931,8 +930,8 @@ static bool whisper_model_load(struct whisper_model_loader* loader, whisper_cont // create the ggml context { struct ne_init_params params = { - /*.mem_size =*/wctx->model.buf->size(), - /*.mem_buffer =*/wctx->model.buf->data(), + /*.mem_size =*/wctx->model.buf.size, + /*.mem_buffer =*/wctx->model.buf.addr, /*.no_alloc =*/false, }; @@ -1219,7 +1218,7 @@ static bool whisper_model_load(struct whisper_model_loader* loader, whisper_cont } } - wctx->t_load_us = model_time_us() - t_start_us; + wctx->t_load_us = model_time_us_whisper() - t_start_us; return true; } @@ -1237,7 +1236,7 @@ static bool whisper_model_load(struct whisper_model_loader* loader, whisper_cont // static bool whisper_encode_internal(whisper_context* wctx, whisper_state* wstate, const int mel_offset, const int n_threads) { - const int64_t t_start_us = model_time_us(); + const int64_t t_start_us = model_time_us_whisper(); const auto& model = wctx->model; const auto& mel_inp = wstate->mel; @@ -1252,8 +1251,8 @@ static bool whisper_encode_internal(whisper_context* wctx, whisper_state* wstate assert(mel_inp.n_mel == n_mels); struct ne_init_params params = { - /*.mem_size =*/wstate->buf_compute.size(), - /*.mem_buffer =*/wstate->buf_compute.data(), + /*.mem_size =*/wstate->buf_compute.size, + /*.mem_buffer =*/wstate->buf_compute.addr, /*.no_alloc =*/false, }; @@ -1501,6 +1500,7 @@ static bool whisper_encode_internal(whisper_context* wctx, whisper_state* wstate // run the computation { struct ne_cgraph gf = {}; + gf.n_threads = n_threads; ne_build_forward_expand(&gf, cur); ne_graph_compute(ctx0, &gf); @@ -1526,6 +1526,7 @@ static bool whisper_encode_internal(whisper_context* wctx, whisper_state* wstate // pre-compute cross-attention memory { struct ne_cgraph gf = {}; + gf.n_threads = n_threads; // hack to disconnect the encoded features from the previous graph cur->op = NE_OP_NONE; @@ -1576,7 +1577,7 @@ static bool whisper_encode_internal(whisper_context* wctx, whisper_state* wstate ne_free(ctx0); - wstate->t_encode_us += model_time_us() - t_start_us; + wstate->t_encode_us += model_time_us_whisper() - t_start_us; wstate->n_encode++; return true; @@ -1595,7 +1596,7 @@ static bool whisper_encode_internal(whisper_context* wctx, whisper_state* wstate static bool whisper_decode_internal(whisper_context* wctx, whisper_state* wstate, whisper_decoder* decoder, const whisper_token* tokens, const int n_tokens, const int n_past, const int n_threads) { - const int64_t t_start_us = model_time_us(); + const int64_t t_start_us = model_time_us_whisper(); const auto& model = wctx->model; const auto& hparams = model.hparams; @@ -1620,14 +1621,15 @@ static bool whisper_decode_internal(whisper_context* wctx, whisper_state* wstate // n_past, N, M, n_ctx); struct ne_init_params params = { - /*.mem_size =*/wstate->buf_compute.size(), - /*.mem_buffer =*/wstate->buf_compute.data(), + /*.mem_size =*/wstate->buf_compute.size, + /*.mem_buffer =*/wstate->buf_compute.addr, /*.no_alloc =*/false, }; struct ne_context* ctx0 = ne_init(params); struct ne_cgraph gf = {}; + gf.n_threads = n_threads; struct ne_tensor* embd = ne_new_tensor_1d(ctx0, NE_TYPE_I32, N, NE_SIZE_CALC); memcpy(embd->data, tokens, N * ne_element_size(embd)); @@ -1930,7 +1932,7 @@ static bool whisper_decode_internal(whisper_context* wctx, whisper_state* wstate ne_free(ctx0); - wstate->t_decode_us += model_time_us() - t_start_us; + wstate->t_decode_us += model_time_us_whisper() - t_start_us; wstate->n_decode++; return true; @@ -2081,7 +2083,7 @@ static bool log_mel_spectrogram(whisper_state* wstate, const float* samples, con const int /*sample_rate*/, const int fft_size, const int fft_step, const int n_mel, const int n_threads, const whisper_filters& filters, const bool speed_up, whisper_mel* mel) { - const int64_t t_start_us = model_time_us(); + const int64_t t_start_us = model_time_us_whisper(); // Hanning window std::vector hann; @@ -2153,7 +2155,7 @@ static bool log_mel_spectrogram(whisper_state* wstate, const float* samples, con mel->data[i] = (mel->data[i] + 4.0) / 4.0; } - wstate->t_mel_us += model_time_us() - t_start_us; + wstate->t_mel_us += model_time_us_whisper() - t_start_us; // printf("mel.n_len() = %d, divided by 1500: %f, n_samples / fft_step: %d\n", // mel.n_len, mel.n_len / 1500.0, n_samples / fft_step); @@ -2434,9 +2436,9 @@ void whisper_free(struct whisper_context* ctx) { if (ctx->model.ctx) { ne_free(ctx->model.ctx); } - if (ctx->model.buf) { - delete ctx->model.buf; - } + // if (ctx->model.buf) { + // delete ctx->model.buf; + // } whisper_free_state(ctx->state); @@ -2764,7 +2766,7 @@ whisper_token whisper_token_translate(struct whisper_context* ctx) { return ctx- whisper_token whisper_token_transcribe(struct whisper_context* ctx) { return ctx->vocab.token_transcribe; } void whisper_print_timings(struct whisper_context* ctx) { - const int64_t t_end_us = model_time_us(); + const int64_t t_end_us = model_time_us_whisper(); fprintf(stderr, "\n"); fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, ctx->t_load_us / 1000.0f); @@ -3488,7 +3490,13 @@ int whisper_full_with_state(struct whisper_context* ctx, struct whisper_state* s auto& decoder = state->decoders[j]; if (decoder.kv_self.ctx == nullptr) { - decoder.kv_self = state->decoders[0].kv_self; + // decoder.kv_self = state->decoders[0].kv_self; + decoder.kv_self.k = state->decoders[0].kv_self.k; + decoder.kv_self.v = state->decoders[0].kv_self.v; + decoder.kv_self.ctx = state->decoders[0].kv_self.ctx; + decoder.kv_self.n = state->decoders[0].kv_self.n; + decoder.kv_self.buf.resize(state->decoders[0].kv_self.buf.size); + if (!kv_cache_reinit(&decoder.kv_self)) { fprintf(stderr, "%s: kv_cache_reinit() failed for self-attention, decoder %d\n", __func__, j); return -4; @@ -3697,7 +3705,7 @@ int whisper_full_with_state(struct whisper_context* ctx, struct whisper_state* s } { - const int64_t t_start_sample_us = model_time_us(); + const int64_t t_start_sample_us = model_time_us_whisper(); whisper_process_logits(ctx, state, params, &state->decoders[0], t_cur); @@ -3719,12 +3727,12 @@ int whisper_full_with_state(struct whisper_context* ctx, struct whisper_state* s decoder.logprobs.size() * sizeof(decoder.logprobs[0])); } - state->t_sample_us += model_time_us() - t_start_sample_us; + state->t_sample_us += model_time_us_whisper() - t_start_sample_us; } } for (int i = 0, n_max = whisper_n_text_ctx(ctx) / 2 - 4; i < n_max; ++i) { - const int64_t t_start_sample_us = model_time_us(); + const int64_t t_start_sample_us = model_time_us_whisper(); // store the KV caches of all decoders when doing beam-search if (params.strategy == whisper_sampling_strategy::WHISPER_SAMPLING_BEAM_SEARCH) { @@ -3923,7 +3931,7 @@ int whisper_full_with_state(struct whisper_context* ctx, struct whisper_state* s } } - state->t_sample_us += model_time_us() - t_start_sample_us; + state->t_sample_us += model_time_us_whisper() - t_start_sample_us; // obtain logits for the next token for (int j = 0; j < n_decoders_cur; ++j) { @@ -3947,13 +3955,13 @@ int whisper_full_with_state(struct whisper_context* ctx, struct whisper_state* s } { - const int64_t t_start_sample_us = model_time_us(); + const int64_t t_start_sample_us = model_time_us_whisper(); whisper_process_logits(ctx, state, params, &decoder, t_cur); ++decoder.kv_self.n; - state->t_sample_us += model_time_us() - t_start_sample_us; + state->t_sample_us += model_time_us_whisper() - t_start_sample_us; } } } diff --git a/intel_extension_for_transformers/llm/runtime/graph/models/whisper/whisper_utils.cpp b/intel_extension_for_transformers/llm/runtime/graph/models/whisper/whisper_utils.cpp index e16e608babe..1e1ee6347b2 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/models/whisper/whisper_utils.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/models/whisper/whisper_utils.cpp @@ -8,7 +8,7 @@ #include #include "whisper.h" - +#include "models/model_utils/quant_utils.h" #define DR_WAV_IMPLEMENTATION #include "dr_wav.h" diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh index b97e49222c1..661e11e306e 100755 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh @@ -149,6 +149,7 @@ model_name_map["baichuan-13b"]="baichuan-inc/Baichuan-13B-Chat" model_name_map["mistral-7b"]="mistralai/Mistral-7B-v0.1" model_name_map["qwen-7b"]="Qwen/Qwen-7B-Chat" model_name_map["magicoder"]="ise-uiuc/Magicoder-S-DS-6.7B" +model_name_map["whisper"]="openai/whisper-tiny" function main() { conda_env="$1" @@ -231,6 +232,10 @@ function main() { elif [[ "${model}" == "magicoder" ]]; then quant_script="./build/bin/quant_llama" infer_cmd="./build/bin/run_llama" + elif [[ "${model}" == "whisper" ]]; then + quant_script="./build/bin/quant_whisper" + infer_cmd="./build/bin/run_whisper" + precision_list+=("q4_0") else echo "Error: Unexpedted model: $model" 1>&2 exit 1 @@ -247,11 +252,13 @@ function main() { for p in "${precision_list[@]}"; do precisions_seen[$p]=x done - for p in "${extra_precision_list[@]}"; do - [[ ${precisions_seen[$p]} ]] && continue - precision_list+=("$p") - precisions_seen[$p]=x - done + if [[ "${model}" != "whisper" ]]; then + for p in "${extra_precision_list[@]}"; do + [[ ${precisions_seen[$p]} ]] && continue + precision_list+=("$p") + precisions_seen[$p]=x + done + fi # init conda #. $(dirname ${CONDA_EXE})/../etc/profile.d/conda.sh @@ -362,18 +369,20 @@ function main() { export LANG=en_US.UTF-8 export LC_ALL=en_US.UTF-8 echo "======= Inference Start =======" + if [[ "${model}" == "whisper" ]];then OMP_NUM_THREADS=$cores_per_instance numactl -m 0 -C 0-$(($cores_per_instance - 1)) \ + $infer_cmd -f "/tf_dataset2/models/nlp_toolkit/whisper-tiny/jfk.wav" -m ${model}-${precision}.bin + else + real_ctx=$ctx # TODO(Zhenzhong): use same ctx for chatglm & baichuan + [[ "${model}" == "chatglm2" || "${model}" == "chatglm-6b" || + "${model}" == "baichuan-13b" || "${model}" == "baichuan2-13b" ]] && real_ctx=2047 - real_ctx=$ctx # TODO(Zhenzhong): use same ctx for chatglm & baichuan - [[ "${model}" == "chatglm2" || "${model}" == "chatglm-6b" || - "${model}" == "baichuan-13b" || "${model}" == "baichuan2-13b" ]] && real_ctx=2047 - - OMP_NUM_THREADS=$cores_per_instance numactl -m 0 -C 0-$(($cores_per_instance - 1)) \ - $infer_cmd --seed 1234 -t $cores_per_instance -b 2047 -c $real_ctx -n ${output} -m ${model}-${precision}.bin $extension -p "$prompt" 2>&1 | tee ${WORKSPACE}/${logs_file} || true & - monitor - - echo "======= Inference End =======" - python $script_dir/calculate_percentiles.py ${WORKSPACE}/${logs_file} ${model} ${precision} ${cores_per_instance} ${batch_size} ${input} ${output} + OMP_NUM_THREADS=$cores_per_instance numactl -m 0 -C 0-$(($cores_per_instance - 1)) \ + $infer_cmd --seed 1234 -t $cores_per_instance -b 2047 -c $real_ctx -n ${output} -m ${model}-${precision}.bin $extension -p "$prompt" 2>&1 | tee ${WORKSPACE}/${logs_file} || true & + monitor + echo "======= Inference End =======" + python $script_dir/calculate_percentiles.py ${WORKSPACE}/${logs_file} ${model} ${precision} ${cores_per_instance} ${batch_size} ${input} ${output} + fi if [[ "$cores_per_instance" == "${cores_list[@]: -1:1}" ]] && [[ "$batch_size_idx" == "0" ]] && [[ "$input_idx" == "0" ]] && diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert.py index 56cc556bb53..b0bb74c2bb4 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert.py @@ -19,10 +19,10 @@ from transformers import AutoConfig import subprocess -model_maps = {"gpt_neox": "gptneox", "gpt_bigcode": "starcoder"} +model_maps = {"gpt_neox": "gptneox", "gpt_bigcode": "starcoder", "whisper": "whisper"} -def convert_model(model, outfile, outtype): +def convert_model(model, outfile, outtype, whisper_repo_path=None): config = AutoConfig.from_pretrained(model, trust_remote_code=True) model_type = model_maps.get(config.model_type, config.model_type) @@ -50,6 +50,7 @@ def main(args_in: Optional[List[str]] = None) -> None: default="f32", ) parser.add_argument("--outfile", type=Path, required=True, help="path to write to") + parser.add_argument("--whisper_repo_path", type=Path, required=False, help="path to whisper repo") parser.add_argument("model", type=Path, help="directory containing model file or model id") args = parser.parse_args(args_in) @@ -58,7 +59,7 @@ def main(args_in: Optional[List[str]] = None) -> None: else: dir_model = args.model - convert_model(dir_model, args.outfile, args.outtype) + convert_model(dir_model, args.outfile, args.outtype, args.whisper_repo_path) if __name__ == "__main__": diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_whisper.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_whisper.py index 171889ef24d..3a03ba81196 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_whisper.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_whisper.py @@ -38,6 +38,9 @@ import torch import numpy as np from pathlib import Path +import argparse +from typing import (IO, TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Literal, Optional, Sequence, Tuple, TypeVar, + Union) from transformers import WhisperForConditionalGeneration @@ -88,138 +91,152 @@ def bytes_to_unicode(): return dict(zip(bs, cs)) -if len(sys.argv) < 4: - print("Usage: convert-h5-to-ggml.py dir_model path-to-whisper-repo dir-output [use-f32]\n") - sys.exit(1) - -dir_model = Path(sys.argv[1]) -dir_whisper = Path(sys.argv[2]) -dir_out = Path(sys.argv[3]) - -encoder = json.load((dir_model / "vocab.json").open("r", encoding="utf8")) -encoder_added = json.load((dir_model / "added_tokens.json").open("r", encoding="utf8")) -hparams = json.load((dir_model / "config.json").open("r", encoding="utf8")) - -model = WhisperForConditionalGeneration.from_pretrained(dir_model) - -#code.interact(local=locals()) - -n_mels = hparams["num_mel_bins"] -with np.load(os.path.join(dir_whisper, "whisper/assets", "mel_filters.npz")) as f: - filters = torch.from_numpy(f[f"mel_{n_mels}"]) - -dir_tokenizer = dir_model - -fname_out = dir_out / "ggml-model.bin" - -tokens = json.load(open(dir_tokenizer / "vocab.json", "r", encoding="utf8")) - -# use 16-bit or 32-bit floats -use_f16 = True -if len(sys.argv) > 4: - use_f16 = False - fname_out = dir_out / "ggml-model-f32.bin" - -fout = open(fname_out, "wb") - -fout.write(struct.pack("i", 0x67676d6c)) # magic: ggml in hex -fout.write(struct.pack("i", hparams["vocab_size"])) -fout.write(struct.pack("i", hparams["max_source_positions"])) -fout.write(struct.pack("i", hparams["d_model"])) -fout.write(struct.pack("i", hparams["encoder_attention_heads"])) -fout.write(struct.pack("i", hparams["encoder_layers"])) -fout.write(struct.pack("i", hparams["max_length"])) -fout.write(struct.pack("i", hparams["d_model"])) -fout.write(struct.pack("i", hparams["decoder_attention_heads"])) -fout.write(struct.pack("i", hparams["decoder_layers"])) -fout.write(struct.pack("i", hparams["num_mel_bins"])) -fout.write(struct.pack("i", use_f16)) - -fout.write(struct.pack("i", filters.shape[0])) -fout.write(struct.pack("i", filters.shape[1])) -for i in range(filters.shape[0]): - for j in range(filters.shape[1]): - fout.write(struct.pack("f", filters[i][j])) - -byte_encoder = bytes_to_unicode() -byte_decoder = {v: k for k, v in byte_encoder.items()} - -fout.write(struct.pack("i", len(tokens))) - -tokens = sorted(tokens.items(), key=lambda x: x[1]) -for key in tokens: - text = bytearray([byte_decoder[c] for c in key[0]]) - fout.write(struct.pack("i", len(text))) - fout.write(text) - -list_vars = model.state_dict() -for name in list_vars.keys(): - # this seems to not be used - # ref: https://github.com/huggingface/transformers/blob/9a5b84a0076a04fe9596da72e8668069d4f09ea0/src - # /transformers/models/whisper/modeling_whisper.py#L1099-L1106 - if name == "proj_out.weight": - print('Skipping', name) - continue - - src = name - - nn = name - if name != "proj_out.weight": - nn = nn.split(".")[1:] - else: - nn = nn.split(".") - - if nn[1] == "layers": - nn[1] = "blocks" - if ".".join(nn[3:-1]) == "encoder_attn.k_proj": - mapped = "attn.key" if nn[0] == "encoder" else "cross_attn.key" +def main(args_in: Optional[List[str]] = None) -> None: + parser = argparse.ArgumentParser(description="Convert a model to a NE compatible file") + parser.add_argument("--outtype", + choices=["f32", "f16"], + default="fp32", + help="output format (default: based on input)") + parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") + parser.add_argument("model", type=Path, help="directory containing model file") + args = parser.parse_args(args_in) + dir_model = args.model + dir_out = args.outfile + out_type = args.outtype + + + encoder = json.load((dir_model / "vocab.json").open("r", encoding="utf8")) + encoder_added = json.load((dir_model / "added_tokens.json").open("r", encoding="utf8")) + hparams = json.load((dir_model / "config.json").open("r", encoding="utf8")) + + model = WhisperForConditionalGeneration.from_pretrained(dir_model) + + #code.interact(local=locals()) + path = os.getcwd() + path = path+'/whisper' + if os.path.exists(path)== False: + os.system('git clone https://github.com/openai/whisper.git') + n_mels = hparams["num_mel_bins"] + mel_path = path+'/whisper/assets/mel_filters.npz' + with np.load(mel_path) as f: + filters = torch.from_numpy(f[f"mel_{n_mels}"]) + + dir_tokenizer = dir_model + + fname_out = dir_out + + tokens = json.load(open(dir_tokenizer / "vocab.json", "r", encoding="utf8")) + + # Default use 16-bit + + use_f16 = True + + fout = open(fname_out, "wb") + + fout.write(struct.pack("i", 0x67676d6c)) # magic: ggml in hex + fout.write(struct.pack("i", hparams["vocab_size"])) + fout.write(struct.pack("i", hparams["max_source_positions"])) + fout.write(struct.pack("i", hparams["d_model"])) + fout.write(struct.pack("i", hparams["encoder_attention_heads"])) + fout.write(struct.pack("i", hparams["encoder_layers"])) + fout.write(struct.pack("i", hparams["max_length"])) + fout.write(struct.pack("i", hparams["d_model"])) + fout.write(struct.pack("i", hparams["decoder_attention_heads"])) + fout.write(struct.pack("i", hparams["decoder_layers"])) + fout.write(struct.pack("i", hparams["num_mel_bins"])) + fout.write(struct.pack("i", use_f16)) + + fout.write(struct.pack("i", filters.shape[0])) + fout.write(struct.pack("i", filters.shape[1])) + for i in range(filters.shape[0]): + for j in range(filters.shape[1]): + fout.write(struct.pack("f", filters[i][j])) + + byte_encoder = bytes_to_unicode() + byte_decoder = {v: k for k, v in byte_encoder.items()} + + fout.write(struct.pack("i", len(tokens))) + + tokens = sorted(tokens.items(), key=lambda x: x[1]) + for key in tokens: + text = bytearray([byte_decoder[c] for c in key[0]]) + fout.write(struct.pack("i", len(text))) + fout.write(text) + + list_vars = model.state_dict() + for name in list_vars.keys(): + # this seems to not be used + # ref: https://github.com/huggingface/transformers/blob/9a5b84a0076a04fe9596da72e8668069d4f09ea0/src + # /transformers/models/whisper/modeling_whisper.py#L1099-L1106 + if name == "proj_out.weight": + print('Skipping', name) + continue + + src = name + + nn = name + if name != "proj_out.weight": + nn = nn.split(".")[1:] + else: + nn = nn.split(".") + + if nn[1] == "layers": + nn[1] = "blocks" + if ".".join(nn[3:-1]) == "encoder_attn.k_proj": + mapped = "attn.key" if nn[0] == "encoder" else "cross_attn.key" + else: + mapped = conv_map[".".join(nn[3:-1])] + name = ".".join(nn[:3] + [mapped] + nn[-1:]) + else: + name = ".".join(nn) + name = conv_map[name] if name in conv_map else name + + print(src, ' -> ', name) + data = list_vars[src].squeeze().numpy() + data = data.astype(np.float16) + + # reshape conv bias from [n] to [n, 1] + if name in ["encoder.conv1.bias", "encoder.conv2.bias"]: + data = data.reshape(data.shape[0], 1) + print(" Reshaped variable: ", name, " to shape: ", data.shape) + + n_dims = len(data.shape) + print(name, n_dims, data.shape) + + # looks like the whisper models are in f16 by default + # so we need to convert the small tensors to f32 until we fully support f16 in ggml + # ftype == 0 -> float32, ftype == 1 -> float16 + ftype = 1 + if use_f16: + if n_dims < 2 or \ + name == "encoder.conv1.bias" or \ + name == "encoder.conv2.bias" or \ + name == "encoder.positional_embedding" or \ + name == "decoder.positional_embedding": + print(" Converting to float32") + data = data.astype(np.float32) + ftype = 0 else: - mapped = conv_map[".".join(nn[3:-1])] - name = ".".join(nn[:3] + [mapped] + nn[-1:]) - else: - name = ".".join(nn) - name = conv_map[name] if name in conv_map else name - - print(src, ' -> ', name) - data = list_vars[src].squeeze().numpy() - data = data.astype(np.float16) - - # reshape conv bias from [n] to [n, 1] - if name in ["encoder.conv1.bias", "encoder.conv2.bias"]: - data = data.reshape(data.shape[0], 1) - print(" Reshaped variable: ", name, " to shape: ", data.shape) - - n_dims = len(data.shape) - print(name, n_dims, data.shape) - - # looks like the whisper models are in f16 by default - # so we need to convert the small tensors to f32 until we fully support f16 in ggml - # ftype == 0 -> float32, ftype == 1 -> float16 - ftype = 1 - if use_f16: - if n_dims < 2 or \ - name == "encoder.conv1.bias" or \ - name == "encoder.conv2.bias" or \ - name == "encoder.positional_embedding" or \ - name == "decoder.positional_embedding": - print(" Converting to float32") data = data.astype(np.float32) ftype = 0 - else: - data = data.astype(np.float32) - ftype = 0 - # header - str_ = name.encode('utf-8') - fout.write(struct.pack("iii", n_dims, len(str_), ftype)) - for i in range(n_dims): - fout.write(struct.pack("i", data.shape[n_dims - 1 - i])) - fout.write(str_) + # header + str_ = name.encode('utf-8') + fout.write(struct.pack("iii", n_dims, len(str_), ftype)) + for i in range(n_dims): + fout.write(struct.pack("i", data.shape[n_dims - 1 - i])) + fout.write(str_) + + # data + data.tofile(fout) + + fout.close() + + print("Done. Output file: ", fname_out) + print("") - # data - data.tofile(fout) -fout.close() + -print("Done. Output file: ", fname_out) -print("") +if __name__ == "__main__": + main() From 8d11949d832354965c9639bb0c6e83c4e1b67992 Mon Sep 17 00:00:00 2001 From: "Wang, Zhe" Date: Wed, 3 Jan 2024 13:23:01 +0800 Subject: [PATCH 021/101] Qbits Backend support act shuffle (#1096) --- .../library/jblas/jblas/jit_blas_prologue_a.h | 5 + .../llm/library/jblas/jblas/kernel_avx2.h | 13 +- .../llm/library/jblas/jblas/kernel_jit.h | 30 +-- .../llm/library/jblas/jblas/kernel_ref.h | 2 +- .../llm/library/jblas/jblas/kernel_wrapper.h | 4 +- .../include/jblas_weightonly_dispatcher.hpp | 36 ++- .../csrc/dispatcher/src/jblas_packq_impl.cpp | 56 +++++ .../src/jblas_weightonly_dispatcher.cpp | 216 +++++++++++------- .../llm/operator/csrc/qbits.cpp | 38 ++- .../llm/operator/csrc/qbits_ut/test_packq.py | 68 ++++++ .../operator/csrc/qbits_ut/test_weightonly.py | 13 +- .../llm/operator/csrc/qbits_ut/ut_utils.py | 9 +- .../llm/quantization/autograd/functions.py | 6 +- .../llm/quantization/nn/modules.py | 6 +- tests/CI/test_weight_only.py | 4 +- 15 files changed, 379 insertions(+), 127 deletions(-) create mode 100644 intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_packq_impl.cpp create mode 100644 intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_packq.py diff --git a/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_prologue_a.h b/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_prologue_a.h index 6bfaad69a2f..be0d109e6a8 100644 --- a/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_prologue_a.h +++ b/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_prologue_a.h @@ -367,6 +367,8 @@ class ShuffleActivationKBlockBase : public ActivationKBlockBase<_GemmCore_T, ISA template using ShuffleActivationKBlockBaseF32 = ShuffleActivationKBlockBase<_GemmCore_T, ISA_T, float>; +template +using ShuffleActivationKBlockBaseBf16 = ShuffleActivationKBlockBase<_GemmCore_T, ISA_T, utils::bf16>; template struct ParamShuffleActivationKBlockQuantize : ParamActivationKBlockQuantize { @@ -422,6 +424,9 @@ class ShuffleActivationKBlockQuantize : public ActivationKBlockQuantize<_GemmCor template using ShuffleActivationKBlockQuantizeF32 = ShuffleActivationKBlockQuantize<_GemmCore_T, ISA_T, float>; + +template +using ShuffleActivationKBlockQuantizeBf16 = ShuffleActivationKBlockQuantize<_GemmCore_T, ISA_T, utils::bf16>; } // namespace gemm } // namespace prologue_a } // namespace jblas diff --git a/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_avx2.h b/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_avx2.h index 1e9fdf287f8..7ee0933ce20 100644 --- a/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_avx2.h +++ b/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_avx2.h @@ -161,15 +161,14 @@ JBLAS_CODE dequant_kblock_s8_fp_fwd(int8_t* srcptr, _DST_T* dstptr, int row, int auto s8_ymm_v = _mm_loadl_epi64(reinterpret_cast<__m128i*>(srcptr + i * ld_src + j)); auto s32_ymm_v = _mm256_cvtepi8_epi32(s8_ymm_v); if constexpr (WITH_ZP) { - s32_ymm_v = _mm256_sub_epi32( - s32_ymm_v, - _mm256_cvtepi8_epi32(_mm_loadl_epi64(reinterpret_cast<__m128i*>(zero_points + kpos * NPad + j)))); + auto zp_ymm = + _mm256_cvtepi8_epi32(_mm_loadl_epi64(reinterpret_cast<__m128i*>(zero_points + kpos * NPad + j / PACK_ROW))); + if constexpr (PACK_ROW == 4) zp_ymm = _mm256_permutevar8x32_epi32(zp_ymm, packrow4_permute_idx); + s32_ymm_v = _mm256_sub_epi32(s32_ymm_v, zp_ymm); } auto f32_ymm_v = _mm256_cvtepi32_ps(s32_ymm_v); auto scale_ymm = _mm256_loadu_ps(sptr + j / PACK_ROW); - if constexpr (PACK_ROW == 4) { - scale_ymm = _mm256_permutevar8x32_ps(scale_ymm, packrow4_permute_idx); - } + if constexpr (PACK_ROW == 4) scale_ymm = _mm256_permutevar8x32_ps(scale_ymm, packrow4_permute_idx); f32_ymm_v = _mm256_mul_ps(f32_ymm_v, scale_ymm); if constexpr (std::is_same_v<_DST_T, float>) { _mm256_storeu_ps(dstptr + i * ld_dst + j, f32_ymm_v); @@ -181,7 +180,7 @@ JBLAS_CODE dequant_kblock_s8_fp_fwd(int8_t* srcptr, _DST_T* dstptr, int row, int } for (; j < col; j++) { float tmp = (float)(srcptr[i * ld_src + j]); - if constexpr (WITH_ZP) tmp -= (float)(zero_points[kpos * NPad + j]); + if constexpr (WITH_ZP) tmp -= (float)(zero_points[kpos * NPad + j / PACK_ROW]); dstptr[i * ld_dst + j] = tmp * sptr[j / PACK_ROW]; } } diff --git a/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_jit.h b/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_jit.h index 4a711736e9d..0f77106fef2 100644 --- a/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_jit.h +++ b/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_jit.h @@ -57,7 +57,7 @@ class DequanS8FP { void generate(JBLAS_DTYPE dst_dt, int pack_row) { assert(pack_row == 1 || pack_row == 2 || pack_row == 4); - int scale_step = 64 / pack_row; + int zmm_scale_step = 64 / pack_row; Xbyak::Label data_label; inLocalLabel(); // use local label for multiple instance { @@ -105,19 +105,23 @@ class DequanS8FP { return 4; // f32 case. }; - auto generateNTile = [&](int N, JBLAS_DTYPE dst_dt, int scale_step, std::string row_label) { + auto generateNTile = [&](int N, JBLAS_DTYPE dst_dt, int zmm_scale_step, std::string row_label) { if (pack_row == 2) { vmovups(Xbyak::Zmm(RegTmp), ptr[rip + data_label + 8]); } else if (pack_row == 4) { vmovups(Xbyak::Zmm(RegTmp), ptr[rip + data_label + 72]); } for (int i = 0; i < N; i++) { - vmovups(Xbyak::Zmm(RegScale + i), ptr[reg_scaleptr + i * scale_step]); + vmovups(Xbyak::Zmm(RegScale + i), ptr[reg_scaleptr + i * zmm_scale_step]); if (pack_row == 2 || pack_row == 4) { vpermd(Xbyak::Zmm(RegScale + i), Xbyak::Zmm(RegTmp), Xbyak::Zmm(RegScale + i)); } if (!is_sym) { - vpmovsxbd(Xbyak::Zmm(RegZP + i), ptr[reg_zpptr + i * 16]); + vpmovsxbd(Xbyak::Zmm(RegZP + i), + ptr[reg_zpptr + i * zmm_scale_step / sizeof(float)]); // revert to zp_step. + if (pack_row == 2 || pack_row == 4) { + vpermd(Xbyak::Zmm(RegZP + i), Xbyak::Zmm(RegTmp), Xbyak::Zmm(RegZP + i)); + } } } xor_(reg_iterrow, reg_iterrow); @@ -163,32 +167,32 @@ class DequanS8FP { sub(reg_tmp, reg_itercol); cmp(reg_tmp, 64); jl(".proc48", T_NEAR); - generateNTile(4, dst_dt, scale_step, ".rowloop1"); + generateNTile(4, dst_dt, zmm_scale_step, ".rowloop1"); add(reg_itercol, 64); add(reg_srcptr, 1 * 64); add(reg_dstptr, get_dst_step() * 64); - add(reg_scaleptr, 4 * scale_step); - if (!is_sym) add(reg_zpptr, 1 * 64); + add(reg_scaleptr, 4 * 64 / pack_row); + if (!is_sym) add(reg_zpptr, 1 * 64 / pack_row); jmp(".colend", T_NEAR); L(".proc48"); cmp(reg_tmp, 48); jl(".proc32", T_NEAR); - generateNTile(3, dst_dt, scale_step, ".rowloop2"); + generateNTile(3, dst_dt, zmm_scale_step, ".rowloop2"); add(reg_itercol, 48); add(reg_srcptr, 1 * 48); add(reg_dstptr, get_dst_step() * 48); - add(reg_scaleptr, 4 * scale_step); - if (!is_sym) add(reg_zpptr, 1 * 48); + add(reg_scaleptr, 4 * 48 / pack_row); + if (!is_sym) add(reg_zpptr, 1 * 48 / pack_row); jmp(".colend", T_NEAR); L(".proc32"); - generateNTile(2, dst_dt, scale_step, ".rowloop3"); + generateNTile(2, dst_dt, zmm_scale_step, ".rowloop3"); add(reg_itercol, 32); add(reg_srcptr, 1 * 32); add(reg_dstptr, get_dst_step() * 32); - add(reg_scaleptr, 4 * scale_step); - if (!is_sym) add(reg_zpptr, 1 * 32); + add(reg_scaleptr, 4 * 32 / pack_row); + if (!is_sym) add(reg_zpptr, 1 * 32 / pack_row); L(".colend"); cmp(reg_itercol, reg_colsize); diff --git a/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_ref.h b/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_ref.h index 1e0ddccdae7..02336c047da 100644 --- a/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_ref.h +++ b/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_ref.h @@ -335,7 +335,7 @@ inline JBLAS_CODE decompress_kblock_s8_fp(int8_t* srcptr, _DST_T* dstptr, int ro auto sptr = scales + kpos * NPad; for (int j = 0; j < col; j += 1) { float tmp = static_cast(srcptr[i * ld_src + j]); - if (zero_points != nullptr) tmp -= static_cast(zero_points[kpos * NPad + j]); + if (zero_points != nullptr) tmp -= static_cast(zero_points[kpos * NPad + j / _PACK_ROW]); dstptr[i * ld_dst + j] = static_cast<_DST_T>(tmp * sptr[j / _PACK_ROW]); } } diff --git a/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_wrapper.h b/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_wrapper.h index 742748e899d..5abbda0b864 100644 --- a/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_wrapper.h +++ b/intel_extension_for_transformers/llm/library/jblas/jblas/kernel_wrapper.h @@ -715,10 +715,10 @@ class ColBlockReduceSum { template static inline JBLAS_CODE forward(const SRC_T* srcptr, int ldsrc, int row, int col, int blocksize, float* reduce, int ldr) { - if constexpr (utils::isa_base::avx512f) { + if constexpr (utils::isa_base::avx512f && std::is_same_v) { return avx512f::col_block_reduce_sum(srcptr, ldsrc, row, col, blocksize, reduce, ldr); } - if constexpr (utils::isa_base::avx2) { + if constexpr (utils::isa_base::avx2 && std::is_same_v) { return avx2::col_block_reduce_sum(srcptr, ldsrc, row, col, blocksize, reduce, ldr); } return ref::col_block_reduce_sum(srcptr, ldsrc, row, col, blocksize, reduce, ldr); diff --git a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/jblas_weightonly_dispatcher.hpp b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/jblas_weightonly_dispatcher.hpp index 7f6f60b9ed7..e03d09a377d 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/jblas_weightonly_dispatcher.hpp +++ b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/jblas_weightonly_dispatcher.hpp @@ -27,22 +27,48 @@ enum WOQ_TASK { WOQ_LINEAR, }; -struct woq_config_param { - std::string compute_type; // determin gemm core template - std::string weight_type; // determin compress-weight template - std::string scale_type; // determin scale param +struct woq_param_base { + std::string compute_type; // determin gemm core template + std::string weight_type; // determin compressed-weight template + std::string scale_type; // determin scale param + bool asym; + int blocksize; +}; + +struct woq_config_param : public woq_param_base { dispatcher_utils::QBITS_DT src_dt; // determin activation related template dispatcher_utils::QBITS_DT dst_dt; // determin write_back template }; +struct woq_packq_param : public woq_param_base { + bool enable_act_shuffle; +}; + +struct woq_packq_ctx { + torch::Tensor *qweight, *scale, *zp, *g_idx, *output; + int n, k; +}; + struct woq_runtime_ctx { torch::Tensor *activation, *weight, *bias, *output; bool transpose; - int blocksize, m, n, k, lda, ldo; + int m, n, k, lda, ldo; float alpha, beta; jblas::storage::gemm::IWeightBase* deseries_wei; }; +static std::map wei2jblasdt_map{{"int8", JBLAS_DTYPE::S8}, + {"int4_clip", JBLAS_DTYPE::S4_CLIP}, + {"int4_fullrange", JBLAS_DTYPE::S4_FULLRANGE}, + {"nf4", JBLAS_DTYPE::F4_NF4}, + {"fp4_e2m1_bnb", JBLAS_DTYPE::F4_BNB}, + {"fp4_e2m1", JBLAS_DTYPE::F4_E2M1}, + {"fp8_e4m3", JBLAS_DTYPE::F8_E4M3}, + {"fp8_e5m2", JBLAS_DTYPE::F8_E5M2}}; +static std::map scale2jblasdt_map{{"fp32", JBLAS_DTYPE::F32}, + {"fp8_e8m0", JBLAS_DTYPE::F8_E8M0}}; + void dispatch_woq_task(woq_config_param* p, woq_runtime_ctx* ctx, WOQ_TASK task); +void jblas_packq(woq_packq_param* p, woq_packq_ctx* ctx); void set_woq_workspace(torch::Tensor* workspace); } // namespace woq diff --git a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_packq_impl.cpp b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_packq_impl.cpp new file mode 100644 index 00000000000..3cdfa0dca57 --- /dev/null +++ b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_packq_impl.cpp @@ -0,0 +1,56 @@ +#include "jblas/jit_blas_prologue_b.h" +#include "../include/jblas_weightonly_dispatcher.hpp" + +namespace woq { +template +void execute_qpack(woq_packq_param* p, woq_packq_ctx* ctx) { + using proB = jblas::prologue_b::gemm::WeightKBlockNInteger; + static proB ker; + auto qpackw = ker.createStorage(ctx->n, ctx->k, p->blocksize, wei2jblasdt_map[p->weight_type], + scale2jblasdt_map[p->scale_type], JBLAS_DTYPE::BF16, p->asym); + if (p->enable_act_shuffle) ker.enableShuffle(&qpackw); + *(ctx->output) = torch::empty(qpackw.mSize, torch::kInt8); + qpackw.assign(ctx->output->data_ptr()); + if (p->enable_act_shuffle) + ker.setShuffleIndices(ctx->g_idx->data_ptr(), &qpackw, &dispatcher_utils::DefaultThreading); + ker.packQWeight(ctx->n, ctx->k, ctx->qweight->data_ptr(), ctx->n, ctx->scale->data_ptr(), + p->asym ? ctx->zp->data_ptr() : nullptr, &qpackw, &dispatcher_utils::DefaultThreading); +} + +void jblas_packq(woq_packq_param* p, woq_packq_ctx* ctx) { + TORCH_CHECK(p->weight_type == "int8" || p->weight_type == "int4_clip" || p->weight_type == "int4_fullrange", + "Qbits: only support Integer WOQ in PACKQ"); + + if (p->compute_type == "int8") { + if (dispatcher_utils::check_amx() && p->blocksize % jblas::gemm::ICoreRowNAmxint8KBlock<48, 16>::KTILE == 0) { + return execute_qpack, JblasAMX_INT8>(p, ctx); + } + if (dispatcher_utils::check_avx512_vnni() && + p->blocksize % jblas::gemm::ICoreRowNAvx512vnniKBlock<48, 4>::KTILE == 0) { + return execute_qpack, JblasAVX512_VNNI>(p, ctx); + } + if (dispatcher_utils::check_avx_vnni() && p->blocksize % jblas::gemm::ICoreRowNAvxvnniKBlock<48, 2>::KTILE == 0) { + return execute_qpack, JblasAVX_VNNI>(p, ctx); + } + TORCH_CHECK(false, "Qbits: Illegal config in int8 compute_type, blocksize:", p->blocksize, + ", ISA support vnni:", dispatcher_utils::check_avx_vnni()); + } + if (p->compute_type == "fp32") { + if (dispatcher_utils::check_avx512f()) { + return execute_qpack, JblasAVX512F>(p, ctx); + } + if (dispatcher_utils::check_avx2()) { + return execute_qpack, JblasAVX2>(p, ctx); + } + TORCH_CHECK(false, "Qbits: device ISA must support AVX2 when compute_type==fp32"); + } + if (p->compute_type == "bf16") { + if (dispatcher_utils::check_amx()) { + return execute_qpack, JblasAMX_BF16>(p, ctx); + } + TORCH_CHECK(false, "Qbits: device ISA must support AMX-BF16 when compute_type==bf16"); + } + TORCH_CHECK(false, "Qbits: unsupported jblas_config, compute_type:", p->compute_type, + ", weight_type:", p->weight_type + ", blocksize:", p->blocksize); +} +} // namespace woq diff --git a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_weightonly_dispatcher.cpp b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_weightonly_dispatcher.cpp index e62943fa847..2e73a58bb6f 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_weightonly_dispatcher.cpp +++ b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_weightonly_dispatcher.cpp @@ -34,13 +34,6 @@ inline void set_nk(woq_runtime_ctx* ctx, torch::Tensor* tensor) { ctx->k = ctx->transpose ? static_cast(tensor->sizes()[1]) : static_cast(tensor->sizes()[0]); } -static std::map wei2jblasdt_map{ - {"int4_clip", JBLAS_DTYPE::S4_CLIP}, {"int4_fullrange", JBLAS_DTYPE::S4_FULLRANGE}, - {"nf4", JBLAS_DTYPE::F4_NF4}, {"fp4_e2m1_bnb", JBLAS_DTYPE::F4_BNB}, - {"fp4_e2m1", JBLAS_DTYPE::F4_E2M1}, {"fp8_e4m3", JBLAS_DTYPE::F8_E4M3}, - {"fp8_e5m2", JBLAS_DTYPE::F8_E5M2}}; -static std::map scale2jblasdt_map{{"fp32", JBLAS_DTYPE::F32}, - {"fp8_e8m0", JBLAS_DTYPE::F8_E8M0}}; static void* woq_workspace = nullptr; static int64_t workspace_size = 0; @@ -54,14 +47,29 @@ template void woq_dequantize(woq_config_param* p, woq_runtime_ctx* ctx) { if (dispatcher_utils::initer.verbose) dispatcher_utils::timer.start(); using PrologueB = typename Launcher::PrologueB; + using WType = typename Launcher::PrologueB::StorageWeight; static PrologueB kernel; + // TODO(zhe): using unified StorageWeightKBlockNInteger after sync with neural-speed(with NFloat ProB feature). if (ctx->transpose) { - kernel.unpackTransposeWeight(ctx->deseries_wei->mN, ctx->deseries_wei->mK, ctx->deseries_wei, - ctx->output->data_ptr(), ctx->deseries_wei->mK, - &dispatcher_utils::DefaultThreading); + if constexpr (std::is_same_v) { + kernel.unpackTransposeWeight(ctx->deseries_wei->mN, ctx->deseries_wei->mK, + dynamic_cast(ctx->deseries_wei), + ctx->output->data_ptr(), ctx->deseries_wei->mK, + &dispatcher_utils::DefaultThreading); + } else { + kernel.unpackTransposeWeight(ctx->deseries_wei->mN, ctx->deseries_wei->mK, ctx->deseries_wei, + ctx->output->data_ptr(), ctx->deseries_wei->mK, + &dispatcher_utils::DefaultThreading); + } } else { - kernel.unpackWeight(ctx->deseries_wei->mN, ctx->deseries_wei->mK, ctx->deseries_wei, ctx->output->data_ptr(), - ctx->deseries_wei->mN, &dispatcher_utils::DefaultThreading); + if constexpr (std::is_same_v) { + kernel.unpackWeight(ctx->deseries_wei->mN, ctx->deseries_wei->mK, + dynamic_cast(ctx->deseries_wei), + ctx->output->data_ptr(), ctx->deseries_wei->mN, &dispatcher_utils::DefaultThreading); + } else { + kernel.unpackWeight(ctx->deseries_wei->mN, ctx->deseries_wei->mK, ctx->deseries_wei, + ctx->output->data_ptr(), ctx->deseries_wei->mN, &dispatcher_utils::DefaultThreading); + } } } @@ -72,17 +80,14 @@ void woq_quantize(woq_config_param* p, woq_runtime_ctx* ctx) { using WType = typename Launcher::PrologueB::StorageWeight; WType packedw(0); static Launcher launcher; - if constexpr (std::is_same_v) { - packedw = launcher.mProB.createStorage(ctx->n, ctx->k, ctx->blocksize, jblas::utils::jblas_dtype, - jblas::utils::jblas_dtype, false); - } else if constexpr (std::is_same_v) { - packedw = launcher.mProB.createStorage(ctx->n, ctx->k, ctx->blocksize, wei2jblasdt_map[p->weight_type], - jblas::utils::jblas_dtype, jblas::utils::jblas_dtype, false); + if constexpr (std::is_same_v) { + packedw = launcher.mProB.createStorage(ctx->n, ctx->k, p->blocksize, wei2jblasdt_map[p->weight_type], + scale2jblasdt_map[p->scale_type], JBLAS_DTYPE::BF16, p->asym); } else if constexpr (std::is_same_v) { - packedw = launcher.mProB.createStorage(ctx->n, ctx->k, ctx->blocksize, wei2jblasdt_map[p->weight_type], + packedw = launcher.mProB.createStorage(ctx->n, ctx->k, p->blocksize, wei2jblasdt_map[p->weight_type], jblas::utils::jblas_dtype); } else if constexpr (std::is_same_v) { - packedw = launcher.mProB.createStorage(ctx->n, ctx->k, ctx->blocksize, wei2jblasdt_map[p->weight_type], + packedw = launcher.mProB.createStorage(ctx->n, ctx->k, p->blocksize, wei2jblasdt_map[p->weight_type], scale2jblasdt_map[p->scale_type]); } else { assert(0); @@ -100,10 +105,23 @@ void woq_quantize(woq_config_param* p, woq_runtime_ctx* ctx) { dispatcher_utils::timer.stop(); auto cost_time = dispatcher_utils::timer.get_elapsed_time(); LOG(INFO) << "QBits quantize verbose\nn:" << ctx->n << " k:" << ctx->k << " weight_type:" << p->weight_type - << " blocksize:" << ctx->blocksize << " src_type:" << dispatcher_utils::get_torch_dt_name(ctx->weight) + << " blocksize:" << p->blocksize << " src_type:" << dispatcher_utils::get_torch_dt_name(ctx->weight) << " execute time:" << cost_time << "ms"; } } + +void* get_workspace(int need_size) { + void* tmpbuf = NULL; + void* workspace = woq_workspace == nullptr ? NULL : woq_workspace; + if (workspace != NULL) { + TORCH_CHECK(workspace_size >= need_size, "Qbits: workspace size should larger than ", need_size, " bytes"); + return workspace; + } else { + tmpbuf = jblas::utils::amalloc(need_size); + return tmpbuf; + } +} + template void do_compute(woq_config_param* p, woq_runtime_ctx* ctx, ParamA param_a) { if (dispatcher_utils::initer.verbose) dispatcher_utils::timer.start(); @@ -111,30 +129,86 @@ void do_compute(woq_config_param* p, woq_runtime_ctx* ctx, ParamA param_a) { using EpiParam = typename Launcher::EpiParam; EpiParam param_epi = {ctx->output->data_ptr(), ctx->bias->data_ptr(), ctx->ldo, 0, ctx->alpha, ctx->beta}; using GemmCore = typename Launcher::GemmCore; + using StorageWeight = typename Launcher::PrologueB::StorageWeight; + int asym_size = 0, shuf_size = 0; + int8_t* tmpbuf = nullptr; if constexpr (GemmCore::ISA == JblasAMX_INT8 || GemmCore::ISA == JblasAVX512_VNNI || GemmCore::ISA == JblasAVX_VNNI) { using Parallel = jblas::parallel::gemm::SchedulerKBlockS; - jblas::utils::GemmProblem gp(1, ctx->m, ctx->n, ctx->k, ctx->blocksize); - typename Launcher::Param args{gp, param_a, - dynamic_cast(ctx->deseries_wei), param_epi}; - jblas::parallel::GemmRunWithA(launcher, args, &dispatcher_utils::DefaultThreading); + jblas::utils::GemmProblem gp(1, ctx->m, ctx->n, ctx->k, p->blocksize); + StorageWeight* packedw = dynamic_cast(ctx->deseries_wei); + auto dyn_q_size = param_a.quan->mSize; + if (packedw->ShfIndice()) shuf_size = param_a.reordered->mSize; + tmpbuf = reinterpret_cast(get_workspace(dyn_q_size + shuf_size)); + param_a.quan->assign(tmpbuf); + if (packedw->ShfIndice()) { + param_a.reordered->assign(tmpbuf + dyn_q_size); + param_a.indices = packedw->ShfIndice(); + launcher.mProA.quantize(param_a, ctx->m, ctx->deseries_wei->mK, &dispatcher_utils::DefaultThreading); + } + typename Launcher::Param args{ + gp, param_a, dynamic_cast(ctx->deseries_wei), param_epi}; + if (packedw->ShfIndice()) { + jblas::parallel::GemmRun(launcher, args, &dispatcher_utils::DefaultThreading); + } else { + jblas::parallel::GemmRunWithA(launcher, args, &dispatcher_utils::DefaultThreading); + } } else { using Parallel = jblas::parallel::gemm::SchedulerKBlock; - using StorageWeight = typename Launcher::PrologueB::StorageWeight; StorageWeight* packedw = dynamic_cast(ctx->deseries_wei); - jblas::utils::GemmProblem gp(1, ctx->m, ctx->n, ctx->k, ctx->blocksize); - typename Launcher::Param args{gp, - param_a, - dynamic_cast(ctx->deseries_wei), - {packedw->template SPtr(), packedw->SDtype(), packedw->CStep()}, - param_epi}; - jblas::parallel::GemmRun(launcher, args, &dispatcher_utils::DefaultThreading); + if (p->asym || packedw->ShfIndice()) { + if (p->asym) asym_size = param_a.reduce->mSize; + if (packedw->ShfIndice()) shuf_size = param_a.reordered->mSize; + tmpbuf = reinterpret_cast(get_workspace(asym_size + shuf_size)); + } + if (p->asym) { + param_a.reduce->assign(tmpbuf); + } else { + param_a.reduce = nullptr; + } + if (packedw->ShfIndice()) { + param_a.reordered->assign(tmpbuf + asym_size); + param_a.indices = packedw->ShfIndice(); + } + + jblas::utils::GemmProblem gp(1, ctx->m, ctx->n, ctx->k, p->blocksize); + if constexpr (std::is_same_v) { + typename Launcher::Param args{ + gp, + param_a, + dynamic_cast(ctx->deseries_wei), + {packedw->template SPtr(), packedw->SDtype(), packedw->CStep(), + p->asym ? packedw->template ZPtr() : nullptr, + p->asym ? param_a.reduce->template RPtr() : nullptr, p->asym ? param_a.reduce->lda : -1}, + param_epi}; + + if (p->asym || packedw->ShfIndice()) { + jblas::parallel::GemmRunWithA(launcher, args, &dispatcher_utils::DefaultThreading); + } else { + jblas::parallel::GemmRun(launcher, args, &dispatcher_utils::DefaultThreading); + } + } else { + // TODO(zhe): remove this branch after using NFloat ProB in nerual-speed, only need to reset paramC in differenct + // ProB. + typename Launcher::Param args{gp, + param_a, + dynamic_cast(ctx->deseries_wei), + {packedw->template SPtr(), packedw->SDtype(), packedw->CStep()}, + param_epi}; + + if (p->asym || packedw->ShfIndice()) { + jblas::parallel::GemmRunWithA(launcher, args, &dispatcher_utils::DefaultThreading); + } else { + jblas::parallel::GemmRun(launcher, args, &dispatcher_utils::DefaultThreading); + } + } } + if (tmpbuf != woq_workspace && tmpbuf != nullptr) jblas::utils::afree(tmpbuf); if (dispatcher_utils::initer.verbose) { dispatcher_utils::timer.stop(); auto cost_time = dispatcher_utils::timer.get_elapsed_time(); LOG(INFO) << "QBits linear verbose\nm:" << ctx->m << " n:" << ctx->deseries_wei->mN << " k:" << ctx->deseries_wei->mK << " weight_type:" << p->weight_type - << " compute_type:" << p->compute_type << " blocksize:" << ctx->blocksize + << " compute_type:" << p->compute_type << " blocksize:" << p->blocksize << " src_type:" << dispatcher_utils::get_torch_dt_name(ctx->activation) << " dst_type:" << dispatcher_utils::get_torch_dt_name(ctx->output) << " execute time:" << cost_time << "ms"; @@ -146,29 +220,18 @@ void parse_paramA(woq_config_param* p, woq_runtime_ctx* ctx) { using PrologueA = typename Launcher::PrologueA; using ParamA = typename PrologueA::Param; using SrcType = typename PrologueA::SRCType; + static PrologueA kernel; if constexpr (quant_PrologueA) { - static PrologueA kernel; - void* workspace = woq_workspace == nullptr ? NULL : woq_workspace; - size_t need_size; - void* tmpbuf = NULL; - auto get_workspace = [&] { - if (workspace != NULL) { - TORCH_CHECK(workspace_size >= need_size, "Qbits: workspace size should large than ", need_size, " bytes"); - return workspace; - } else { - tmpbuf = jblas::utils::amalloc(need_size); - return tmpbuf; - } - }; - auto quantA = kernel.createStorage(ctx->m, ctx->deseries_wei->mK, ctx->blocksize, false); - need_size = quantA.mSize; - quantA.assign(reinterpret_cast(get_workspace())); - kernel.quantize({reinterpret_cast(ctx->activation->data_ptr()), ctx->deseries_wei->mK, &quantA}, ctx->m, - ctx->deseries_wei->mK, &dispatcher_utils::DefaultThreading); + auto quantA = kernel.createQuantStorage(ctx->m, ctx->deseries_wei->mK, p->blocksize, p->asym); + auto reordA = kernel.createReorderStorage(ctx->m, ctx->deseries_wei->mK, p->blocksize); ParamA param_a = {reinterpret_cast(ctx->activation->data_ptr()), ctx->deseries_wei->mK, &quantA}; + param_a.reordered = &reordA; return do_compute(p, ctx, param_a); } else { - ParamA param_a = {reinterpret_cast(ctx->activation->data_ptr()), ctx->deseries_wei->mK}; + auto reduceA = kernel.createReduceStorage(ctx->m, ctx->k, p->blocksize); + auto reorderA = kernel.createReorderStorage(ctx->m, ctx->k, p->blocksize); + ParamA param_a = {reinterpret_cast(ctx->activation->data_ptr()), ctx->deseries_wei->mK, &reduceA}; + param_a.reordered = &reorderA; return do_compute(p, ctx, param_a); } } @@ -221,25 +284,21 @@ void parse_activation(woq_config_param* p, woq_runtime_ctx* ctx) { if (p->src_dt == dispatcher_utils::QBITS_FP32) { if constexpr (GemmCore::ISA == JblasAMX_INT8 || GemmCore::ISA == JblasAVX512_VNNI || GemmCore::ISA == JblasAVX_VNNI) { - return parse_store(p, ctx); + return parse_store( + p, ctx); } else { - if constexpr (GemmCore::ISA == JblasAMX_BF16) { - return parse_store(p, ctx); - } else { - return parse_store(p, ctx); - } + return parse_store(p, + ctx); } } if (p->src_dt == dispatcher_utils::QBITS_BF16) { if constexpr (GemmCore::ISA == JblasAMX_INT8 || GemmCore::ISA == JblasAVX512_VNNI || GemmCore::ISA == JblasAVX_VNNI) { - return parse_store(p, ctx); + return parse_store( + p, ctx); } else { - if constexpr (GemmCore::ISA == JblasAMX_BF16) { - return parse_store(p, ctx); - } else { - return parse_store(p, ctx); - } + return parse_store(p, + ctx); } } } @@ -247,17 +306,16 @@ void parse_activation(woq_config_param* p, woq_runtime_ctx* ctx) { template void parse_weight(woq_config_param* p, woq_runtime_ctx* ctx) { using namespace jblas::prologue_b::gemm; - if (p->weight_type == "int8") { - return parse_activation(p, ctx); - } - if (p->weight_type == "int4_clip" || p->weight_type == "int4_fullrange") { - return parse_activation(p, ctx); + if (p->weight_type == "int8" || p->weight_type == "int4_clip" || p->weight_type == "int4_fullrange") { + return parse_activation(p, ctx); } if (p->weight_type == "nf4" || p->weight_type == "fp4_e2m1_bnb" || p->weight_type == "fp4_e2m1") { + TORCH_CHECK(p->asym == false, "Qbits: only support sym alg in fp4/nf4 woq weight."); if constexpr (GemmCore::ISA != JblasAMX_INT8 && GemmCore::ISA != JblasAVX512_VNNI && GemmCore::ISA != JblasAVX_VNNI) return parse_activation(p, ctx); } if (p->weight_type == "fp8_e4m3" || p->weight_type == "fp8_e5m2") { + TORCH_CHECK(p->asym == false, "Qbits: only support sym alg in fp8 woq weight."); if constexpr (GemmCore::ISA != JblasAMX_INT8 && GemmCore::ISA != JblasAVX512_VNNI && GemmCore::ISA != JblasAVX_VNNI) return parse_activation(p, ctx); } @@ -268,19 +326,20 @@ void parse_weight(woq_config_param* p, woq_runtime_ctx* ctx) { template void parse_gemm_core_online(woq_config_param* p, woq_runtime_ctx* ctx) { set_nk(ctx, ctx->weight); - ctx->blocksize = ctx->blocksize == -1 ? ctx->k : ctx->blocksize; + p->blocksize = p->blocksize == -1 ? ctx->k : p->blocksize; if (p->compute_type == "int8") { - if (dispatcher_utils::check_amx() && ctx->blocksize % jblas::gemm::ICoreRowNAmxint8KBlock<48, 16>::KTILE == 0) { + TORCH_CHECK(p->asym == false, "Qbits: int8 compute_type dosen't support asym quantization currently.") + if (dispatcher_utils::check_amx() && p->blocksize % jblas::gemm::ICoreRowNAmxint8KBlock<48, 16>::KTILE == 0) { return parse_weight>(p, ctx); } if (dispatcher_utils::check_avx512_vnni() && - ctx->blocksize % jblas::gemm::ICoreRowNAvx512vnniKBlock<48, 4>::KTILE == 0) { + p->blocksize % jblas::gemm::ICoreRowNAvx512vnniKBlock<48, 4>::KTILE == 0) { return parse_weight>(p, ctx); } - if (dispatcher_utils::check_avx_vnni() && ctx->blocksize % jblas::gemm::ICoreRowNAvxvnniKBlock<48, 2>::KTILE == 0) { + if (dispatcher_utils::check_avx_vnni() && p->blocksize % jblas::gemm::ICoreRowNAvxvnniKBlock<48, 2>::KTILE == 0) { return parse_weight>(p, ctx); } - TORCH_CHECK(false, "Qbits: Illegal config in int8 compute_type, blocksize:", ctx->blocksize, + TORCH_CHECK(false, "Qbits: Illegal config in int8 compute_type, blocksize:", p->blocksize, ", ISA support vnni:", dispatcher_utils::check_avx_vnni()); } if (p->compute_type == "fp32") { @@ -299,23 +358,25 @@ void parse_gemm_core_online(woq_config_param* p, woq_runtime_ctx* ctx) { TORCH_CHECK(false, "Qbits: device ISA must support AMX-BF16 when compute_type==bf16"); } TORCH_CHECK(false, "Qbits: unsupported jblas_config, compute_type:", p->compute_type, - ", weight_type:", p->weight_type + ", blocksize:", ctx->blocksize); + ", weight_type:", p->weight_type + ", blocksize:", p->blocksize); } template void parse_gemm_core_offline(woq_config_param* p, woq_runtime_ctx* ctx) { ctx->deseries_wei = jblas::storage::gemm::PackedWeightParser::deserialBuffer(ctx->weight->data_ptr()); - ctx->blocksize = dynamic_cast(ctx->deseries_wei)->mBlockSize; + p->blocksize = dynamic_cast(ctx->deseries_wei)->mBlockSize; auto NTile = jblas::gemm::CoreAttr::get_mask_val(ctx->deseries_wei->mCoreId, jblas::gemm::CoreAttr::NTILE_MASK, jblas::gemm::CoreAttr::NTILE_SHIFT); auto CType = jblas::gemm::CoreAttr::get_mask_val(ctx->deseries_wei->mCoreId, jblas::gemm::CoreAttr::COMP_MASK, jblas::gemm::CoreAttr::COMP_SHIFT); if (CType == uint32_t(jblas::gemm::CompType::COMP_INT8_US_INT32)) { + TORCH_CHECK(p->asym == false, "Qbits: int8 compute_type dosen't support asym quantization currently.") if (NTile == jblas::gemm::ICoreRowNAmxint8KBlock<48, 16>::NTILE && dispatcher_utils::check_amx()) { return parse_weight>(p, ctx); } } if (CType == uint32_t(jblas::gemm::CompType::COMP_INT8_US_FP32)) { + TORCH_CHECK(p->asym == false, "Qbits: int8 compute_type dosen't support asym quantization currently.") if (NTile == jblas::gemm::ICoreRowNAvx512vnniKBlock<48, 4>::NTILE && dispatcher_utils::check_avx512_vnni()) { return parse_weight>(p, ctx); } @@ -364,4 +425,5 @@ void set_woq_workspace(torch::Tensor* workspace) { woq_workspace = workspace->data_ptr(); workspace_size = workspace->element_size() * workspace->numel(); } + } // namespace woq diff --git a/intel_extension_for_transformers/llm/operator/csrc/qbits.cpp b/intel_extension_for_transformers/llm/operator/csrc/qbits.cpp index 2aaf368499a..bb57e7bd4d3 100755 --- a/intel_extension_for_transformers/llm/operator/csrc/qbits.cpp +++ b/intel_extension_for_transformers/llm/operator/csrc/qbits.cpp @@ -36,10 +36,11 @@ static dispatcher_utils::QBITS_DT get_qbits_dt(torch::Tensor* tensor) { template static void inline init_woq_config_param(woq::woq_config_param* p, woq::woq_runtime_ctx* ctx, const std::string& compute_type, const std::string& weight_type, - const std::string& scale_type) { + const std::string& scale_type, bool asym) { p->compute_type = compute_type; p->weight_type = weight_type; p->scale_type = scale_type; + p->asym = asym; switch (TASK) { case woq::WOQ_QUANTIZE: case woq::WOQ_DEQUANTIZE: @@ -54,14 +55,31 @@ static void inline init_woq_config_param(woq::woq_config_param* p, woq::woq_runt } } -static torch::Tensor woq_quantize(const torch::Tensor& fp32_weight, bool transpose, int64_t block_size, +static torch::Tensor woq_packq(const torch::Tensor& qweight, const torch::Tensor& scale, const torch::Tensor& zp, + const torch::Tensor& g_idx, const std::string& weight_type, + const std::string& scale_type, const std::string& compute_type, bool asym, + int64_t blocksize) { + torch::Tensor output; + woq::woq_packq_param p{compute_type, weight_type, scale_type, asym, static_cast(blocksize), g_idx.numel() != 0}; + woq::woq_packq_ctx ctx{const_cast(&qweight), + const_cast(&scale), + const_cast(&zp), + const_cast(&g_idx), + &output, + static_cast(qweight.sizes()[1]), + static_cast(qweight.sizes()[0])}; + woq::jblas_packq(&p, &ctx); + return output; +} + +static torch::Tensor woq_quantize(const torch::Tensor& fp32_weight, bool transpose, int64_t blocksize, const std::string& compute_type, const std::string& weight_type, - const std::string& scale_type) { + const std::string& scale_type, bool asym) { torch::Tensor output; woq::woq_config_param p; - woq::woq_runtime_ctx ctx{ - nullptr, const_cast(&fp32_weight), nullptr, &output, transpose, static_cast(block_size)}; - init_woq_config_param(&p, &ctx, compute_type, weight_type, scale_type); + woq::woq_runtime_ctx ctx{nullptr, const_cast(&fp32_weight), nullptr, &output, transpose}; + init_woq_config_param(&p, &ctx, compute_type, weight_type, scale_type, asym); + p.blocksize = static_cast(blocksize); woq::dispatch_woq_task(&p, &ctx, woq::WOQ_QUANTIZE); return output; } @@ -72,13 +90,14 @@ static void woq_dequantize(const torch::Tensor& compressed_weight, torch::Tensor woq::woq_config_param p; woq::woq_runtime_ctx ctx{nullptr, const_cast(&compressed_weight), nullptr, &dequantize_weight, transpose}; - init_woq_config_param(&p, &ctx, compute_type, weight_type, scale_type); + init_woq_config_param(&p, &ctx, compute_type, weight_type, scale_type, + false); // zp is packed to compressed-weight, it's ok to set false here. woq::dispatch_woq_task(&p, &ctx, woq::WOQ_DEQUANTIZE); } static void woq_linear(const torch::Tensor& activation, const torch::Tensor& weight, const torch::Tensor& bias, torch::Tensor& output, int64_t ldo, bool with_bias, const std::string& compute_type, - const std::string& weight_type, const std::string& scale_type) { + const std::string& weight_type, const std::string& scale_type, bool asym) { woq::woq_config_param p; torch::Tensor* rt_bias = with_bias ? const_cast(&bias) : &output; woq::woq_runtime_ctx ctx{ @@ -94,7 +113,7 @@ static void woq_linear(const torch::Tensor& activation, const torch::Tensor& wei ctx.n = static_cast(ldo); ctx.alpha = 1.f; ctx.beta = with_bias ? 1.f : 0.f; - init_woq_config_param(&p, &ctx, compute_type, weight_type, scale_type); + init_woq_config_param(&p, &ctx, compute_type, weight_type, scale_type, asym); woq::dispatch_woq_task(&p, &ctx, woq::WOQ_LINEAR); } @@ -127,6 +146,7 @@ TORCH_LIBRARY(jblasop, m) { m.def("woq_quantize", &woq_quantize); m.def("woq_linear", &woq_linear); m.def("woq_dequantize", &woq_dequantize); + m.def("woq_packq", &woq_packq); m.def("set_woq_workspace", &set_woq_workspace); m.def("matmul", &jblasop_gemm); } diff --git a/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_packq.py b/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_packq.py new file mode 100644 index 00000000000..98cb106a0c1 --- /dev/null +++ b/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_packq.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ut_utils import * + + +def convert_idx(g_idx, k, blocksize): + ret_idx = torch.zeros(k, dtype=int) + g_counter = torch.zeros(blocksize, dtype=int) + for i in range(k): + ret_idx[g_idx[i]*blocksize+g_counter[g_idx[i]]] = i + g_counter[g_idx[i]] += 1 + return ret_idx + + +@pytest.mark.parametrize("m", [256]) +@pytest.mark.parametrize("n", [1024]) +@pytest.mark.parametrize("k", [512]) +@pytest.mark.parametrize("blocksize", [128]) +@pytest.mark.parametrize("compute_type", ["fp32", "bf16", "int8"]) +@pytest.mark.parametrize("weight_type", ["int8", "int4_clip"]) +@pytest.mark.parametrize("scale_type", ["fp32"]) +@pytest.mark.parametrize("asym", [True, False]) +def test(m, k, n, weight_type, scale_type, compute_type, asym, blocksize, dump_tensor=False): + if compute_type == "int8" and asym == True: + pytest.skip() + torch.manual_seed(0) + raw_s8_wei = torch.randint(-128, 127, [k, n], dtype=torch.int8) + g_idx = torch.arange(k//blocksize, dtype=torch.int) + g_idx = g_idx.repeat(blocksize) + cvt_idx = convert_idx(g_idx, k, blocksize) + zp = torch.randint(-4, 4, [k//blocksize, n], dtype=torch.int8) + scale = torch.rand(k//blocksize, n, dtype=torch.float) + packw = torch.ops.jblasop.woq_packq( + raw_s8_wei, scale, zp, g_idx, weight_type, scale_type, compute_type, asym, blocksize) + revert_wei = torch.zeros(k, n, dtype=torch.float) + torch.ops.jblasop.woq_dequantize( + packw, revert_wei, False, compute_type, weight_type, scale_type) + ref_act = torch.rand(m, k, dtype=torch.float) + tar_act = ref_act.clone() + ref_act = torch.index_select(ref_act, 1, cvt_idx) + tar_dst = torch.zeros(m, n, dtype=torch.float) + torch.ops.jblasop.woq_linear( + tar_act, packw, torch.empty(0), tar_dst, n, False, compute_type, weight_type, scale_type, asym) + ref_dst = torch.matmul(ref_act, revert_wei) + if dump_tensor: + print(tar_dst) + print(ref_dst) + if compute_type == "fp32": + assert (abs(ref_dst - tar_dst).max() < 0.03) + elif compute_type == "bf16": + assert (abs(ref_dst - tar_dst).max() < 8) + else: + assert (abs(ref_dst - tar_dst).max() < 10) diff --git a/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_weightonly.py b/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_weightonly.py index 0c3f63fca2c..b36b2c6edae 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_weightonly.py +++ b/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_weightonly.py @@ -17,7 +17,7 @@ from ut_utils import * -cmpt_configs = {"int8": {"int8", "fp32"}, "int4_clip": {"int8", "fp32", "bf16"}, "int4_fullrange": { +cmpt_configs = {"int8": {"int8", "bf16", "fp32"}, "int4_clip": {"int8", "fp32", "bf16"}, "int4_fullrange": { "int8", "fp32", "bf16"}, "fp4_e2m1_bnb": {"fp32", "bf16"}, "fp4_e2m1": {"fp32", "bf16"}, "nf4": {"fp32", "bf16"}, "fp8_e5m2": {"fp32", "bf16"}, "fp8_e4m3": {"fp32", "bf16"} } @@ -25,6 +25,8 @@ scale_configs = {"int8": {"fp32"}, "int4_clip": {"fp32"}, "int4_fullrange": {"fp32"}, "fp4_e2m1_bnb": {"fp32"}, "fp4_e2m1": {"fp32"}, "nf4": {"fp32"}, "fp8_e5m2": {"fp32", "fp8_e8m0"}, "fp8_e4m3": {"fp32", "fp8_e8m0"}} +asym_configs = {"int8", "int4_clip", "int4_fullrange"} + @capture_args @pytest.mark.parametrize("m", [256]) @@ -34,13 +36,16 @@ @pytest.mark.parametrize("compute_type", ["int8", "bf16", "fp32"]) @pytest.mark.parametrize("weight_type", ["int8", "int4_clip", "int4_fullrange", "nf4", "fp4_e2m1_bnb", "fp4_e2m1", "fp8_e5m2", "fp8_e4m3"]) @pytest.mark.parametrize("scale_type", ["fp32", "fp8_e8m0"]) +@pytest.mark.parametrize("asym", [True, False]) @pytest.mark.parametrize("transpose", [True, False]) @pytest.mark.parametrize("add_bias", [True, False]) @pytest.mark.parametrize("src_dt", ["fp32", "bf16"]) @pytest.mark.parametrize("dst_dt", ["fp32", "bf16"]) -def test(m, n, k, blocksize, compute_type, weight_type, scale_type, transpose, add_bias, src_dt, dst_dt, dump_tensor_info=True): +def test(m, n, k, blocksize, compute_type, weight_type, scale_type, asym, transpose, add_bias, src_dt, dst_dt, dump_tensor_info=True): if compute_type not in cmpt_configs[weight_type] or scale_type not in scale_configs[weight_type]: pytest.skip() + if asym and (weight_type not in asym_configs or compute_type == "int8"): + pytest.skip() torch.manual_seed(0) ref_activation = torch.rand(m, k, dtype=torch.float) tar_activation = ref_activation.clone() @@ -54,7 +59,7 @@ def test(m, n, k, blocksize, compute_type, weight_type, scale_type, transpose, a if dump_tensor_info: print(raw_wei) compress_wei = torch.ops.jblasop.woq_quantize( - raw_wei, transpose, blocksize, compute_type, weight_type, scale_type) + raw_wei, transpose, blocksize, compute_type, weight_type, scale_type, asym) revert_wei = torch.zeros(wei_row, wei_col, dtype=torch.float) torch.ops.jblasop.woq_dequantize( compress_wei, revert_wei, transpose, compute_type, weight_type, scale_type) @@ -68,7 +73,7 @@ def test(m, n, k, blocksize, compute_type, weight_type, scale_type, transpose, a revert_wei = torch.transpose(revert_wei, 0, 1) ref_dst = torch.matmul(ref_activation, revert_wei) torch.ops.jblasop.woq_linear( - tar_activation, compress_wei, bias, tar_dst, n, add_bias, compute_type, weight_type, scale_type) + tar_activation, compress_wei, bias, tar_dst, n, add_bias, compute_type, weight_type, scale_type, asym) if dst_dt == "bf16": tar_dst = tar_dst.to(torch.float) if add_bias: diff --git a/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/ut_utils.py b/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/ut_utils.py index cf4f4a904c4..1b6fb5b7ede 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/ut_utils.py +++ b/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/ut_utils.py @@ -20,7 +20,14 @@ import time import pytest from functools import wraps -torch.ops.load_library("../build/libqbits.so") +import sys + +if sys.platform.startswith("linux"): + torch.ops.load_library("../build/libqbits.so") +elif sys.platform.startswith("win"): + torch.ops.load_library("../build/qbits.dll") +else: + sys.exit() def capture_args(f): @wraps(f) diff --git a/intel_extension_for_transformers/llm/quantization/autograd/functions.py b/intel_extension_for_transformers/llm/quantization/autograd/functions.py index df87636629d..aa202ad3486 100644 --- a/intel_extension_for_transformers/llm/quantization/autograd/functions.py +++ b/intel_extension_for_transformers/llm/quantization/autograd/functions.py @@ -50,8 +50,8 @@ def forward(ctx, A, B, out=None, bias=None, compute_dtype=None, weight_dtype=Non # 2. Matmul # output = torch.nn.functional.linear(A, B_dequant, bias) torch.ops.jblasop.woq_linear( - A, B.data, bias, out, out.shape[-1], bias is not None, compute_dtype, weight_dtype, scale_dtype - ) + A, B.data, bias, out, out.shape[-1], bias is not None, compute_dtype, weight_dtype, scale_dtype, + False) output = out # 3. Save state @@ -106,5 +106,5 @@ def matmul_kbit(A: Tensor, else: torch.ops.jblasop.woq_linear(A, B.data, bias, out, out.shape[-1], bias is not None, compute_dtype, weight_dtype, - scale_dtype) + scale_dtype, False) return out diff --git a/intel_extension_for_transformers/llm/quantization/nn/modules.py b/intel_extension_for_transformers/llm/quantization/nn/modules.py index 88223fcf275..9074d49b9d3 100644 --- a/intel_extension_for_transformers/llm/quantization/nn/modules.py +++ b/intel_extension_for_transformers/llm/quantization/nn/modules.py @@ -134,7 +134,7 @@ def forward(self, x: torch.Tensor): def set_weights_bias(self, weight_data, bias=None): shape = weight_data.shape weight = torch.ops.jblasop.woq_quantize( - weight_data, True, self.blocksize, self.compute_dtype, self.weight_dtype, self.scale_dtype) + weight_data, True, self.blocksize, self.compute_dtype, self.weight_dtype, self.scale_dtype, False) weight.resize_(shape) self.weight = ParamsQBits(data=weight, requires_grad=False, @@ -222,7 +222,7 @@ def merge(self, safe_merge: bool = False) -> None: else: w_data += self.get_delta_weight(active_adapter) weight = torch.ops.jblasop.woq_quantize( - w_data, True, self.blocksize, self.compute_dtype, self.weight_dtype, self.scale_dtype) + w_data, True, self.blocksize, self.compute_dtype, self.weight_dtype, self.scale_dtype, False) self.weight = ParamsQBits( data=weight, requires_grad=False, quant_state={"scheme": self.scheme}, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_dtype=self.weight_dtype, scale_dtype=self.scale_dtype @@ -241,7 +241,7 @@ def unmerge(self) -> None: if active_adapter in self.lora_A.keys(): w_data -= self.get_delta_weight(active_adapter) weight = torch.ops.jblasop.woq_quantize( - w_data, True, self.blocksize, self.compute_dtype, self.weight_dtype, self.scale_dtype) + w_data, True, self.blocksize, self.compute_dtype, self.weight_dtype, self.scale_dtype, False) self.weight = ParamsQBits( data=weight, requires_grad=False, quant_state={"scheme": self.scheme}, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_dtype=self.weight_dtype, scale_dtype=self.scale_dtype diff --git a/tests/CI/test_weight_only.py b/tests/CI/test_weight_only.py index 0b0c884396e..ab16aa91bb6 100644 --- a/tests/CI/test_weight_only.py +++ b/tests/CI/test_weight_only.py @@ -86,7 +86,7 @@ def test_woq_config_post_init_runtime(self): def test_int8(self): raw_wei = torch.rand(2, 32, dtype=torch.float) compress_wei = torch.ops.jblasop.woq_quantize( - raw_wei, True, 32, "fp32", "int8", "fp32") + raw_wei, True, 32, "fp32", "int8", "fp32", False) revert_wei = torch.zeros(2, 32, dtype=torch.float) torch.ops.jblasop.woq_dequantize( compress_wei, revert_wei, True, "fp32", "int8", "fp32") @@ -107,7 +107,7 @@ def test_int8(self): def test_int4(self): raw_wei = torch.rand(2, 32, dtype=torch.float) compress_wei = torch.ops.jblasop.woq_quantize( - raw_wei, True, 32, "fp32", "int4_fullrange", "fp32") + raw_wei, True, 32, "fp32", "int4_fullrange", "fp32", False) revert_wei = torch.zeros(2, 32, dtype=torch.float) torch.ops.jblasop.woq_dequantize( compress_wei, revert_wei, True, "fp32", "int4_fullrange", "fp32") From 9cf6a7f6332f0017bd3ae5da1d5310f3b93e6815 Mon Sep 17 00:00:00 2001 From: VincyZhang Date: Thu, 4 Jan 2024 11:50:19 +0800 Subject: [PATCH 022/101] add accelerate for tensorflow examples (#1108) Signed-off-by: Wenxin Zhang --- .../language-modeling/quantization/ptq/requirements.txt | 3 ++- .../tensorflow/multiple-choice/quantization/requirements.txt | 1 + .../token-classification/quantization/requirements.txt | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/examples/huggingface/tensorflow/language-modeling/quantization/ptq/requirements.txt b/examples/huggingface/tensorflow/language-modeling/quantization/ptq/requirements.txt index 89b51f1b55b..62aa53701f4 100644 --- a/examples/huggingface/tensorflow/language-modeling/quantization/ptq/requirements.txt +++ b/examples/huggingface/tensorflow/language-modeling/quantization/ptq/requirements.txt @@ -3,4 +3,5 @@ sentencepiece != 0.1.92 protobuf intel-tensorflow transformers -scikit-learn \ No newline at end of file +scikit-learn +accelerate \ No newline at end of file diff --git a/examples/huggingface/tensorflow/multiple-choice/quantization/requirements.txt b/examples/huggingface/tensorflow/multiple-choice/quantization/requirements.txt index 32db3a60a90..ffa62da04e1 100644 --- a/examples/huggingface/tensorflow/multiple-choice/quantization/requirements.txt +++ b/examples/huggingface/tensorflow/multiple-choice/quantization/requirements.txt @@ -3,3 +3,4 @@ sentencepiece != 0.1.92 protobuf intel-tensorflow transformers +accelerate \ No newline at end of file diff --git a/examples/huggingface/tensorflow/token-classification/quantization/requirements.txt b/examples/huggingface/tensorflow/token-classification/quantization/requirements.txt index 5686e873667..6e419404871 100644 --- a/examples/huggingface/tensorflow/token-classification/quantization/requirements.txt +++ b/examples/huggingface/tensorflow/token-classification/quantization/requirements.txt @@ -4,3 +4,4 @@ seqeval protobuf intel-tensorflow transformers +accelerate \ No newline at end of file From 257c78c68a9028f7197282281b0f767fb700047b Mon Sep 17 00:00:00 2001 From: XuehaoSun Date: Thu, 4 Jan 2024 17:03:56 +0800 Subject: [PATCH 023/101] update Llama-2-13b-hf SQ recipe(#1109) Signed-off-by: Sun, Xuehao --- .../text-generation/quantization/llm_quantization_recipes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/huggingface/pytorch/text-generation/quantization/llm_quantization_recipes.md b/examples/huggingface/pytorch/text-generation/quantization/llm_quantization_recipes.md index 34c63d8438f..e7d34c9a0ba 100644 --- a/examples/huggingface/pytorch/text-generation/quantization/llm_quantization_recipes.md +++ b/examples/huggingface/pytorch/text-generation/quantization/llm_quantization_recipes.md @@ -156,7 +156,7 @@ python run_generation.py \ --trust_remote_code True \ --calib_len 1024 \ --fallback_add \ - --calib_shuffle False \ + --calib_padding \ --tasks lambada_openai \ --int8 --sq --accuracy \ --batch_size 1 \ From c35d2b61ecc04950ca7cf7a32f67e7b8f50fc37b Mon Sep 17 00:00:00 2001 From: lvliang-intel Date: Thu, 4 Jan 2024 20:43:36 +0800 Subject: [PATCH 024/101] [NeuralChat] Fix RAG example for retrieval plugin parameter change (#1111) Fix RAG example for retrieval plugin parameter change Signed-off-by: lvliang-intel --- .../neural_chat/examples/deployment/rag/askdoc.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/intel_extension_for_transformers/neural_chat/examples/deployment/rag/askdoc.yaml b/intel_extension_for_transformers/neural_chat/examples/deployment/rag/askdoc.yaml index 2863595eddd..9fe270d3b66 100644 --- a/intel_extension_for_transformers/neural_chat/examples/deployment/rag/askdoc.yaml +++ b/intel_extension_for_transformers/neural_chat/examples/deployment/rag/askdoc.yaml @@ -30,7 +30,7 @@ retrieval: enable: true args: input_path: "./askdoc_docs" - persist_dir: "./askdoc_persist" + persist_directory: "./askdoc_persist" response_template: "We cannot find suitable content to answer your query, please contact AskGM to find help. Mail: ask.gm.zizhu@intel.com." append: False From e01d330383e07663dddca2a395229ae5d7605fd0 Mon Sep 17 00:00:00 2001 From: lvliang-intel Date: Fri, 5 Jan 2024 10:07:43 +0800 Subject: [PATCH 025/101] [LLM Runtime] Fix convert mistral script missing parameter issue (#1100) --- .../llm/runtime/graph/scripts/convert_mistral.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mistral.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mistral.py index aeb029e5ab7..8ca762adaa2 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mistral.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mistral.py @@ -180,7 +180,9 @@ def loadHFTransformerJson(model: 'LazyModel', config_path: Path) -> 'Params': ffn_hidden_size = config["intermediate_size"] rms_norm_eps = config["rms_norm_eps"] rope_theta = config["rope_theta"] if "rope_theta" in config else 10000 - rope_scale = config["factor"] if "factor" in config else 1 + rope_scale = 1 + if config["rope_scaling"]: + rope_scale = config["rope_scaling"]["factor"] if "factor" in config["rope_scaling"] else 1 return Params( n_vocab=n_vocab, @@ -192,6 +194,7 @@ def loadHFTransformerJson(model: 'LazyModel', config_path: Path) -> 'Params': ffn_hidden_size=ffn_hidden_size, rms_norm_eps=rms_norm_eps, rope_theta=rope_theta, + rope_scale=rope_scale ) # LLaMA v2 70B params.json @@ -246,7 +249,7 @@ def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer)) added_tokens: Dict[str, int] if fname_added_tokens is not None: - added_tokens = json.load(open(fname_added_tokens)) + added_tokens = json.load(open(fname_added_tokens, encoding='utf-8')) else: added_tokens = {} vocab_size: int = self.sentencepiece_tokenizer.vocab_size() From df26e1d0955b2c393ad94a55680678248d348faf Mon Sep 17 00:00:00 2001 From: VincyZhang Date: Fri, 5 Jan 2024 19:34:07 +0800 Subject: [PATCH 026/101] update prompt and ci scripts (#1113) --- .github/workflows/cpp-graph-test.yml | 14 +------------- .github/workflows/unit-test-llmruntime.yml | 1 - .../graph/scripts/ci/cpp_graph_prompts.json | 5 +++-- 3 files changed, 4 insertions(+), 16 deletions(-) diff --git a/.github/workflows/cpp-graph-test.yml b/.github/workflows/cpp-graph-test.yml index 6bb001b4714..96853f20ab5 100644 --- a/.github/workflows/cpp-graph-test.yml +++ b/.github/workflows/cpp-graph-test.yml @@ -53,19 +53,7 @@ jobs: - name: Env build run: | - bash ${{ github.workspace }}/.github/workflows/script/prepare_env_with_conda.sh "cpp-graph-test" "3.8" - - - name: Binary build - # cpp model does not requires itrex package - if: 0 == 1 - run: | - cd ${{ github.workspace }} - conda activate cpp-graph-test || source activate cpp-graph-test - pip install build --upgrade - pip install -r requirements.txt - python setup.py sdist bdist_wheel - pip install dist/intel_extension_for_transformers*.whl - pip list + bash ${{ github.workspace }}/.github/workflows/script/prepare_env_with_conda.sh "cpp-graph-test-itrex" "3.8" - name: BF16 Benchmark run: | diff --git a/.github/workflows/unit-test-llmruntime.yml b/.github/workflows/unit-test-llmruntime.yml index a08c8981543..c0c75532ab8 100644 --- a/.github/workflows/unit-test-llmruntime.yml +++ b/.github/workflows/unit-test-llmruntime.yml @@ -6,7 +6,6 @@ on: paths: - intel_extension_for_transformers/llm/runtime/graph/** - .github/workflows/unit-test-llmruntime.yml - - .github/workflows/script/unitTest/** - '!intel_extension_for_transformers/llm/runtime/graph/docs/**' - '!intel_extension_for_transformers/llm/runtime/graph/*.md' workflow_dispatch: diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_prompts.json b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_prompts.json index 9d234a67752..63f52ce89f4 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_prompts.json +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_prompts.json @@ -42,7 +42,7 @@ "llama-2-7b-chat": "llama", "mistral-7b": "llama", "chatglm2": "chinese1", - "baichuan-13b": "chinese1", + "baichuan-13b": "chinese3", "baichuan2-13b": "chinese1", "chatglm-6b": "chinese2" }, @@ -51,7 +51,8 @@ "llama": "It is done, and submitted. You can play 'Survival of the Tastiest' on Android, and on the web. Playing on the web works, but you have to simulate multiple touch for table moving and that can be a bit confusing. There is a lot I'd like to talk about. I will go through every topic, insted of making the typical what went right/wrong list. Concept Working over the theme was probably one of the hardest tasks which I had to face. Originally, I had an idea of what kind of game I wanted to develop, gameplay wise - something with a lot of enemies/actors, simple graphics, maybe set in space, controlled from a top-down view. I was confident that I could fit any theme around it. In the end, the problem with a theme like 'Evolution' in a game is that evolution is unassisted. It happens through several seemingly random mutations over time, with the most apt permutation surviving. This genetic car simulator is, in my opinion, a great example of actual evolution of a species facing a challenge. But is it a game? In a game, you need to control something to reach an objective. That control goes against what evolution is supposed to be like. If you allow the user to pick how to evolve something, it's not evolution anymore - it's the equivalent of intelligent design, the fable invented by creationists to combat the idea of evolution. Being agnostic and a Pastafarian, that's not something that rubbed me the right way. Hence, my biggest dillema when deciding what to create was not with what I wanted to create, but with what I did not. I didn't want to create an 'intelligent design' simulator and wrongly call it evolution. This is a problem, of course, every other contestant also had to face. And judging by the entries submitted, not many managed to work around it. I'd say the only real solution was through the use of artificial selection, somehow. So far, I have not seen any entry using this at its core gameplay. Alas, this is just a fun competition and after a while I decided not to be as strict with the game idea, and allowed myself to pick whatever I thought would work out. My initial idea was to create something where humanity tried to evolve to a next level but had some kind of foe trying to stop them from doing so. I kind of had this image of human souls flying in space towards a monolith or a space baby (all based in 2001: A Space Odyssey of course) but I couldn't think of compelling (read: serious) mechanics for that. Borgs were my next inspiration, as their whole hypothesis fit pretty well into the evolution theme. But how to make it work? Are you the borg, or fighting the Borg? The third and final idea came to me through my girlfriend, who somehow gave me the idea of making something about the evolution of Pasta. The more I thought about it the more it sounded like it would work, so I decided to go with it. Conversations with my inspiring co-worker Roushey (who also created the 'Mechanical Underdogs' signature logo for my intros) further matured the concept, as it involved into the idea of having individual pieces of pasta flying around and trying to evolve until they became all-powerful. A secondary idea here was that the game would work to explain how the Flying Spaghetti Monster came to exist - by evolving from a normal dinner table. So the idea evolved more or less into this: you are sitting a table. You have your own plate, with is your 'base'. There are 5 other guests at the table, each with their own plate. Your plate can spawn little pieces of pasta. You do so by 'ordering' them through a menu. Some pastas are better than others; some are faster, some are stronger. They have varying 'costs', which are debited from your credits (you start with a number of credits). Once spawned, your pastas start flying around. Their instinct is to fly to other plates, in order to conquer them (the objective of the game is having your pasta conquer all the plates on the table). But they are really autonomous, so after being spawned, you have no control over your pasta (think DotA or LoL creeps). Your pasta doesn't like other people's pasta, so if they meet, they shoot sauce at each other until one dies. You get credits for other pastas your own pasta kill. Once a pasta is in the vicinity of a plate, it starts conquering it for its team. It takes around 10 seconds for a plate to be conquered; less if more pasta from the same team are around. If pasta from other team are around, though, they get locked down in their attempt, unable to conquer the plate, until one of them die (think Battlefield's standard 'Conquest' mode). You get points every second for every plate you own. Over time, the concept also evolved to use an Italian bistro as its main scenario. Carlos, Carlos' Bistro's founder and owner Setup No major changes were made from my work setup. I used FDT and Starling creating an Adobe AIR (ActionScript) project, all tools or frameworks I already had some knowledge with. One big change for me was that I livestreamed my work through a twitch.tv account. This was a new thing for me. As recommended by Roushey, I used a program called XSplit and I got to say, it is pretty amazing. It made the livestream pretty effortless and the features are awesome, even for the free version. It was great to have some of my friends watch me, and then interact with them and random people through chat. It was also good knowing that I was also recording a local version of the files, so I could make a timelapse video later. Knowing the video was being recorded also made me a lot more self-conscious about my computer use, as if someone was watching over my shoulder. It made me realize that sometimes I spend too much time in seemingly inane tasks (I ended up wasting the longest time just to get some text alignment the way I wanted - it'll probably drive someone crazy if they watch it) and that I do way too many typos where writing code. I pretty much spend half of the time writing a line and the other half fixing the crazy characters in it. My own stream was probably boring to watch since I was coding for the most time. But livestreaming is one of the cool things to do as a spectator too. It was great seeing other people working - I had a few tabs opened on my second monitor all the time. It's actually a bit sad, because if I could, I could have spent the whole weekend just watching other people working! But I had to do my own work, so I'd only do it once in a while, when resting for a bit. Design Although I wanted some simple, low-fi, high-contrast kind of design, I ended up going with somewhat realistic (vector) art. I think it worked very well, fitting the mood of the game, but I also went overboard. For example: to know the state of a plate (who owns it, who's conquering it and how much time they have left before conquering it, which pasta units are in the queue, etc), you have to look at the plate's bill. The problem I realized when doing some tests is that people never look at the bill! They think it's some kind of prop, so they never actually read its details. Plus, if you're zoomed out too much, you can't actually read it, so it's hard to know what's going on with the game until you zoom in to the area of a specific plate. One other solution that didn't turn out to be as perfect as I thought was how to indicate who a plate base belongs to. In the game, that's indicated by the plate's decoration - its color denotes the team owner. But it's something that fits so well into the design that people never realized it, until they were told about it. In the end, the idea of going with a full physical metaphor is one that should be done with care. Things that are very important risk becoming background noise, unless the player knows its importance. Originally, I wanted to avoid any kind of heads-up display in my game. In the end, I ended up adding it at the bottom to indicate your credits and bases owned, as well as the hideous out-of-place-and-still-not-obvious 'Call Waiter' button. But in hindsight, I should have gone with a simple HUD from the start, especially one that indicated each team's colors and general state of the game without the need for zooming in and out. Development Development went fast. But not fast enough. Even though I worked around 32+ hours for this Ludum Dare, the biggest problem that I had to face in the end was overscoping.", "gptj-6b": "It is done, and submitted. You can play 'Survival of the Tastiest' on Android, and on the web. Playing on the web works, but you have to simulate multiple touch for table moving and that can be a bit confusing. There is a lot I'd like to talk about. I will go through every topic, insted of making the typical what went right/wrong list. Concept Working over the theme was probably one of the hardest tasks which I had to face. Originally, I had an idea of what kind of game I wanted to develop, gameplay wise - something with a lot of enemies/actors, simple graphics, maybe set in space, controlled from a top-down view. I was confident that I could fit any theme around it. In the end, the problem with a theme like 'Evolution' in a game is that evolution is unassisted. It happens through several seemingly random mutations over time, with the most apt permutation surviving. This genetic car simulator is, in my opinion, a great example of actual evolution of a species facing a challenge. But is it a game? In a game, you need control something to reach an objective. That control goes against what evolution is supposed to be like. If you allow the user to pick how to evolve something, it's not evolution anymore - it's the equivalent of intelligent design, the fable invented by creationists to combat the idea of evolution. Being agnostic and a Pastafarian, that's not something that rubbed me the right way. Hence, my biggest dillema when deciding what to create was not with what I wanted to create, but with what I did not. I didn't want to create an 'intelligent design' simulator and wrongly call it evolution. This is a problem, of course, every other contestant also had to face. And judging by the entries submitted, not many managed to work around it. I'd say the only real solution was through the use of artificial selection, somehow. So far, I haven't seen any entry using this at its core gameplay. Alas, this is just a fun competition and after a while I decided not to be as strict with the game idea, and allowed myself to pick whatever I thought would work out. My initial idea was to create something where humanity tried to evolve to a next level, but had some kind of foe trying to stop them from doing so. I kind of had this image of human souls flying in space towards a monolith or a space baby (all based in 2001: A Space Odyssey of course) but I couldn't think of compelling (read: serious) mechanics for that. Borgs were my next inspiration, as their whole hypothesis fit pretty well into the evolution theme. But how to make it work? Are you the borg, or fighting the Borg? The third and final idea came to me through my girlfriend, who somehow gave me the idea of making something about the evolution of Pasta. The more I thought about it the more it sounded like it would work, so I decided to go with it. Conversations with my inspiring co-worker Roushey (who also created the 'Mechanical Underdogs' signature logo for my intros) further matured the concept, as it involved into the idea of having individual pieces of pasta flying around and trying to evolve until they became all-powerful. A secondary idea here was that the game would work to explain how the Flying Spaghetti Monster came to exist - by evolving from a normal dinner table. So the idea evolved more or less into this: you are sitting a table. You have your own plate, with is your 'base'. There are 5 other guests at the table, each with their own plate. Your plate can spawn little pieces of pasta. You do so by 'ordering' them through a menu. Some pastas are better than others; some are faster, some are stronger. They have varying 'costs', which are debited from your credits (you start with a number of credits). Once spawned, your pastas start flying around. Their instinct is to fly to other plates, in order to conquer them (the objective of the game is having your pasta conquer all the plates on the table). But they are really autonomous, so after being spawned, you have no control over your pasta (think DotA or LoL creeps). Your pasta doesn't like other people's pasta, so if they meet, they shoot sauce at each other until one dies. You get credits for other pastas your own pasta kill. Once a pasta is in the vicinity of a plate, it starts conquering it for its team. It takes around 10 seconds for a plate to be conquered; less if more pasta from the same team are around. If pasta from other team are around, though, they get locked down in their attempt, unable to conquer the plate, until one of them die (think Battlefield's standard 'Conquest' mode). You get points every second for every plate you own. Over time, the concept also evolved to use an Italian bistro as its main scenario. Carlos, Carlos' Bistro's founder and owner Setup No major changes were made from my work setup. I used FDT and Starling creating an Adobe AIR (ActionScript) project, all tools or frameworks I already had some knowledge with. One big change for me was that I livestreamed my work through a twitch.tv account. This was a new thing for me. As recommended by Roushey, I used a program called XSplit and I got to say, it is pretty amazing. It made the livestream pretty effortless and the features are awesome, even for the free version. It was great to have some of my friends watch me, and then interact with them and random people through chat. It was also good knowing that I was also recording a local version of the files, so I could make a timelapse video later. Knowing the video was being recorded also made me a lot more self-conscious about my computer use, as if someone was watching over my shoulder. It made me realize that sometimes I spend too much time in seemingly inane tasks (I ended up wasting the longest time just to get some text alignment the way I wanted - it'll probably drive someone crazy if they watch it) and that I do way too many typos where writing code. I pretty much spend half of the time writing a line and the other half fixing the crazy characters in it. My own stream was probably boring to watch since I was coding for the most time. But livestreaming is one of the cool things to do as a spectator too. It was great seeing other people working - I had a few tabs opened on my second monitor all the time. It's actually a bit sad, because if I could, I could have spent the whole weekend just watching other people working! But I had to do my own work, so I'd only do it once in a while, when resting for a bit. Design Although I wanted some simple, low-fi, high-contrast kind of design, I ended up going with somewhat realistic (vector) art. I think it worked very well, fitting the mood of the game, but I also went overboard. For example: to know the state of a plate (who owns it, who's conquering it and how much time they have left before conquering it, which pasta units are in the queue, etc), you have to look at the plate's bill. The problem I realized when doing some tests is that people never look at the bill! They think it's some kind of prop, so they never actually read its details. Plus, if you're zoomed out too much, you can't actually read it, so it's hard to know what's going on with the game until you zoom in to the area of a specific plate. One other solution that didn't turn out to be as perfect as I thought was how to indicate who a plate base belongs to. In the game, that's indicated by the plate's decoration - its color denotes the team owner. But it's something that fits so well into the design that people never realized it, until they were told about it. In the end, the idea of going with a full physical metaphor is one that should be done with care. Things that are very important risk becoming background noise, unless the player knows its importance. Originally, I wanted to avoid any kind of heads-up display in my game. In the end, I ended up adding it at the bottom to indicate your credits and bases owned, as well as the hideous out-of-place-and-still-not-obvious 'Call Waiter' button. But in hindsight, I should have gone with a simple HUD from the start, especially one that indicated each team's colors and general state of the game without the need for zooming in and out. Development Development went fast. But not fast enough. Even though I worked around 32+ hours for this Ludum Dare, the biggest problem I had to face in the end was overscoping. I had too much planned, and could not get it all done. Content-wise, I had several kinds of pasta planned - Wikipedia is just amazing in that regard, split into several different groups, from small Pastina to huge Pasta al forno. But because of time constraints, I ended up scratching most of them, and ended up with 5 different types of small pasta - barely something to start when talking about the evolution of Pasta. Pastas used in the game. Unfortunately, the macs where never used Which is one of the saddest things about the project, really. It had the framework and the features to allow an endless number of elements in there, but I just did not have time to draw the rest of the assets needed (something I loved to do).", "chinese1": "\"它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念 围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?你是博格人,还是与博格人战斗?你是博格人,还是与博格人战斗?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?工作呢?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?你是博格人,还是与博格人战斗?你是博格人,还是与博格人战斗?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?\"", - "chinese2": "\"它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念 围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?你是博格人,还是与博格人战斗?你是博格人,还是与博格人战斗?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?工作呢?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?你是博格人,还是与博格人战斗?你是博格人,还是与博格人战斗?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是?\"" + "chinese2": "\"它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念 围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?你是博格人,还是与博格人战斗?你是博格人,还是与博格人战斗?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?工作呢?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?你是博格人,还是与博格人战斗?你是博格人,还是与博格人战斗?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢但是工作?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是?\"", + "chinese3": "\"它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念 围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?你是博格人,还是与博格人战斗?你是博格人,还是与博格人战斗?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?但是如何让它工作呢?工作呢?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。因此,在决定创作什么时,我最大的困惑不是我想创造什么,而是我不想要创造什么。我不想创建一个“智能设计”模拟器,并错误地称之为进化。这是一个问题,当然,其他参赛者也都要面对。从提交的条目来看,没有多少人设法解决这个问题。我想说,唯一真正的解决方案是通过使用人工选择,不知何故。到目前为止,我还没有看到任何条目在其核心游戏玩法中使用它。唉,这只是一个有趣的比赛,过了一段时间,我决定不那么严格地要求游戏理念,并允许自己选择我认为可行的任何内容。我最初的想法是创造一些东西,让人类试图进化到一个新的水平,但有某种敌人试图阻止他们这样做。我有点像人类灵魂在太空中飞向巨石或太空婴儿的图像(当然都是基于2001:太空漫游),但我想不出令人信服的(阅读:严肃的)机制。博格人是我的下一个灵感来源,因为他们的整个假设非常符合进化论的主题。但是如何让它工作呢?你是博格人,还是与博格人战斗?你是博格人,还是与博格人战斗?它完成了,并提交了。你可以在Android和网络上玩“美味生存”。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子,这可能会有点令人困惑。我想谈的有很多。我将浏览每个主题,而不是列出典型的正确/错误列表。概念围绕这个主题工作可能是我必须面对的最艰巨的任务之一。最初,我有一个想法,我想开发什么样的游戏,游戏玩法明智有很多敌人/演员的东西,简单的图形,可能设置在太空中,从自上而下的视图控制。我相信我可以围绕它适合任何主题。最后,游戏中像“进化”这样的主题的问题在于进化是无辅助的。随着时间的推移,它通过几个看似随机的突变发生,最合适的排列幸存下来。在我看来,这个基因汽车模拟器是面临挑战的物种实际进化的一个很好的例子。但这是游戏吗?在游戏中,您需要控制某些东西才能达到目标。这种控制违背了进化应该是什么样子。如果你允许用户选择如何进化某些东西,它就不再是进化了——它相当于智能设计,是创造论者发明的寓言,用来对抗进化论的想法。作为不可知论者和意大利面主义者,这不是以正确的方式摩擦我的东西。\"" } } } From 4d8d9a2f70fc60cc4521fe3d3b9784ed3d5a2516 Mon Sep 17 00:00:00 2001 From: Sihan Chen <39623753+Spycsh@users.noreply.github.com> Date: Fri, 5 Jan 2024 21:53:43 +0800 Subject: [PATCH 027/101] [NeuralChat] Fix tts crash with messy retrieval input and enhance normalizer (#1088) Fix tts crash with messy retrieval input and enhance normalizer --- .../neural_chat/pipeline/plugins/audio/tts.py | 2 +- .../plugins/audio/utils/english_normalizer.py | 7 +++---- .../ci/plugins/audio/test_english_normalizer.py | 2 +- .../neural_chat/tests/ci/plugins/audio/test_tts.py | 14 ++++++++++++++ 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/pipeline/plugins/audio/tts.py b/intel_extension_for_transformers/neural_chat/pipeline/plugins/audio/tts.py index 74e3f7df5bb..035710c434c 100644 --- a/intel_extension_for_transformers/neural_chat/pipeline/plugins/audio/tts.py +++ b/intel_extension_for_transformers/neural_chat/pipeline/plugins/audio/tts.py @@ -19,7 +19,6 @@ from datasets import load_dataset, Audio, Dataset, Features, ClassLabel import os import torch -from speechbrain.pretrained import EncoderClassifier from typing import Any, Dict, List, Union from transformers import SpeechT5HifiGan import soundfile as sf @@ -59,6 +58,7 @@ def __init__(self, output_audio_path="./response.wav", voice="default", stream_m self.stream_mode = stream_mode self.spk_model_name = "speechbrain/spkrec-xvect-voxceleb" try: + from speechbrain.pretrained import EncoderClassifier self.speaker_model = EncoderClassifier.from_hparams( source=self.spk_model_name, run_opts={"device": "cpu"}, diff --git a/intel_extension_for_transformers/neural_chat/pipeline/plugins/audio/utils/english_normalizer.py b/intel_extension_for_transformers/neural_chat/pipeline/plugins/audio/utils/english_normalizer.py index 77297b5613b..9b4e390b4ed 100644 --- a/intel_extension_for_transformers/neural_chat/pipeline/plugins/audio/utils/english_normalizer.py +++ b/intel_extension_for_transformers/neural_chat/pipeline/plugins/audio/utils/english_normalizer.py @@ -26,7 +26,7 @@ class EnglishNormalizer: def __init__(self): self.correct_dict = { - "A": "Eigh", + "A": "eigh", "B": "bee", "C": "cee", "D": "dee", @@ -34,7 +34,7 @@ def __init__(self): "F": "ef", "G": "jee", "H": "aitch", - "I": "I", + "I": "eye", "J": "jay", "K": "kay", "L": "el", @@ -58,8 +58,7 @@ def __init__(self): def correct_abbreviation(self, text): # TODO mixed abbreviation or proper noun like i7, ffmpeg, BTW should be supported - # words = text.split() # CVPR-15 will be upper but 1 and 5 will be splitted to two numbers - words = re.split(' |_|/', text) + words = re.split(r' |_|/|\*|\#', text) # ignore the characters that not break sentence results = [] for idx, word in enumerate(words): if word.startswith("-"): # bypass negative number diff --git a/intel_extension_for_transformers/neural_chat/tests/ci/plugins/audio/test_english_normalizer.py b/intel_extension_for_transformers/neural_chat/tests/ci/plugins/audio/test_english_normalizer.py index 809a8d48e40..6eb95ed0103 100644 --- a/intel_extension_for_transformers/neural_chat/tests/ci/plugins/audio/test_english_normalizer.py +++ b/intel_extension_for_transformers/neural_chat/tests/ci/plugins/audio/test_english_normalizer.py @@ -52,7 +52,7 @@ def test_correct_conjunctions(self): text = "CVPR-15 ICML-21 PM2.5" text = self.normalizer.correct_abbreviation(text) result = self.normalizer.correct_number(text) - self.assertEqual(result, "cee vee pea ar fifteen I cee em el twenty-one pea em two point five.") + self.assertEqual(result, "cee vee pea ar fifteen eye cee em el twenty-one pea em two point five.") if __name__ == "__main__": unittest.main() diff --git a/intel_extension_for_transformers/neural_chat/tests/ci/plugins/audio/test_tts.py b/intel_extension_for_transformers/neural_chat/tests/ci/plugins/audio/test_tts.py index e6bb93f8e34..ff214c154f7 100644 --- a/intel_extension_for_transformers/neural_chat/tests/ci/plugins/audio/test_tts.py +++ b/intel_extension_for_transformers/neural_chat/tests/ci/plugins/audio/test_tts.py @@ -95,7 +95,11 @@ def test_tts_long_text(self): output_audio_path = os.path.join(os.getcwd(), "tmp_audio/2.wav") set_seed(555) output_audio_path = self.tts.text2speech(text, output_audio_path, voice="default", do_batch_tts=True, batch_length=120) + result = self.asr.audio2text(output_audio_path) self.assertTrue(os.path.exists(output_audio_path)) + self.assertEqual("intel extension for transformers is an innovative toolkit to accelerate transformer based " + \ + "models on intel platforms in particular effective on 4th intel xeon scalable processor " + \ + "sapphire rapids codenamed sapphire rapids", result) def test_create_speaker_embedding(self): driven_audio_path = \ @@ -117,5 +121,15 @@ def test_tts_remove_noise(self): result = self.asr.audio2text(output_audio_path) self.assertEqual(text.lower(), result.lower()) + def test_tts_messy_input(self): + text = "Please refer to the following responses to this inquiry:\n" + 244 * "* " + "*" + output_audio_path = os.path.join(os.getcwd(), "tmp_audio/6.wav") + set_seed(555) + output_audio_path = self.tts_noise_reducer.text2speech(text, output_audio_path, voice="default") + self.assertTrue(os.path.exists(output_audio_path)) + # verify accuracy + result = self.asr.audio2text(output_audio_path) + self.assertEqual("please refer to the following responses to this inquiry", result.lower()) + if __name__ == "__main__": unittest.main() From 607e5b6d8074ee9b519cc7efdf2d46d9e8fe9524 Mon Sep 17 00:00:00 2001 From: lvliang-intel Date: Sat, 6 Jan 2024 07:10:50 +0800 Subject: [PATCH 028/101] Fix UT issue for plugins (#1116) Signed-off-by: lvliang-intel --- .../neural_chat/tests/ci/api/test_chatbot_build_api.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/tests/ci/api/test_chatbot_build_api.py b/intel_extension_for_transformers/neural_chat/tests/ci/api/test_chatbot_build_api.py index ba5a2bb5d70..81ed3189cac 100644 --- a/intel_extension_for_transformers/neural_chat/tests/ci/api/test_chatbot_build_api.py +++ b/intel_extension_for_transformers/neural_chat/tests/ci/api/test_chatbot_build_api.py @@ -85,6 +85,7 @@ def test_build_chatbot_with_audio_plugin(self): self.assertIsNotNone(response) print("output audio path: ", response) self.assertTrue(os.path.exists("./output_audio.wav")) + plugins.tts.enable = False def test_build_chatbot_with_safety_checker_plugin(self): plugins.safety_checker.enable = True @@ -95,6 +96,7 @@ def test_build_chatbot_with_safety_checker_plugin(self): response = chatbot.predict(query="蔡英文是谁?") print("response: ", response) self.assertTrue(response, "Your query contains sensitive words, please try another query.") + plugins.safety_checker.enable = False def test_build_chatbot_with_retrieval_plugin(self): plugins.retrieval.enable = True @@ -144,7 +146,7 @@ def test_build_chatbot_with_retrieval_plugin_bge_int8(self): response = chatbot.predict(query="What is Intel extension for transformers?") self.assertIsNotNone(response) plugins.retrieval.enable = False - + def test_build_chatbot_with_retrieval_plugin_using_local_file(self): def _run_retrieval(local_dir): @@ -157,13 +159,13 @@ def _run_retrieval(local_dir): self.assertIsNotNone(chatbot) response = chatbot.predict(query="What is Intel extension for transformers?") self.assertIsNotNone(response) + plugins.retrieval.enable = False # test local file _run_retrieval(local_dir="/tf_dataset2/inc-ut/gte-base") _run_retrieval(local_dir="/tf_dataset2/inc-ut/instructor-large") _run_retrieval(local_dir="/tf_dataset2/inc-ut/bge-base-en-v1.5") - if __name__ == '__main__': unittest.main() From 1d84fd83e19bb3eb66af1c98cfbd7f223265942d Mon Sep 17 00:00:00 2001 From: lvliang-intel Date: Sun, 7 Jan 2024 22:07:29 +0800 Subject: [PATCH 029/101] Fix langchain version (#1118) Signed-off-by: lvliang-intel --- intel_extension_for_transformers/neural_chat/requirements.txt | 2 +- .../neural_chat/requirements_cpu.txt | 2 +- .../neural_chat/requirements_hpu.txt | 2 +- .../neural_chat/requirements_pc.txt | 2 +- .../neural_chat/requirements_xpu.txt | 2 +- .../neural_chat/tests/requirements.txt | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/requirements.txt b/intel_extension_for_transformers/neural_chat/requirements.txt index 0dd6aafa763..2897a38a511 100644 --- a/intel_extension_for_transformers/neural_chat/requirements.txt +++ b/intel_extension_for_transformers/neural_chat/requirements.txt @@ -15,7 +15,7 @@ evaluate pydub python-multipart PyPDF2 -langchain +langchain==0.0.354 langchain_core python-docx scikit-learn diff --git a/intel_extension_for_transformers/neural_chat/requirements_cpu.txt b/intel_extension_for_transformers/neural_chat/requirements_cpu.txt index 50c4fa45cb1..da8bf2b1a29 100644 --- a/intel_extension_for_transformers/neural_chat/requirements_cpu.txt +++ b/intel_extension_for_transformers/neural_chat/requirements_cpu.txt @@ -12,7 +12,7 @@ evaluate pydub python-multipart PyPDF2 -langchain +langchain==0.0.354 langchain_core python-docx scikit-learn diff --git a/intel_extension_for_transformers/neural_chat/requirements_hpu.txt b/intel_extension_for_transformers/neural_chat/requirements_hpu.txt index 5ba7df70ab3..38169d8d44f 100644 --- a/intel_extension_for_transformers/neural_chat/requirements_hpu.txt +++ b/intel_extension_for_transformers/neural_chat/requirements_hpu.txt @@ -12,7 +12,7 @@ evaluate pydub python-multipart PyPDF2 -langchain +langchain==0.0.354 langchain_core python-docx librosa diff --git a/intel_extension_for_transformers/neural_chat/requirements_pc.txt b/intel_extension_for_transformers/neural_chat/requirements_pc.txt index 5e9f45c7440..d124c27523d 100644 --- a/intel_extension_for_transformers/neural_chat/requirements_pc.txt +++ b/intel_extension_for_transformers/neural_chat/requirements_pc.txt @@ -11,7 +11,7 @@ evaluate pydub python-multipart PyPDF2 -langchain +langchain==0.0.354 langchain_core python-docx scikit-learn diff --git a/intel_extension_for_transformers/neural_chat/requirements_xpu.txt b/intel_extension_for_transformers/neural_chat/requirements_xpu.txt index eb67e80b1cd..f7362190b90 100644 --- a/intel_extension_for_transformers/neural_chat/requirements_xpu.txt +++ b/intel_extension_for_transformers/neural_chat/requirements_xpu.txt @@ -9,7 +9,7 @@ evaluate pydub python-multipart PyPDF2 -langchain +langchain==0.0.354 langchain_core python-docx scikit-learn diff --git a/intel_extension_for_transformers/neural_chat/tests/requirements.txt b/intel_extension_for_transformers/neural_chat/tests/requirements.txt index cddbbb39ddb..320a6c812ef 100644 --- a/intel_extension_for_transformers/neural_chat/tests/requirements.txt +++ b/intel_extension_for_transformers/neural_chat/tests/requirements.txt @@ -15,7 +15,7 @@ evaluate pydub python-multipart PyPDF2 -langchain +langchain==0.0.354 langchain_core python-docx scikit-learn From f6b9e32d4cd60add63e41726a2e35534535491f0 Mon Sep 17 00:00:00 2001 From: yuwenzho Date: Sun, 7 Jan 2024 22:08:35 +0800 Subject: [PATCH 030/101] Enable Qdrant vectorstore (#1076) * enable qdrant vectorstore Signed-off-by: yuwenzho Co-authored-by: XuhuiRen --- .../langchain/vectorstores/__init__.py | 1 + .../langchain/vectorstores/qdrant.py | 290 ++++++++++++++++++ .../plugins/retrieval/retrieval_agent.py | 17 +- .../neural_chat/requirements.txt | 1 + .../neural_chat/requirements_cpu.txt | 3 +- .../neural_chat/requirements_hpu.txt | 1 + .../neural_chat/requirements_pc.txt | 1 + .../neural_chat/requirements_xpu.txt | 1 + .../tests/ci/api/test_chatbot_build_api.py | 1 + .../tests/ci/api/test_inference.py | 42 +++ .../neural_chat/tests/requirements.txt | 3 +- 11 files changed, 356 insertions(+), 5 deletions(-) create mode 100644 intel_extension_for_transformers/langchain/vectorstores/qdrant.py diff --git a/intel_extension_for_transformers/langchain/vectorstores/__init__.py b/intel_extension_for_transformers/langchain/vectorstores/__init__.py index b2767507201..c01bddf84c9 100644 --- a/intel_extension_for_transformers/langchain/vectorstores/__init__.py +++ b/intel_extension_for_transformers/langchain/vectorstores/__init__.py @@ -16,3 +16,4 @@ # limitations under the License. from .chroma import Chroma +from .qdrant import Qdrant diff --git a/intel_extension_for_transformers/langchain/vectorstores/qdrant.py b/intel_extension_for_transformers/langchain/vectorstores/qdrant.py new file mode 100644 index 00000000000..aebb1d1d8dd --- /dev/null +++ b/intel_extension_for_transformers/langchain/vectorstores/qdrant.py @@ -0,0 +1,290 @@ +# !/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import logging +from typing import Any, Type, List, Optional, TYPE_CHECKING + +from langchain_core.documents import Document +from langchain_core.embeddings import Embeddings +from langchain.vectorstores.qdrant import Qdrant as Qdrant_origin +from intel_extension_for_transformers.transformers.utils.utility import LazyImport + +logging.basicConfig( + format="%(asctime)s %(name)s:%(levelname)s:%(message)s", + datefmt="%d-%M-%Y %H:%M:%S", + level=logging.INFO +) + +if TYPE_CHECKING: + from qdrant_client.conversions import common_types + +_DEFAULT_PERSIST_DIR = './output' + +qdrant_client = LazyImport("qdrant_client") + +class Qdrant(Qdrant_origin): + + _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain" + + @classmethod + def from_documents( + cls, + documents: List[Document], + embedding: Embeddings, + sign: Optional[str] = None, + location: Optional[str] = None, + url: Optional[str] = None, + api_key: Optional[str] = None, + host: Optional[str]= None, + persist_directory: Optional[str] = None, + collection_name: Optional[str] = _LANGCHAIN_DEFAULT_COLLECTION_NAME, + force_recreate: Optional[bool] = False, + **kwargs: Any, + ): + """Create a Qdrant vectorstore from a list of documents. + + Args: + documents (List[Document]): List of documents to add to the vectorstore. + embedding (Optional[Embeddings]): A subclass of `Embeddings`, responsible for text vectorization. + sign (Optional[str], optional): sign for retrieval_type of 'child_parent'. Defaults to None. + location (Optional[str], optional): + If `:memory:` - use in-memory Qdrant instance. + If `str` - use it as a `url` parameter. + If `None` - fallback to relying on `host` and `port` parameters. + Defaults to None. + url (Optional[str], optional): either host or str of "Optional[scheme], host, Optional[port], + Optional[prefix]". Defaults to None. + api_key (Optional[str], optional): API key for authentication in Qdrant Cloud. Defaults to None. + host (Optional[str], optional): Host name of Qdrant service. If url and host are None, set to + 'localhost'. Defaults to None. + persist_directory (Optional[str], optional): Path in which the vectors will be stored while using + local mode. Defaults to None. + collection_name (Optional[str], optional): Name of the Qdrant collection to be used. + Defaults to _LANGCHAIN_DEFAULT_COLLECTION_NAME. + force_recreate (bool, optional): _description_. Defaults to False. + """ + if sum([param is not None for param in (location, url, host, persist_directory)]) == 0: + # One of 'location', 'url', 'host' or 'persist_directory' should be specified. + persist_directory = _DEFAULT_PERSIST_DIR + if sign == "child": + persist_directory = persist_directory + "_child" + texts = [d.page_content for d in documents] + metadatas = [d.metadata for d in documents] + return cls.from_texts( + texts, + embedding, + metadatas=metadatas, + location=location, + url=url, + api_key=api_key, + host=host, + path=persist_directory, + collection_name=collection_name, + force_recreate=force_recreate, + **kwargs) + + @classmethod + def build( + cls, + documents: List[Document], + embedding: Optional[Embeddings], + sign: Optional[str] = None, + location: Optional[str] = None, + url: Optional[str] = None, + api_key: Optional[str] = None, + host: Optional[str]= None, + persist_directory: Optional[str] = None, + collection_name: Optional[str] = _LANGCHAIN_DEFAULT_COLLECTION_NAME, + force_recreate: Optional[bool] = False, + **kwargs: Any, + ): + """Build a Qdrant vectorstore. + + Args: + documents (List[Document]): List of documents to add to the vectorstore. + embedding (Optional[Embeddings]): A subclass of `Embeddings`, responsible for text vectorization. + sign (Optional[str], optional): sign for retrieval_type of 'child_parent'. Defaults to None. + location (Optional[str], optional): + If `:memory:` - use in-memory Qdrant instance. + If `str` - use it as a `url` parameter. + If `None` - fallback to relying on `host` and `port` parameters. + Defaults to None. + url (Optional[str], optional): either host or str of "Optional[scheme], host, Optional[port], + Optional[prefix]". Defaults to None. + api_key (Optional[str], optional): API key for authentication in Qdrant Cloud. Defaults to None. + host (Optional[str], optional): Host name of Qdrant service. If url and host are None, set to + 'localhost'. Defaults to None. + persist_directory (Optional[str], optional): Path in which the vectors will be stored while using + local mode. Defaults to None. + collection_name (Optional[str], optional): Name of the Qdrant collection to be used. + Defaults to _LANGCHAIN_DEFAULT_COLLECTION_NAME. + force_recreate (bool, optional): _description_. Defaults to False. + kwargs: + Current used: + port (Optional[int], optional): Port of the REST API interface. Defaults to 6333. + grpc_port (int, optional): Port of the gRPC interface. Defaults to 6334. + prefer_grpc (bool, optional): If true - use gPRC interface whenever possible in custom methods. + Defaults to False. + https (Optional[bool], optional): If true - use HTTPS(SSL) protocol. + prefix (Optional[str], optional): + If not None - add prefix to the REST URL path. + Example: service/v1 will result in + http://localhost:6333/service/v1/{qdrant-endpoint} for REST API. + timeout (Optional[float], optional): + Timeout for REST and gRPC API requests. + + distance_func (str, optional): Distance function. One of: "Cosine" / "Euclid" / "Dot". + Defaults to "Cosine". + content_payload_key (str, optional): A payload key used to store the content of the document. + Defaults to CONTENT_KEY. + metadata_payload_key (str, optional): A payload key used to store the metadata of the document. + Defaults to METADATA_KEY. + vector_name (Optional[str], optional): Name of the vector to be used internally in Qdrant. + Defaults to VECTOR_NAME. + shard_number (Optional[int], optional): Number of shards in collection. + replication_factor (Optional[int], optional): + Replication factor for collection. + Defines how many copies of each shard will be created. + Have effect only in distributed mode. + write_consistency_factor (Optional[int], optional): + Write consistency factor for collection. + Defines how many replicas should apply the operation for us to consider + it successful. Increasing this number will make the collection more + resilient to inconsistencies, but will also make it fail if not enough + replicas are available. + Does not have any performance impact. + Have effect only in distributed mode. + on_disk_payload (Optional[bool], optional): + If true - point`s payload will not be stored in memory. + It will be read from the disk every time it is requested. + This setting saves RAM by (slightly) increasing the response time. + Note: those payload values that are involved in filtering and are + indexed - remain in RAM. + hnsw_config (Optional[common_types.HnswConfigDiff], optional): Params for HNSW index. + optimizers_config (Optional[common_types.OptimizersConfigDiff], optional): Params for optimizer. + wal_config (Optional[common_types.WalConfigDiff], optional): Params for Write-Ahead-Log. + quantization_config (Optional[common_types.QuantizationConfig], optional): + Params for quantization, if None - quantization will be disable. + init_from (Optional[common_types.InitFrom], optional): + Use data stored in another collection to initialize this collection. + on_disk (Optional[bool], optional): if True, vectors will be stored on disk. + If None, default value will be used. + """ + if sum([param is not None for param in (location, url, host, persist_directory)]) == 0: + # One of 'location', 'url', 'host' or 'persist_directory' should be specified. + persist_directory = _DEFAULT_PERSIST_DIR + if sign == "child": + persist_directory = persist_directory + "_child" + if persist_directory and os.path.exists(persist_directory): + if bool(os.listdir(persist_directory)): + logging.info("Load the existing database!") + texts = [d.page_content for d in documents] + qdrant_collection = cls.construct_instance( + texts=texts, + embedding=embedding, + location=location, + url=url, + api_key=api_key, + host=host, + path=persist_directory, + collection_name=collection_name, + force_recreate=force_recreate, + **kwargs + ) + return qdrant_collection + else: + logging.info("Create a new knowledge base...") + qdrant_collection = cls.from_documents( + documents=documents, + embedding=embedding, + location=location, + url=url, + api_key=api_key, + host=host, + persist_directory=persist_directory, + collection_name=collection_name, + force_recreate=force_recreate, + **kwargs, + ) + return qdrant_collection + + + @classmethod + def reload( + cls, + embedding: Optional[Embeddings], + location: Optional[str] = None, + url: Optional[str] = None, + api_key: Optional[str] = None, + host: Optional[str]= None, + persist_directory: Optional[str] = None, + collection_name: Optional[str] = _LANGCHAIN_DEFAULT_COLLECTION_NAME, + force_recreate: bool = False, + **kwargs: Any, + ): + """Reload a Qdrant vectorstore. + + Args: + embedding (Optional[Embeddings]): A subclass of `Embeddings`, responsible for text vectorization. + location (Optional[str], optional): + If `:memory:` - use in-memory Qdrant instance. + If `str` - use it as a `url` parameter. + If `None` - fallback to relying on `host` and `port` parameters. + Defaults to None. + url (Optional[str], optional): either host or str of "Optional[scheme], host, Optional[port], + Optional[prefix]". Defaults to None. + api_key (Optional[str], optional): API key for authentication in Qdrant Cloud. Defaults to None. + host (Optional[str], optional): Host name of Qdrant service. If url and host are None, set to + 'localhost'. Defaults to None. + persist_directory (Optional[str], optional): Path in which the vectors will be stored while using + local mode. Defaults to None. + collection_name (Optional[str], optional): Name of the Qdrant collection to be used. + Defaults to _LANGCHAIN_DEFAULT_COLLECTION_NAME. + force_recreate (bool, optional): _description_. Defaults to False. + """ + if sum([param is not None for param in (location, url, host, persist_directory)]) == 0: + # One of 'location', 'url', 'host' or 'persist_directory' should be specified. + persist_directory = _DEFAULT_PERSIST_DIR + + # for a single quick embedding to get vector size + tmp_texts = ["foo"] + + qdrant_collection = cls.construct_instance( + texts=tmp_texts, + embedding=embedding, + location=location, + url=url, + api_key=api_key, + host=host, + path=persist_directory, + collection_name=collection_name, + force_recreate=force_recreate, + **kwargs + ) + return qdrant_collection + + + def is_local( + self, + ): + """Determine whether a client is local.""" + if hasattr(self.client, "_client") and \ + isinstance(self.client._client, qdrant_client.local.qdrant_local.QdrantLocal): + return True + else: + return False \ No newline at end of file diff --git a/intel_extension_for_transformers/neural_chat/pipeline/plugins/retrieval/retrieval_agent.py b/intel_extension_for_transformers/neural_chat/pipeline/plugins/retrieval/retrieval_agent.py index 784e63ca6e7..7b956aad793 100644 --- a/intel_extension_for_transformers/neural_chat/pipeline/plugins/retrieval/retrieval_agent.py +++ b/intel_extension_for_transformers/neural_chat/pipeline/plugins/retrieval/retrieval_agent.py @@ -26,7 +26,7 @@ HuggingFaceInstructEmbeddings, HuggingFaceBgeEmbeddings from langchain.embeddings import GooglePalmEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter -from intel_extension_for_transformers.langchain.vectorstores import Chroma +from intel_extension_for_transformers.langchain.vectorstores import Chroma, Qdrant import uuid from langchain_core.documents import Document import logging @@ -133,10 +133,12 @@ def __init__(self, logging.info("The format of parsed documents is transferred.") if self.vector_database == "Chroma": - self.database = Chroma() + self.database = Chroma + elif self.vector_database == "Qdrant": + self.database = Qdrant # elif self.vector_database == "PGVector": # self.database = PGVector() - + if self.retrieval_type == 'default': # Using vector store retriever if append: knowledge_base = self.database.from_documents(documents=langchain_documents, embedding=self.embeddings, @@ -145,6 +147,9 @@ def __init__(self, knowledge_base = self.database.build(documents=langchain_documents, embedding=self.embeddings, **kwargs) self.retriever = RetrieverAdapter(retrieval_type=self.retrieval_type, document_store=knowledge_base, \ **kwargs).retriever + if self.vector_database == "Qdrant" and knowledge_base.is_local(): + # one local storage folder cannot be accessed by multiple instances of Qdrant client simultaneously. + knowledge_base.client.close() elif self.retrieval_type == "child_parent": # Using child-parent store retriever child_documents = self.splitter.split_documents(langchain_documents) if append: @@ -158,6 +163,12 @@ def __init__(self, sign='child', **kwargs) self.retriever = RetrieverAdapter(retrieval_type=self.retrieval_type, document_store=knowledge_base, \ child_document_store=child_knowledge_base, **kwargs).retriever + if self.vector_database == "Qdrant" : + # one local storage folder cannot be accessed by multiple instances of Qdrant client simultaneously. + if knowledge_base.is_local(): + knowledge_base.client.close() + if child_knowledge_base.is_local(): + child_knowledge_base.client.close() logging.info("The retriever is successfully built.") def reload_localdb(self, local_persist_dir, **kwargs): diff --git a/intel_extension_for_transformers/neural_chat/requirements.txt b/intel_extension_for_transformers/neural_chat/requirements.txt index 2897a38a511..c79462d9cbc 100644 --- a/intel_extension_for_transformers/neural_chat/requirements.txt +++ b/intel_extension_for_transformers/neural_chat/requirements.txt @@ -71,3 +71,4 @@ urllib3 langid diffusers==0.12.1 transformers_stream_generator +qdrant-client \ No newline at end of file diff --git a/intel_extension_for_transformers/neural_chat/requirements_cpu.txt b/intel_extension_for_transformers/neural_chat/requirements_cpu.txt index da8bf2b1a29..4dbc3e8656c 100644 --- a/intel_extension_for_transformers/neural_chat/requirements_cpu.txt +++ b/intel_extension_for_transformers/neural_chat/requirements_cpu.txt @@ -48,4 +48,5 @@ einops cchardet zhconv urllib3 -langid \ No newline at end of file +langid +qdrant-client \ No newline at end of file diff --git a/intel_extension_for_transformers/neural_chat/requirements_hpu.txt b/intel_extension_for_transformers/neural_chat/requirements_hpu.txt index 38169d8d44f..2d59275f85c 100644 --- a/intel_extension_for_transformers/neural_chat/requirements_hpu.txt +++ b/intel_extension_for_transformers/neural_chat/requirements_hpu.txt @@ -43,3 +43,4 @@ einops zhconv urllib3 langid +qdrant-client \ No newline at end of file diff --git a/intel_extension_for_transformers/neural_chat/requirements_pc.txt b/intel_extension_for_transformers/neural_chat/requirements_pc.txt index d124c27523d..a197cdc50b5 100644 --- a/intel_extension_for_transformers/neural_chat/requirements_pc.txt +++ b/intel_extension_for_transformers/neural_chat/requirements_pc.txt @@ -46,3 +46,4 @@ langid pymysql deepface exifread +qdrant-client \ No newline at end of file diff --git a/intel_extension_for_transformers/neural_chat/requirements_xpu.txt b/intel_extension_for_transformers/neural_chat/requirements_xpu.txt index f7362190b90..0d3576466a6 100644 --- a/intel_extension_for_transformers/neural_chat/requirements_xpu.txt +++ b/intel_extension_for_transformers/neural_chat/requirements_xpu.txt @@ -39,3 +39,4 @@ exifread zhconv urllib3 langid +qdrant-client \ No newline at end of file diff --git a/intel_extension_for_transformers/neural_chat/tests/ci/api/test_chatbot_build_api.py b/intel_extension_for_transformers/neural_chat/tests/ci/api/test_chatbot_build_api.py index 81ed3189cac..c4e196625fe 100644 --- a/intel_extension_for_transformers/neural_chat/tests/ci/api/test_chatbot_build_api.py +++ b/intel_extension_for_transformers/neural_chat/tests/ci/api/test_chatbot_build_api.py @@ -150,6 +150,7 @@ def test_build_chatbot_with_retrieval_plugin_bge_int8(self): def test_build_chatbot_with_retrieval_plugin_using_local_file(self): def _run_retrieval(local_dir): + plugins.tts.enable = False plugins.retrieval.enable = True plugins.retrieval.args["input_path"] = "../../../README.md" plugins.retrieval.args["embedding_model"] = local_dir diff --git a/intel_extension_for_transformers/neural_chat/tests/ci/api/test_inference.py b/intel_extension_for_transformers/neural_chat/tests/ci/api/test_inference.py index 6d7e6941f6c..8023b9904f7 100644 --- a/intel_extension_for_transformers/neural_chat/tests/ci/api/test_inference.py +++ b/intel_extension_for_transformers/neural_chat/tests/ci/api/test_inference.py @@ -63,6 +63,22 @@ def test_retrieval(self): print(response) self.assertIsNotNone(response) plugins.retrieval.enable = False + + def test_retrieval_with_qdrant(self): + plugins.retrieval.enable = True + input_path="/intel-extension-for-transformers/intel_extension_for_transformers/neural_chat/assets/docs/" + if os.path.exists(input_path): + plugins.retrieval.args["input_path"] = input_path + else: + plugins.retrieval.args["input_path"] = "../assets/docs/" + plugins.retrieval.args["vector_database"] = "Qdrant" + config = PipelineConfig(model_name_or_path="facebook/opt-125m", + plugins=plugins) + chatbot = build_chatbot(config) + response = chatbot.predict("Tell me about Intel Xeon Scalable Processors.") + print(response) + self.assertIsNotNone(response) + plugins.retrieval.enable = False def test_retrieval_append(self): plugins.retrieval.enable = True @@ -87,6 +103,30 @@ def test_retrieval_append(self): plugins.retrieval.args["persist_directory"] = "./output" plugins.retrieval.enable = False + def test_retrieval_append_with_qdrant(self): + plugins.retrieval.enable = True + plugins.retrieval.args["append"] = True + plugins.retrieval.args["input_path"] = "../assets/docs/" + plugins.retrieval.args["persist_directory"] = "./check_append" + plugins.retrieval.args["vector_database"] = "Qdrant" + config = PipelineConfig(model_name_or_path="facebook/opt-125m", + plugins=plugins) + chatbot = build_chatbot(config) + response = chatbot.predict("Tell me about Intel Xeon Scalable Processors.") + print(response) + self.assertIsNotNone(response) + + plugins.retrieval.args["append"] = False + config = PipelineConfig(model_name_or_path="facebook/opt-125m", + plugins=plugins) + chatbot = build_chatbot(config) + response = chatbot.predict("Tell me about Intel Xeon Scalable Processors.") + print(response) + self.assertIsNotNone(response) + plugins.retrieval.args["append"] = True + plugins.retrieval.args["persist_directory"] = "./output" + plugins.retrieval.enable = False + @unittest.skipIf(get_device_type() != 'cpu', "Only run this test on CPU") def test_voice_chat(self): plugins.tts.enable = True @@ -128,6 +168,8 @@ def test_text_chat_stream(self): suite.addTest(UnitTest('test_quantization')) suite.addTest(UnitTest('test_text_chat_stream')) suite.addTest(UnitTest('test_voice_chat')) + suite.addTest(UnitTest('test_retrieval_with_qdrant')) + suite.addTest(UnitTest('test_retrieval_append_with_qdrant')) runner = unittest.TextTestRunner() diff --git a/intel_extension_for_transformers/neural_chat/tests/requirements.txt b/intel_extension_for_transformers/neural_chat/tests/requirements.txt index 320a6c812ef..fb72c247ed1 100644 --- a/intel_extension_for_transformers/neural_chat/tests/requirements.txt +++ b/intel_extension_for_transformers/neural_chat/tests/requirements.txt @@ -69,4 +69,5 @@ langid optimum-intel==1.11.0 zhconv diffusers -transformers_stream_generator \ No newline at end of file +transformers_stream_generator +qdrant-client \ No newline at end of file From 7fcb3850d86152583c817be0bd2916a5b7c81369 Mon Sep 17 00:00:00 2001 From: XuhuiRen <44249229+XuhuiRen@users.noreply.github.com> Date: Sun, 7 Jan 2024 22:17:21 +0800 Subject: [PATCH 031/101] fix (#1080) Signed-off-by: XuhuiRen --- .../pipeline/plugins/prompt/prompt_template.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/pipeline/plugins/prompt/prompt_template.py b/intel_extension_for_transformers/neural_chat/pipeline/plugins/prompt/prompt_template.py index 6e01432c29b..14a8f24adc5 100644 --- a/intel_extension_for_transformers/neural_chat/pipeline/plugins/prompt/prompt_template.py +++ b/intel_extension_for_transformers/neural_chat/pipeline/plugins/prompt/prompt_template.py @@ -22,14 +22,14 @@ def generate_qa_prompt(query, context=None, history=None): if context and history: conv = PromptTemplate("rag_with_context_memory") - conv.append_message(conv.roles[0], query) conv.append_message(conv.roles[1], context) conv.append_message(conv.roles[2], history) + conv.append_message(conv.roles[0], query) conv.append_message(conv.roles[3], None) elif context: conv = PromptTemplate("rag_with_context_memory") - conv.append_message(conv.roles[0], query) conv.append_message(conv.roles[1], context) + conv.append_message(conv.roles[0], query) conv.append_message(conv.roles[3], None) else: conv = PromptTemplate("rag_without_context") @@ -40,14 +40,14 @@ def generate_qa_prompt(query, context=None, history=None): def generate_qa_enterprise(query, context=None, history=None): if context and history: conv = PromptTemplate("rag_with_threshold") - conv.append_message(conv.roles[0], query) conv.append_message(conv.roles[1], context) conv.append_message(conv.roles[2], history) + conv.append_message(conv.roles[0], query) conv.append_message(conv.roles[3], None) else: conv = PromptTemplate("rag_with_threshold") - conv.append_message(conv.roles[0], query) conv.append_message(conv.roles[1], context) + conv.append_message(conv.roles[0], query) conv.append_message(conv.roles[3], None) return conv.get_prompt() @@ -55,8 +55,8 @@ def generate_qa_enterprise(query, context=None, history=None): def generate_prompt(query, history=None): if history: conv = PromptTemplate("rag_without_context_memory") - conv.append_message(conv.roles[0], query) conv.append_message(conv.roles[1], history) + conv.append_message(conv.roles[0], query) conv.append_message(conv.roles[2], None) else: conv = PromptTemplate("rag_without_context") From 9dc1b7f77bfee447f24bd83f2953e0f5879c52b9 Mon Sep 17 00:00:00 2001 From: XuhuiRen <44249229+XuhuiRen@users.noreply.github.com> Date: Tue, 9 Jan 2024 13:26:16 +0800 Subject: [PATCH 032/101] add (#1119) Signed-off-by: XuhuiRen --- .../pipeline/plugins/retrieval/README.md | 192 +++++++++++++++--- 1 file changed, 167 insertions(+), 25 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/pipeline/plugins/retrieval/README.md b/intel_extension_for_transformers/neural_chat/pipeline/plugins/retrieval/README.md index 49fcfbaac16..0452bf2f429 100644 --- a/intel_extension_for_transformers/neural_chat/pipeline/plugins/retrieval/README.md +++ b/intel_extension_for_transformers/neural_chat/pipeline/plugins/retrieval/README.md @@ -3,15 +3,15 @@
# Introduction -Large Language Models (LLMs) have demonstrated remarkable performance in various Natural Language Processing (NLP) tasks. Compared to earlier pretrained models, LLMs can produce better results on tasks without fine-tuning, reducing the cost of use. The popularity of applications like ChatGPT has attracted many users seeking to address everyday problems. However, some users have encountered a challenge known as "model hallucination," where LLMs generate incorrect or nonexistent information, raising concerns about content accuracy. +Large language models (LLMs) have shown exceptional performance in various natural language processing tasks, establishing them as essential tools for understanding and generating language. These models absorb extensive world knowledge from their vast training datasets, enabling them to produce fluent and coherent responses to user queries without external data resources. However, despite the remarkable breadth of knowledge large language models gain during training, they face limitations in accessing up-to-date information and certain domain-specific data. This limitation can lead to a significant concern: the tendency of large language models to 'hallucinate' or create fictitious content in their responses. -To improve the accuracy of generated content, two approaches can be considered: expanding the training data or utilizing an external database. Expanding the training data is impractical due to the time and effort required to train a high-performance LLM. It's challenging to collect and maintain an extensive, up-to-date knowledge corpus. Therefore, we propose an economically efficient alternative: leveraging relevant documents from a local database during content generation. These retrieved documents will be integrated into the input prompt of the LLM to enhance the accuracy and reliability of the generated results. +This hallucination problem primarily arises from two factors: (1) LLMs are predominantly trained using data from the Internet, which limits their exposure to specific, domain-focused information; and (2) LLMs mainly rely on the training corpus for information extraction. These models remain unaware of events occurring post-training, which can be particularly problematic for topics that change daily. Two methods are recongized as effective method for model hallucination problems, [Finetuning the LLM on task-specific datasets](https://arxiv.org/abs/2311.08401) and [Retrieval-Augmented Generation (RAG)](https://arxiv.org/abs/2212.10560). However, finetuning a LLM is impractical for most users due to it requires high-quality datasets, labor-intensive data annotation, and substantial computational resources. Also, it is challenging to collect and maintain an extensive, up-to-date knowledge corpus. Therefore, we propose an economically efficient alternative based on RAG. It retrieves relevant documents from a local database to serve as the reference to enhance the accuracy and reliability of the generated results. -The Neural Chat API offers an easy way to create and utilize chatbot models while integrating local documents. Our API simplifies the process of automatically handling and storing local documents in a document store. We provide support for two retrieval methods: -1. Dense Retrieval: This method is based on document embeddings, enhancing the accuracy of retrieval. Learn more about [here](https://medium.com/@aikho/deep-learning-in-information-retrieval-part-ii-dense-retrieval-1f9fecb47de9). -2. Sparse Retrieval: Using TF-IDF, this method efficiently retrieves relevant information. Explore this approach in detail [here](https://medium.com/itnext/deep-learning-in-information-retrieval-part-i-introduction-and-sparse-retrieval-12de0423a0b9). +Inspired by the prevent chatbot framework [langchain](https://github.com/langchain-ai/langchain), [Llama-Index](https://github.com/run-llama/llama_index) and [haystack](https://github.com/deepset-ai/haystack), our NeuralChat API offers an easy way to create and utilize chatbot models while integrating RAG. Our API provides an easy to use extension for langchain users as well as a convenient deployment code for the general user. Without too much learning effort, the user can build their own RAG-based chatbot with their documents. The details about our langchain extension feature could be see [here](#langchain-extension). -We have already provided support for a wide range of pre-trained embedding models featured on the [HuggingFace text embedding leaderboard](https://huggingface.co/spaces/mteb/leaderboard). Users can conveniently choose an embedding model in two ways: they can either specify the model by its name on HuggingFace or download a model and save it under the default name. Below is a list of some supported embedding models available in our plugin. Users can select their preferred embedding model based on various factors such as model size, embedding dimensions, maximum sequence length, and average ranking score. +Currently, we concentrate on [dense retrieval](https://medium.com/@aikho/deep-learning-in-information-retrieval-part-ii-dense-retrieval-1f9fecb47de9) to construct the RAG pipeline. The dense retrieval will return the documents that share the similar semantic expression with the candidate queries instead of the keywords expression, which is more suitable for the long-context application scenario. + +The embedding model plays a crucial factor to influence the retrieval accuracy. We have already provided support for a wide range of open-released pre-trained embedding models featured on the [HuggingFace text embedding leaderboard](https://huggingface.co/spaces/mteb/leaderboard). Users can conveniently choose an embedding model in two ways: they can either specify the model by its name on HuggingFace or download a model and save it under the default name. Below is a list of some supported embedding models available in our plugin. Users can select their preferred embedding model based on various factors such as model size, embedding dimensions, maximum sequence length, and average ranking score. | Model | Model Size (GB) |Embedding Dimensions |Max Sequence Length |Average Ranking Score | | :----: | :----: | :----: | :----: |:----: | | [bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) | 1.34 |1024 |512 |64.23| @@ -25,14 +25,16 @@ We have already provided support for a wide range of pre-trained embedding model In addition, our plugin seamlessly integrates the online embedding model, Google Palm2 embedding. To set up this feature, please follow the [Google official guideline](https://developers.generativeai.google/tutorials/embeddings_quickstart) to obtain your API key. Once you have your API key, you can activate the Palm2 embedding service by setting the `embedding_model` parameter to 'Google'. -The workflow of this plugin consists of three main operations: document indexing, intent detection, and retrieval. The `Agent_QA` initializes itself using the provided `input_path` to construct a local database. During a conversation, the user's query is first passed to the `IntentDetector` to determine whether the user intends to engage in chitchat or seek answers to specific questions. If the `IntentDetector` determines that the user's query requires an answer, the retriever is activated to search the database using the user's query. The documents retrieved from the database serve as reference context in the input prompt, assisting in generating responses using the Large Language Models (LLMs). +> Due to the recent code refactorization of `sentence-transformers` will impact the operation behaviour of the embedding models, please check and install the latest `sentence-transformers` from source! + +This plugin streamlines three key processes: parsing documents, identifying user intentions, and fetching relevant information. Initially, the `Agent_QA` sets itself up by building a local database from the data at input_path. In the midst of a conversation, when a user poses a question, it first goes through the `IntentDetector`. This step is crucial to figure out if the user is just making casual conversation or looking for specific information. If the IntentDetector concludes that the user is seeking an answer, it triggers the `retrieval` process. This involves scouring the database with the user's question to find pertinent information. The information thus obtained forms the basis for crafting responses with the help of Large Language Models (LLMs). -We have already provided support for popular file format on retrieval. When using xlsx, csv, json/jsonl, predefined structure should be used. +To ensure a smooth experience, we've made sure this plugin is compatible with common file formats like xlsx, csv, and json/jsonl. It's important to note that these files need to follow a specific structure for optimal functioning. | File Type | Predefined Structure | | :----: | :----: | | xlsx | ['Questions', 'Answers']
['question', 'answer', 'link']
['context', 'link'] | | csv | ['question', 'correct_answer'] | -| json/jsonl | {'doc':xxx, 'doc_id':xxx}| +| json/jsonl | {'content':xxx, 'link':xxx}| | txt | / | | html | / | | markdown | / | @@ -44,10 +46,10 @@ The most convenient way to use is this plugin is via our `build_chatbot` api as We support multiple file formats for retrieval, including unstructured file formats such as pdf, docx, html, txt, and markdown, as well as structured file formats like jsonl and xlsx. For structured file formats, they must adhere to predefined structures. -In the case of jsonl files, they should be formatted as dictionaries, such as: {"doc": xxx, "doc_id": xxx}. The support for xlsx files is specifically designed for Question-Answer (QA) tasks. Users can input QA pairs for retrieval. Therefore, the table's header should include items labeled as "Question" and "Answer". The reference files could be found [here](https://github.com/intel/intel-extension-for-transformers/tree/main/intel_extension_for_transformers/neural_chat/assets/docs). +In the case of jsonl files, they should be formatted as dictionaries, such as: {'content':xxx, 'link':xxx}. The support for xlsx files is specifically designed for Question-Answer (QA) tasks. Users can input QA pairs for retrieval. Therefore, the table's header should include items labeled as "Question" and "Answer". The reference files could be found [here](https://github.com/intel/intel-extension-for-transformers/tree/main/intel_extension_for_transformers/neural_chat/assets/docs). ## Import the module and set the retrieval config: -The user can download the [Intel 2022 Annual Report](https://d1io3yog0oux5.cloudfront.net/_897efe2d574a132883f198f2b119aa39/intel/db/888/8941/file/412439%281%29_12_Intel_AR_WR.pdf) for a quick test. +> The user can download the [Intel 2022 Annual Report](https://d1io3yog0oux5.cloudfront.net/_897efe2d574a132883f198f2b119aa39/intel/db/888/8941/file/412439%281%29_12_Intel_AR_WR.pdf) for a quick test. ```python from intel_extension_for_transformers.neural_chat import PipelineConfig @@ -68,30 +70,170 @@ response = chatbot.predict("What is IDM 2.0?") Checkout the full example [retrieval_chat.py](../../../examples/retrieval/retrieval_chat.py) and have a try! # Parameters -The user can costomize the retrieval parameters to meet the personal demmads for better catering the local files. The user can set the specific parameter by plugins.retrieval.args["xxx"]. Below the description of each available parameters, +Users have the flexibility to tailor the retrieval configuration to meet their individual needs and adapt to their local files. To customize a particular aspect of the retrieval plugin, you can adjust its settings as follows: +```python +plugins.retrieval.args["xxx"]=xxx +``` +Below are the description for the available parameters in `agent_QA`, + +| Parameters | Type | Description| Options| +| ---- | ---- | --| --| +| vector_database | str | The vector database for constructing the knowledge base. |"Chroma", "Qdrant"| +| input_path | str | The path of the file/folder/link of the content to formulate the knowledge base |-| +| embedding_model | str | The name or path for the text embedding model |-| +| response_template | str | Default response when there is no available relevent documents for RAG |-| +| mode | str | The RAG behavior for different use case. Please check [here](#rag-mode) |"accuracy", "general"| +| retrieval_type | str | The type of the retriever. Please check [here](#retrievers) for more details | "default", "child_parent"| +| process | bool | Whether to split the long documents into small chucks. The size of each chuck is defined by `max_chuck_size` and `min_chuck_size`|True, False| +| max_chuck_size | int | The max token length for a single chuck in the knowledge base |-| +| min_chuck_size | int | The min token length for a single chuck in the knowledge base |-| +| append | bool | Whether the new knowledge will be append to the existing knowledge base or directly load the existing knowledge base |True, False| + +More retriever- and vectorstore-related parameters please check [here](#langchain-extension) + +# RAG Mode +Our system offers two distinct modes for the Retrieval-Augmented Generation (RAG) feature, catering to different user expectations: "accuracy" and "general." These modes are designed to accommodate various application scenarios. + +In "general" mode, the system primarily utilizes the output of the `IntentDetector` to determine the appropriate response prompt. If the predicted intent of the user's query is "chitchat," the system engages in a casual conversation. For other intents, it crafts a response augmented by retrieval results. This mode leverages the Large Language Model's (LLM's) inherent capabilities to predict user intent and generate relevant responses. However, it may occasionally misinterpret user intent, leading to reliance on the LLM's inherent knowledge for response generation, which could result in inaccuracies or model hallucination issues. + +Conversely, "accuracy" mode combines the `IntentDetector`'s output with retrieval results to enhance the accuracy of intent prediction. We implement a retrieval threshold to balance free generation with reliance on relevant documents. In this mode, the system will first search for relevant content to support the response. Casual conversation ("chitchat") only occurs if there are no relevant documents and the intent is determined as such. This approach helps mitigate model hallucination problems but may limit the LLM's free generation capacity. + +Users are encouraged to choose the RAG mode that best suits their specific needs and application scenario. + +# Langchain Extension +To fully leverage the capabilities of our mutual chatbot platform, we have developed a comprehensive range of langchain-based extension APIs. These enhancements include advanced retrievers, embedding models, and vector stores, all designed to expand the functionality of the original langchain API. Our goal with these additions is to enrich user experience and provide a more robust and versatile chatbot platform. + +## Vector Stores + +### Chroma +[Chroma](https://docs.trychroma.com/getting-started) stands out as an AI-native, open-source vector database, placing a strong emphasis on boosting developer productivity and satisfaction. It's available under the Apache 2.0 license. Initially, the original Chroma API within langchain was designed to accept settings only once, at the chatbot's startup. This approach lacked flexibility, as it didn't allow users to modify settings post-initialization. To address this limitation, we've revamped the Chroma API. Our updated version introduces enhanced vector store operations, enabling users to adjust and fine-tune their settings even after the chatbot has been initialized, offering a more adaptable and user-friendly experience. + +The user can select Chroma as the vectorstore for RAG with: +```python +plugins.retrieval.args["vector_database"]="Chroma" +``` +Our Chroma API is easy to use and can be generalized to langchain platform. For a quick Chroma configuration, the user can directly set the parameters following the same step for [agent_QA](#parameters). Some of parameters for Chroma share the same value with agent_QA. The extra parameters for Chroma are: +| Parameters | Type | Description| Options| +| ---- | ---- | --| --| +| collection_name | str | The collection name for the local Chroma database instance. |-| +| persist_directory | str | The path for saving the knowledge base. |-| +| collection_metadata | dict | Collection configurations. Can set the retrieval distance type and indexing structure. |-| + +For the langchain users, it can be easily imported and used by replacing the origin Chroma API in langchain. +```python +from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline +from langchain.chains import RetrievalQA +from langchain_core.vectorstores import VectorStoreRetriever +from intel_extension_for_transformers.langchain.vectorstores import Chroma +retriever = VectorStoreRetriever(vectorstore=Chroma(...)) +retrievalQA = RetrievalQA.from_llm(llm=HuggingFacePipeline(...), retriever=retriever) +``` +More independent langchain-based examples can be found [here](https://python.langchain.com/docs/integrations/vectorstores/chroma). + +### Qdrant +[Qdrant](https://qdrant.tech/documentation/) is a state-of-the-art vector similarity search engine, designed for production-ready services. It features an easy-to-use API that enables the storage, search, and management of points - vectors that come with additional payload data. Qdrant stands out for its advanced filtering capabilities, making it ideal for a wide range of uses such as neural network or semantic-based matching, faceted search, and other similar applications. + +Originally, the Qdrant API within langchain was set up to allow configuration only once, at the time of the chatbot's initialization. This setup limited flexibility, as it didn't permit users to modify settings after the initial setup. Recognizing this limitation, we have redeveloped the Qdrant API. Our enhanced version offers expanded vector store operations, providing users with the ability to adjust and refine their settings post-initialization, thereby delivering a more adaptable and user-friendly experience. + +The user can select Qdrant as the vectorstore for RAG with: +```python +plugins.retrieval.args["vector_database"]="Qdrant" +``` +Our Qdrant API is easy to use and can be generalized to langchain platform. For a quick Qdrant configuration, the user can directly set the parameters following the same step for [agent_QA](#parameters). Some of parameters for Qdrant share the same value with agent_QA. The extra parameters for Qdrant are: +| Parameters | Type | Description| Options| +| ---- | ---- | --| --| +| collection_name | str | The collection name for the local Qdrant database instance. |-| +| location | str | If `:memory:` - use in-memory Qdrant instance. If `str` - use it as a `url` parameter. If `None` - fallback to relying on `host` and `port` parameters. |-| +| url | str | Either host or str of "Optional[scheme], host, Optional[port], Optional[prefix]" |-| +| host | str | Host name of Qdrant service. If url and host are None, set to 'localhost'. |-| +| persist_directory | str | Path in which the vectors will be stored while using local mode.|-| + +For the langchain users, it can be easily imported and used by replacing the origin Qdrant API in langchain. +```python +from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline +from langchain.chains import RetrievalQA +from langchain_core.vectorstores import VectorStoreRetriever +from intel_extension_for_transformers.langchain.vectorstores import Qdrant +retriever = VectorStoreRetriever(vectorstore=Qdrant(...)) +retrievalQA = RetrievalQA.from_llm(llm=HuggingFacePipeline(...), retriever=retriever) +``` +More independent langchain-based examples can be found [here](https://python.langchain.com/docs/integrations/vectorstores/qdrant). + +## Retrievers +Retrievers play a crucial role for RAG. They are responsible for implementing the basic retrieval configuration and accessing the vectorstore using the specified retrieval method and settings. Currently, we offer two types of retrievers: `VectorStoreRetriever` and `ChildParentRetriever`. + +We've chosen VectorStoreRetriever as the default retriever. This decision aligns the retrieval process seamlessly with langchain’s functionality. The VectorStoreRetriever is designed to efficiently handle vectorstore operations, ensuring optimal retrieval performance. Meanwhile, the ChildParentRetriever offers a special solution for the long-context scenario. +Our approach ensures that users have access to versatile and effective retrieval tools, tailored to a variety of requirements and preferences within the system. + +### VectorStoreRetriever +We've maintained most of the retrieval behaviors consistent with langchain, but we've also introduced additional content processing steps. These enhancements are specifically designed to better meet our needs for source-based retrieval. The user can select this retriever by: ```python -persist_dir [str]: The local path to save the processed database. Default to "./output". +plugins.retrieval.args["retrieval_type"]="default" +``` + +The basic parameters for `VectorStoreRetriever` are: +| Parameters | Type | Description| Options| +| ---- | ---- | --| --| +| search_type | str | Type of search to perform. |"mmr", "similarity_score_threshold", and "similarity"| +| search_kwargs | dict | Keyword arguments to pass to the search function.|-| -process [bool]: Select to process the too long document into small chucks. Default to "True". +The user can set the parameters for the retriever by: +```python +plugins.retrieval.args["search_type"]=xxx +plugins.retrieval.args["search_kwargs"]=xxx +``` -input_path [str]: The user local path to a file folder or a specific file path. The code itself will check the path is a folder or a file. If it is a folder, the code will process all the files in the given folder. If it is a file, the code will prcess this single file. +If "search_type"="similarity": +>search_kwargs={"k"=xxx} -embedding_model [str]: the user specific document embedding model for dense retrieval. The user could selecte a specific embedding model from "https://huggingface.co/spaces/mteb/leaderboard". Default to "BAAI/bge-base-en-v1.5". +"k" is the number of the returned most similar documents. -max_length [int]: The max context length in the processed chucks. Should be combined with "process". Default to "512". +If "search_type"="mmr": +>search_kwargs={"k"=xxx, "fetch_k"=xxx, "lamabda_mult"=xxx} -retrieval_type [str]: Select a method for retrieval from "dense" or "sparse". Default to "dense". +"k" is the number of the returned most similar documents. "fetch_k" is the number of Documents to fetch to pass to MMR algorithm. "Lamabda_mult" is a number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. -document_store [str]: Considering the sparse retrieval needs to load the data into memory. We provide "InMemoryDocumentStore" and "ElasticsearchDocumentStore" for manage the memory efficiency for sparse retrieval. Default to "None" for using dense retrieval. - -top_k [int]: The number of the retrieved documents. Default to "1". +If "search_type"="similarity_score_threshold": +>search_kwargs={"k"=xxx, "score_threshold"=xxx} -search_type [str]: Select a ranking method for dense retrieval from "mmr", "similarity" and "similarity_score_threshold". "similarity" will return the most similar docs to the input query. "mmr" will return ranking the docs using the maximal marginal relevance method. "similarity_score_threshold" will return the mosy similar docs that also meet the threshold. Deault to "mmr". +"k" is the number of the returned most similar documents. "score_threshold" is the similar score threshold for the retrieved documents. -search_kwargs [dict]: Used by dense retrieval. Should be in the same format like {"k":1, "fetch_k":5}. "k" is the amount of documents to return. "score_threshold" is the minimal relevance threshold for "similarity_score_threshold" search. "lambda_mult" is the diversity of results returned by "mmr". "fetch_k" determines the amount of documents to pass to the "mmr" algorithm. Default to {"k":1, "fetch_k":5}. +### ChildParentRetriever +We've specifically designed this retriever to address challenges in long-context retrieval scenarios. Commonly, in many applications, the documents being retrieved are lengthier than the user's query. This discrepancy leads to an imbalance in context information between the query and the documents, often resulting in reduced retrieval accuracy. The reason is that the documents typically contain a richer semantic expression compared to the brief user query. -append [bool]: Decide to append the local database or not. If append=True, the uploaded files will be continuously written into the database. If append=False, the existing database will be loaded. +An ideal solution would be to segment the user-uploaded documents for the RAG knowledgebase into suitably sized chunks. However, this approach is not always feasible due to the lack of consistent guidelines for automatically and accurately dividing the context. Too short a division can result in partial, contextually incomplete answers to user queries. Conversely, excessively long segments can significantly lower retrieval accuracy. -index_name [str]: The index name for ElasticsearchDocumentStore. +To navigate this challenge, we've developed a unique solution involving the `ChildParentRetriever` to optimize the RAG process. Our strategy involves initially splitting the user-uploaded files into larger chunks, termed 'parent chunks', to preserve the integrity of each concept. Then, these parent chunks are further divided into smaller 'child chunks'. Both child and parent chunks are interconnected using a unique identification ID. This approach enhances the likelihood and precision of matching the user query with a relevant, concise context chunk. When a highly relevant child chunk is identified, we use the ID to trace back to its parent chunk. The context from this parent chunk is then utilized in the RAG process, thereby improving the overall effectiveness and accuracy of retrieval. + +The user can select this retriever by: +```python +plugins.retrieval.args["retrieval_type"]="child_parent" +``` + +Most parameters for `ChildParentRetriever` are will be automatically set by `agent_QA`. The user only needs to decide the `search_type` and `search_kwargs`. +| Parameters | Type | Description| Options| +| ---- | ---- | --| --| +| search_type | str | Type of search to perform. |"mmr", and "similarity"| +| search_kwargs | dict | Keyword arguments to pass to the search function.|-| + +The user can set the parameters for the retriever by: +```python +plugins.retrieval.args["search_type"]=xxx +plugins.retrieval.args["search_kwargs"]=xxx ``` +If "search_type"="similarity": +>search_kwargs={"k"=xxx} + +If "search_type"="mmr": +>search_kwargs={"k"=xxx, "fetch_k"=xxx, "lamabda_mult"=xxx} + +"k" is the number of the returned most similar documents. "fetch_k" is the number of Documents to fetch to pass to MMR algorithm. "Lamabda_mult" is a number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. + +This new retriever is also available for langchain users. Below is a toy example that using our `ChildParentRetriever` in the langchain framework: +```python +from intel_extension_for_transformers.langchain.retrievers import ChildParentRetriever +from langchain.vectorstores import Chroma +retriever = ChildParentRetriever(vectorstore=Chroma(documents=child_documents), parentstore=Chroma(documents=parent_documents), search_type=xxx, search_kwargs={...}) +docs=retriever.get_relevant_documents("Intel") +``` \ No newline at end of file From 867b497b652d77df441d89bc30cd80cc8400a62b Mon Sep 17 00:00:00 2001 From: yuwenzho Date: Tue, 9 Jan 2024 15:34:17 +0800 Subject: [PATCH 033/101] update embedding (#1121) Signed-off-by: yuwenzho --- .../optimized_instructor_embedding.py | 12 ++++++++---- .../optimized_sentence_transformers.py | 18 ++++++++++++++---- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/intel_extension_for_transformers/langchain/embeddings/optimized_instructor_embedding.py b/intel_extension_for_transformers/langchain/embeddings/optimized_instructor_embedding.py index 48bc64ef4a0..20078ded4f2 100644 --- a/intel_extension_for_transformers/langchain/embeddings/optimized_instructor_embedding.py +++ b/intel_extension_for_transformers/langchain/embeddings/optimized_instructor_embedding.py @@ -56,12 +56,14 @@ def __init__(self, *args, **kwargs): def _load_auto_model(self, model_name_or_path, token: Optional[Union[bool, str]], - cache_folder: Optional[str]): # pragma: no cover + cache_folder: Optional[str], + trust_remote_code: bool = False): # pragma: no cover """Creates a simple Transformer + Mean Pooling model and returns the modules.""" logger.warning("No sentence-transformers model found with name {}." \ "Creating a new one with MEAN pooling.".format(model_name_or_path)) transformer_model = OptimzedTransformer( - model_name_or_path, cache_dir=cache_folder, model_args={"token": token}) + model_name_or_path, cache_dir=cache_folder, model_args={"token": token, + "trust_remote_code": trust_remote_code}) pooling_model = sentence_transformers.models.Pooling( transformer_model.get_word_embedding_dimension(), 'mean') return [transformer_model, pooling_model] @@ -69,7 +71,8 @@ def _load_auto_model(self, def _load_sbert_model(self, model_name_or_path: str, token: Optional[Union[bool, str]], - cache_folder: Optional[str]): + cache_folder: Optional[str], + trust_remote_code: bool = False): """Loads a full sentence-transformers model.""" # Check if the config_sentence_transformers.json file exists (exists since v2 of the framework) config_sentence_transformers_json_path = sentence_transformers.util.load_file_path( @@ -121,8 +124,9 @@ def _load_sbert_model(self, break if "model_args" in kwargs: kwargs["model_args"]["token"] = token + kwargs["model_args"]["trust_remote_code"] = trust_remote_code else: - kwargs["model_args"] = {"token": token} + kwargs["model_args"] = {"token": token, "trust_remote_code": trust_remote_code} module = OptimizedInstructorTransformer(model_name_or_path, cache_dir=cache_folder, **kwargs) elif module_config['idx']==1: module_class = InstructorEmbedding.INSTRUCTOR_Pooling diff --git a/intel_extension_for_transformers/langchain/embeddings/optimized_sentence_transformers.py b/intel_extension_for_transformers/langchain/embeddings/optimized_sentence_transformers.py index c5f19d89074..5c3cb2a6fc7 100644 --- a/intel_extension_for_transformers/langchain/embeddings/optimized_sentence_transformers.py +++ b/intel_extension_for_transformers/langchain/embeddings/optimized_sentence_transformers.py @@ -55,20 +55,29 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def _load_auto_model( - self, model_name_or_path: str, token: Optional[Union[bool, str]], cache_folder: Optional[str]): + self, + model_name_or_path: str, + token: Optional[Union[bool, str]], + cache_folder: Optional[str], + trust_remote_code: bool = False): """ Creates a simple Transformer + Mean Pooling model and returns the modules """ logger.warning("No sentence-transformers model found with name {}." \ "Creating a new one with MEAN pooling.".format(model_name_or_path)) transformer_model = OptimzedTransformer( - model_name_or_path, cache_dir=cache_folder, model_args={"token": token}) + model_name_or_path, cache_dir=cache_folder, model_args={"token": token, + "trust_remote_code": trust_remote_code}) pooling_model = sentence_transformers.models.Pooling( transformer_model.get_word_embedding_dimension(), 'mean') return [transformer_model, pooling_model] def _load_sbert_model( - self, model_name_or_path: str, token: Optional[Union[bool, str]], cache_folder: Optional[str]): + self, + model_name_or_path: str, + token: Optional[Union[bool, str]], + cache_folder: Optional[str], + trust_remote_code: bool = False): """ Loads a full sentence-transformers model """ @@ -124,8 +133,9 @@ def _load_sbert_model( break if "model_args" in kwargs: kwargs["model_args"]["token"] = token + kwargs["model_args"]["trust_remote_code"] = trust_remote_code else: - kwargs["model_args"] = {"token": token} + kwargs["model_args"] = {"token": token, "trust_remote_code": trust_remote_code} module = sentence_transformers.models.Transformer( model_name_or_path, cache_dir=cache_folder, **kwargs) else: From f46a79ee5bb0d976bb4abdf1ce46775045ee560f Mon Sep 17 00:00:00 2001 From: VincyZhang Date: Tue, 9 Jan 2024 20:13:24 +0800 Subject: [PATCH 034/101] add paper documents (#1122) * Add the instructions to reproduce the accuracy & perf for NeurIPS'23 paper Signed-off-by: Wenxin Zhang --- .../papers/efficient_LLM_inference_on_cpus.md | 129 ++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 docs/papers/efficient_LLM_inference_on_cpus.md diff --git a/docs/papers/efficient_LLM_inference_on_cpus.md b/docs/papers/efficient_LLM_inference_on_cpus.md new file mode 100644 index 00000000000..6f3b8497f10 --- /dev/null +++ b/docs/papers/efficient_LLM_inference_on_cpus.md @@ -0,0 +1,129 @@ +# Efficient LLM Inference on CPUs + +In this tutorial, we will demonstrate how to reproduce data in paper [Efficient LLM Inference on CPUs](https://arxiv.org/pdf/2311.00502.pdf). + + +## System Summary + +Test by Intel on 09/19/2023. 1-node, 1x Intel(R) Xeon(R) Platinum 8480+ @3.8GHz, 56 cores/socket, HT On, Turbo On, Total Memory 256GB (16x16GB DDR5 4800 MT/s [4800 MT/s]), BIOS 3A14.TEL2P1, microcode 0x2b0001b0, CentOS Stream 8, gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-10), DL Models, Frameworks/Backends: PyTorch/ONNXRT/LLM Runtime/GGML, Datatype: FP32/INT8/BF16/FP8. Using 1 socket, 56 cores/instance, 1 instance and batch size 1. + +Performance varies by use, configuration and other factors. For more complete information about performance and benchmark results, visit www.intel.com/benchmarks + + +## Run Performance Step by Step + +### Prepare Intel Extension for Transformers + +Build from source + +```shell +git clone https://github.com/intel/intel-extension-for-transformers.git +cd intel-extension-for-transformers/intel_extension_for_transformers/llm/runtime/graph +git submodule update --init --recursive +mkdir build +cd build +cmake .. -G Ninja +ninja +``` + +### FP32 Inference (Baseline) + +>**Note**: Please donwload the corresponding AI model from [huggingface hub](https://huggingface.co/models) before executing following command. + + +#### 1. Convert Model + +Convert Hugginface model. + +Please make sure you have donwloaded the model into local path + +```shell +cd intel-extension-for-transformers/intel_extension_for_transformers/llm/runtime/graph +pip install -r requirements.txt +python scripts/convert.py local_model_path --outtype f32 --outfile model_f32.bin +``` + +#### 2. Inference + +Please replace `build/bin/run_` with your model name, and fill in `-p ` with your prompt. We provide several [prompts](../../intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_prompts.json) for different input length. For more details about paramters and their meanings, please go to [argument description](../../intel_extension_for_transformers/llm/runtime/graph/README.md#2-inference-llm) + +When running inference, we recommend using `numactl` to control CPU cores in instance. In this paper, we use 56 cores/socket Intel(R) Xeon(R) Platinum 8480+ server, and we recommend setting `cores-per-instance=48` (best performance from our practice). And you can try to find out the best settings on your server. + +```shell +OMP_NUM_THREADS=48 numactl -m 0 -C 0- ./build/bin/run_ -m model_f32.bin -p -n 32 -t 48 +``` + +### INT4 Inference + +>**Note**: Please donwload the corresponding AI model from [huggingface hub](https://huggingface.co/models) before executing following command. For converting models, please see above [Convert Model](#1-convert-model) + +#### 1. Quantization + +Quantize the converted FP32 model with INT4 as weight datatype, INT8 as compute datatype and 128 as group size. + +Please select `group_size` between 32 or 128. For more details about parameters and their meanings, please go to [argument description](../../intel_extension_for_transformers/llm/runtime/graph/README.md#1-convert-and-quantize-llm) + +```shell +./build/bin/quant_llama --model_file model_f32.bin --out_file model_q4j128.bin --weight_dtype int4 --group_size 128 --compute_dtype int8 --nthread 24 +``` + +#### 2. Inference + +Please replace `build/bin/run_` with your model name, and fill in `-p ` with your prompt. We provide several [prompts](../../intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_prompts.json) for different input length. For more details about paramters and their meanings, please go to [argument description](../../intel_extension_for_transformers/llm/runtime/graph/README.md#2-inference-llm) + +When running inference, we recommend using `numactl` to control CPU cores in instance. In this paper, we use 56 cores/socket Intel(R) Xeon(R) Platinum 8480+ server, and we recommend setting `cores-per-instance=48` (best performance from our practice). And you can try to find out the best settings on your server. + +```shell +OMP_NUM_THREADS=48 numactl -m 0 -C 0-47 ./build/bin/run_ -m model_q4j128.bin -p -n 32 -t 48 +``` + + +## Run Accuracy Step by Step + +### Prepare Environment + +```shell +# Install Intel Extension for Transformers +pip install intel-extension-for-transformers +# Install requirements for running accuracy +git clone https://github.com/intel/intel-extension-for-transformers.git +cd examples/huggingface/pytorch/text-generation/quantization +pip install -r requirements.txt +``` + +>**Note**: If `ImportError: /lib64/libstdc++.so.6: version ``GLIBCXX_3.4.29`` not found` error raised when import intel-extension-for-pytorch, it is due to the high gcc library request, there is the solution to find the correct version. +> ```bash +> find $CONDA_PREFIX | grep libstdc++.so.6 +> export LD_PRELOAD=:${LD_PRELOAD} +> ``` + +### FP32 Accuracy (Baseline) + +There are four tasks/datasets can be selected to run accuracy: "lambada_openai", "piqa", "helloswag" and "winogrande", and you can choose one or more to get the final results. Here we take [Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) as an example. + +```shell +python run_generation.py \ + --model meta-llama/Llama-2-7b-hf \ + --accuracy \ + --batch_size 56 \ + --tasks "lambada_openai", "piqa", "hellaswag", "winogrande" +``` + +### INT4 Accuracy + +Quantize the model with INT4 as weight datatype and group size is 128. + +Please select `woq_group_size` between 32 or 128, and set `woq_weight_dtype` to `int4_clip` + +```shell +python run_generation.py \ + --model meta-llama/Llama-2-7b-hf \ + --output_dir saved_results \ + --woq \ + --woq_weight_dtype int4_clip \ + --woq_group_size 128 \ + --accuracy \ + --batch_size 56 \ + --tasks "lambada_openai", "piqa", "hellaswag", "winogrande" +``` + From ccd87595b78c2f4af6212af1416d6ef996580872 Mon Sep 17 00:00:00 2001 From: Haihao Shen Date: Tue, 9 Jan 2024 20:14:36 +0800 Subject: [PATCH 035/101] Update efficient_LLM_inference_on_cpus.md Signed-off-by: Haihao Shen --- docs/papers/efficient_LLM_inference_on_cpus.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/papers/efficient_LLM_inference_on_cpus.md b/docs/papers/efficient_LLM_inference_on_cpus.md index 6f3b8497f10..ce7794a4371 100644 --- a/docs/papers/efficient_LLM_inference_on_cpus.md +++ b/docs/papers/efficient_LLM_inference_on_cpus.md @@ -1,6 +1,6 @@ # Efficient LLM Inference on CPUs -In this tutorial, we will demonstrate how to reproduce data in paper [Efficient LLM Inference on CPUs](https://arxiv.org/pdf/2311.00502.pdf). +In this tutorial, we will demonstrate how to reproduce data in NeurIPS'23 paper [Efficient LLM Inference on CPUs](https://arxiv.org/pdf/2311.00502.pdf). ## System Summary From 938a3f069bfd72574aecd1407434876ec8674bd6 Mon Sep 17 00:00:00 2001 From: Liangyx2 Date: Wed, 10 Jan 2024 14:05:35 +0800 Subject: [PATCH 036/101] [NeuralChat] Add ut (#1083) --- .../retrievers/child_parent_retriever.py | 39 ++-- .../neural_chat/assets/docs/sample.docx | Bin 0 -> 12197 bytes .../neural_chat/assets/docs/sample_1.xlsx | Bin 0 -> 9206 bytes .../neural_chat/assets/docs/sample_2.xlsx | Bin 0 -> 9186 bytes .../plugins/retrieval/retrieval_agent.py | 27 ++- .../plugins/retrieval/retriever_adapter.py | 5 +- .../neural_chat/tests/ci/api/test_rag.py | 188 +++++++++++++++++- .../ci/plugins/retrieval/test_retrieval.py | 2 +- .../plugins/security/test_satety_checker.py | 35 ++++ 9 files changed, 270 insertions(+), 26 deletions(-) create mode 100644 intel_extension_for_transformers/neural_chat/assets/docs/sample.docx create mode 100644 intel_extension_for_transformers/neural_chat/assets/docs/sample_1.xlsx create mode 100644 intel_extension_for_transformers/neural_chat/assets/docs/sample_2.xlsx create mode 100644 intel_extension_for_transformers/neural_chat/tests/ci/plugins/security/test_satety_checker.py diff --git a/intel_extension_for_transformers/langchain/retrievers/child_parent_retriever.py b/intel_extension_for_transformers/langchain/retrievers/child_parent_retriever.py index df5d1873ea3..05af66a69f6 100644 --- a/intel_extension_for_transformers/langchain/retrievers/child_parent_retriever.py +++ b/intel_extension_for_transformers/langchain/retrievers/child_parent_retriever.py @@ -16,10 +16,14 @@ # limitations under the License. """The wrapper for Child-Parent retriever based on langchain""" -from langchain.retrievers import MultiVectorRetriever from langchain_core.vectorstores import VectorStore -from langchain.callbacks.manager import CallbackManagerForRetrieverRun +from langchain_core.retrievers import BaseRetriever +from langchain_core.pydantic_v1 import Field from enum import Enum +from typing import List +from langchain_core.documents import Document +from langchain.callbacks.manager import CallbackManagerForRetrieverRun + class SearchType(str, Enum): """Enumerator of the types of search to perform.""" @@ -30,15 +34,17 @@ class SearchType(str, Enum): """Maximal Marginal Relevance reranking of similarity search.""" -class ChildParentRetriever(MultiVectorRetriever): +class ChildParentRetriever(BaseRetriever): """Retrieve from a set of multiple embeddings for the same document.""" - vectorstore: VectorStore - """The underlying vectorstore to use to store small chunks - and their embedding vectors""" parentstore: VectorStore - - def get_context(self, query:str, *, run_manager: CallbackManagerForRetrieverRun): + id_key: str = "doc_id" + search_kwargs: dict = Field(default_factory=dict) + """Keyword arguments to pass to the search function.""" + search_type: SearchType = SearchType.similarity + """Type of search to perform (similarity / mmr)""" + + def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun) -> List[Document]: """Get documents relevant to a query. Args: query: String to find relevant documents for @@ -52,15 +58,20 @@ def get_context(self, query:str, *, run_manager: CallbackManagerForRetrieverRun) ) else: sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs) - + ids = [] for d in sub_docs: - if d.metadata['doc_id'] not in ids: - ids.append(d.metadata['doc_id']) + if d.metadata["identify_id"] not in ids: + ids.append(d.metadata['identify_id']) retrieved_documents = self.parentstore.get(ids) + return retrieved_documents + + def get_context(self, query): context = '' links = [] - for doc in retrieved_documents: - context = context + doc.page_content + " " - links.append(doc.metadata['source']) + retrieved_documents = self.get_relevant_documents(query) + for doc in retrieved_documents['documents']: + context = context + doc + " " + for meta in retrieved_documents['metadatas']: + links.append(meta['source']) return context.strip(), links diff --git a/intel_extension_for_transformers/neural_chat/assets/docs/sample.docx b/intel_extension_for_transformers/neural_chat/assets/docs/sample.docx new file mode 100644 index 0000000000000000000000000000000000000000..aaeb4634d78aa92d84b3b0a786a6e46415b40ad0 GIT binary patch literal 12197 zcmeHtbz5A?_H`q{A-KCGIE@7e?(VL^-JJxt;O-V6I0SchcXtTxZoz(?xifcWa_4=& zz`f^>dir$LTBrK--nDntUUHHUkmvv?01N;CAOWm>owU>d0|1C$0RU(K7;sHN8*4`+ zYe!urH(MhIZF*NLOX6%u@V8$8;GpsUyZ(#cKy}=ZbtfaT@NN8$*U@##`g_?$)Zn2! z33Lkk(AXcrRi0w{nx9+Iz~z;}qTwt_DVT3pnN@oICKgjHVNq&aC=Ri>6MSNI%-+!~ z%x%$m8RAfG#4)b&53qS#vNX5gNU?!2lGSv@j3~m%rsbshVW`~!BBL*`;b(D;Y0uq4UDQnz@8Gmk3@{MX%!m-tWrzV@UCqiJT8LqkYX@4obM z-&uOFx`O}!US1#oa({D4!f4#OQ;?m>fIJQ!xm9cPQ{M9JEVy? zt;cDf;U*{}hP1%W-s^N}J~X=m;scs$;7D!{X*3-LdI~RqVGN@ zjkKd}Qw!Z}=fj(kQjJX;kXPko*eTE5CV4V{Ne0f$nu}?Egf8M1ZVe=89L4d3Gv=*G zWTuOX>|CwX?p8X8t!D~1r@E%awPa+VNQ>4lfx|s(o;YyNY0K{k4xok2f(>SP_++ew zs`0!VvV+6Rc1ogNt-E1^m!{2s2?|XA=^sIR@28MJo?{3P0HAlP; zG4}~<;agV@WjPT2(YNF4N!})ndrOA6_*I-&p*YR8sokg-5~fbUPX9xJa}wHh?U*$n z4JFnxh6QNyp^-m;B-r(E1I2U(lIQa`$}nIeiX^nWaI~UHnULDS+Vqdv>+*31-5My# zrsP)+E=lQ9Mbf#&joshmbQ8iUB5_q{1K1_9Hpz>dAWkJA^tAYqh+eI?PnoX(FF6To zUTSKMq64Akd!CqFxahcElJ*)1D$P)8M6j@)Yt`L4vwZQoS_B+Ww8{)F3jv<+$a1si zCh|Z^3W%v4M@^Z@b2a5OQksNV7N2X*oA%pLEr6q8_8oPIZV!g0w!WB@qxN1x|2GG7 zrOnYtQjvud2j~s;0gu`7n6U{JX-Tfdp#hAN3qEZ^`zH<)9sL;o{4Ex(V}B*XSYJ_F zE@UP4LFNyca&K?qy($%XY#hQZBkhcBix>O%EZ1=Q1NB7E)%oS2LMuMLgUv|$g!wGq zJBmc;ci;ZHml3ZMk^TqJNo!6U>asysmTn>8NYQ}8!RB#`cSl8uLZ{dTm01(EOW`V1 z+-J7-I)qy^cBA_#aJI)dS3pj?UrbPe1#VpbDFjmi=O{xW52i7C_d#&35i#I2oboI@^wnChG8<}2-aFca0z<4+9m zj+@I36$uC6^071z?TCD`dD09{R*71yY?t<}`oeU88R){`R%dBCQ{1wP)fS_J{01Vh~&hxJb^H&%8Biew059<&em&n36^$|PwL3IG@&1^_TX#_*ffIGP$+88Q6U0)H;M zN9vNHc%sOy&=*934MXlDdwM0TlVTSZb8M?MevvH-7I~!^W)mxmHH?8DGVNg@)1m|s z8n%%I5P;oVF+>LC;%P+D2ZZ9xbl2UH+J3N2zRJ9Z>*iqVf*qE|j*RxFe?4Rn5y5XGHB=*!D<6R(uJwi=SkMAYjzV!pij#0L$^FE) zp-UQoi!I+;req)}V9+;J^aB+hMeYQjWYJD99r&K7{WkYdj+|V|aXIAKVdy>9palsv zyrF}=;8R_kCR|uPD_vh#SIw8*d!3x7*)lLC>rJQ;euHtf!zlm@ZikE_ecq@s-C4ps z{#z>z$J^8=_nG$HtGT)}ZCww9W~f|g8+cxPsANQkFV*`t8&5h-bMLM#d5kuaJ=V|VRtQ9~PuY4mx6JgNYuv)`ovygNRq~6V@iI+8$FX`DM+!O?u?vje5 ziqadq`igkNBU1#Wu{V*25uX{q)dLTtsTnL+2zoi#+)uc9%NZ#z!%2V}TuRc@he9S} zT)C<9s?-|k;-DuqF*2j=)78UJU!*S252uEk+GV3Sf47ErydIZhQw-^CUJvIZu{Ou9 z(S2>t+r@KUkN3I4lGZ1Md5h$-)=>hS~EJ0|5FEJG@U zkLXDPwQu`BGj~u{_pn4R3o}(lFYBH~>)DcYOp)Xb4@VM`mMWzo&Uooxz;g#i5pdEfSiW&mmwCJt z6?g=eZM?JWOwPzWw&^Q_TuXI&F)vo3EmJZvks2goda5rq0mL;pT5i?vs!G@7NYqsCYIH3TozAed3WnJB2ldu&yo?3`8(l}zXB{f}vcWH2qK3Razc zQ}eG1oikZAvGpYS23`RgHX{8nG&5K?uG+R;UZ?iGd^@Yq{p+_0KHblwwH$oYYQ*qK z5Rl4&bSu7}H=d=02;R`K)eTAy3#K+?HOpUhle<~Gs+NDaBPzS`f!myUElW2)Rp@fm z9;?JDJYZPWO4Vtl!V0!&aIufY>cJvwtzR!nJ6@HtQ<8VTHpe2odtVa6QEWI$jnmXG zT3}fDU6>bKgL`j6<9la551PP!tCn0djpG8@`cY&MW5Ih3)2ApiOn!%kG#_}5Nvj~{ z`&ZEup8m$=Qp#n>A-M-~?97y3Y$v-^R?K}zH@VqlM2lx;9Hf!?nDBx+Em)C^Qlv!j zMvlN%HCj#re()cLc*l4Up*HkLp*q0_@0g#_kzb9<1PCv6X6H_0RgUXQmbGth5nqcf z-ztb8TO&^zZ09R^Zm%fbIz|IWtmbH!ENH&vZ<_fD(JxD(HZX>*U(Dn480pqAC*oek zQPrBUXcBeJQ3qRYL8T;d?_!T?tw6Jqu^iT;-3>sFxI+E#A5{7rDBedfkekIzUNu}F zU$^tMu_vXrP8BGh_Qob#=g%se??NF~&)cURgh# zn?%iN9$)NKbkkrrYV@8CGVmf**{B_eVsAA9+-O)IEvb+GmLNLex)wtqFJywEvHJGj zFr(Ayqx74)*CUP&s>WUo$z5_B``vXXO99hD^k$e^#0Rt0yOEJD1G{QQZfhUn^G-&z z=5)3jvo*hOVF^TN)F$yu18e1mM5N90)sVLmG=q#M2<#T?I*g}_;PsVN4_0SGt(;w7 zleWE{IY^VpD(o!7jPRYI8DK%%n^1OcU`Xe9LvYLAbL!4N@>(K3f%mPOQ<;MK{fc|x z;luSodzs3QmPJ9vkg)GXncukmi5?s)KG~G@)vd1d6)QX}f9^5LJ6x~CoIO1$uKDhA ziv%{rWjY;r?A|o@w&rj}Z{YA&BiDI+FaAgPmj$95h_)F*9>;Vt#GA(Jb_)cJToG0n z37wqmy}HHRJk?^S7UMeBv&PqnwSnnnt*SdS&CbvN7zAPK{3wJW0RV1=2WDngBFCF8ToH0fVcK)0J7=5Yec1;BeN6ZxRm?%1sF&7k}PqzdL z+1o4&q-N2H{wd53?D&MEcU^tlIev5rAuv3*A|^iX+PSb^ZKO8b9(%H;xk2QjFd$># zkF`Lkto9A9c|DYeV$9+$PXpf8hp6-1QkNdi-zw|q1x_ay0HMp#V@MD)?KhdpiBjB? z;8n<^nFNA)?(~qQS|UV-TSPAEbFi6WB(sDHrNw9mW47uac1gD^Zma|oNNuop7Eksh zrAVMVl!eW==jX>N!PGcnjw6vOjHd68MLK9An$RlQMVcwO(c~nf4RY6gv7KO1S8edQ z$v96upgy{YRx2U%z#Sd1Tn;S3becd;;h(Pc0uR|gedgLG=~;5k6D6F)a%OpIFqDB& z$_hP@rp+oYw$YCk!B7yzz!7<@{Yoj?2ytlfbhG8SQIa|sa()d+nO zY#(9$2Ab{tC02;(^8&@HW>cY`3oQ3{c^J0&9@Yd7E_72y{`Gq(fLjRDj2MJ)iMdqa zF*`>OjPr62UNH6*oJj@n6}`ORHTR$bow@BIDf_(Jy!;ME%-cD&Jk~*5U#)^5pdzsGpTy@hLcvp|@WgezL@C>inN!m8 z&YncTIV;IuwZ5Z}z4rNzflo;h>Dm!08$Jn%ZFTx10Kqd3uw%T-KpgWR7_N%F4>for zle&RWSgfMTskNiPoBvMc!MF1n%EUEi^JCP$ zAZaz`(T<#|+@Wrhc(K=u(kSC7{5Ga;z_{q#Iw@SX^1QM6I3aAp6uBEp^5`f-!hmG| zOO^I;p6bK}1vyT7S!a)AzY~)><=l!{BXjvY!fe}D&Ez1)4_4h6`pM*`nFC z+B21~V%?rhb|Xf;1CFpfmUw+D+3peT5yHv*`r| zV8Uw-dwyBQ$82sAwt-JS?zTCy++4-v(41bP9A(?LQZz}$)qI60(RtqnWRoenZ ze?vo$_SJSmFQDIMWICf7_(JQ~LmEuEsfS8FzzAnN*JX&G{yMRWSZlP?G);^4`Is>8 zVa4m72$WX;XX5?HiF8l_I_FSu003V9k$5{ex>*|iw&FCWOFJyFqXwR8*1Y)SP91eb zZ@=cS3uoSDA6R6y>qX;8Wfl@Zoh8nqD4fsGZ3BpjN2Pw5{}9O#EL=$B?Rw5O@fhX# zbW9aLA)A3EV1(s2k=n)IU8P1rnyQP?y5_~>`u=lBj1g=s^(K~|i4KD2$Coj$#~-db zabq!9(NKmxZn(M#m_`j;L`b2o9f~T=@{$?*?@Xv=)eCiZF7*a4f@Us=RrX`z^{KzU z%?uI^3fklkA83-Bwxra5mhUN!Gsz7n+R-x49t;Uk!?aj5z*@{1j5YeEuY>NMuc&^m zKv{`s>c3iRBDARbaEp{@IjTj&RY?G9R|<~^y&nz&W2@0IC7A7*!AK? zh~dkqnc(w9#|?lBb7({!j(1na(5Y3}2IJ2aP9dIfuo}em?!LQ}+P0)-6)z?<+aQH! zdpUS|_p-oyAG>1htaqmKkwpGnl*fQZ9dpOXGjWdLQ1zy5jG88N?>r5o0{OPluQ6U} z52}ozN&w;NeHzBL5lPa+PQH7po0lHXJT?wiZq>b?Soa50T#t3PytU_nt)BIQMWx4@ zh4KY^qaQU@(v_C2tG!izr)YIvSblAo(5kv4d4<*o`t28f1G$L!>1mQclWpp`hK}Y+ zf9im(fp|jjXQgjREJTE}28vIoVe=Q;G)qGip#jI}sqtDSArp7wSA1}>Nf9z@rMjOq zah~Rjl$^*g<(}F)kbF!zefAug3M*4Q6USN$?tI?FDzw=F%dPgq*KAJgv+UCw=%G$6 zQ{6ZH^S^75!;YoBHinNrBZANHU~sKmdEPZi^*BJR&@lROx>;3`E>M=gx&C*BDt$t(wkneQF>Rr#9%g0c)|VcZr?mIo;*Ji z_O4`(cGW*rxnR3^RrWd*=c+j7SS7@_XtOC64J!?hRqY*18zdVqP$veOAh~nBSr7-8 zieET@u(Qz^K0172?oMHObk#}G2P#yVQtsq!?XTEnJZ&k^xpl_~UBdWq%n2u(2np;s zj+$`ost;4WVI!DU@|CZZ$cJ=r7}znS&7kP+s-uh{4;b;Ey}s;B5<-?bi0f%MSYz$! zzv#k}zapZ62<9pIx+PRn#-cb}`C|?WbE=J{Jn1>!r{{_R0e_?*upkd5TuGV?w>(9J z#d_xPD{kG-53J#)%jOy{c1>8h)l{q6PGOCiO-(U_T8Vork-!op}njI2!ZW^Q^>_4&C1i4>wpwm;QLYhZH`jg-00O!p24FXXW}t z=UkjPVC>^8tvDv7GF3TIoiII6jYg?D<6GOdq_*G{XoYyDzV#{@F{J{M`L0-f6qkdF zX{|Q#>3yxyb=}NRiO^yTuK2iQ;dkg1mQPR3TMhI>gzm9CG*mNWvU{InnN(-A96;3{ zihaX-*?s#CI6aG80-;;Mrymi4dkJ(OPsh^@NpmKSr}zi`!}e5==WgqCvp?3b80Fz$ z!RTd^U34B(Izj@5jK3Fv6)Os=MMEdrE+EG;a`nB;h>d_JCZuDfm!1+d^G8=hS zvY>;jXhr+7;I_`|T=gE4oJAIcVy9It~l$`U4CD_5Prp)WzSzAHhrrOjWl_X0*{ zKz+5%Av!8pPftlg4LcqIQzX|XAR2D`1&IK9_J!?dqmvFNQVnVQK?$_uJCk*c@AHu# zVTR91F~JmJ9it61&miy$yR*#0>TnFgh8Abt#V23!p`>wZRN@%Q435eq&t&#R`-*W) zugZMs7(7^%S`QnwgI`0gWvR+e9qn~Sr9OucTB!?ZB9t5_fs(GdL}+Wdb<$$-(0LR~ z+|p$GMtvAdf|Bd@D3(`cZYcJkA}n#GZe`DL@iH`tVbHX4KJC~AxY^Ybedz1)#?s4A z+JHxg_i+;4c?hjSVvZo-wPILg6Z8^J3)-hjUe~6W`CzVNY}h6I)5zs-F)pNIoFbiU zYtCWSJS)MLmtCR>yeu;QRkin3h38zcrzKXHR=#+tR;gmC{drWbvuvTxC8*lH6%87I znf)CcuTSqT?>2$-7mAF|7mAz@gOR4p^Y5M&lgQpvnl0Y2xyX7q+h9$kGwo*5D5ux0 zHLOoRWqreIu^ogXWVMfI(yd<{V0yMYO0$0aJnGgLxtHes_*o{oA1^{e+*4wAc}vt$ zM!^%3Q?r8%oL;lTRjbzboMZt`L7-AOQrVkm!i{5H^h-D#ifPV`8r2=j+Z^9B;v50E z#R6m>HfkYI57r0voKDv?dA(1cI3!;4g^*sn70QuW6^Yz75JVHYyF@8J@(s7Xh zm}>cB_lh8SJ+9FLMjj9XRsXMjB;Zy`pZP#T@QVbXSIhtYRgL>J<8lEbp@|YU z5eNbcr2hJ^sr~+--{$k5Q#5D5ykVpc>%+n54Z_Ay5D0^#`uE)a1^Ib9SU}9x|L5$j zE~{FF_M1jL{do`fTMXJBKs5u!wPn!;?P)ug=|)vgm0J@Op10S?tURu+mWSci$iR4k z1;y!%p#%E4RaBqGv->{f2Q(h~mfb}9nK6NRi#&kmBr9r~B8OS(g*Exx3z{kur4hbCl*@i%IRk03k2tcUt! z*3ArKE{FD}A~so8mv!M_tfnSN((YlcrcljmWctRjwS_Oy8jE({ID%3hzWx24^?>V~ zakvM0Jq5v3yQY$0@qhTfgkbC0Bccb5#8(8oGgp)F_ukVsTg#Eo2QXG+SLqSlf90a6f zEr^*=5Cm5-7i$NN_zPc+xYbzpkyyCGJZv9bXByu!*aB=fg~=PnFB3Z`*@q@Fiqc@P^fAo|pfJobZ>jA;0$$z71+yz?*g-!f};t@0^ zm-0`F|1bX^EEoTN_|He=Y7&sbw1S(*R5pk0wKANfV1&k8zu0%4s*l1`cwLZZugowM zwBYlcJU+(jEoI(LWinhRCEM`15UNTP*?5=w-1xm4*fA-RD&H4ZdU024S#Yjz)zYjn zh0s$x7pTLSThrb)D|$v+91&Ey!86rmJG#%-Sc*P|)beU1oIG|_&RoJNo%KvK?yS37 zz|D3FqaM5Cy!$2t{@`Blz#Km#o43zvT`2@Xc$f)a?4H6^0hhbh_FZAM@?gq*|D|B7 zs?y!X(X_2{z2{--VClB`yg_iTFF|D zmwuFee|q`HO6DmXm5dCi#}ibHLH{GWcQMje{L7(2+LW#ZNOb`?yX|Nd9B9fLM5j{G zL+I1#{jdc6d4DF5q9G=<_+;vIvC!d8|2F33c9Wjua(i@P3k@9SssS5OG1_}h4ZCfi z=V8g55e-&&8{TL_kp6~GzoEV*`JFn%q3Y*oS%uD!{Na5qCE#pw+*cagkU%xR-4Rqj z#prxJvUE72CHF5AGt?I+h%-@_)ohl zRDwk}1(3>O9HfOp{p0-`g9K1Ydis_|ztuG2q^*}2kppgDxkLLrsKpu5d_&<7OM`hU zXDI)buZ~4nQ%G2W*ldVL;Pn)|rLe@hDal`Os7p&t z^h`|$Ac17c%&;BtlfG*8Glz1>PiBg`aunwJ_c5x=CtcW^=uX&As397q>_#pj){wEY zyFn?+d`gpX&1}#bm#7*!j`9}5$-wcd0*wq}EkfYL9V9vX%wR!00Y9_0FyX6xlpEFT zA4o|E5Z!OQ<0@Aed#ernqZW40^s(P?+q+I|KB@z`y{Rnak+Qp?bp;b8X{my#9)#FR z3Iutv%FO>E{A%qmN~A#J$5v9Mgr-wV8)Y%wg19)5epa>^6%C0U%5*nywAb$S`v zoHSk!rMvd)Ao5kzCaiNyw`&-hi%^!Pm}EAhLzFX#^)?1d^CFSbl&!%c-v}Mu>mLcG zCq>qRfn_A+K*_YM2Vn2HM@EiH@E6XcQ`b*!OH>sf6JA>O(XDOW=e-lt|dV_Z~9Pj+jmHvwWqANpr)(l2X=oD1J7Z;*^EWN#XD~B z>+Qw4*qrya=SY8?seoNQcQW(h=F)zAfpK~-GgjvC5V2(SJmHbw#CU1)kNbOp@|t}+ z&qcIN)8@dP5Fgkpi2JDQ2rdFdm-A)Bh1T&ND*;nJ zC`Q!YeywglB~~AT)Dz8P&g1K({n2SU%lR4LRgzG)Qr4(d-IVQJ=|3@VtH_tI?qEvR z^70I3{$mqZ0aWIb%B=+LR@xkRZu{8w%Cn~FJ)otvsq#uc#Hfe)aOkF(;Of2DI#x-<>4RCz6ZQraRG=lv5rvCg`(y?)g{OVGy`@B!hjU!b_{>q)~p`nldz_#5)tTV#2 zz9^R7Fmg++EolkQQMEY4T2h$;t3t(w@em=#>0#yggnE1XwQT95#<#`@k+7u#PTSBO z&8lfclU~B{vFUB~bg~&&>~Zt%WT+Q&){|0kl62)h0h1JZ5-X&csN4WoEY{wLL-_fI zFQjd+X0ggGg^-yhp6^Cj%TfHHPs0;PC-m<_6FP)dbB~UPtY*U@@N~f>%0(JpbEdn3 zSt4wxFRhB`MwJpD5P!Nnw&E*K=vVB|c1mXbX0uep}B-exk7$k7!_v)5jI2ql@913T zD1Hwi55fzm5c99&QZR5jQ1bN8dgg!qDSx*Apl+6v{5!$l)t7%l0f1Ogmi8Ad=C8nC zWodsxt3b+${}8GD3jez>=T9&Ia0dH3{Qn`-`IYHcvBsZVl%UGhKT0=#rTDef^(O@l z+V2#9Eqwh7|Mk@RPk1KQAMk&lVgCyLH7WiR>_PTB_?Hyc7B$UpeI@UxCcyr_vW5&;eRqM`?fF{U0Ps B)bIcR literal 0 HcmV?d00001 diff --git a/intel_extension_for_transformers/neural_chat/assets/docs/sample_1.xlsx b/intel_extension_for_transformers/neural_chat/assets/docs/sample_1.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..251aaa34f6d96b6982581c6c41df3cd87cd2272f GIT binary patch literal 9206 zcmeHNg+-68oH&FMmnU2uB91d=w|4Kp#>2{I;1Z{pa zExe_k2CO1bfneWU$Fj24BA&oN;W+N<7{P>%HPFlOaFtCCIg-|Ep4ineSCvLb9ccQz z!RaDJp%ubZ&ogOkhPcQX64!EnVR#9(sr50X5-(Y0VZ>1ps3*>rrOjL4+p~=+Ch|!2 zqLH7-%7{%bQ!Ic6`f(bdY8zQiT*~b*;?pHcl%qJf|DJl`lKwLRMb{F!d1Gd5oVhXy ztv%~f4xAv%8nU@w?p0-+81Q$vs=Owyp6`gb_v}6b{aaL(4FPJ z4PmeC(XV=ey9db|e%v>x0Kn}n5*Vp|N!l}c4<^@^ zSmxsTf^3EUrAIR32T~QdBkbB_7zDW#A;eG80zf@Vx{Ib~WeAfJ>N}+&<#ms^5UOyk38$8&pP%4Rr4MUzVhgLjL})#x8Dzqq$*g1XQ+RIl_>)ZEy(T zz%Ao$$KeKcvNZ*RZGYsg5=}L5Iw!90>*`zoE9DL`E2w-C9UbVcL&R*EPq>EQGBS&Q znmtTI^Y(HW7nbL-F-x;=GVFb`ySrN7xcD`#B3pvnFbLgfx-U!m1A$hF#L=a%scSq9 z5fC4C01f$;Chxp(d$VIR*zMVIJ`3=d2w~DJ!eUkE0S<*XFVnNNSAC&nyq9Nc(f?c( z)|dD^$uik&t~H53D+`qdm#SlVkUrA7Yn3@a*hUO_C=Bs}+alL)?EBdxM?t);B3`r1 z6hMVBsrm>X6Zdz4)Utc*eYAn)ZuMr{$Qe)dr$aid-4Lf{v;-qjQoIY<0kbx;ue)RI zF;^1^w}&D7Ty!J&&YjNKI3+>1a#}YCgQ8HA%RwuWx0<%}{$G6XOCif^SvQj~i=*uL zGVy`$5Bgnlxy{X`ie9`YRFZJUP;brcMZ--h7%=JP-WT#6If$DS?ekzTMy#PSkBERW z39SSp7VZ@KX=?GEFny&qT}{3zOwHQg@eiV=Ad#bp3(mkxB1`I%;JvrjZ@6_RKZM!% z3bP0~ha6@do6)95Bbag}`YoJ}WsDv5rRr+( zKHDl>C0Sihpo-v3}^v4d+N<>J20Q|V2)wr6qxV^bWU_)Jlev}ZPZ1qnI)5E z+ZSFC<8rpJ-=Yi5LTU+3WvFL2y#y&%x-($eZw9xHca|B7yKTx8%|b^>MmsZ~)K`0D zTE{NLY`A#E%pB|YQh3Dk?CQtl9F*s5<)r)Ui-HVGPqVHgt&)Lwn}%`j*teN_jTHD8 zP#Z+V)~w#-n#tD5Tg-pMLBP|*ltZ}xH^Q+%20%xE;(Ov9;!GXd_l(L9 z2!nimlh&lU_t{zEasb#i_nmd{Xz#Z#{k@Y*@H_p_(5cg);DCe#0N}wxCkgzDKSC$O z%EH1K!trzC`Vm7j67;}x)P%iDj61^Ai)P;alM(7JrA5c~AU6+&GXWWdRfJ*b@z%@x z%Bva!k~tuLV}YCNk$|=oH?-2e`y~l^Kr8(2M6pMTz;q>XzZ-Y5A>fP5$Mo8H0Z_`0 zv8Ks&mBE$@J#8d+XiyJP^-1dxDdY|6kbybK$}yK)3ljUzwNu|VAm#cQsDd{pUlB4h zNO`53Pq2T9rdHj^~M}Dj~yxlHid9TiXvL#BIqLHxupDz z&CKO`N>Gaz4@2fVBe8&ee=-dEj!av#TAF>*d{er{r_{I&`%Y*$SsLPP#?_bOw#&FE z(^>nj-dbLp*w;faeC9>q?ZS2&bbf-;qIV1#6I;1|aP^Tu0I~E2Q#x`P=RKNfe74TE z5aQKP^~%a47x8gj-l2~kFxu{Ov+{?i;lmy|ey=4ikkODc)neOyZzKBNnAfHbTkcWo zy#C<#(-D3ymB3+HCAJrGta8;=zyS_zrb8d+~dSz zz5D>z{O!hBp3=Y~`JiRyaQX+Kqw7h8Foh&3?r6<|x3`GzKD6APHf$VBsDkr?qo*4X zuMDn$sfOU~?JI*v#+F+dEp~Yo*6~`H0WO~!l|J{>03-u3qGv452T_ktlkLaI0KpOBcD_jPmR@1I^akFE8w3OZ#JZ zWq1~&F#=#B$GNJq3P$&Qwj_bryrZKjH>trv{MUF-cwnVz#{%htdDZl`I&a6^hP@%f zV~-ygG&^(T4dW>bn#j$S8Wz;wSMITsfa%V)@KGebEbBy(X9%lAu3MmA+s$2w6>SW2 zl?pQ3_V;=3raBiLl)9HV=#4cLW@HS>Xs-e)n9(isze-{R3Zb@fu}v{2Wl=nW)hk0y z9ld;n=*_mVauwX%zBa3T=Eu|LkFxfpHZbdyFkuYU3}05D{5T;fQN1G}O5iHebsDs# z+Jz!w)kJi>p6tevJf`d*)lIB7e|VF=q&OZP1!x_s2y=Niav1 z#gTlkti}t{Ag`Ed+i@nXiEa#yRitYs4g>Y!aPhr&CuKv9uxAft%JXbLjSI7*!%5!I z={;`nR~+HAQ1*ig*3Iavm^6bvzJtD>9&c4isR&m@5v)F+myFuzqZGz^L^x8KA&0*) zt#U?_$mHa}X%e`{bBG;(8i$r>r*vu$jdp&`hFmA6?;qXJ)F54dL+V6coA9TqcrC#g?8I;bimSUa)vya4Bfs3m)JHsZ1FR`-J?3pKqom%44 zpE;(139#l_r3ZTC($1p7zEMj+be6Rz`JHnEJoPvW z4H3#CbTq}{HlU`is>$0=P ztXgCB74QUev3bXmpbWz==CC)PW<*DiHSZ{!Ea9XaP}qbHqHw(75fCAd%UJWXA{~i4lqV9QJeBj%3xt?(#CFYYI zP-K*|E9Sk|*1^Yx+c|h zCEiHEaaAHg+u81;Yo+gG#isatIu;*GYq6V08$Zc!WvP}{b&*!>+A5-x)x6;ed}-bc z?0jBzsFg4)$c|nui>$X&oB60D-hD2OYFk;mG=o}eOJ4JAvorJt?tQyZ_oHP(cX1T$ z!XOSDAXM7OH727UwVC!*s#PY4#WEgN>(#d6)?B_8nkzrPm(tv*4dlC4Zkaf^_N>{{ zW@&NQK~Q9+jVSg#8M6SOsX3?ld8+M@zcyN#)SV0FiX2q*F$A)#Y#u7q)LtJ(N{Tt} zqSc5OFK(jdYPIk%d9*lGP23A~D0~nUy_ogjV(8Oyz!n9te5x?ia1@*jwINAv&Z6CT ztd|?2UF^~GcC6^lJN60wf#UDSM0>=J5he4b@ z>?|NZD!#Xx2C?ZvxIXVDZjH}h@s-mk(Pr83M>^!z7FFvNzPq2=1@fadt9`3|z4wrO zy8L?ggubFTGYvBQA@#_D1EtY2?<>ecpb?$Rv}l4zI~2>Wo()PQQ<_`!MuH{zGlA{F z;MI9mbq!;OLrjz|GL1(1=bW0MkhoeAZn-(k1x^&k8h#jP`X!Pzd8gH+5F=2QB1C}} zsO=tB9{=?$y3CvlQC#=L2NREOiLQfeap>* zjw9N^#h!zjeavM@;pE9YpDqC?^O5+MeCY^${GQnQ)3xw;;;wS9-GR`<>Q4i;1rx6- zNn;QS5{x1FM~s^Fj|%qPSybjmzGdDkmrDI&T^P*YC$S+W2v*am{M6!S7+^wyj@ zl0H%Ou|H{yuh*sVH(*3U8qxvAq4PT>6f@rNJ!eWq4`vf0ZT2l#2_{m z7G)fe+GpofdYhl!`3Vs(!Bp1)#!>tkR+67mW7lr5mIo?-=?JnWVyAN|U2 zJIHE0Y`jT%Hw}12cWJk{SuL_xn|bSTE59EI`@l!ret3T`y0)bHCig&-5#ow8MLMJq zx4&ooruBx+D1S=jOnDJ$g%m?uH8n7RK**+3H1LaqzdUt+NbBAfW4H+2vHCha0^V;) zej2A}-Erh;Shr|OcF5XpoZFcK6HZGnOd6cHORnrxp735YLxKqtkixhiXL;=YIh!M| z)>TTq41H9dQU{U{63H6oKfkEyP z%Tz8QmS?wHJCVn`jbVYB^#&-B3dzV@=1rKe?n&1=mQN-WhCXRezTfV5U4romxj}Xy z1S&)x*u##c413Z{d(^WVYrr&Qd&`})RsIbp6MH9<4(kT94jV^8mh2n%I(@r$3AAY@ zDw$_MbU8w7Dk8L}Gdk>~)jb-;g%S`JLX6rrYVkbzxI;A2+k5}kR?+HQJ{AWv^9G=4r${4jNtVwzTd|Dv^sUx?({y$gYOUYi_a+Ec#I zC$n{YO|{7`e0No zJ=h0VqGiJ)_J03bRa*da`_JL_3WJ}0^t;`Da2J2Hqra0GznapYR7Ox-hdjK657~q6 z;W~lGkEvc`X#RABIhHhSZCe8JapoG8|DWuLAJ2uab@)YG}C zSs=`6+76MLy6Mt-s%{2u?*_mj6z@`31h6Y9@?R0=vsE45Zu5>Tw$r3RkW2h@n0nvh zi?Cx&c1v8p$6snLv)!tYuu_T`pkq(#P1`3^=B*A+qx03zGjYok#B(wK5Tx$ZXx;G4D7hO8|+2m~M z{J^sgHIfJH{epX)f*j#my|iwvV%BN{BVIUY0G;J5^`H#+wsya|o=e zGCbR$5LQo~b)M36clo(b^PGcWV^LY$P8mrS-AWWE%0wcw2ddx;*W+wrJjy3^EFTK_ z^_q5C0KR_m{LlfrmFB756Wq^B_Xl@ujvSX`uw^dD9(-l5d9?vH5=0dgsq7ZAFCCzed)FmS`+{)h*! zFL3|rk0y?e|8qyUAO3M?es#pOfrn1^c(fKA)HVB#Wx-?a&uuF3up6$PIQ^YEntV?XJ3OP%@$gdJKwJSLDEniN`h>X~% zH8!fu$ze~*sI#$+4&_7J>TyS=Uqaoa{7_yU{erY+(|R&6Hx<*_&)r`8&6zzeYKHs~ zgD^o@1?t>HRE>(Pu4wmF6dNELW|tabCU`n*dn(QG4C%po4~;RR$!Cfe>wQQ!HvA0Z z!d-)c2Ys9e$^vr%U?IrxW5YC+3r4SpP3G_8gVm2agDt&fo7`>D4W~SwYRr8yFE~e0 z-g=5X@dV6$Nn^h7vOM%+^019l_5@OceUJQ$$n|!gb(WATJCaYcNN@iQGO{nBKoP|SHp5w z^llCIOH`TkchS2Q*~m)3tTi+1JkMThJ$tLj!z17WkN_wE0Dux;vX^dY@BjdSAOHY(02DYq zage=>xxI^_x~GG=vp%zjoh?N+0vuf?01kHl|BnCS2oxs_D0Q%6OP@6CQVXjyJ=R}BvDZcxRD3hZsw zFd*S!>FU{`X?a8hb<)+@&%`GYVIwj$jIl@sym74QQ00_Z5tS<@wj>}F4)~fr+oOrI z)Ln;BsGHB%JJ+_Xq`8R0+g~t_{dE*?!rBVxZE*aRNfsfB%6p#B%^*jGQdX=Dnw$QlZ1dblvOgw)W?L#D_{TwV})QmESvv0-TOdFkWTLKho#qHL^q9(y zaXF_aLqeI4haT*g*V|Xi+p4^4O6p^uJBqx9AbD|ze}=PJf7~jD-PcMvaoi?PC(lx(y`*HAh~3f|n3UB;$RD=^0dBq1rOcpz%F~(uxu%LUiU!ReeC^Bt$rhe2%c2 z7C4i>v1}?+O7lH*lbB2!bw9qi5kAQsq2nxhlvSo^N)Dy(np*d~bx`zXfM(#k_*4C$ zgmo6^ww!ryZ@7AVFz~HJZy0cC>??e@ocem136aMW7KfK+r&vxIsBc7ekyEm>DI#M% zvP7)Cw+s_CVua-Q!e?r8{OO<3`3O)@q99A*hM7^mlE{&wDMyKE_bi{jYE(T2+i!sB z+rEdI%|6`#smlz}++p!ke^LrU%;GtDdwp&NNTRZ>;#O-9xZb=#I`Wm~hWO??%6p(~ zcBSW05mNejZN}`h^&TQy1V}e;BIz%YUNwVay_-2OUXmEjwJ{((ZDjrSL3OjajF?}H zS^e+fo$FSdguncZ*_u|9>g$s)>~!3X9$7bq?HWo{C`727gF9_5&gaM0Au`zN zc-RMsLe<43XC=3WFqHq32Z6!~$;Yry>S0_U2B18E@!*eq@mDtdCwDx6m2xmP{dXV5 zs*3WRu&Dw4HjLFh)r|n>iwg_Yj>a|?N?#>#?inSA-^mgQeZ9W+lspSODAaR*u*dD3 z>)|Rq-f0^{ek3k}7oP3D03vAccn}`NWVeJ2ERBeVyR)@jw2S`mtutPoFtnF31E2TX z1sx4h*!x^6iIcjqP)_S1R#$dw=aDD`6X)OoS>ci0KI!{XRj-aM31thH}*S!$Q$>$foW66~;Pgkvv%;myl1W3q@G3G`T+z z>b#zv(x9=w;v#n44{Vt`VjMWxw^-}znOuTB>3>4z4gblTQy5&v5dnb5usi;oI-M=e z&0U;Xex2BU&Y@}XbFrzc*rEFjr%xD{km32B_7?71Pc{|R+nvv{iI0E+na7J#`HO8C4HfS!6 zEM~E9{H72JM;7y)lhZFiTV>w#wlt|kV7#&xIj?TPZCB>;0+k6<;zZeydoQbh(CD!j(wb&edTb+gwM$pT|Y1p zw&O36f8yX57|MY|GKx%&vW&8o6~KNLExK|Ic6C)cW|_|nNae-R3HbCFin#G5s`UD* zQSoZWa<+*WC((+DJGM|!>nEa-ZMp%tT_U6G0`Md^>;fb=X8CBe3Iw4J1Xm&qD`#-K zREq`>6Ls&H(VdWd0MsFwDCjQybU;S%Ph#7bYhBXrYIVrx91_Y=OmA1|h@<8KE z%-7H?shGnh0=(!3=e^Yo387~n3|=p!(LA>ZRzw?vm-7DIB`r2qK#UBe*L=prMRnbid8(U!iXmR*&+X;ii@SWojJ>| z@fS;Wbo8R;r11S%PM)JQ2{g-=H+7QOeo>m;c}BVYVa?I@bxB_=rxeFx3~C@$@H9t7 zM&1z3cUv5Y$vHBTeETLOnEM7thyz-Za>|=JkXuP(qy1sjeaOdoXq4xXexnOZ?huX= zpRw#*i9vqtL#1w8F{sX56BlX1+tLn1xo6?!2sH~d-}Z7AUO%r7casP<-3jn*cUPH< z34XJmFyMnR7;b3foYq3CuWj9h1M#8O&q*^`DsjdFmmMG z-PaqHSGaLraz|TvQRtg?h#Av{X+$i`lZ{RAiB;~3J;!qs>^u+NR_R0(v}_Qe2{sj%J7~MN-AXIf3=zV4|+MN@1*CUKXJ#)daAcXDHn}!{>T}kW#UXEHIwNX zlr51-q*Ugx#^DzJ$=t;qg>AK7xf}S^mOf3ObCb#hvkwv-Ek++*3VD1~UWpT|PV(tX z%dQJiRZM-@YX#~?!f@SQEHDfWk1VxspGa zujb23W#dNOv5(;Pnek@jEUNVGF}Q$j?FSrve=R{j2WFj!Faj69TJH`S8@$EHd|^jFG33+~mn!X$0>Z$!C)t9^PtZ1@uKGkBiEsk!M z;kN$C88MYOa*PR9wcAIxDvl~G0~|;-NK9<|Gxduah-EP`CHhCIEGai;MqNTe#ZAA& zLJh*{EP(ZPacg^D;JW7AY10lVpjthAHR7|H8+BQKslrg5g_j;{fx&~aNNo*~xXq-s zdRGr87oCD8sa#B4-MSR&O7`dxgpJ7p^0c&9rFxQbm|pDoMKxKOqn6FDU&`a~7Fj@abIl1qp>GfDak7l>A&RI~5X^!F1WOva#isQk zHBy~RG)o0Dyo!TXd$+8*H(M_j<8Q^3HH|QfDrNEowl)*8s?{x^*mHRef_DB`)H!hg>CEw77+oquInk z=hzvr+#WkzXkcmTi-Ci-Ijk;9x_JZ;ULf7W;(H0SK+c(HvA zjqU=M8^mNkHC9s=Gbl?tGLiKot zOSHgToN39qZMUzdT_!2D1~~_F0X<+=8eKPASoiJYQwzD(lBKn{M$Y=lswQx)#55Pn zml}s@Get|!ScEv1D0~{~XhVnn!H_t@&NAA){;=c9ss8HxiS%-!*a!LXM#uRl&kG&o zRaP9eGOFK=;1L)_u=b6sqT4}2#sZxdv-8b5VoB&P79DCza-mV8pyUd8*iX^@H*J4)R=?#a4dU{r+|8%iF#XX6Ur;%&!NshgphMpl{KzRB$w4WFsT zPPw=6;##}KXmVP{wH$J0muSpesdQ+CBGJ8q4SFN;gPc!t`OcSCB2_AFkH79!?g8zW zSI3%p>z>u8KZcK|!Q=^Mr8o~V+&w^*<~gT4F(3I;DoARFEDeH!@7_Jz+BQ6L^|}ez zUlXFph;u*;ab|T8leW8{h0H(6#FZ(}+P}V{iVORog2*yXBijVcTqrg+s2r#m=}oW1 zXbD{80T)TvK5aCB_$)aZ*EwyK{77W2TKw+3vBmNXdBow{fd=0L>B)B_Qy+ER8Ll<` zq>$L?HOG}^!Zx}v-u3ubM7rbdeR7=kzWq>w6}NJHkuIP$Iwoq-9v_Y#%Ic8d(^zN1;taP7%MaTh>GD# zcId6O(vl3&1WmoM=F+gV520#1p{4FsM|HY=2f0K4w|=T!oC)^|Ml45IYmNUW!7PoP z%+1tXoUH6${o>h6l|h((j?w(|@S*0O9e*&kn3dZbfTIbrM*zgju5t?aNxr6rc7YS( zc6|QIzg&krl+2ISFssqZ2D}%?VSG?cH;h-UtcsA6Eyv~t2S7@IU?RVgg zmV6kcXn_~Lqb9?b=x_R&lbeOBg&K_X+PAw<0!8_Q2ag3hpERcP|3G&2OSyq{m;dZN z&4D?6m#`Rx!(#RHk68T_D*o&}|4=S|cc8y?i(p8b9IRgt-3RYuJAuYeDX8qu%h9|K zsJ7udUET&ii-+{h?#>BNM52RhQZ7^W-(6nc&>$T?2=Qqb-J4ZH6N!KJ+6@BA+xRAm zR!B_y1h<`nhoX)g1iI2fsXxNydne0^Xay;q`jL-Mg#1D+m7RjYnf`^AeUyeys-&KZ zyT1F!eo!ds$2Y6InB}B->G*j}6~}iwoWqN)lqt>##s1oKJs)rdnK352#BSPgml{iL zwm*qkDn|BGGpDqvxjsY8$JV5e$Z>yXJNm^l^PQJXd1+T_&L`~filDK2;*Od@`L`EA zdGaA(ei^Q_E&7&2Sf}yN$l+;WF>1r2W(DiqabQA^8OT({3FP3+VhVCH|8uGCfBFs# zQa;cZ@*S-BAxnsN;(f=n)J&;QU7qCcSiz4*j@Llu?b^7RWG!m^L9_O*;)hIqeCJ=& ze6$b`y|40RJ%>ov1*RmBiue=|JG_JNR^`6~;t?spfIu9*th40qHz)5t8XR=Zkn3 zHXpyp>7sRcEiU!?g+xF?jw-8hKtsRTaHe#dSIb-xzM)_Z17oc#L(6UqoTvZiq{!`E zI*%ptS@B4J0l}K0(GF20;m6xHX^Ay!D{O|DjL%$1p|UdFeCh07Jtjw^NVRW4L1$R& z!0wh#UQ{d6Zg)P}$BK_-nZsX+OszNHl78WV#q^(WG?qohj)1`t2Ub~N{}qnL4i5iA zBP?Wp9%)cf&>Sm9$UgiPImQAB-+~H8V6lme)=8Q118DUt#az>@a_aEnxSi?O)uR(% zytZQ2ygjZYWEV*J4NoAWT`Cdl+4zVEBsP!E5_SP^=Wbd^`ZXJVUw;a9etm=+beS#4xk{{B64Vp@&0 zO-vXU@^-fe0?iWA7TM>r${0tYsx7O@pqw}8F8&^NTGbTJhe{0SCY+RsofbX!E^-zg- zE)c};Jj7#=qI^Z`{iMOHJuXD;v?Jt|k4%Gy4T{0Er?C3mlv(~IqSCf7=7bQ4{hHEj z;cZ#i)#PyttKjX|laA9PuNQvIm-??bXqIlvZ~uSJegMY=%g_J5`Qbm0>p$mz*$SZ| z|91y}U#9(!;m>n2j1zxZw7qY5f9dMCX(LQ(y0?&Z-}vu(|8G+OpcD0%@&BRz-}iH0 z1pe*m92T(uOA@~C<-R`l+Y1$JQ4Z$izFKzQ!Tsjsw}Wp4za0E-RqmVKFS~x5N)!EI zdcO#}@8Nzi^4o(mOz-=94F0t=x$p4rIr_Id0HA~f0Qg77zHk2bIrXpRf@FU&|8s^_ Xkw=8V<>wXwbigRAD7dEh`Re}xJW None: - if os.path.exists("output"): - shutil.rmtree("output") + if os.path.exists("test_for_accuracy"): + shutil.rmtree("test_for_accuracy", ignore_errors=True) return super().tearDown() def test_retrieval_accuracy(self): plugins.retrieval.enable = True - plugins.retrieval.args["input_path"] = "../assets/docs/sample.txt" + plugins.retrieval.args["input_path"] = "../assets/docs/" plugins.retrieval.args["persist_directory"] = "./test_for_accuracy" + plugins.retrieval.args["retrieval_type"] = 'default' config = PipelineConfig(model_name_or_path="facebook/opt-125m", plugins=plugins) chatbot = build_chatbot(config) @@ -45,6 +48,185 @@ def test_retrieval_accuracy(self): self.assertIsNotNone(response) plugins.retrieval.enable = False +class TestChatbotBuilder_txt(unittest.TestCase): + def setUp(self): + if os.path.exists("test_txt"): + shutil.rmtree("test_txt", ignore_errors=True) + return super().setUp() + + def tearDown(self) -> None: + if os.path.exists("test_txt"): + shutil.rmtree("test_txt", ignore_errors=True) + return super().tearDown() + + def test_retrieval_txt(self): + plugins.retrieval.enable = True + plugins.retrieval.args["input_path"] = "../assets/docs/sample.txt" + plugins.retrieval.args["persist_directory"] = "./test_txt" + plugins.retrieval.args["retrieval_type"] = 'default' + config = PipelineConfig(model_name_or_path="facebook/opt-125m", + plugins=plugins) + chatbot = build_chatbot(config) + response = chatbot.predict("How many cores does the Intel Xeon Platinum 8480+ Processor have in total?") + print(response) + plugins.retrieval.args["persist_directory"] = "./output" + self.assertIsNotNone(response) + plugins.retrieval.enable = False + +class TestChatbotBuilder_docx(unittest.TestCase): + def setUp(self): + if os.path.exists("test_docx"): + shutil.rmtree("test_docx", ignore_errors=True) + return super().setUp() + + def tearDown(self) -> None: + if os.path.exists("test_docx"): + shutil.rmtree("test_docx", ignore_errors=True) + return super().tearDown() + + def test_retrieval_docx(self): + plugins.retrieval.enable = True + plugins.retrieval.args["input_path"] = "../assets/docs/sample.docx" + plugins.retrieval.args["persist_directory"] = "./test_docx" + plugins.retrieval.args["retrieval_type"] = 'default' + config = PipelineConfig(model_name_or_path="facebook/opt-125m", + plugins=plugins) + chatbot = build_chatbot(config) + response = chatbot.predict("How many cores does the Intel Xeon Platinum 8480+ Processor have in total?") + print(response) + plugins.retrieval.args["persist_directory"] = "./output" + self.assertIsNotNone(response) + plugins.retrieval.enable = False + +class TestChatbotBuilder_xlsx(unittest.TestCase): + def setUp(self): + if os.path.exists("test_xlsx"): + shutil.rmtree("test_xlsx", ignore_errors=True) + return super().setUp() + + def tearDown(self) -> None: + if os.path.exists("test_xlsx"): + shutil.rmtree("test_xlsx", ignore_errors=True) + return super().tearDown() + + def test_retrieval_xlsx(self): + plugins.retrieval.enable = True + plugins.retrieval.args["input_path"] = "../assets/docs/sample.xlsx" + plugins.retrieval.args["persist_directory"] = "./test_xlsx" + plugins.retrieval.args["retrieval_type"] = 'default' + config = PipelineConfig(model_name_or_path="facebook/opt-125m", + plugins=plugins) + chatbot = build_chatbot(config) + response = chatbot.predict("Who is the CEO of Intel?") + print(response) + plugins.retrieval.args["persist_directory"] = "./output" + self.assertIsNotNone(response) + plugins.retrieval.enable = False + +class TestChatbotBuilder_xlsx_1(unittest.TestCase): + def setUp(self): + if os.path.exists("test_xlsx_1"): + shutil.rmtree("test_xlsx_1", ignore_errors=True) + return super().setUp() + + def tearDown(self) -> None: + if os.path.exists("test_xlsx_1"): + shutil.rmtree("test_xlsx_1", ignore_errors=True) + return super().tearDown() + + def test_retrieval_xlsx_1(self): + plugins.retrieval.enable = True + plugins.retrieval.args["input_path"] = "../assets/docs/sample_1.xlsx" + plugins.retrieval.args["persist_directory"] = "./test_xlsx_1" + plugins.retrieval.args["retrieval_type"] = 'default' + config = PipelineConfig(model_name_or_path="facebook/opt-125m", + plugins=plugins) + chatbot = build_chatbot(config) + response = chatbot.predict("Who is the CEO of Intel?") + print(response) + plugins.retrieval.args["persist_directory"] = "./output" + self.assertIsNotNone(response) + plugins.retrieval.enable = False + +class TestChatbotBuilder_xlsx_2(unittest.TestCase): + def setUp(self): + if os.path.exists("test_xlsx_2"): + shutil.rmtree("test_xlsx_2", ignore_errors=True) + return super().setUp() + + def tearDown(self) -> None: + if os.path.exists("test_xlsx_2"): + shutil.rmtree("test_xlsx_2", ignore_errors=True) + return super().tearDown() + + def test_retrieval_xlsx_2(self): + plugins.retrieval.enable = True + plugins.retrieval.args["input_path"] = "../assets/docs/sample_2.xlsx" + plugins.retrieval.args["persist_directory"] = "./test_xlsx_2" + plugins.retrieval.args["retrieval_type"] = 'default' + config = PipelineConfig(model_name_or_path="facebook/opt-125m", + plugins=plugins) + chatbot = build_chatbot(config) + response = chatbot.predict("Who is the CEO of Intel?") + print(response) + plugins.retrieval.args["persist_directory"] = "./output" + self.assertIsNotNone(response) + plugins.retrieval.enable = False + +class TestChatbotBuilder_jsonl(unittest.TestCase): + def setUp(self): + if os.path.exists("test_jsonl"): + shutil.rmtree("test_jsonl", ignore_errors=True) + return super().setUp() + + def tearDown(self) -> None: + if os.path.exists("test_jsonl"): + shutil.rmtree("test_jsonl", ignore_errors=True) + return super().tearDown() + + def test_retrieval_jsonl(self): + plugins.retrieval.enable = True + plugins.retrieval.args["input_path"] = "../assets/docs/sample.jsonl" + plugins.retrieval.args["persist_directory"] = "./test_jsonl" + plugins.retrieval.args["retrieval_type"] = 'default' + config = PipelineConfig(model_name_or_path="facebook/opt-125m", + plugins=plugins) + chatbot = build_chatbot(config) + response = chatbot.predict("What does this blog talk about?") + print(response) + plugins.retrieval.args["persist_directory"] = "./output" + self.assertIsNotNone(response) + plugins.retrieval.enable = False + +class TestChatbotBuilder_child_parent(unittest.TestCase): + def setUp(self): + if os.path.exists("test_rag"): + shutil.rmtree("test_rag", ignore_errors=True) + if os.path.exists("test_rag_child"): + shutil.rmtree("test_rag_child", ignore_errors=True) + return super().setUp() + + def tearDown(self) -> None: + if os.path.exists("test_rag"): + shutil.rmtree("test_rag", ignore_errors=True) + if os.path.exists("test_rag_child"): + shutil.rmtree("test_rag_child", ignore_errors=True) + return super().tearDown() + + def test_retrieval_child_parent(self): + plugins.retrieval.enable = True + plugins.retrieval.args["input_path"] = "../assets/docs/sample.txt" + plugins.retrieval.args["persist_directory"] = "./test_rag" + plugins.retrieval.args["retrieval_type"] = "child_parent" + config = PipelineConfig(model_name_or_path="facebook/opt-125m", + plugins=plugins) + chatbot = build_chatbot(config) + response = chatbot.predict("How many cores does the Intel Xeon Platinum 8480+ Processor have in total?") + print(response) + plugins.retrieval.args["persist_directory"] = "./output" + plugins.retrieval.args["retrieval_type"] = 'default' + self.assertIsNotNone(response) + plugins.retrieval.enable = False if __name__ == '__main__': unittest.main() diff --git a/intel_extension_for_transformers/neural_chat/tests/ci/plugins/retrieval/test_retrieval.py b/intel_extension_for_transformers/neural_chat/tests/ci/plugins/retrieval/test_retrieval.py index 0e0a3db902f..16132c7b560 100644 --- a/intel_extension_for_transformers/neural_chat/tests/ci/plugins/retrieval/test_retrieval.py +++ b/intel_extension_for_transformers/neural_chat/tests/ci/plugins/retrieval/test_retrieval.py @@ -34,4 +34,4 @@ def test_html_loader(self): self.assertIsNotNone(vectordb) if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/intel_extension_for_transformers/neural_chat/tests/ci/plugins/security/test_satety_checker.py b/intel_extension_for_transformers/neural_chat/tests/ci/plugins/security/test_satety_checker.py new file mode 100644 index 00000000000..6e818f1a7cc --- /dev/null +++ b/intel_extension_for_transformers/neural_chat/tests/ci/plugins/security/test_satety_checker.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from intel_extension_for_transformers.neural_chat.pipeline.plugins.security.safety_checker import SafetyChecker + +class TestSafetyChecker(unittest.TestCase): + def setUp(self): + return super().setUp() + + def tearDown(self) -> None: + return super().tearDown() + + def test_safety_checker(self): + safety_checker = SafetyChecker() + response = safety_checker.post_llm_inference_actions(response='ADMIN?') + self.assertTrue(response, "******") + + +if __name__ == '__main__': + unittest.main() From fdefb9d60f28b55f93f5ca48345da85d909aa7ad Mon Sep 17 00:00:00 2001 From: VincyZhang Date: Wed, 10 Jan 2024 17:13:54 +0800 Subject: [PATCH 037/101] fix nightly ut (#1125) --- .../nightly/finetuning}/test_finetuning_data.py | 17 +++++++++++++++++ tests/CI/test_quantization_qa_ipex.py | 5 ++++- 2 files changed, 21 insertions(+), 1 deletion(-) rename {tests/Nightly => intel_extension_for_transformers/neural_chat/tests/nightly/finetuning}/test_finetuning_data.py (88%) diff --git a/tests/Nightly/test_finetuning_data.py b/intel_extension_for_transformers/neural_chat/tests/nightly/finetuning/test_finetuning_data.py similarity index 88% rename from tests/Nightly/test_finetuning_data.py rename to intel_extension_for_transformers/neural_chat/tests/nightly/finetuning/test_finetuning_data.py index 158e73a73b0..b860c302e54 100644 --- a/tests/Nightly/test_finetuning_data.py +++ b/intel_extension_for_transformers/neural_chat/tests/nightly/finetuning/test_finetuning_data.py @@ -1,3 +1,20 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import os import unittest diff --git a/tests/CI/test_quantization_qa_ipex.py b/tests/CI/test_quantization_qa_ipex.py index 50aa4bc024c..fe33823d435 100644 --- a/tests/CI/test_quantization_qa_ipex.py +++ b/tests/CI/test_quantization_qa_ipex.py @@ -8,8 +8,11 @@ os.environ["CUDA_VISIBLE_DEVICES"] = "" # example test for question-answering quantization with IPEX only for now +EXAMPLE_PATH="../../examples/huggingface/pytorch/" +if not os.path.exists(EXAMPLE_PATH): + EXAMPLE_PATH="../examples/huggingface/pytorch/" SRC_DIRS = [ - os.path.join("../../examples/huggingface/pytorch/", dirname) + os.path.join(EXAMPLE_PATH, dirname) for dirname in [ "question-answering/quantization/", ] From f780a280d8144ee91ccaccd6f01a5cd93cbeadc2 Mon Sep 17 00:00:00 2001 From: Anas Ahouzi <112881240+aahouzi@users.noreply.github.com> Date: Thu, 11 Jan 2024 02:38:22 +0100 Subject: [PATCH 038/101] Fix missing documentation in LLM runtime, and a broken link + some typos (#1128) * Fix broken link * Fix script typos --- .../llm/runtime/graph/README.md | 3 ++- .../llm/runtime/graph/scripts/run.py | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/intel_extension_for_transformers/llm/runtime/graph/README.md b/intel_extension_for_transformers/llm/runtime/graph/README.md index 4591e929f0c..042adc6dd5f 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/README.md +++ b/intel_extension_for_transformers/llm/runtime/graph/README.md @@ -408,10 +408,11 @@ while True: outputs = model.generate(inputs, streamer=streamer, interactive=True, ignore_prompt=True, do_sample=True) ``` -## How to use: Python script +## How to use: Straightforward Python script Install from binary ```shell pip install intel-extension-for-transformers +pip install -r requirements.txt # under graph folder ``` Build from source diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/run.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/run.py index 78d4f8d8c5a..6271fba05c9 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/run.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/run.py @@ -167,7 +167,7 @@ def main(args_in: Optional[List[str]] = None) -> None: convert_cmd.extend(["--outfile", Path(work_path, "ne_{}_f32.bin".format(model_type))]) convert_cmd.extend(["--outtype", "f32"]) convert_cmd.append(args.model) - print("convert model ...") + print("Convert model ...") subprocess.run(convert_cmd) # 2. quantize @@ -185,7 +185,7 @@ def main(args_in: Optional[List[str]] = None) -> None: if args.use_ggml: quant_cmd.extend(["--use_ggml"]) quant_cmd.extend(["--build_dir", args.build_dir]) - print("quantize model ...") + print("Quantize model ...") subprocess.run(quant_cmd) # 3. inference @@ -204,7 +204,7 @@ def main(args_in: Optional[List[str]] = None) -> None: infer_cmd.extend(["--build_dir", args.build_dir]) if args.shift_roped_k: infer_cmd.extend(["--shift-roped-k"]) - print("inferce model ...") + print("Inference model ...") subprocess.run(infer_cmd) From 9f716b4020101d051d9b51b31a5c2d589e34879e Mon Sep 17 00:00:00 2001 From: VincyZhang Date: Thu, 11 Jan 2024 13:12:52 +0800 Subject: [PATCH 039/101] try remove paddle speed limitation (#1130) --- .../workflows/script/unitTest/run_unit_test_neuralchat.sh | 5 ----- .github/workflows/unit-test-neuralchat.yml | 1 + .../neural_chat/tests/requirements.txt | 5 +++-- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.github/workflows/script/unitTest/run_unit_test_neuralchat.sh b/.github/workflows/script/unitTest/run_unit_test_neuralchat.sh index de11201142b..f4088565696 100644 --- a/.github/workflows/script/unitTest/run_unit_test_neuralchat.sh +++ b/.github/workflows/script/unitTest/run_unit_test_neuralchat.sh @@ -87,11 +87,6 @@ function main() { wget http://nz2.archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2.19_amd64.deb dpkg -i libssl1.1_1.1.1f-1ubuntu2.19_amd64.deb python -m pip install --upgrade --force-reinstall torch - pip install paddlepaddle==2.4.2 paddlenlp==2.5.2 paddlespeech==1.4.1 paddle2onnx==1.0.6 - pip install git+https://github.com/UKPLab/sentence-transformers.git - pip install git+https://github.com/Muennighoff/sentence-transformers.git@sgpt_poolings_specb - pip install --upgrade git+https://github.com/UKPLab/sentence-transformers.git - pip install -U sentence-transformers cd ${WORKING_DIR} || exit 1 echo "test on ${test_name}" if [[ $test_name == "PR-test" ]]; then diff --git a/.github/workflows/unit-test-neuralchat.yml b/.github/workflows/unit-test-neuralchat.yml index 6ebcd4b20c7..9ad31ba86eb 100644 --- a/.github/workflows/unit-test-neuralchat.yml +++ b/.github/workflows/unit-test-neuralchat.yml @@ -12,6 +12,7 @@ on: - 'intel_extension_for_transformers/llm/quantization/**' - 'intel_extension_for_transformers/llm/runtime/graph/**' - 'intel_extension_for_transformers/transformers/**' + - 'intel_extension_for_transformers/langchain/**' - '!intel_extension_for_transformers/neural_chat/docs/**' - '!intel_extension_for_transformers/neural_chat/examples/**' - '!intel_extension_for_transformers/neural_chat/assets/**' diff --git a/intel_extension_for_transformers/neural_chat/tests/requirements.txt b/intel_extension_for_transformers/neural_chat/tests/requirements.txt index fb72c247ed1..6baf28346ff 100644 --- a/intel_extension_for_transformers/neural_chat/tests/requirements.txt +++ b/intel_extension_for_transformers/neural_chat/tests/requirements.txt @@ -1,12 +1,14 @@ transformers>=4.35.2 peft==0.6.2 fschat==0.2.32 -torch==2.1.0 +torch torchaudio==2.1.0 torchvision==0.16.0 intel_extension_for_pytorch num2words speechbrain +onnxruntime>=1.16.3 +onnx>=1.15.0 paddlepaddle paddlespeech shortuuid @@ -40,7 +42,6 @@ tiktoken==0.4.0 git+https://github.com/EleutherAI/lm-evaluation-harness.git@cc9778fbe4fa1a709be2abed9deb6180fd40e7e2 spacy neural-compressor -onnxruntime pymysql deepface exifread From c5ab7db95451bb2ed091d93d0a1bf80fd479741e Mon Sep 17 00:00:00 2001 From: VincyZhang Date: Thu, 11 Jan 2024 16:10:53 +0800 Subject: [PATCH 040/101] Fix neuralchat ut requirement (#1131) --- .github/workflows/script/unitTest/run_unit_test_neuralchat.sh | 1 + .github/workflows/unit-test-neuralchat.yml | 1 + .github/workflows/unit-test-optimize.yml | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/script/unitTest/run_unit_test_neuralchat.sh b/.github/workflows/script/unitTest/run_unit_test_neuralchat.sh index f4088565696..d6329c4c713 100644 --- a/.github/workflows/script/unitTest/run_unit_test_neuralchat.sh +++ b/.github/workflows/script/unitTest/run_unit_test_neuralchat.sh @@ -87,6 +87,7 @@ function main() { wget http://nz2.archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2.19_amd64.deb dpkg -i libssl1.1_1.1.1f-1ubuntu2.19_amd64.deb python -m pip install --upgrade --force-reinstall torch + pip install paddlepaddle==2.4.2 paddlenlp==2.5.2 paddlespeech==1.4.1 paddle2onnx==1.0.6 cd ${WORKING_DIR} || exit 1 echo "test on ${test_name}" if [[ $test_name == "PR-test" ]]; then diff --git a/.github/workflows/unit-test-neuralchat.yml b/.github/workflows/unit-test-neuralchat.yml index 9ad31ba86eb..91f6875a455 100644 --- a/.github/workflows/unit-test-neuralchat.yml +++ b/.github/workflows/unit-test-neuralchat.yml @@ -5,6 +5,7 @@ on: branches: [main] paths: - '.github/workflows/unit-test-neuralchat.yml' + - '.github/workflows/script/unitTest/run_unit_test_neuralchat.sh' - 'intel_extension_for_transformers/neural_chat/**' - 'requirements.txt' - 'setup.py' diff --git a/.github/workflows/unit-test-optimize.yml b/.github/workflows/unit-test-optimize.yml index 7be5e709149..e702ec68e07 100644 --- a/.github/workflows/unit-test-optimize.yml +++ b/.github/workflows/unit-test-optimize.yml @@ -14,7 +14,7 @@ on: - intel_extension_for_transformers/llm/operator/** - tests/** - .github/workflows/unit-test-optimize.yml - - .github/workflows/script/unitTest/** + - '.github/workflows/script/unitTest/run_unit_test_optimize.sh' - '!intel_extension_for_transformers/llm/runtime/graph/*.md' workflow_dispatch: From 4e6834a4afbf2a3e458212f96c9b9db65dfe7f3b Mon Sep 17 00:00:00 2001 From: lkk <33276950+lkk12014402@users.noreply.github.com> Date: Fri, 12 Jan 2024 09:28:24 +0800 Subject: [PATCH 041/101] remove SharedDDP as it is deprecated (#1103) * remove SharedDDP as it is deprecated --- .../script/unitTest/run_unit_test_optimize.sh | 2 +- .../optimized_sentence_transformers.py | 2 +- .../finetune_on_Intel_Xeon_CPU.ipynb | 2 - .../instruction/finetune_on_Nvidia_GPU.ipynb | 4 -- .../finetuning/multi_modal/llava_trainer.py | 72 ++++++++----------- .../modeling/llava_models/llava_mistral.py | 4 +- .../transformers/modeling/modeling_auto.py | 8 +++ .../modeling/modeling_base_seq2seq.py | 6 +- .../transformers/trainer.py | 18 ++--- .../transformers/utils/utility.py | 1 + tests/CI/test_weight_only.py | 2 +- 11 files changed, 49 insertions(+), 72 deletions(-) diff --git a/.github/workflows/script/unitTest/run_unit_test_optimize.sh b/.github/workflows/script/unitTest/run_unit_test_optimize.sh index 276ccb0a67e..70772e569f7 100644 --- a/.github/workflows/script/unitTest/run_unit_test_optimize.sh +++ b/.github/workflows/script/unitTest/run_unit_test_optimize.sh @@ -21,7 +21,7 @@ function pytest() { mkdir -p ${coverage_log_dir} pip install --no-cache-dir protobuf==3.20.0 ## install transformers==4.34.1, to work with SharedDPO API - pip install transformers==4.34.1 + pip install transformers cd /intel-extension-for-transformers/tests/CI || exit 1 JOB_NAME=unit_test ut_log_name=${LOG_DIR}/${JOB_NAME}.log diff --git a/intel_extension_for_transformers/langchain/embeddings/optimized_sentence_transformers.py b/intel_extension_for_transformers/langchain/embeddings/optimized_sentence_transformers.py index 5c3cb2a6fc7..d56edb34b48 100644 --- a/intel_extension_for_transformers/langchain/embeddings/optimized_sentence_transformers.py +++ b/intel_extension_for_transformers/langchain/embeddings/optimized_sentence_transformers.py @@ -144,4 +144,4 @@ def _load_sbert_model( module = module_class.load(module_path) modules[module_config['name']] = module - return modules \ No newline at end of file + return modules diff --git a/intel_extension_for_transformers/neural_chat/examples/finetuning/instruction/finetune_on_Intel_Xeon_CPU.ipynb b/intel_extension_for_transformers/neural_chat/examples/finetuning/instruction/finetune_on_Intel_Xeon_CPU.ipynb index 0c14cf3d936..f125b2ede1c 100644 --- a/intel_extension_for_transformers/neural_chat/examples/finetuning/instruction/finetune_on_Intel_Xeon_CPU.ipynb +++ b/intel_extension_for_transformers/neural_chat/examples/finetuning/instruction/finetune_on_Intel_Xeon_CPU.ipynb @@ -384,7 +384,6 @@ "save_strategy=no,\n", "save_total_limit=2,\n", "seed=42,\n", - "sharded_ddp=[],\n", "skip_memory_metrics=True,\n", "tf32=None,\n", "torch_compile=False,\n", @@ -1526,7 +1525,6 @@ "save_strategy=no,\n", "save_total_limit=2,\n", "seed=42,\n", - "sharded_ddp=[],\n", "skip_memory_metrics=True,\n", "tf32=None,\n", "torch_compile=False,\n", diff --git a/intel_extension_for_transformers/neural_chat/examples/finetuning/instruction/finetune_on_Nvidia_GPU.ipynb b/intel_extension_for_transformers/neural_chat/examples/finetuning/instruction/finetune_on_Nvidia_GPU.ipynb index 13c64d58d91..515ff2e6c5d 100644 --- a/intel_extension_for_transformers/neural_chat/examples/finetuning/instruction/finetune_on_Nvidia_GPU.ipynb +++ b/intel_extension_for_transformers/neural_chat/examples/finetuning/instruction/finetune_on_Nvidia_GPU.ipynb @@ -251,7 +251,6 @@ "save_strategy=no,\n", "save_total_limit=2,\n", "seed=42,\n", - "sharded_ddp=[],\n", "skip_memory_metrics=True,\n", "tf32=None,\n", "torch_compile=False,\n", @@ -740,7 +739,6 @@ "save_strategy=no,\n", "save_total_limit=2,\n", "seed=42,\n", - "sharded_ddp=[],\n", "skip_memory_metrics=True,\n", "tf32=None,\n", "torch_compile=False,\n", @@ -1322,7 +1320,6 @@ "save_strategy=no,\n", "save_total_limit=2,\n", "seed=42,\n", - "sharded_ddp=[],\n", "skip_memory_metrics=True,\n", "tf32=None,\n", "torch_compile=False,\n", @@ -1807,7 +1804,6 @@ "save_strategy=no,\n", "save_total_limit=2,\n", "seed=42,\n", - "sharded_ddp=[],\n", "skip_memory_metrics=True,\n", "tf32=None,\n", "torch_compile=False,\n", diff --git a/intel_extension_for_transformers/neural_chat/examples/finetuning/multi_modal/llava_trainer.py b/intel_extension_for_transformers/neural_chat/examples/finetuning/multi_modal/llava_trainer.py index a5bcc53ab65..c163af45f53 100644 --- a/intel_extension_for_transformers/neural_chat/examples/finetuning/multi_modal/llava_trainer.py +++ b/intel_extension_for_transformers/neural_chat/examples/finetuning/multi_modal/llava_trainer.py @@ -26,7 +26,6 @@ get_parameter_names, has_length, ALL_LAYERNORM_LAYERS, - ShardedDDPOption, logger, ) from typing import List, Optional @@ -176,7 +175,7 @@ def create_optimizer(self): """ if is_sagemaker_mp_enabled(): return super().create_optimizer() - if self.sharded_ddp == ShardedDDPOption.SIMPLE: + if self.is_fsdp_enabled: return super().create_optimizer() opt_model = self.model @@ -237,27 +236,20 @@ def create_optimizer(self): optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args) - if self.sharded_ddp == ShardedDDPOption.SIMPLE: - self.optimizer = OSS( - params=optimizer_grouped_parameters, - optim=optimizer_cls, - **optimizer_kwargs, - ) - else: - self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) - if optimizer_cls.__name__ == "Adam8bit": - import bitsandbytes + self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) + if optimizer_cls.__name__ == "Adam8bit": + import bitsandbytes - manager = bitsandbytes.optim.GlobalOptimManager.get_instance() + manager = bitsandbytes.optim.GlobalOptimManager.get_instance() - skipped = 0 - for module in opt_model.modules(): - if isinstance(module, nn.Embedding): - skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) - logger.info(f"skipped {module}: {skipped/2**20}M params") - manager.register_module_override(module, "weight", {"optim_bits": 32}) - logger.debug(f"bitsandbytes: will optimize {module} in fp32") - logger.info(f"skipped: {skipped/2**20}M params") + skipped = 0 + for module in opt_model.modules(): + if isinstance(module, nn.Embedding): + skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) + logger.info(f"skipped {module}: {skipped/2**20}M params") + manager.register_module_override(module, "weight", {"optim_bits": 32}) + logger.debug(f"bitsandbytes: will optimize {module} in fp32") + logger.info(f"skipped: {skipped/2**20}M params") return self.optimizer @@ -297,7 +289,6 @@ def _save(self, output_dir: Optional[str] = None, state_dict=None): get_parameter_names, has_length, ALL_LAYERNORM_LAYERS, - ShardedDDPOption, logger, ) from typing import List, Optional @@ -328,7 +319,7 @@ def create_optimizer(self): """ if is_sagemaker_mp_enabled(): return super().create_optimizer() - if self.sharded_ddp == ShardedDDPOption.SIMPLE: + if self.is_fsdp_enabled: return super().create_optimizer() opt_model = self.model @@ -401,27 +392,20 @@ def create_optimizer(self): # optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args) - if self.sharded_ddp == ShardedDDPOption.SIMPLE: - self.optimizer = OSS( - params=optimizer_grouped_parameters, - optim=optimizer_cls, - **optimizer_kwargs, - ) - else: - self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) - if optimizer_cls.__name__ == "Adam8bit": - import bitsandbytes - - manager = bitsandbytes.optim.GlobalOptimManager.get_instance() - - skipped = 0 - for module in opt_model.modules(): - if isinstance(module, nn.Embedding): - skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) - logger.info(f"skipped {module}: {skipped/2**20}M params") - manager.register_module_override(module, "weight", {"optim_bits": 32}) - logger.debug(f"bitsandbytes: will optimize {module} in fp32") - logger.info(f"skipped: {skipped/2**20}M params") + self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) + if optimizer_cls.__name__ == "Adam8bit": + import bitsandbytes + + manager = bitsandbytes.optim.GlobalOptimManager.get_instance() + + skipped = 0 + for module in opt_model.modules(): + if isinstance(module, nn.Embedding): + skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) + logger.info(f"skipped {module}: {skipped/2**20}M params") + manager.register_module_override(module, "weight", {"optim_bits": 32}) + logger.debug(f"bitsandbytes: will optimize {module} in fp32") + logger.info(f"skipped: {skipped/2**20}M params") return self.optimizer diff --git a/intel_extension_for_transformers/transformers/modeling/llava_models/llava_mistral.py b/intel_extension_for_transformers/transformers/modeling/llava_models/llava_mistral.py index d5427a5a612..7125e200bed 100644 --- a/intel_extension_for_transformers/transformers/modeling/llava_models/llava_mistral.py +++ b/intel_extension_for_transformers/transformers/modeling/llava_models/llava_mistral.py @@ -31,7 +31,7 @@ class LlavaConfig(MistralConfig): - model_type = "llava" + model_type = "llava_custom" class LlavaMistralModel(LlavaMetaModel, MistralModel): @@ -110,5 +110,5 @@ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_ _inputs['images'] = images return _inputs -AutoConfig.register("llava", LlavaConfig) +AutoConfig.register("llava_custom", LlavaConfig) AutoModelForCausalLM.register(LlavaConfig, LlavaMistralForCausalLM) diff --git a/intel_extension_for_transformers/transformers/modeling/modeling_auto.py b/intel_extension_for_transformers/transformers/modeling/modeling_auto.py index ae3b0de2d00..b1719b73c2d 100644 --- a/intel_extension_for_transformers/transformers/modeling/modeling_auto.py +++ b/intel_extension_for_transformers/transformers/modeling/modeling_auto.py @@ -53,6 +53,7 @@ QUANT_CONFIG, WEIGHTS_NAME, WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, ) from intel_extension_for_transformers.llm.quantization.utils import replace_linear from transformers.configuration_utils import PretrainedConfig @@ -727,6 +728,13 @@ def load_low_bit(cls, pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant) ) is_sharded = True + elif os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant)) + ): + # Load from a safetensors checkpoint + archive_file = os.path.join( + pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant) + ) elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): archive_file = pretrained_model_name_or_path is_local = True diff --git a/intel_extension_for_transformers/transformers/modeling/modeling_base_seq2seq.py b/intel_extension_for_transformers/transformers/modeling/modeling_base_seq2seq.py index 75acfab6483..17ec9c7ffdb 100644 --- a/intel_extension_for_transformers/transformers/modeling/modeling_base_seq2seq.py +++ b/intel_extension_for_transformers/transformers/modeling/modeling_base_seq2seq.py @@ -25,9 +25,7 @@ from neural_compressor.utils.pytorch import load from transformers import AutoModel, PretrainedConfig from transformers.file_utils import add_start_docstrings -from transformers.modeling_utils import no_init_weights from transformers.models.auto.auto_factory import _get_model_class -from transformers.utils.generic import ContextManagers from optimum.exporters import TasksManager from optimum.intel.neural_compressor import INCConfig @@ -268,9 +266,7 @@ def _from_pretrained( decoder = model else: model_class = _get_model_class(config, cls.auto_model_class._model_mapping) - init_contexts = [no_init_weights(_enable=True)] - with ContextManagers(init_contexts): - model = model_class(config) + model = model_class(config) # Load the model from local directory if os.path.isdir(model_id): diff --git a/intel_extension_for_transformers/transformers/trainer.py b/intel_extension_for_transformers/transformers/trainer.py index c61ccfe4c6d..251d3cf168f 100644 --- a/intel_extension_for_transformers/transformers/trainer.py +++ b/intel_extension_for_transformers/transformers/trainer.py @@ -49,7 +49,7 @@ from transformers import __version__, Seq2SeqTrainer, Trainer, PreTrainedModel from transformers.configuration_utils import PretrainedConfig from transformers.debug_utils import DebugOption, DebugUnderflowOverflow -from transformers.file_utils import ( +from transformers.utils import ( CONFIG_NAME, WEIGHTS_NAME, is_torch_tpu_available, @@ -67,7 +67,6 @@ ) from transformers.trainer_utils import ( HPSearchBackend, - ShardedDDPOption, TrainOutput, EvalLoopOutput, EvalPrediction, @@ -762,7 +761,8 @@ def train( else: debug_overflow = DebugUnderflowOverflow(self.model) # noqa - delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE + # delay_optimizer_creation = is_sagemaker_mp_enabled() or self.is_fsdp_xla_enabled or self.is_fsdp_enabled + delay_optimizer_creation = is_sagemaker_mp_enabled() if not delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) @@ -1176,9 +1176,7 @@ def training_step( else: loss.backward() else: - if self.do_grad_scaling: - self.scaler.scale(loss).backward() - elif self.use_apex: + if self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() elif NEW_DEEPSPEED_FLAG: @@ -1265,9 +1263,7 @@ def training_step_length_adaptive( else: loss.backward() else: - if self.do_grad_scaling: - self.scaler.scale(loss).backward() - elif self.use_apex: + if self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() elif NEW_DEEPSPEED_FLAG: @@ -1360,9 +1356,7 @@ def training_step_length_adaptive( else: loss.backward() else: - if self.do_grad_scaling: - self.scaler.scale(loss).backward() - elif self.use_apex: + if self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() elif NEW_DEEPSPEED_FLAG: diff --git a/intel_extension_for_transformers/transformers/utils/utility.py b/intel_extension_for_transformers/transformers/utils/utility.py index d35f4330151..f55dbf98724 100644 --- a/intel_extension_for_transformers/transformers/utils/utility.py +++ b/intel_extension_for_transformers/transformers/utils/utility.py @@ -34,6 +34,7 @@ WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json" QUANT_CONFIG = "quantization_config.json" SPARSITY_CONFIG = "sparsity_config.json" +SAFE_WEIGHTS_NAME = "model.safetensors" torch = LazyImport("torch") diff --git a/tests/CI/test_weight_only.py b/tests/CI/test_weight_only.py index ab16aa91bb6..de26a7bfe7d 100644 --- a/tests/CI/test_weight_only.py +++ b/tests/CI/test_weight_only.py @@ -153,7 +153,7 @@ def test_auto_model_saving_loading(self): if isinstance(module, QuantizedLinearQBits): module_list.append(name) self.assertTrue(len(module_list) > 0) - model.save_pretrained(self.workspace) + model.save_pretrained(self.workspace, safe_serialization=False) loaded_model = AutoModelForCausalLM.from_pretrained(self.workspace) for name, module in loaded_model.named_modules(): if isinstance(module, QuantizedLinearQBits): From 8b4d98b4604f160d3748f7bcbfb65aecb002f8a4 Mon Sep 17 00:00:00 2001 From: "Wang, Zhe" Date: Fri, 12 Jan 2024 13:59:51 +0800 Subject: [PATCH 042/101] Qbits use bestla as 3rd lib in cmake (#1126) * update qbits lib * update usage * Update intel_extension_for_transformers/llm/operator/csrc/dispatcher/neural_speed.cmake Co-authored-by: Meng, Hengyu Signed-off-by: Wang, Zhe --------- Signed-off-by: Wang, Zhe Co-authored-by: Meng, Hengyu --- .../llm/operator/csrc/CMakeLists.txt | 4 +- .../operator/csrc/dispatcher/CMakeLists.txt | 19 +- ...jblas_customop.hpp => bestla_customop.hpp} | 26 +- ...patcher.hpp => bestla_gemm_dispatcher.hpp} | 8 +- ...r.hpp => bestla_weightonly_dispatcher.hpp} | 26 +- .../dispatcher/include/dispatcher_utils.hpp | 20 +- .../csrc/dispatcher/neural_speed.cmake | 9 + ...patcher.cpp => bestla_gemm_dispatcher.cpp} | 54 ++-- ...s_packq_impl.cpp => bestla_packq_impl.cpp} | 36 +-- ...r.cpp => bestla_weightonly_dispatcher.cpp} | 247 ++++++++---------- .../llm/operator/csrc/include/dropout.hpp | 14 +- .../llm/operator/csrc/qbits.cpp | 22 +- .../llm/operator/csrc/qbits_ut/test_matmul.py | 2 +- .../llm/operator/csrc/qbits_ut/test_packq.py | 6 +- .../operator/csrc/qbits_ut/test_weightonly.py | 12 +- .../llm/operator/csrc/src/dropout.cpp | 36 +-- .../llm/quantization/autograd/functions.py | 8 +- .../llm/quantization/nn/modules.py | 10 +- tests/CI/test_weight_only.py | 8 +- 19 files changed, 279 insertions(+), 288 deletions(-) rename intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/{jblas_customop.hpp => bestla_customop.hpp} (66%) rename intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/{jblas_gemm_dispatcher.hpp => bestla_gemm_dispatcher.hpp} (83%) rename intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/{jblas_weightonly_dispatcher.hpp => bestla_weightonly_dispatcher.hpp} (77%) create mode 100644 intel_extension_for_transformers/llm/operator/csrc/dispatcher/neural_speed.cmake rename intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/{jblas_gemm_dispatcher.cpp => bestla_gemm_dispatcher.cpp} (58%) rename intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/{jblas_packq_impl.cpp => bestla_packq_impl.cpp} (50%) rename intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/{jblas_weightonly_dispatcher.cpp => bestla_weightonly_dispatcher.cpp} (54%) diff --git a/intel_extension_for_transformers/llm/operator/csrc/CMakeLists.txt b/intel_extension_for_transformers/llm/operator/csrc/CMakeLists.txt index deb0e5d16e8..a723e49a0b6 100755 --- a/intel_extension_for_transformers/llm/operator/csrc/CMakeLists.txt +++ b/intel_extension_for_transformers/llm/operator/csrc/CMakeLists.txt @@ -32,7 +32,7 @@ find_package(Torch REQUIRED PATHS ${torch_path} NO_DEFAULT_PATH) - +include(FindOpenMP) add_subdirectory(dispatcher) file(GLOB HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) @@ -43,4 +43,4 @@ add_library(qbits SHARED ${qbits_src}) target_compile_features(qbits PRIVATE cxx_std_14) # Link against LibTorch -target_link_libraries(qbits jblas_dispatcher) +target_link_libraries(qbits PRIVATE bestla_dispatcher) diff --git a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/CMakeLists.txt b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/CMakeLists.txt index 9641c4952e7..240526ca0e1 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/CMakeLists.txt +++ b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/CMakeLists.txt @@ -12,9 +12,9 @@ ## See the License for the specific language governing permissions and ## limitations under the License. cmake_minimum_required(VERSION 3.1 FATAL_ERROR) -project(jblas_dispatcher LANGUAGES C CXX) +project(bestla_dispatcher LANGUAGES C CXX) if(NOT WIN32) -set(CMAKE_CXX_FLAGS "-fconcepts") +set(CMAKE_CXX_FLAGS "-fPIC -fconcepts") endif() file(GLOB SOURCES ${PROJECT_SOURCE_DIR}/src/*.cpp @@ -23,16 +23,17 @@ file(GLOB HEADERS ${PROJECT_SOURCE_DIR}/include/*.hpp ) -add_subdirectory(../../../library/jblas jblas_out) +include(FetchContent) +include(neural_speed.cmake) -add_library(jblas_dispatcher STATIC ${HEADERS} ${SOURCES}) +add_library(bestla_dispatcher STATIC ${HEADERS} ${SOURCES}) if(WIN32) # MSVC does not allow sth like -fconcepts -set_target_properties(jblas_dispatcher PROPERTIES C_STANDARD 20 C_STANDARD_REQUIRED ON C_EXTENSIONS OFF) -set_target_properties(jblas_dispatcher PROPERTIES CXX_STANDARD 20 CXX_STANDARD_REQUIRED ON CXX_EXTENSIONS OFF) +set_target_properties(bestla_dispatcher PROPERTIES C_STANDARD 20 C_STANDARD_REQUIRED ON C_EXTENSIONS OFF) +set_target_properties(bestla_dispatcher PROPERTIES CXX_STANDARD 20 CXX_STANDARD_REQUIRED ON CXX_EXTENSIONS OFF) endif() -set_target_properties(jblas_dispatcher PROPERTIES POSITION_INDEPENDENT_CODE ON) -set_target_properties(jblas_dispatcher PROPERTIES LINKER_LANGUAGE CXX) -target_link_libraries(jblas_dispatcher "${TORCH_LIBRARIES}" jblas::jblas) +set_target_properties(bestla_dispatcher PROPERTIES POSITION_INDEPENDENTBTLA_CODE ON) +set_target_properties(bestla_dispatcher PROPERTIES LINKER_LANGUAGE CXX) +target_link_libraries(bestla_dispatcher OpenMP::OpenMP_CXX OpenMP::OpenMP_C "${TORCH_LIBRARIES}" bestla::bestla) set_property(TARGET torch_cpu PROPERTY INTERFACE_COMPILE_OPTIONS "") diff --git a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/jblas_customop.hpp b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/bestla_customop.hpp similarity index 66% rename from intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/jblas_customop.hpp rename to intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/bestla_customop.hpp index f2b859b7df4..10958560712 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/jblas_customop.hpp +++ b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/bestla_customop.hpp @@ -15,31 +15,31 @@ #include #include #include -#include "jblas/jit_blas.h" -#include "jblas/kernel_wrapper.h" +#include "bestla/bestla.h" +#include "bestla/kernel_wrapper.h" -template -inline JBLAS_CODE alphabeta_dt_cvt_process(float* tmp_dst, const int cachestep, const int M_offset, const int N_offset, +template +inline BTLA_CODE alphabeta_dt_cvt_process(float* tmp_dst, const int cachestep, const int M_offset, const int N_offset, const int M, const int N, const Param& _param) { auto DOffset = M_offset * _param.ldd + N_offset; auto dptr = reinterpret_cast(_param.D) + DOffset; - jblas::kernel::wrapper::AlphaBetaF32F32::template forward(_param.alpha, tmp_dst, cachestep, _param.beta, dptr, + bestla::kernel::wrapper::AlphaBetaF32F32::template forward(_param.alpha, tmp_dst, cachestep, _param.beta, dptr, _param.ldd, tmp_dst, cachestep, M, N); auto COffset = M_offset * _param.ldc + N_offset; auto cptr = reinterpret_cast(_param.C) + COffset; if constexpr (std::is_same_v) { - return jblas::kernel::wrapper::Memcpy2D::template forward(tmp_dst, cptr, M, N, cachestep, + return bestla::kernel::wrapper::Memcpy2D::template forward(tmp_dst, cptr, M, N, cachestep, _param.ldc, NULL); } - if constexpr (std::is_same_v) { - return jblas::kernel::wrapper::Memcpy2DFp32CvtBf16::template forward( + if constexpr (std::is_same_v) { + return bestla::kernel::wrapper::Memcpy2DFp32CvtBf16::template forward( (void*)tmp_dst, (void*)cptr, M, N, cachestep * sizeof(float), _param.ldc * sizeof(DST_T), false); } assert(false); } -template +template class AlphaBetaProcess { public: struct Param { @@ -47,13 +47,13 @@ class AlphaBetaProcess { int ldc, ldd; float alpha, beta; }; - JBLAS_CODE forward(float* cacheptr, const int cachestep, const int M_offset, const int N_offset, const int M, + BTLA_CODE forward(float* cacheptr, const int cachestep, const int M_offset, const int N_offset, const int M, const int N, const Param& _param, void* tmpcache = nullptr, size_t cachesize = -1) { return alphabeta_dt_cvt_process(cacheptr, cachestep, M_offset, N_offset, M, N, _param); } }; -template +template using AlphaBetaProcessStoreFp32 = AlphaBetaProcess; -template -using AlphaBetaProcessStoreBf16 = AlphaBetaProcess; +template +using AlphaBetaProcessStoreBf16 = AlphaBetaProcess; diff --git a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/jblas_gemm_dispatcher.hpp b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/bestla_gemm_dispatcher.hpp similarity index 83% rename from intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/jblas_gemm_dispatcher.hpp rename to intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/bestla_gemm_dispatcher.hpp index 9b45820588d..c4f985116a7 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/jblas_gemm_dispatcher.hpp +++ b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/bestla_gemm_dispatcher.hpp @@ -14,12 +14,12 @@ #pragma once #include -namespace jblas_gemm { -struct jblas_gemm_runtime_ctx { +namespace bestla_gemm { +struct bestla_gemm_runtime_ctx { torch::Tensor *matA, *matB, *matC; bool matB_trans; int m, n, k; }; -void dispatch_jblas_gemm(jblas_gemm_runtime_ctx* ctx); -} // namespace jblas_gemm +void dispatch_bestla_gemm(bestla_gemm_runtime_ctx* ctx); +} // namespace bestla_gemm diff --git a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/jblas_weightonly_dispatcher.hpp b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/bestla_weightonly_dispatcher.hpp similarity index 77% rename from intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/jblas_weightonly_dispatcher.hpp rename to intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/bestla_weightonly_dispatcher.hpp index e03d09a377d..3f816b23f13 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/jblas_weightonly_dispatcher.hpp +++ b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/bestla_weightonly_dispatcher.hpp @@ -14,7 +14,7 @@ #pragma once #include #include -#include "jblas/jit_blas_storage.h" +#include "bestla/bestla_storage.h" #include "../include/dispatcher_utils.hpp" #include #include @@ -54,21 +54,21 @@ struct woq_runtime_ctx { bool transpose; int m, n, k, lda, ldo; float alpha, beta; - jblas::storage::gemm::IWeightBase* deseries_wei; + bestla::storage::gemm::IWeightBase* deseries_wei; }; -static std::map wei2jblasdt_map{{"int8", JBLAS_DTYPE::S8}, - {"int4_clip", JBLAS_DTYPE::S4_CLIP}, - {"int4_fullrange", JBLAS_DTYPE::S4_FULLRANGE}, - {"nf4", JBLAS_DTYPE::F4_NF4}, - {"fp4_e2m1_bnb", JBLAS_DTYPE::F4_BNB}, - {"fp4_e2m1", JBLAS_DTYPE::F4_E2M1}, - {"fp8_e4m3", JBLAS_DTYPE::F8_E4M3}, - {"fp8_e5m2", JBLAS_DTYPE::F8_E5M2}}; -static std::map scale2jblasdt_map{{"fp32", JBLAS_DTYPE::F32}, - {"fp8_e8m0", JBLAS_DTYPE::F8_E8M0}}; +static std::map wei2bestladt_map{{"int8", BTLA_DTYPE::S8}, + {"int4_clip", BTLA_DTYPE::S4_CLIP}, + {"int4_fullrange", BTLA_DTYPE::S4_FULLRANGE}, + {"nf4", BTLA_DTYPE::F4_NF4}, + {"fp4_e2m1_bnb", BTLA_DTYPE::F4_BNB}, + {"fp4_e2m1", BTLA_DTYPE::F4_E2M1}, + {"fp8_e4m3", BTLA_DTYPE::F8_E4M3}, + {"fp8_e5m2", BTLA_DTYPE::F8_E5M2}}; +static std::map scale2bestladt_map{ + {"fp32", BTLA_DTYPE::F32}, {"bf16", BTLA_DTYPE::BF16}, {"fp8_e8m0", BTLA_DTYPE::F8_E8M0}}; void dispatch_woq_task(woq_config_param* p, woq_runtime_ctx* ctx, WOQ_TASK task); -void jblas_packq(woq_packq_param* p, woq_packq_ctx* ctx); +void bestla_packq(woq_packq_param* p, woq_packq_ctx* ctx); void set_woq_workspace(torch::Tensor* workspace); } // namespace woq diff --git a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/dispatcher_utils.hpp b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/dispatcher_utils.hpp index b1b57546a81..d84b82a9cc5 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/dispatcher_utils.hpp +++ b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/include/dispatcher_utils.hpp @@ -15,21 +15,21 @@ #include #include #include -#include "jblas/jit_blas_device.h" -#include "jblas/jit_blas_utils.h" -#include "jblas/jit_blas_parallel.h" +#include "bestla/bestla_device.h" +#include "bestla/bestla_utils.h" +#include "bestla/bestla_parallel.h" namespace dispatcher_utils { -inline bool check_amx() { return jblas::device::CpuDevice::getInstance()->AMX_BF16(); } -inline bool check_avx512_vnni() { return jblas::device::CpuDevice::getInstance()->AVX512_VNNI(); } -inline bool check_avx_vnni() { return jblas::device::CpuDevice::getInstance()->AVX_VNNI(); }; -inline bool check_avx512f() { return jblas::device::CpuDevice::getInstance()->AVX512F(); } -inline bool check_avx2() { return jblas::device::CpuDevice::getInstance()->AVX2(); } +inline bool check_amx() { return bestla::device::CpuDevice::getInstance()->AMX_BF16(); } +inline bool check_avx512_vnni() { return bestla::device::CpuDevice::getInstance()->AVX512_VNNI(); } +inline bool check_avx_vnni() { return bestla::device::CpuDevice::getInstance()->AVX_VNNI(); }; +inline bool check_avx512f() { return bestla::device::CpuDevice::getInstance()->AVX512F(); } +inline bool check_avx2() { return bestla::device::CpuDevice::getInstance()->AVX2(); } class env_initer { public: env_initer() { - if (check_amx()) jblas::utils::request_perm_xtile_data(); + if (check_amx()) bestla::utils::request_perm_xtile_data(); verbose = std::getenv("QBITS_VERBOSE") != nullptr; FLAGS_caffe2_log_level = 0; } @@ -56,7 +56,7 @@ class Timer { high_resolution_clock::time_point m_end; }; static Timer timer; -static jblas::parallel::OMPThreading DefaultThreading(jblas::device::CpuDevice::getInstance()->getThreads()); +static bestla::parallel::OMPThreading DefaultThreading(bestla::device::CpuDevice::getInstance()->getThreads()); string get_torch_dt_name(torch::Tensor* tensor); } // namespace dispatcher_utils diff --git a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/neural_speed.cmake b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/neural_speed.cmake new file mode 100644 index 00000000000..7a8c0ce591c --- /dev/null +++ b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/neural_speed.cmake @@ -0,0 +1,9 @@ +set(NEURAL_SPEED_URL https://github.com/intel/neural-speed.git) +set(NEURAL_SPEED_TAG bestlav0.1) + +FetchContent_Declare( + neural_speed + GIT_REPOSITORY ${NEURAL_SPEED_URL} + GIT_TAG ${NEURAL_SPEED_TAG} + ) +FetchContent_MakeAvailable(neural_speed) diff --git a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_gemm_dispatcher.cpp b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/bestla_gemm_dispatcher.cpp similarity index 58% rename from intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_gemm_dispatcher.cpp rename to intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/bestla_gemm_dispatcher.cpp index 7ff7c3907ed..fe4e4a92019 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_gemm_dispatcher.cpp +++ b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/bestla_gemm_dispatcher.cpp @@ -11,32 +11,32 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#include "../include/jblas_gemm_dispatcher.hpp" +#include "../include/bestla_gemm_dispatcher.hpp" #include "../include/dispatcher_utils.hpp" -#include "jblas/jit_blas.h" -#include "jblas/jit_blas_epilogue.h" -#include "jblas/jit_blas_gemm.h" -#include "jblas/jit_blas_parallel.h" -#include "jblas/jit_blas_prologue_a.h" -#include "jblas/jit_blas_prologue_b.h" -#include "jblas/jit_blas_utils.h" -#include "jblas/jit_blas_wrapper.h" +#include "bestla/bestla.h" +#include "bestla/bestla_epilogue.h" +#include "bestla/bestla_gemm.h" +#include "bestla/bestla_parallel.h" +#include "bestla/bestla_prologue_a.h" +#include "bestla/bestla_prologue_b.h" +#include "bestla/bestla_utils.h" +#include "bestla/bestla_wrapper.h" #include #include #include #include -namespace jblas_gemm { +namespace bestla_gemm { -template class PrologueA, template class Epilogue, +template class PrologueA, template class Epilogue, typename DT> -void do_gemm(jblas_gemm_runtime_ctx* ctx) { - using Launcher = jblas::wrapper::gemm::LauncherBase; +void do_gemm(bestla_gemm_runtime_ctx* ctx) { + using Launcher = bestla::wrapper::gemm::LauncherBase; Launcher launcher; - using Parallel = jblas::parallel::gemm::SchedulerBase; + using Parallel = bestla::parallel::gemm::SchedulerBase; auto packw = launcher.mProB.createStorage(ctx->n, ctx->k); - auto tmpbuf = jblas::utils::amalloc(packw.mSize); + auto tmpbuf = bestla::utils::amalloc(packw.mSize); packw.assign(tmpbuf); if (ctx->matB_trans) { launcher.mProB.packWeightTranspose(ctx->n, ctx->k, {reinterpret_cast(ctx->matB->data_ptr()), ctx->k, &packw}, @@ -45,37 +45,37 @@ void do_gemm(jblas_gemm_runtime_ctx* ctx) { launcher.mProB.packWeight(ctx->n, ctx->k, {reinterpret_cast(ctx->matB->data_ptr()), ctx->n, &packw}, &dispatcher_utils::DefaultThreading); } - jblas::utils::GemmProblem gp(1, ctx->m, ctx->n, ctx->k); + bestla::utils::GemmProblem gp(1, ctx->m, ctx->n, ctx->k); typename Launcher::Param args{gp, {reinterpret_cast(ctx->matA->data_ptr()), ctx->k}, {reinterpret_cast(ctx->matB->data_ptr()), ctx->n, &packw}, {reinterpret_cast(ctx->matC->data_ptr()), ctx->n}}; - jblas::parallel::GemmRun(launcher, args, &dispatcher_utils::DefaultThreading); - jblas::utils::afree(tmpbuf); + bestla::parallel::GemmRun(launcher, args, &dispatcher_utils::DefaultThreading); + bestla::utils::afree(tmpbuf); } -void dispatch_jblas_gemm(jblas_gemm_runtime_ctx* ctx) { +void dispatch_bestla_gemm(bestla_gemm_runtime_ctx* ctx) { TORCH_CHECK( ctx->matA->scalar_type() == ctx->matB->scalar_type() && ctx->matA->scalar_type() == ctx->matC->scalar_type(), "QBits: data-type of matA matB matC must be equal in gemm op."); if (ctx->matA->scalar_type() == torch::kFloat32) { if (dispatcher_utils::check_avx512f()) { - return do_gemm, jblas::prologue_a::gemm::ActivationBase, - jblas::epilogue::gemm::AccumulatorWriteBackFp32, float>(ctx); + return do_gemm, bestla::prologue_a::gemm::ActivationBase, + bestla::epilogue::gemm::AccumulatorWriteBackFp32, float>(ctx); } if (dispatcher_utils::check_avx2()) { - return do_gemm, jblas::prologue_a::gemm::ActivationBase, - jblas::epilogue::gemm::AccumulatorWriteBackFp32, float>(ctx); + return do_gemm, bestla::prologue_a::gemm::ActivationBase, + bestla::epilogue::gemm::AccumulatorWriteBackFp32, float>(ctx); } } if (ctx->matA->scalar_type() == torch::kBFloat16) { if (dispatcher_utils::check_amx()) { - return do_gemm, jblas::prologue_a::gemm::ActivationBase, - jblas::epilogue::gemm::AccumulatorWriteBackFp32Bf16, jblas::utils::bf16>(ctx); + return do_gemm, bestla::prologue_a::gemm::ActivationBase, + bestla::epilogue::gemm::AccumulatorWriteBackFp32Bf16, bestla::utils::bf16>(ctx); } } TORCH_CHECK(false, "QBits: unsupported config in gemm op, data_type:", dispatcher_utils::get_torch_dt_name(ctx->matA), ", AVX2:", dispatcher_utils::check_avx2(), ", AVX512F:", dispatcher_utils::check_avx512f(), ", AMX_BF16:", dispatcher_utils::check_amx()); } -} // namespace jblas_gemm +} // namespace bestla_gemm diff --git a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_packq_impl.cpp b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/bestla_packq_impl.cpp similarity index 50% rename from intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_packq_impl.cpp rename to intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/bestla_packq_impl.cpp index 3cdfa0dca57..c0bb021bc25 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_packq_impl.cpp +++ b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/bestla_packq_impl.cpp @@ -1,13 +1,13 @@ -#include "jblas/jit_blas_prologue_b.h" -#include "../include/jblas_weightonly_dispatcher.hpp" +#include "bestla/bestla_prologue_b.h" +#include "../include/bestla_weightonly_dispatcher.hpp" namespace woq { -template +template void execute_qpack(woq_packq_param* p, woq_packq_ctx* ctx) { - using proB = jblas::prologue_b::gemm::WeightKBlockNInteger; + using proB = bestla::prologue_b::gemm::WeightKBlockNInteger; static proB ker; - auto qpackw = ker.createStorage(ctx->n, ctx->k, p->blocksize, wei2jblasdt_map[p->weight_type], - scale2jblasdt_map[p->scale_type], JBLAS_DTYPE::BF16, p->asym); + auto qpackw = ker.createStorage(ctx->n, ctx->k, p->blocksize, wei2bestladt_map[p->weight_type], + scale2bestladt_map[p->scale_type], BTLA_DTYPE::BF16, p->asym); if (p->enable_act_shuffle) ker.enableShuffle(&qpackw); *(ctx->output) = torch::empty(qpackw.mSize, torch::kInt8); qpackw.assign(ctx->output->data_ptr()); @@ -17,40 +17,40 @@ void execute_qpack(woq_packq_param* p, woq_packq_ctx* ctx) { p->asym ? ctx->zp->data_ptr() : nullptr, &qpackw, &dispatcher_utils::DefaultThreading); } -void jblas_packq(woq_packq_param* p, woq_packq_ctx* ctx) { +void bestla_packq(woq_packq_param* p, woq_packq_ctx* ctx) { TORCH_CHECK(p->weight_type == "int8" || p->weight_type == "int4_clip" || p->weight_type == "int4_fullrange", "Qbits: only support Integer WOQ in PACKQ"); if (p->compute_type == "int8") { - if (dispatcher_utils::check_amx() && p->blocksize % jblas::gemm::ICoreRowNAmxint8KBlock<48, 16>::KTILE == 0) { - return execute_qpack, JblasAMX_INT8>(p, ctx); + if (dispatcher_utils::check_amx() && p->blocksize % bestla::gemm::ICoreRowNAmxint8KBlock<48, 16>::KTILE == 0) { + return execute_qpack, BTLA_ISA::AMX_INT8>(p, ctx); } if (dispatcher_utils::check_avx512_vnni() && - p->blocksize % jblas::gemm::ICoreRowNAvx512vnniKBlock<48, 4>::KTILE == 0) { - return execute_qpack, JblasAVX512_VNNI>(p, ctx); + p->blocksize % bestla::gemm::ICoreRowNAvx512vnniKBlock<48, 4>::KTILE == 0) { + return execute_qpack, BTLA_ISA::AVX512_VNNI>(p, ctx); } - if (dispatcher_utils::check_avx_vnni() && p->blocksize % jblas::gemm::ICoreRowNAvxvnniKBlock<48, 2>::KTILE == 0) { - return execute_qpack, JblasAVX_VNNI>(p, ctx); + if (dispatcher_utils::check_avx_vnni() && p->blocksize % bestla::gemm::ICoreRowNAvxvnniKBlock<48, 2>::KTILE == 0) { + return execute_qpack, BTLA_ISA::AVX_VNNI>(p, ctx); } TORCH_CHECK(false, "Qbits: Illegal config in int8 compute_type, blocksize:", p->blocksize, ", ISA support vnni:", dispatcher_utils::check_avx_vnni()); } if (p->compute_type == "fp32") { if (dispatcher_utils::check_avx512f()) { - return execute_qpack, JblasAVX512F>(p, ctx); + return execute_qpack, BTLA_ISA::AVX512F>(p, ctx); } if (dispatcher_utils::check_avx2()) { - return execute_qpack, JblasAVX2>(p, ctx); + return execute_qpack, BTLA_ISA::AVX2>(p, ctx); } - TORCH_CHECK(false, "Qbits: device ISA must support AVX2 when compute_type==fp32"); + TORCH_CHECK(false, "Qbits: device ISA must support BTLA_ISA::AVX2 when compute_type==fp32"); } if (p->compute_type == "bf16") { if (dispatcher_utils::check_amx()) { - return execute_qpack, JblasAMX_BF16>(p, ctx); + return execute_qpack, BTLA_ISA::AMX_BF16>(p, ctx); } TORCH_CHECK(false, "Qbits: device ISA must support AMX-BF16 when compute_type==bf16"); } - TORCH_CHECK(false, "Qbits: unsupported jblas_config, compute_type:", p->compute_type, + TORCH_CHECK(false, "Qbits: unsupported bestla_config, compute_type:", p->compute_type, ", weight_type:", p->weight_type + ", blocksize:", p->blocksize); } } // namespace woq diff --git a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_weightonly_dispatcher.cpp b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/bestla_weightonly_dispatcher.cpp similarity index 54% rename from intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_weightonly_dispatcher.cpp rename to intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/bestla_weightonly_dispatcher.cpp index 2e73a58bb6f..f944cfc8c23 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/jblas_weightonly_dispatcher.cpp +++ b/intel_extension_for_transformers/llm/operator/csrc/dispatcher/src/bestla_weightonly_dispatcher.cpp @@ -11,17 +11,17 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#include "../include/jblas_weightonly_dispatcher.hpp" -#include "../include/jblas_customop.hpp" +#include "../include/bestla_weightonly_dispatcher.hpp" +#include "../include/bestla_customop.hpp" #include -#include "jblas/jit_blas.h" -#include "jblas/jit_blas_epilogue.h" -#include "jblas/jit_blas_gemm.h" -#include "jblas/jit_blas_parallel.h" -#include "jblas/jit_blas_prologue_b.h" -#include "jblas/jit_blas_prologue_a.h" -#include "jblas/jit_blas_storage.h" -#include "jblas/jit_blas_wrapper.h" +#include "bestla/bestla.h" +#include "bestla/bestla_epilogue.h" +#include "bestla/bestla_gemm.h" +#include "bestla/bestla_parallel.h" +#include "bestla/bestla_prologue_b.h" +#include "bestla/bestla_prologue_a.h" +#include "bestla/bestla_storage.h" +#include "bestla/bestla_wrapper.h" #include #include #include @@ -40,7 +40,7 @@ static int64_t workspace_size = 0; template concept quant_PrologueA = requires { requires !std::is_same_v; - requires !std::is_same_v; + requires !std::is_same_v; }; template @@ -51,25 +51,15 @@ void woq_dequantize(woq_config_param* p, woq_runtime_ctx* ctx) { static PrologueB kernel; // TODO(zhe): using unified StorageWeightKBlockNInteger after sync with neural-speed(with NFloat ProB feature). if (ctx->transpose) { - if constexpr (std::is_same_v) { - kernel.unpackTransposeWeight(ctx->deseries_wei->mN, ctx->deseries_wei->mK, - dynamic_cast(ctx->deseries_wei), - ctx->output->data_ptr(), ctx->deseries_wei->mK, - &dispatcher_utils::DefaultThreading); - } else { - kernel.unpackTransposeWeight(ctx->deseries_wei->mN, ctx->deseries_wei->mK, ctx->deseries_wei, - ctx->output->data_ptr(), ctx->deseries_wei->mK, - &dispatcher_utils::DefaultThreading); - } + kernel.unpackTransposeWeight(ctx->deseries_wei->mN, ctx->deseries_wei->mK, + dynamic_cast(ctx->deseries_wei), + ctx->output->data_ptr(), ctx->deseries_wei->mK, + &dispatcher_utils::DefaultThreading); + } else { - if constexpr (std::is_same_v) { - kernel.unpackWeight(ctx->deseries_wei->mN, ctx->deseries_wei->mK, - dynamic_cast(ctx->deseries_wei), - ctx->output->data_ptr(), ctx->deseries_wei->mN, &dispatcher_utils::DefaultThreading); - } else { - kernel.unpackWeight(ctx->deseries_wei->mN, ctx->deseries_wei->mK, ctx->deseries_wei, - ctx->output->data_ptr(), ctx->deseries_wei->mN, &dispatcher_utils::DefaultThreading); - } + kernel.unpackWeight(ctx->deseries_wei->mN, ctx->deseries_wei->mK, + dynamic_cast(ctx->deseries_wei), + ctx->output->data_ptr(), ctx->deseries_wei->mN, &dispatcher_utils::DefaultThreading); } } @@ -80,15 +70,22 @@ void woq_quantize(woq_config_param* p, woq_runtime_ctx* ctx) { using WType = typename Launcher::PrologueB::StorageWeight; WType packedw(0); static Launcher launcher; - if constexpr (std::is_same_v) { - packedw = launcher.mProB.createStorage(ctx->n, ctx->k, p->blocksize, wei2jblasdt_map[p->weight_type], - scale2jblasdt_map[p->scale_type], JBLAS_DTYPE::BF16, p->asym); - } else if constexpr (std::is_same_v) { - packedw = launcher.mProB.createStorage(ctx->n, ctx->k, p->blocksize, wei2jblasdt_map[p->weight_type], - jblas::utils::jblas_dtype); - } else if constexpr (std::is_same_v) { - packedw = launcher.mProB.createStorage(ctx->n, ctx->k, p->blocksize, wei2jblasdt_map[p->weight_type], - scale2jblasdt_map[p->scale_type]); + if constexpr (std::is_same_v) { + TORCH_CHECK(p->scale_type == "fp32" || p->scale_type == "bf16", + "Qbits: scale_type must be fp32/bf16 in NInteger Weight."); + if (p->scale_type == "bf16") TORCH_CHECK(!p->asym, "Qbits: asym is not supported when scale_type==bf16 currently."); + packedw = launcher.mProB.createStorage(ctx->n, ctx->k, p->blocksize, wei2bestladt_map[p->weight_type], + scale2bestladt_map[p->scale_type], BTLA_DTYPE::BF16, p->asym); + } else if constexpr (std::is_same_v) { + if (p->weight_type == "nf4" || p->weight_type == "fp4_e2m1" || p->weight_type == "fp4_e2m1_bnb") { + TORCH_CHECK(p->scale_type == "fp32" || p->scale_type == "bf16", + "Qbits: scale_type must be fp32/bf16 in 4Bit NFloat Weight."); + } else { + TORCH_CHECK(p->scale_type == "fp32" || p->scale_type == "fp8_e8m0", + "Qbits: scale_type must be fp32/fp8_e8m0 in 8Bit NFloat Weight."); + } + packedw = launcher.mProB.createStorage(ctx->n, ctx->k, p->blocksize, wei2bestladt_map[p->weight_type], + scale2bestladt_map[p->scale_type]); } else { assert(0); } @@ -117,7 +114,7 @@ void* get_workspace(int need_size) { TORCH_CHECK(workspace_size >= need_size, "Qbits: workspace size should larger than ", need_size, " bytes"); return workspace; } else { - tmpbuf = jblas::utils::amalloc(need_size); + tmpbuf = bestla::utils::amalloc(need_size); return tmpbuf; } } @@ -132,9 +129,10 @@ void do_compute(woq_config_param* p, woq_runtime_ctx* ctx, ParamA param_a) { using StorageWeight = typename Launcher::PrologueB::StorageWeight; int asym_size = 0, shuf_size = 0; int8_t* tmpbuf = nullptr; - if constexpr (GemmCore::ISA == JblasAMX_INT8 || GemmCore::ISA == JblasAVX512_VNNI || GemmCore::ISA == JblasAVX_VNNI) { - using Parallel = jblas::parallel::gemm::SchedulerKBlockS; - jblas::utils::GemmProblem gp(1, ctx->m, ctx->n, ctx->k, p->blocksize); + if constexpr (GemmCore::ISA == BTLA_ISA::AMX_INT8 || GemmCore::ISA == BTLA_ISA::AVX512_VNNI || + GemmCore::ISA == BTLA_ISA::AVX_VNNI) { + using Parallel = bestla::parallel::gemm::SchedulerKBlockS; + bestla::utils::GemmProblem gp(1, ctx->m, ctx->n, ctx->k, p->blocksize); StorageWeight* packedw = dynamic_cast(ctx->deseries_wei); auto dyn_q_size = param_a.quan->mSize; if (packedw->ShfIndice()) shuf_size = param_a.reordered->mSize; @@ -146,14 +144,14 @@ void do_compute(woq_config_param* p, woq_runtime_ctx* ctx, ParamA param_a) { launcher.mProA.quantize(param_a, ctx->m, ctx->deseries_wei->mK, &dispatcher_utils::DefaultThreading); } typename Launcher::Param args{ - gp, param_a, dynamic_cast(ctx->deseries_wei), param_epi}; + gp, param_a, dynamic_cast(ctx->deseries_wei), param_epi}; if (packedw->ShfIndice()) { - jblas::parallel::GemmRun(launcher, args, &dispatcher_utils::DefaultThreading); + bestla::parallel::GemmRun(launcher, args, &dispatcher_utils::DefaultThreading); } else { - jblas::parallel::GemmRunWithA(launcher, args, &dispatcher_utils::DefaultThreading); + bestla::parallel::GemmRunWithA(launcher, args, &dispatcher_utils::DefaultThreading); } } else { - using Parallel = jblas::parallel::gemm::SchedulerKBlock; + using Parallel = bestla::parallel::gemm::SchedulerKBlock; StorageWeight* packedw = dynamic_cast(ctx->deseries_wei); if (p->asym || packedw->ShfIndice()) { if (p->asym) asym_size = param_a.reduce->mSize; @@ -170,39 +168,24 @@ void do_compute(woq_config_param* p, woq_runtime_ctx* ctx, ParamA param_a) { param_a.indices = packedw->ShfIndice(); } - jblas::utils::GemmProblem gp(1, ctx->m, ctx->n, ctx->k, p->blocksize); - if constexpr (std::is_same_v) { - typename Launcher::Param args{ - gp, - param_a, - dynamic_cast(ctx->deseries_wei), - {packedw->template SPtr(), packedw->SDtype(), packedw->CStep(), - p->asym ? packedw->template ZPtr() : nullptr, - p->asym ? param_a.reduce->template RPtr() : nullptr, p->asym ? param_a.reduce->lda : -1}, - param_epi}; + bestla::utils::GemmProblem gp(1, ctx->m, ctx->n, ctx->k, p->blocksize); - if (p->asym || packedw->ShfIndice()) { - jblas::parallel::GemmRunWithA(launcher, args, &dispatcher_utils::DefaultThreading); - } else { - jblas::parallel::GemmRun(launcher, args, &dispatcher_utils::DefaultThreading); - } - } else { - // TODO(zhe): remove this branch after using NFloat ProB in nerual-speed, only need to reset paramC in differenct - // ProB. - typename Launcher::Param args{gp, - param_a, - dynamic_cast(ctx->deseries_wei), - {packedw->template SPtr(), packedw->SDtype(), packedw->CStep()}, - param_epi}; + typename Launcher::Param args{ + gp, + param_a, + dynamic_cast(ctx->deseries_wei), + {packedw->template SPtr(), packedw->SDtype(), packedw->CStep(), + p->asym ? packedw->template ZPtr() : nullptr, + p->asym ? param_a.reduce->template RPtr() : nullptr, p->asym ? param_a.reduce->lda : -1}, + param_epi}; - if (p->asym || packedw->ShfIndice()) { - jblas::parallel::GemmRunWithA(launcher, args, &dispatcher_utils::DefaultThreading); - } else { - jblas::parallel::GemmRun(launcher, args, &dispatcher_utils::DefaultThreading); - } + if (p->asym || packedw->ShfIndice()) { + bestla::parallel::GemmRunWithA(launcher, args, &dispatcher_utils::DefaultThreading); + } else { + bestla::parallel::GemmRun(launcher, args, &dispatcher_utils::DefaultThreading); } } - if (tmpbuf != woq_workspace && tmpbuf != nullptr) jblas::utils::afree(tmpbuf); + if (tmpbuf != woq_workspace && tmpbuf != nullptr) bestla::utils::afree(tmpbuf); if (dispatcher_utils::initer.verbose) { dispatcher_utils::timer.stop(); auto cost_time = dispatcher_utils::timer.get_elapsed_time(); @@ -253,21 +236,22 @@ void execute_task(woq_config_param* p, woq_runtime_ctx* ctx) { } } -template class PrologueB, - template class PrologueA, template class Epilogue> +template class PrologueB, + template class PrologueA, template class Epilogue> void parse_launcher(woq_config_param* p, woq_runtime_ctx* ctx) { - if constexpr (GemmCore::ISA == JblasAMX_INT8 || GemmCore::ISA == JblasAVX512_VNNI || GemmCore::ISA == JblasAVX_VNNI) { - using Launcher = jblas::wrapper::gemm::LauncherIntKBlock; + if constexpr (GemmCore::ISA == BTLA_ISA::AMX_INT8 || GemmCore::ISA == BTLA_ISA::AVX512_VNNI || + GemmCore::ISA == BTLA_ISA::AVX_VNNI) { + using Launcher = bestla::wrapper::gemm::LauncherIntKBlock; return execute_task(p, ctx); } else { - using Launcher = jblas::wrapper::gemm::LauncherKBlock; + using Launcher = bestla::wrapper::gemm::LauncherKBlock; return execute_task(p, ctx); } } -template class PrologueB, - template class PrologueA, dispatcher_utils::QBITS_DT ACT_DT> +template class PrologueB, + template class PrologueA, dispatcher_utils::QBITS_DT ACT_DT> void parse_store(woq_config_param* p, woq_runtime_ctx* ctx) { auto constexpr ISA = GemmCore::ISA; if (p->dst_dt == dispatcher_utils::QBITS_FP32) { @@ -278,12 +262,12 @@ void parse_store(woq_config_param* p, woq_runtime_ctx* ctx) { } } -template class PrologueB> +template class PrologueB> void parse_activation(woq_config_param* p, woq_runtime_ctx* ctx) { - using namespace jblas::prologue_a::gemm; + using namespace bestla::prologue_a::gemm; if (p->src_dt == dispatcher_utils::QBITS_FP32) { - if constexpr (GemmCore::ISA == JblasAMX_INT8 || GemmCore::ISA == JblasAVX512_VNNI || - GemmCore::ISA == JblasAVX_VNNI) { + if constexpr (GemmCore::ISA == BTLA_ISA::AMX_INT8 || GemmCore::ISA == BTLA_ISA::AVX512_VNNI || + GemmCore::ISA == BTLA_ISA::AVX_VNNI) { return parse_store( p, ctx); } else { @@ -292,8 +276,8 @@ void parse_activation(woq_config_param* p, woq_runtime_ctx* ctx) { } } if (p->src_dt == dispatcher_utils::QBITS_BF16) { - if constexpr (GemmCore::ISA == JblasAMX_INT8 || GemmCore::ISA == JblasAVX512_VNNI || - GemmCore::ISA == JblasAVX_VNNI) { + if constexpr (GemmCore::ISA == BTLA_ISA::AMX_INT8 || GemmCore::ISA == BTLA_ISA::AVX512_VNNI || + GemmCore::ISA == BTLA_ISA::AVX_VNNI) { return parse_store( p, ctx); } else { @@ -305,22 +289,19 @@ void parse_activation(woq_config_param* p, woq_runtime_ctx* ctx) { template void parse_weight(woq_config_param* p, woq_runtime_ctx* ctx) { - using namespace jblas::prologue_b::gemm; + using namespace bestla::prologue_b::gemm; if (p->weight_type == "int8" || p->weight_type == "int4_clip" || p->weight_type == "int4_fullrange") { return parse_activation(p, ctx); } - if (p->weight_type == "nf4" || p->weight_type == "fp4_e2m1_bnb" || p->weight_type == "fp4_e2m1") { - TORCH_CHECK(p->asym == false, "Qbits: only support sym alg in fp4/nf4 woq weight."); - if constexpr (GemmCore::ISA != JblasAMX_INT8 && GemmCore::ISA != JblasAVX512_VNNI && GemmCore::ISA != JblasAVX_VNNI) - return parse_activation(p, ctx); - } - if (p->weight_type == "fp8_e4m3" || p->weight_type == "fp8_e5m2") { - TORCH_CHECK(p->asym == false, "Qbits: only support sym alg in fp8 woq weight."); - if constexpr (GemmCore::ISA != JblasAMX_INT8 && GemmCore::ISA != JblasAVX512_VNNI && GemmCore::ISA != JblasAVX_VNNI) - return parse_activation(p, ctx); + if (p->weight_type == "nf4" || p->weight_type == "fp4_e2m1_bnb" || p->weight_type == "fp4_e2m1" || + p->weight_type == "fp8_e4m3" || p->weight_type == "fp8_e5m2") { + TORCH_CHECK(!p->asym, "Qbits: float-weight unsupports asym quantization."); + if constexpr (GemmCore::ISA != BTLA_ISA::AMX_INT8 && GemmCore::ISA != BTLA_ISA::AVX512_VNNI && + GemmCore::ISA != BTLA_ISA::AVX_VNNI) + return parse_activation(p, ctx); } TORCH_CHECK(false, - "Qbits: unsupported jblas_config, compute_type==" + p->compute_type + " weight_type==" + p->weight_type); + "Qbits: unsupported bestla_config, compute_type==" + p->compute_type + " weight_type==" + p->weight_type); } template @@ -329,72 +310,72 @@ void parse_gemm_core_online(woq_config_param* p, woq_runtime_ctx* ctx) { p->blocksize = p->blocksize == -1 ? ctx->k : p->blocksize; if (p->compute_type == "int8") { TORCH_CHECK(p->asym == false, "Qbits: int8 compute_type dosen't support asym quantization currently.") - if (dispatcher_utils::check_amx() && p->blocksize % jblas::gemm::ICoreRowNAmxint8KBlock<48, 16>::KTILE == 0) { - return parse_weight>(p, ctx); + if (dispatcher_utils::check_amx() && p->blocksize % bestla::gemm::ICoreRowNAmxint8KBlock<48, 16>::KTILE == 0) { + return parse_weight>(p, ctx); } if (dispatcher_utils::check_avx512_vnni() && - p->blocksize % jblas::gemm::ICoreRowNAvx512vnniKBlock<48, 4>::KTILE == 0) { - return parse_weight>(p, ctx); + p->blocksize % bestla::gemm::ICoreRowNAvx512vnniKBlock<48, 4>::KTILE == 0) { + return parse_weight>(p, ctx); } - if (dispatcher_utils::check_avx_vnni() && p->blocksize % jblas::gemm::ICoreRowNAvxvnniKBlock<48, 2>::KTILE == 0) { - return parse_weight>(p, ctx); + if (dispatcher_utils::check_avx_vnni() && p->blocksize % bestla::gemm::ICoreRowNAvxvnniKBlock<48, 2>::KTILE == 0) { + return parse_weight>(p, ctx); } TORCH_CHECK(false, "Qbits: Illegal config in int8 compute_type, blocksize:", p->blocksize, ", ISA support vnni:", dispatcher_utils::check_avx_vnni()); } if (p->compute_type == "fp32") { if (dispatcher_utils::check_avx512f()) { - return parse_weight>(p, ctx); + return parse_weight>(p, ctx); } if (dispatcher_utils::check_avx2()) { - return parse_weight>(p, ctx); + return parse_weight>(p, ctx); } - TORCH_CHECK(false, "Qbits: device ISA must support AVX2 when compute_type==fp32"); + TORCH_CHECK(false, "Qbits: device ISA must support BTLA_ISA::AVX2 when compute_type==fp32"); } if (p->compute_type == "bf16") { if (dispatcher_utils::check_amx()) { - return parse_weight>(p, ctx); + return parse_weight>(p, ctx); } TORCH_CHECK(false, "Qbits: device ISA must support AMX-BF16 when compute_type==bf16"); } - TORCH_CHECK(false, "Qbits: unsupported jblas_config, compute_type:", p->compute_type, + TORCH_CHECK(false, "Qbits: unsupported bestla_config, compute_type:", p->compute_type, ", weight_type:", p->weight_type + ", blocksize:", p->blocksize); } template void parse_gemm_core_offline(woq_config_param* p, woq_runtime_ctx* ctx) { - ctx->deseries_wei = jblas::storage::gemm::PackedWeightParser::deserialBuffer(ctx->weight->data_ptr()); - p->blocksize = dynamic_cast(ctx->deseries_wei)->mBlockSize; - auto NTile = jblas::gemm::CoreAttr::get_mask_val(ctx->deseries_wei->mCoreId, jblas::gemm::CoreAttr::NTILE_MASK, - jblas::gemm::CoreAttr::NTILE_SHIFT); - auto CType = jblas::gemm::CoreAttr::get_mask_val(ctx->deseries_wei->mCoreId, jblas::gemm::CoreAttr::COMP_MASK, - jblas::gemm::CoreAttr::COMP_SHIFT); - if (CType == uint32_t(jblas::gemm::CompType::COMP_INT8_US_INT32)) { + ctx->deseries_wei = bestla::storage::gemm::PackedWeightParser::deserialBuffer(ctx->weight->data_ptr()); + p->blocksize = dynamic_cast(ctx->deseries_wei)->mBlockSize; + auto NTile = bestla::gemm::CoreAttr::get_mask_val(ctx->deseries_wei->mCoreId, bestla::gemm::CoreAttr::NTILE_MASK, + bestla::gemm::CoreAttr::NTILE_SHIFT); + auto CType = bestla::gemm::CoreAttr::get_mask_val(ctx->deseries_wei->mCoreId, bestla::gemm::CoreAttr::COMP_MASK, + bestla::gemm::CoreAttr::COMP_SHIFT); + if (CType == uint32_t(bestla::gemm::CompType::COMP_INT8_US_INT32)) { TORCH_CHECK(p->asym == false, "Qbits: int8 compute_type dosen't support asym quantization currently.") - if (NTile == jblas::gemm::ICoreRowNAmxint8KBlock<48, 16>::NTILE && dispatcher_utils::check_amx()) { - return parse_weight>(p, ctx); + if (NTile == bestla::gemm::ICoreRowNAmxint8KBlock<48, 16>::NTILE && dispatcher_utils::check_amx()) { + return parse_weight>(p, ctx); } } - if (CType == uint32_t(jblas::gemm::CompType::COMP_INT8_US_FP32)) { + if (CType == uint32_t(bestla::gemm::CompType::COMP_INT8_US_FP32)) { TORCH_CHECK(p->asym == false, "Qbits: int8 compute_type dosen't support asym quantization currently.") - if (NTile == jblas::gemm::ICoreRowNAvx512vnniKBlock<48, 4>::NTILE && dispatcher_utils::check_avx512_vnni()) { - return parse_weight>(p, ctx); + if (NTile == bestla::gemm::ICoreRowNAvx512vnniKBlock<48, 4>::NTILE && dispatcher_utils::check_avx512_vnni()) { + return parse_weight>(p, ctx); } - if (NTile == jblas::gemm::ICoreRowNAvxvnniKBlock<48, 2>::NTILE && dispatcher_utils::check_avx_vnni()) { - return parse_weight>(p, ctx); + if (NTile == bestla::gemm::ICoreRowNAvxvnniKBlock<48, 2>::NTILE && dispatcher_utils::check_avx_vnni()) { + return parse_weight>(p, ctx); } } - if (CType == uint32_t(jblas::gemm::CompType::COMP_FP32)) { - if (NTile == jblas::gemm::SCoreRowNAvx512f<48, 8>::NTILE && dispatcher_utils::check_avx512f()) { - return parse_weight>(p, ctx); + if (CType == uint32_t(bestla::gemm::CompType::COMP_FP32)) { + if (NTile == bestla::gemm::SCoreRowNAvx512f<48, 8>::NTILE && dispatcher_utils::check_avx512f()) { + return parse_weight>(p, ctx); } - if (NTile == jblas::gemm::SCoreRowNAvx2<48, 2>::NTILE && dispatcher_utils::check_avx2()) { - return parse_weight>(p, ctx); + if (NTile == bestla::gemm::SCoreRowNAvx2<48, 2>::NTILE && dispatcher_utils::check_avx2()) { + return parse_weight>(p, ctx); } } - if (CType == uint32_t(jblas::gemm::CompType::COMP_BF16_FP32)) { - if (NTile == jblas::gemm::HCoreRowNAmxbf16<64, 16>::NTILE && dispatcher_utils::check_amx()) { - return parse_weight>(p, ctx); + if (CType == uint32_t(bestla::gemm::CompType::COMP_BF16_FP32)) { + if (NTile == bestla::gemm::HCoreRowNAmxbf16<64, 16>::NTILE && dispatcher_utils::check_amx()) { + return parse_weight>(p, ctx); } } TORCH_CHECK(false, "Qbits: parse packweight fail, NTile:", NTile, ", CType:", CType, diff --git a/intel_extension_for_transformers/llm/operator/csrc/include/dropout.hpp b/intel_extension_for_transformers/llm/operator/csrc/include/dropout.hpp index 81144c28f56..293e7fbae66 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/include/dropout.hpp +++ b/intel_extension_for_transformers/llm/operator/csrc/include/dropout.hpp @@ -11,8 +11,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#include "../dispatcher/include/jblas_weightonly_dispatcher.hpp" -#include "jblas/jit_blas_utils.h" +#include "../dispatcher/include/bestla_weightonly_dispatcher.hpp" +#include "bestla/bestla_utils.h" #include #include #include @@ -29,15 +29,15 @@ class RandBuffer { public: RandBuffer() { std::srand((int)std::time(0)); - auto thread_num = jblas::device::CpuDevice::getInstance()->getThreads(); - load_buffer = jblas::utils::amalloc(thread_num * 16); - iws = jblas::utils::amalloc(thread_num * 16); + auto thread_num = bestla::device::CpuDevice::getInstance()->getThreads(); + load_buffer = bestla::utils::amalloc(thread_num * 16); + iws = bestla::utils::amalloc(thread_num * 16); for (int i = 0; i < thread_num; i++) initMWC(rand(), i); } ~RandBuffer() { - if (iws != NULL) jblas::utils::afree(iws); - if (load_buffer != NULL) jblas::utils::afree(load_buffer); + if (iws != NULL) bestla::utils::afree(iws); + if (load_buffer != NULL) bestla::utils::afree(load_buffer); } #pragma GCC push_options diff --git a/intel_extension_for_transformers/llm/operator/csrc/qbits.cpp b/intel_extension_for_transformers/llm/operator/csrc/qbits.cpp index bb57e7bd4d3..35d53d17782 100755 --- a/intel_extension_for_transformers/llm/operator/csrc/qbits.cpp +++ b/intel_extension_for_transformers/llm/operator/csrc/qbits.cpp @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. #include "dispatcher/include/dispatcher_utils.hpp" -#include "dispatcher/include/jblas_gemm_dispatcher.hpp" -#include "dispatcher/include/jblas_weightonly_dispatcher.hpp" +#include "dispatcher/include/bestla_gemm_dispatcher.hpp" +#include "dispatcher/include/bestla_weightonly_dispatcher.hpp" #include "include/dropout.hpp" #include #include @@ -45,7 +45,7 @@ static void inline init_woq_config_param(woq::woq_config_param* p, woq::woq_runt case woq::WOQ_QUANTIZE: case woq::WOQ_DEQUANTIZE: p->src_dt = dispatcher_utils::QBITS_FP32; - p->dst_dt = dispatcher_utils::QBITS_FP32; // jblas dosen't care about dst_dt in quantize/dequant task,so set fp32 + p->dst_dt = dispatcher_utils::QBITS_FP32; // bestla dosen't care about dst_dt in quantize/dequant task,so set fp32 // as default. break; case woq::WOQ_LINEAR: @@ -68,7 +68,7 @@ static torch::Tensor woq_packq(const torch::Tensor& qweight, const torch::Tensor &output, static_cast(qweight.sizes()[1]), static_cast(qweight.sizes()[0])}; - woq::jblas_packq(&p, &ctx); + woq::bestla_packq(&p, &ctx); return output; } @@ -121,11 +121,11 @@ static void set_woq_workspace(const torch::Tensor& workspace) { woq::set_woq_workspace(const_cast(&workspace)); } -static void jblasop_gemm(const torch::Tensor& matA, const torch::Tensor& matB, const torch::Tensor& matC, +static void bestlaop_gemm(const torch::Tensor& matA, const torch::Tensor& matB, const torch::Tensor& matC, bool matB_trans) { TORCH_CHECK(matA.dim() == 2 && matB.dim() == 2 && matC.dim() == 2, - "Qbits: only support 2-dim input-tensor in jblas gemm op."); - jblas_gemm::jblas_gemm_runtime_ctx ctx; + "Qbits: only support 2-dim input-tensor in bestla gemm op."); + bestla_gemm::bestla_gemm_runtime_ctx ctx; ctx.matA = const_cast(&matA); ctx.matB = const_cast(&matB); ctx.matC = const_cast(&matC); @@ -134,21 +134,21 @@ static void jblasop_gemm(const torch::Tensor& matA, const torch::Tensor& matB, c ctx.n = static_cast(matC.sizes()[1]); ctx.k = static_cast(matA.sizes()[1]); TORCH_CHECK(matB_trans ? ctx.k == matB.sizes()[1] : ctx.k == matB.sizes()[0], - "QBits: input shape mismatch in jblas gemm op."); - return jblas_gemm::dispatch_jblas_gemm(&ctx); + "QBits: input shape mismatch in bestla gemm op."); + return bestla_gemm::dispatch_bestla_gemm(&ctx); } static torch::Tensor qbits_dropout_fwd(torch::Tensor& output, double p) { return dropout_fwd(output, p); } static void qbits_dropout_bwd(torch::Tensor& grad, torch::Tensor& scale) { dropout_bwd(grad, scale); } -TORCH_LIBRARY(jblasop, m) { +TORCH_LIBRARY(bestlaop, m) { m.def("woq_quantize", &woq_quantize); m.def("woq_linear", &woq_linear); m.def("woq_dequantize", &woq_dequantize); m.def("woq_packq", &woq_packq); m.def("set_woq_workspace", &set_woq_workspace); - m.def("matmul", &jblasop_gemm); + m.def("matmul", &bestlaop_gemm); } TORCH_LIBRARY(qbits_customop, m) { diff --git a/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_matmul.py b/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_matmul.py index b36246eb3e5..dad665f8d69 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_matmul.py +++ b/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_matmul.py @@ -42,7 +42,7 @@ def test(m, n, k, trans_matB, dt, dump_tensor_info=True): if dt == "bf16": tar_dst = tar_dst.to(torch.bfloat16) wei = wei.to(torch.bfloat16) - torch.ops.jblasop.matmul(activation, wei, tar_dst, trans_matB) + torch.ops.bestlaop.matmul(activation, wei, tar_dst, trans_matB) if trans_matB: cp_wei = torch.transpose(cp_wei, 0, 1) ref_dst = torch.matmul(activation_cp, cp_wei) diff --git a/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_packq.py b/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_packq.py index 98cb106a0c1..d58388934b8 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_packq.py +++ b/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_packq.py @@ -45,16 +45,16 @@ def test(m, k, n, weight_type, scale_type, compute_type, asym, blocksize, dump_t cvt_idx = convert_idx(g_idx, k, blocksize) zp = torch.randint(-4, 4, [k//blocksize, n], dtype=torch.int8) scale = torch.rand(k//blocksize, n, dtype=torch.float) - packw = torch.ops.jblasop.woq_packq( + packw = torch.ops.bestlaop.woq_packq( raw_s8_wei, scale, zp, g_idx, weight_type, scale_type, compute_type, asym, blocksize) revert_wei = torch.zeros(k, n, dtype=torch.float) - torch.ops.jblasop.woq_dequantize( + torch.ops.bestlaop.woq_dequantize( packw, revert_wei, False, compute_type, weight_type, scale_type) ref_act = torch.rand(m, k, dtype=torch.float) tar_act = ref_act.clone() ref_act = torch.index_select(ref_act, 1, cvt_idx) tar_dst = torch.zeros(m, n, dtype=torch.float) - torch.ops.jblasop.woq_linear( + torch.ops.bestlaop.woq_linear( tar_act, packw, torch.empty(0), tar_dst, n, False, compute_type, weight_type, scale_type, asym) ref_dst = torch.matmul(ref_act, revert_wei) if dump_tensor: diff --git a/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_weightonly.py b/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_weightonly.py index b36b2c6edae..e8a28c2e66d 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_weightonly.py +++ b/intel_extension_for_transformers/llm/operator/csrc/qbits_ut/test_weightonly.py @@ -22,7 +22,7 @@ "fp8_e5m2": {"fp32", "bf16"}, "fp8_e4m3": {"fp32", "bf16"} } -scale_configs = {"int8": {"fp32"}, "int4_clip": {"fp32"}, "int4_fullrange": {"fp32"}, "fp4_e2m1_bnb": {"fp32"}, "fp4_e2m1": {"fp32"}, "nf4": {"fp32"}, +scale_configs = {"int8": {"fp32", "bf16"}, "int4_clip": {"fp32", "bf16"}, "int4_fullrange": {"fp32", "bf16"}, "fp4_e2m1_bnb": {"fp32", "bf16"}, "fp4_e2m1": {"fp32", "bf16"}, "nf4": {"fp32", "bf16"}, "fp8_e5m2": {"fp32", "fp8_e8m0"}, "fp8_e4m3": {"fp32", "fp8_e8m0"}} asym_configs = {"int8", "int4_clip", "int4_fullrange"} @@ -35,7 +35,7 @@ @pytest.mark.parametrize("blocksize", [128, -1]) @pytest.mark.parametrize("compute_type", ["int8", "bf16", "fp32"]) @pytest.mark.parametrize("weight_type", ["int8", "int4_clip", "int4_fullrange", "nf4", "fp4_e2m1_bnb", "fp4_e2m1", "fp8_e5m2", "fp8_e4m3"]) -@pytest.mark.parametrize("scale_type", ["fp32", "fp8_e8m0"]) +@pytest.mark.parametrize("scale_type", ["fp32", "bf16", "fp8_e8m0"]) @pytest.mark.parametrize("asym", [True, False]) @pytest.mark.parametrize("transpose", [True, False]) @pytest.mark.parametrize("add_bias", [True, False]) @@ -44,7 +44,7 @@ def test(m, n, k, blocksize, compute_type, weight_type, scale_type, asym, transpose, add_bias, src_dt, dst_dt, dump_tensor_info=True): if compute_type not in cmpt_configs[weight_type] or scale_type not in scale_configs[weight_type]: pytest.skip() - if asym and (weight_type not in asym_configs or compute_type == "int8"): + if asym and (weight_type not in asym_configs or compute_type == "int8" or scale_type!="fp32"): pytest.skip() torch.manual_seed(0) ref_activation = torch.rand(m, k, dtype=torch.float) @@ -58,10 +58,10 @@ def test(m, n, k, blocksize, compute_type, weight_type, scale_type, asym, transp raw_wei = torch.rand(wei_row, wei_col, dtype=torch.float) if dump_tensor_info: print(raw_wei) - compress_wei = torch.ops.jblasop.woq_quantize( + compress_wei = torch.ops.bestlaop.woq_quantize( raw_wei, transpose, blocksize, compute_type, weight_type, scale_type, asym) revert_wei = torch.zeros(wei_row, wei_col, dtype=torch.float) - torch.ops.jblasop.woq_dequantize( + torch.ops.bestlaop.woq_dequantize( compress_wei, revert_wei, transpose, compute_type, weight_type, scale_type) bias = torch.rand(n, dtype=torch.float)*10 if dump_tensor_info: @@ -72,7 +72,7 @@ def test(m, n, k, blocksize, compute_type, weight_type, scale_type, asym, transp if transpose: revert_wei = torch.transpose(revert_wei, 0, 1) ref_dst = torch.matmul(ref_activation, revert_wei) - torch.ops.jblasop.woq_linear( + torch.ops.bestlaop.woq_linear( tar_activation, compress_wei, bias, tar_dst, n, add_bias, compute_type, weight_type, scale_type, asym) if dst_dt == "bf16": tar_dst = tar_dst.to(torch.float) diff --git a/intel_extension_for_transformers/llm/operator/csrc/src/dropout.cpp b/intel_extension_for_transformers/llm/operator/csrc/src/dropout.cpp index 8abe806d56f..0fef273def1 100644 --- a/intel_extension_for_transformers/llm/operator/csrc/src/dropout.cpp +++ b/intel_extension_for_transformers/llm/operator/csrc/src/dropout.cpp @@ -17,9 +17,9 @@ #include #include "../include/dropout.hpp" -#include "jblas/jit_blas_utils.h" -#include "jblas/kernel_avx2.h" -#include "jblas/kernel_avx512f.h" +#include "bestla/bestla_utils.h" +#include "bestla/kernel_avx2.h" +#include "bestla/kernel_avx512f.h" #pragma GCC push_options #pragma GCC target("avx512f", "avx512bw", "avx512vl") @@ -52,8 +52,8 @@ static inline void write_rand(char* data, int thread_idx, int64_t elt_num, int d bf16_ans = (__m256i)_mm512_cvtneps_pbh(ans); bf16_mul_scale = (__m256i)_mm512_cvtneps_pbh(mul_scale); #else - bf16_ans = jblas::kernel::avx512f::zmm_cvt_fp32_bf16(ans); - bf16_mul_scale = jblas::kernel::avx512f::zmm_cvt_fp32_bf16(mul_scale); + bf16_ans = bestla::kernel::avx512f::zmm_cvt_fp32_bf16(ans); + bf16_mul_scale = bestla::kernel::avx512f::zmm_cvt_fp32_bf16(mul_scale); #endif _mm256_storeu_si256((__m256i*)(data + i * dt_size), bf16_ans); _mm256_storeu_si256((__m256i*)(mask_ptr + i * dt_size), bf16_mul_scale); @@ -81,8 +81,8 @@ static inline void write_rand(char* data, int thread_idx, int64_t elt_num, int d bf16_ans = (__m256i)_mm512_cvtneps_pbh(ans); bf16_mul_scale = (__m256i)_mm512_cvtneps_pbh(mul_scale); #else - bf16_ans = jblas::kernel::avx512f::zmm_cvt_fp32_bf16(ans); - bf16_mul_scale = jblas::kernel::avx512f::zmm_cvt_fp32_bf16(mul_scale); + bf16_ans = bestla::kernel::avx512f::zmm_cvt_fp32_bf16(ans); + bf16_mul_scale = bestla::kernel::avx512f::zmm_cvt_fp32_bf16(mul_scale); #endif _mm256_mask_storeu_epi16(data + i * dt_size, ls_mask, bf16_ans); _mm256_mask_storeu_epi16(mask_ptr + i * dt_size, ls_mask, bf16_mul_scale); @@ -109,7 +109,7 @@ static inline void mul(char* grad, int thread_idx, int64_t elt_num, int dt_size, #if CompileBF16() bf16_ans = (__m256i)_mm512_cvtneps_pbh(ans); #else - bf16_ans = jblas::kernel::avx512f::zmm_cvt_fp32_bf16(ans); + bf16_ans = bestla::kernel::avx512f::zmm_cvt_fp32_bf16(ans); #endif _mm256_storeu_si256((__m256i*)(grad + i * dt_size), bf16_ans); } @@ -132,7 +132,7 @@ static inline void mul(char* grad, int thread_idx, int64_t elt_num, int dt_size, #if CompileBF16() bf16_ans = (__m256i)_mm512_cvtneps_pbh(ans); #else - bf16_ans = jblas::kernel::avx512f::zmm_cvt_fp32_bf16(ans); + bf16_ans = bestla::kernel::avx512f::zmm_cvt_fp32_bf16(ans); #endif _mm256_mask_storeu_epi16(grad + i * dt_size, ls_mask, bf16_ans); } @@ -164,8 +164,8 @@ static inline void write_rand_avx2(char* data, int thread_idx, int64_t elt_num, auto bf16_v = _mm_loadu_si128(reinterpret_cast<__m128i*>(data + i * dt_size)); auto fp32_v = _mm256_castsi256_ps(_mm256_bslli_epi128(_mm256_cvtepu16_epi32(bf16_v), 2)); fp32_v = _mm256_mul_ps(fp32_v, mul_scale); - auto ans = jblas::kernel::avx2::cvt_fp32_to_bf16(fp32_v, &bf16_and_helper, &bf16_add_helper); - auto bf16_scale = jblas::kernel::avx2::cvt_fp32_to_bf16(mul_scale, &bf16_and_helper, &bf16_add_helper); + auto ans = bestla::kernel::avx2::cvt_fp32_to_bf16(fp32_v, &bf16_and_helper, &bf16_add_helper); + auto bf16_scale = bestla::kernel::avx2::cvt_fp32_to_bf16(mul_scale, &bf16_and_helper, &bf16_add_helper); _mm_store_ps(reinterpret_cast(data + i * dt_size), _mm_castsi128_ps(ans)); _mm_store_ps(reinterpret_cast(mask_ptr + i * dt_size), _mm_castsi128_ps(bf16_scale)); } @@ -185,8 +185,8 @@ static inline void write_rand_avx2(char* data, int thread_idx, int64_t elt_num, fp_mask_ptr[i + j] = mul_scale_arr[j]; } } else { - jblas::utils::bf16* bf16_data_ptr = reinterpret_cast(data); - jblas::utils::bf16* bf16_mask_ptr = reinterpret_cast(mask_ptr); + bestla::utils::bf16* bf16_data_ptr = reinterpret_cast(data); + bestla::utils::bf16* bf16_mask_ptr = reinterpret_cast(mask_ptr); mul_scale = _mm256_blendv_ps(mul_scale, ymm_scale, zero_mask); float mul_scale_arr[8]; _mm256_storeu_ps(mul_scale_arr, mul_scale); @@ -215,7 +215,7 @@ static inline void mul_avx2(char* grad, int thread_idx, int64_t elt_num, int dt_ auto fp32_grad = _mm256_castsi256_ps(_mm256_bslli_epi128(_mm256_cvtepu16_epi32(bf16_grad), 2)); auto fp32_mask = _mm256_castsi256_ps(_mm256_bslli_epi128(_mm256_cvtepu16_epi32(bf16_mask), 2)); fp32_grad = _mm256_mul_ps(fp32_grad, fp32_mask); - auto ans = jblas::kernel::avx2::cvt_fp32_to_bf16(fp32_grad, &bf16_and_helper, &bf16_add_helper); + auto ans = bestla::kernel::avx2::cvt_fp32_to_bf16(fp32_grad, &bf16_and_helper, &bf16_add_helper); _mm_store_ps(reinterpret_cast(grad + i * dt_size), _mm_castsi128_ps(ans)); } } @@ -227,8 +227,8 @@ static inline void mul_avx2(char* grad, int thread_idx, int64_t elt_num, int dt_ fp_data_ptr[i + j] = fp_data_ptr[i + j] * fp_mask_ptr[i + j]; } } else { - jblas::utils::bf16* bf16_data_ptr = reinterpret_cast(grad); - jblas::utils::bf16* bf16_mask_ptr = reinterpret_cast(mask_ptr); + bestla::utils::bf16* bf16_data_ptr = reinterpret_cast(grad); + bestla::utils::bf16* bf16_mask_ptr = reinterpret_cast(mask_ptr); for (int j = 0; j < (elt_num - align_elt_num); j++) { bf16_data_ptr[i + j].fromfloat(bf16_data_ptr[i + j].tofloat() * bf16_mask_ptr[i + j].tofloat()); } @@ -240,7 +240,7 @@ static inline void mul_avx2(char* grad, int thread_idx, int64_t elt_num, int dt_ torch::Tensor dropout_fwd(torch::Tensor& output, double p) { auto elt_num = output.numel(); auto core_num = omp_get_max_threads(); - auto task_each_core = jblas::utils::updiv(int(elt_num / core_num), 16) * 16; + auto task_each_core = bestla::utils::updiv(int(elt_num / core_num), 16) * 16; torch::Tensor mask = torch::empty_like(output); #pragma omp parallel { @@ -279,7 +279,7 @@ torch::Tensor dropout_fwd(torch::Tensor& output, double p) { void dropout_bwd(torch::Tensor& grad, torch::Tensor& mask) { auto elt_num = grad.numel(); auto core_num = omp_get_max_threads(); - auto task_each_core = jblas::utils::updiv(int(elt_num / core_num), 16) * 16; + auto task_each_core = bestla::utils::updiv(int(elt_num / core_num), 16) * 16; #pragma omp parallel { auto ker_idx = omp_get_thread_num(); diff --git a/intel_extension_for_transformers/llm/quantization/autograd/functions.py b/intel_extension_for_transformers/llm/quantization/autograd/functions.py index aa202ad3486..be79f054814 100644 --- a/intel_extension_for_transformers/llm/quantization/autograd/functions.py +++ b/intel_extension_for_transformers/llm/quantization/autograd/functions.py @@ -30,7 +30,7 @@ class MatMulKBit(torch.autograd.Function): def forward(ctx, A, B, out=None, bias=None, compute_dtype=None, weight_dtype=None, scale_dtype=None): # # 1. Dequantize # B_dequant = torch.zeros(out.shape[-1], A.shape[-1], dtype=torch.float) - # torch.ops.jblasop.woq_dequantize( + # torch.ops.bestlaop.woq_dequantize( # B, B_dequant, True, compute_dtype, weight_dtype, scale_dtype) # B_dequant = B_dequant.to(dtype=A.dtype) @@ -49,7 +49,7 @@ def forward(ctx, A, B, out=None, bias=None, compute_dtype=None, weight_dtype=Non # 2. Matmul # output = torch.nn.functional.linear(A, B_dequant, bias) - torch.ops.jblasop.woq_linear( + torch.ops.bestlaop.woq_linear( A, B.data, bias, out, out.shape[-1], bias is not None, compute_dtype, weight_dtype, scale_dtype, False) output = out @@ -77,7 +77,7 @@ def backward(ctx, grad_output): grad_A, grad_B, grad_bias = None, None, None B_dequant = torch.zeros(grad_output.shape[-1], A.shape[-1], dtype=torch.float) - torch.ops.jblasop.woq_dequantize( + torch.ops.bestlaop.woq_dequantize( B, B_dequant, True, ctx.compute_dtype, ctx.weight_dtype, ctx.scale_dtype) B = B_dequant @@ -104,7 +104,7 @@ def matmul_kbit(A: Tensor, return MatMulKBit.apply(A, B, out, bias, compute_dtype, weight_dtype, scale_dtype) else: - torch.ops.jblasop.woq_linear(A, B.data, bias, out, out.shape[-1], bias + torch.ops.bestlaop.woq_linear(A, B.data, bias, out, out.shape[-1], bias is not None, compute_dtype, weight_dtype, scale_dtype, False) return out diff --git a/intel_extension_for_transformers/llm/quantization/nn/modules.py b/intel_extension_for_transformers/llm/quantization/nn/modules.py index 9074d49b9d3..dd2100d0a65 100644 --- a/intel_extension_for_transformers/llm/quantization/nn/modules.py +++ b/intel_extension_for_transformers/llm/quantization/nn/modules.py @@ -133,7 +133,7 @@ def forward(self, x: torch.Tensor): def set_weights_bias(self, weight_data, bias=None): shape = weight_data.shape - weight = torch.ops.jblasop.woq_quantize( + weight = torch.ops.bestlaop.woq_quantize( weight_data, True, self.blocksize, self.compute_dtype, self.weight_dtype, self.scale_dtype, False) weight.resize_(shape) self.weight = ParamsQBits(data=weight, @@ -202,7 +202,7 @@ def merge(self, safe_merge: bool = False) -> None: f"You are now additionally merging {','.join(self.active_adapters)}." ) w_dequant = torch.zeros(self.out_features, self.in_features, dtype=list(self.lora_A.values())[0].weight.dtype) - torch.ops.jblasop.woq_dequantize( + torch.ops.bestlaop.woq_dequantize( self.weight.data, w_dequant, True, self.compute_dtype, self.weight_dtype, self.scale_dtype) w_data = w_dequant for active_adapter in self.active_adapters: @@ -221,7 +221,7 @@ def merge(self, safe_merge: bool = False) -> None: w_data = orig_weights else: w_data += self.get_delta_weight(active_adapter) - weight = torch.ops.jblasop.woq_quantize( + weight = torch.ops.bestlaop.woq_quantize( w_data, True, self.blocksize, self.compute_dtype, self.weight_dtype, self.scale_dtype, False) self.weight = ParamsQBits( data=weight, requires_grad=False, quant_state={"scheme": self.scheme}, blocksize=self.blocksize, @@ -233,14 +233,14 @@ def unmerge(self) -> None: print("Already unmerged. Nothing to do.") return w_dequant = torch.zeros(self.out_features, self.in_features, dtype=list(self.lora_A.values())[0].weight.dtype) - torch.ops.jblasop.woq_dequantize( + torch.ops.bestlaop.woq_dequantize( self.weight.data, w_dequant, True, self.compute_dtype, self.weight_dtype, self.scale_dtype) w_data = w_dequant while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): w_data -= self.get_delta_weight(active_adapter) - weight = torch.ops.jblasop.woq_quantize( + weight = torch.ops.bestlaop.woq_quantize( w_data, True, self.blocksize, self.compute_dtype, self.weight_dtype, self.scale_dtype, False) self.weight = ParamsQBits( data=weight, requires_grad=False, quant_state={"scheme": self.scheme}, blocksize=self.blocksize, diff --git a/tests/CI/test_weight_only.py b/tests/CI/test_weight_only.py index de26a7bfe7d..45ddc93ac69 100644 --- a/tests/CI/test_weight_only.py +++ b/tests/CI/test_weight_only.py @@ -85,10 +85,10 @@ def test_woq_config_post_init_runtime(self): def test_int8(self): raw_wei = torch.rand(2, 32, dtype=torch.float) - compress_wei = torch.ops.jblasop.woq_quantize( + compress_wei = torch.ops.bestlaop.woq_quantize( raw_wei, True, 32, "fp32", "int8", "fp32", False) revert_wei = torch.zeros(2, 32, dtype=torch.float) - torch.ops.jblasop.woq_dequantize( + torch.ops.bestlaop.woq_dequantize( compress_wei, revert_wei, True, "fp32", "int8", "fp32") for bias in [True, False]: model = M(with_bias=bias) @@ -106,10 +106,10 @@ def test_int8(self): def test_int4(self): raw_wei = torch.rand(2, 32, dtype=torch.float) - compress_wei = torch.ops.jblasop.woq_quantize( + compress_wei = torch.ops.bestlaop.woq_quantize( raw_wei, True, 32, "fp32", "int4_fullrange", "fp32", False) revert_wei = torch.zeros(2, 32, dtype=torch.float) - torch.ops.jblasop.woq_dequantize( + torch.ops.bestlaop.woq_dequantize( compress_wei, revert_wei, True, "fp32", "int4_fullrange", "fp32") for bias in [True, False]: model = M(with_bias=bias) From 8500e7fecc53cb56e947fb630866c690d1e9da9a Mon Sep 17 00:00:00 2001 From: WeiweiZhang1 <109071285+WeiweiZhang1@users.noreply.github.com> Date: Fri, 12 Jan 2024 17:00:19 +0800 Subject: [PATCH 043/101] fix Qwen-72b eval precision issue (#1134) * fix Qwen-72b eval precision issue Signed-off-by: Zhang, Weiwei1 --- .../evaluation/lm_eval/models/huggingface.py | 82 +++++++++++++------ 1 file changed, 57 insertions(+), 25 deletions(-) diff --git a/intel_extension_for_transformers/llm/evaluation/lm_eval/models/huggingface.py b/intel_extension_for_transformers/llm/evaluation/lm_eval/models/huggingface.py index 32a6da62f0e..27bda6e6045 100644 --- a/intel_extension_for_transformers/llm/evaluation/lm_eval/models/huggingface.py +++ b/intel_extension_for_transformers/llm/evaluation/lm_eval/models/huggingface.py @@ -30,6 +30,7 @@ from lm_eval import utils from lm_eval.base import BaseLM +import re TokenSequence = Union[List[int], torch.LongTensor, torch.Tensor, BatchEncoding] @@ -324,17 +325,32 @@ def _create_auto_model( with init_empty_weights(): if self._config.model_type =="chatglm": self.AUTO_MODEL_CLASS = transformers.AutoModel - model = self.AUTO_MODEL_CLASS.from_pretrained( - pretrained, - revision=revision + ("/" + subfolder if subfolder is not None else ""), - low_cpu_mem_usage=low_cpu_mem_usage, - device_map=device_map, - max_memory=max_memory, - offload_folder=offload_folder, - load_in_8bit=load_in_8bit, - trust_remote_code=trust_remote_code, - torch_dtype=torch_dtype - ) + if re.search("qwen-72b", self._config._name_or_path.lower()): + model = self.AUTO_MODEL_CLASS.from_pretrained( + pretrained, + revision=revision + ("/" + subfolder if subfolder is not None else ""), + low_cpu_mem_usage=low_cpu_mem_usage, + device_map=device_map, + max_memory=max_memory, + offload_folder=offload_folder, + load_in_8bit=load_in_8bit, + trust_remote_code=trust_remote_code, + torch_dtype=torch_dtype, + fp32=(bool(torch_dtype==torch.float32)), + fp16=(bool(torch_dtype==torch.float16)), + ) + else: + model = self.AUTO_MODEL_CLASS.from_pretrained( + pretrained, + revision=revision + ("/" + subfolder if subfolder is not None else ""), + low_cpu_mem_usage=low_cpu_mem_usage, + device_map=device_map, + max_memory=max_memory, + offload_folder=offload_folder, + load_in_8bit=load_in_8bit, + trust_remote_code=trust_remote_code, + torch_dtype=torch_dtype + ) else: if load_in_4bit: assert ( @@ -354,18 +370,34 @@ def _create_auto_model( model_kwargs[ "bnb_4bit_use_double_quant" ] = bnb_4bit_use_double_quant - model = self.AUTO_MODEL_CLASS.from_pretrained( - pretrained, - revision=revision + ("/" + subfolder if subfolder is not None else ""), - low_cpu_mem_usage=low_cpu_mem_usage, - device_map=device_map, - max_memory=max_memory, - offload_folder=offload_folder, - load_in_8bit=load_in_8bit, - trust_remote_code=trust_remote_code, - torch_dtype=torch_dtype, - **model_kwargs, - ) + if re.search("qwen-72b", self._config._name_or_path.lower()): + model = self.AUTO_MODEL_CLASS.from_pretrained( + pretrained, + revision=revision + ("/" + subfolder if subfolder is not None else ""), + low_cpu_mem_usage=low_cpu_mem_usage, + device_map=device_map, + max_memory=max_memory, + offload_folder=offload_folder, + load_in_8bit=load_in_8bit, + trust_remote_code=trust_remote_code, + torch_dtype=torch_dtype, + **model_kwargs, + fp32=(bool(torch_dtype==torch.float32)), + fp16=(bool(torch_dtype==torch.float16)), + ) + else: + model = self.AUTO_MODEL_CLASS.from_pretrained( + pretrained, + revision=revision + ("/" + subfolder if subfolder is not None else ""), + low_cpu_mem_usage=low_cpu_mem_usage, + device_map=device_map, + max_memory=max_memory, + offload_folder=offload_folder, + load_in_8bit=load_in_8bit, + trust_remote_code=trust_remote_code, + torch_dtype=torch_dtype, + **model_kwargs + ) else: from auto_gptq import AutoGPTQForCausalLM # pylint: disable=E0401 @@ -590,7 +622,7 @@ def __init__(self, *args, pretrained, model_format, **kwargs): "'decoder_model_merged.onnx', 'model.onnx'] in {}.".format( pretrained) ) - + import optimum.version import onnxruntime as ort from transformers import PretrainedConfig @@ -678,7 +710,7 @@ def __init__(self, *args, pretrained, model_format, **kwargs): pretrained, use_cache=False, use_io_binding=False) - + def _create_auto_tokenizer( self, *, From 04f5ef6ccea4e763537a2a418c0255d13ef6c094 Mon Sep 17 00:00:00 2001 From: lvliang-intel Date: Sat, 13 Jan 2024 08:06:19 +0800 Subject: [PATCH 044/101] Support Phi-2 model (#1137) Signed-off-by: lvliang-intel --- .../neural_chat/chatbot.py | 3 +- .../neural_chat/models/base_model.py | 3 +- .../neural_chat/models/model_utils.py | 3 +- .../tests/nightly/models/test_phi2.py | 37 +++++++++++++++++++ 4 files changed, 43 insertions(+), 3 deletions(-) create mode 100644 intel_extension_for_transformers/neural_chat/tests/nightly/models/test_phi2.py diff --git a/intel_extension_for_transformers/neural_chat/chatbot.py b/intel_extension_for_transformers/neural_chat/chatbot.py index 855c6837ade..1d7133d12f2 100644 --- a/intel_extension_for_transformers/neural_chat/chatbot.py +++ b/intel_extension_for_transformers/neural_chat/chatbot.py @@ -97,7 +97,8 @@ def build_chatbot(config: PipelineConfig=None): "starcoder" in config.model_name_or_path.lower() or \ "codegen" in config.model_name_or_path.lower() or \ "magicoder" in config.model_name_or_path.lower() or \ - "mixtral" in config.model_name_or_path.lower(): + "mixtral" in config.model_name_or_path.lower() or \ + "phi-2" in config.model_name_or_path.lower(): from .models.base_model import BaseModel adapter = BaseModel() else: diff --git a/intel_extension_for_transformers/neural_chat/models/base_model.py b/intel_extension_for_transformers/neural_chat/models/base_model.py index 2f29e1aea61..3aee7670cc3 100644 --- a/intel_extension_for_transformers/neural_chat/models/base_model.py +++ b/intel_extension_for_transformers/neural_chat/models/base_model.py @@ -164,7 +164,8 @@ def predict_stream(self, query, origin_query="", config=None): self.get_conv_template(self.model_name, config.task) if (self.conv_template.roles[0] in query and self.conv_template.roles[1] in query) or \ "starcoder" in self.model_name.lower() or "codellama" in self.model_name.lower() or \ - "codegen" in self.model_name.lower() or "magicoder" in self.model_name.lower(): + "codegen" in self.model_name.lower() or "magicoder" in self.model_name.lower() or \ + "phi-2" in self.model_name.lower(): query_include_prompt = True # plugin pre actions diff --git a/intel_extension_for_transformers/neural_chat/models/model_utils.py b/intel_extension_for_transformers/neural_chat/models/model_utils.py index 40c32029282..0d2a376f4cd 100644 --- a/intel_extension_for_transformers/neural_chat/models/model_utils.py +++ b/intel_extension_for_transformers/neural_chat/models/model_utils.py @@ -501,6 +501,7 @@ def load_model( or config.model_type == "llama" or config.model_type == "mistral" or config.model_type == "mixtral" + or config.model_type == "phi" ) and not ipex_int8) or config.model_type == "opt": with smart_context_manager(use_deepspeed=use_deepspeed): model = AutoModelForCausalLM.from_pretrained( @@ -509,7 +510,7 @@ def load_model( torch_dtype=torch_dtype, low_cpu_mem_usage=True, quantization_config=bitsandbytes_quant_config, - trust_remote_code=True if (config.model_type == "qwen" or \ + trust_remote_code=True if (config.model_type == "qwen" or config.model_type == "phi" or \ re.search("codegen", model_name, re.IGNORECASE)) else False ) elif ( diff --git a/intel_extension_for_transformers/neural_chat/tests/nightly/models/test_phi2.py b/intel_extension_for_transformers/neural_chat/tests/nightly/models/test_phi2.py new file mode 100644 index 00000000000..fef9c8fa22b --- /dev/null +++ b/intel_extension_for_transformers/neural_chat/tests/nightly/models/test_phi2.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from intel_extension_for_transformers.neural_chat import build_chatbot, PipelineConfig +import unittest + +class TestPhi2Model(unittest.TestCase): + def setUp(self): + return super().setUp() + + def tearDown(self) -> None: + return super().tearDown() + + def test_code_gen(self): + config = PipelineConfig( + model_name_or_path="microsoft/phi-2") + chatbot = build_chatbot(config=config) + result = chatbot.predict("Calculate 99+22=") + print(result) + self.assertIn("The answer is 121", str(result)) + +if __name__ == "__main__": + unittest.main() \ No newline at end of file From aabb2fc35faf13a64fab3e06d4b476a30be2ef2b Mon Sep 17 00:00:00 2001 From: lvliang-intel Date: Sat, 13 Jan 2024 17:52:49 +0800 Subject: [PATCH 045/101] [NeuralChat] Refine Document (#1127) [NeuralChat] Refactor RAG code and structure Signed-off-by: XuhuiRen Signed-off-by: lvliang-intel Signed-off-by: Tian, Feng Co-authored-by: hshen14 --- .../neural_chat/README.md | 215 ++++++++---------- .../assets/pictures/neuralchat.png | Bin 44878 -> 0 bytes .../neural_chat/docs/advanced_features.md | 199 ++++++++++++++++ .../docs/images/neuralchat_arch.png | Bin 0 -> 434602 bytes .../examples/deployment/codegen/README.md | 9 + .../deployment/codegen/backend/pc/README.md | 18 ++ 6 files changed, 327 insertions(+), 114 deletions(-) delete mode 100644 intel_extension_for_transformers/neural_chat/assets/pictures/neuralchat.png create mode 100644 intel_extension_for_transformers/neural_chat/docs/advanced_features.md create mode 100644 intel_extension_for_transformers/neural_chat/docs/images/neuralchat_arch.png create mode 100644 intel_extension_for_transformers/neural_chat/examples/deployment/codegen/README.md diff --git a/intel_extension_for_transformers/neural_chat/README.md b/intel_extension_for_transformers/neural_chat/README.md index df8d7099afc..c0cad91a432 100644 --- a/intel_extension_for_transformers/neural_chat/README.md +++ b/intel_extension_for_transformers/neural_chat/README.md @@ -2,139 +2,148 @@ NeuralChat =========================== -

A customizable chatbot framework to create your own chatbot within minutes

+

A customizable framework to create your own LLM-driven AI apps within minutes

---- -
+[🌟RESTful API](./docs/neuralchat_api.md)   |   [💻Examples](./examples)   |   [📖Notebooks](./docs/full_notebooks.md) +
-## Introduction +# Introduction -NeuralChat is a customizable chat framework designed to easily create user own chatbot that can be efficiently deployed across multiple architectures (e.g., Intel® Xeon® Scalable processors, Habana® Gaudi® AI processors). NeuralChat is built on top of large language models (LLMs) and provides a set of strong capabilities including LLM fine-tuning, optimization, and inference, together with a rich set of plugins such as knowledge retrieval, query caching, etc. With NeuralChat, you can easily create a text-based or audio-based chatbot within minutes and deploy on user favorite platform rapidly. +NeuralChat is a powerful and flexible open framework that empowers you to effortlessly create LLM-centric AI applications, including chatbots and copilots. +* Support a range of hardware like [Intel Xeon Scalable processors](https://www.intel.com/content/www/us/en/products/details/processors/xeon/scalable.html), [Intel Gaudi AI processors](https://habana.ai/products), [Intel® Data Center GPU Max Series](https://www.intel.com/content/www/us/en/products/details/discrete-gpus/data-center-gpu/max-series.html) and NVidia GPUs +* Leverage the leading AI frameworks (e.g., [PyTorch](https://pytorch.org/) and popular domain libraries (e.g., [Hugging Face](https://github.com/huggingface), [Langchain](https://www.langchain.com/)) with their extensions +* Support the model customizations through parameter-efficient fine-tuning, quantization, and sparsity. Released [Intel NeuralChat-7B LLM](https://huggingface.co/Intel/neural-chat-7b-v3-1), ranking #1 in Hugging Face leaderboard in Nov'23 +* Provide a rich set of plugins that can augment the AI applications through retrieval-augmented generation (RAG) (e.g., [fastRAG](https://github.com/IntelLabs/fastRAG/tree/main)), content moderation, query caching, more +* Integrate with popular serving frameworks (e.g., [vLLM](https://github.com/vllm-project/vllm), [TGI](https://github.com/huggingface/text-generation-inference), [Triton](https://developer.nvidia.com/triton-inference-server)). Support [OpenAI](https://platform.openai.com/docs/introduction)-compatible API to simplify the creation or migration of AI applications - +

- NeuralChat + NeuralChat

-> NeuralChat is under active development with some experimental features (APIs are subject to change). +> NeuralChat is under active development. APIs are subject to change. -## Installation +# Installation -NeuralChat is seamlessly integrated into the Intel Extension for Transformers. Please refer to [Installation](../../docs/installation.md) page for step by step instructions. +NeuralChat is under Intel Extension for Transformers, so ensure the installation of Intel Extension for Transformers first by following the [installation](../../docs/installation.md). After that, install additional dependency for NeuralChat per your device: -## Getting Started - -NeuralChat could be deployed locally or accessed through service. +```shell +# For CPU device +pip install -r requirements_cpu.txt -### Deploy Chatbot Locally +# For HPU device +pip install -r requirements_hpu.txt -NeuralChat can be deployed locally and users can run it through command line or python code. +# For XPU device +pip install -r requirements_xpu.txt -```shell -# Command line -neuralchat predict --query "Tell me about Intel Xeon Scalable Processors." +# For CUDA +pip install -r requirements.txt ``` -```python -# Python code -from intel_extension_for_transformers.neural_chat import build_chatbot -chatbot = build_chatbot() -response = chatbot.predict("Tell me about Intel Xeon Scalable Processors.") -print(response) -``` +# Getting Started -### Deploy Chatbot Service +## OpenAI-Compatible RESTful APIs -NeuralChat can be deployed as a service and users can access it through curl with Restful API. +NeuralChat provides OpenAI-compatible RESTful APIs for LLM inference, so you can use NeuralChat as a drop-in replacement for OpenAI APIs. NeuralChat service can also be accessible through [OpenAI client library](https://github.com/openai/openai-python), `curl` commands, and `requests` library. See [neuralchat_api.md](./docs/neuralchat_api.md). -#### Launch Service +### Launch OpenAI-compatible Service +NeuralChat launches a chatbot service using [Intel/neural-chat-7b-v3-1](https://huggingface.co/Intel/neural-chat-7b-v3-1) by default. You can customize the chatbot service by configuring the YAML file. ```shell neuralchat_server start --config_file ./server/config/neuralchat.yaml ``` -#### Access Service +### Access the Service +Once the service is running, you can observe an OpenAI-compatible endpoint `/v1/chat/completions`. You can use any of below ways to access the endpoint. -```shell -curl -X POST -H "Content-Type: application/json" -d '{"prompt": "Tell me about Intel Xeon Scalable Processors."}' http://127.0.0.1:80/v1/chat/completions +#### Using OpenAI Client Library +```python +from openai import Client +# Replace 'your_api_key' with your actual OpenAI API key +api_key = 'your_api_key' +backend_url = 'http://127.0.0.1:80/v1/chat/completions' +client = Client(api_key=api_key, base_url=backend_url) +response = client.ChatCompletion.create( + model="Intel/neural-chat-7b-v3-1", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Tell me about Intel Xeon Scalable Processors."}, + ] +) +print(response) ``` -## Advanced Topics - -### Plugins - -NeuralChat introduces the `plugins` which offer a rich set of useful LLM utils and features to augment the chatbot's capability. Such plugins are applied in the chatbot pipeline for inference. - -Below shows the supported plugins: - -- [Knowledge Retrieval](./pipeline/plugins/retrieval/) - - Knowledge retrieval consists of document indexing for efficient retrieval of relevant information, including Dense Indexing based on LangChain and Sparse Indexing based on fastRAG, document rankers to prioritize the most relevant responses. - -- [Query Caching](./pipeline/plugins/caching/) - - Query caching enables the fast path to get the response without LLM inference and therefore improves the chat response time - -- [Prompt Optimization](./pipeline/plugins/prompt/) +#### Using Curl +```shell +curl http://127.0.0.1:80/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "Intel/neural-chat-7b-v3-1", + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Tell me about Intel Xeon Scalable Processors."} + ] + }' +``` - Prompt optimization supports auto prompt engineering to improve user prompts. +#### Using Python Requests Library -- [Memory Controller](./pipeline/plugins/memory/) +```python +import requests +url = 'http://127.0.0.1:80/v1/chat/completions' +headers = {'Content-Type': 'application/json'} +data = '{"model": "Intel/neural-chat-7b-v3-1", "messages": [ \ + {"role": "system", "content": "You are a helpful assistant."}, \ + {"role": "user", "content": "Tell me about Intel Xeon Scalable Processors."}] \ + }' +response = requests.post(url, headers=headers, data=data) +print(response.json()) +``` - Memory controller enables the efficient memory utilization. +## Langchain Extension APIs -- [Safety Checker](./pipeline/plugins/security/) +Intel Extension for Transformers provides a comprehensive suite of Langchain-based extension APIs, including advanced retrievers, embedding models, and vector stores. These enhancements are carefully crafted to expand the capabilities of the original langchain API, ultimately boosting overall performance. This extension is specifically tailored to enhance the functionality and performance of RAG. - Safety checker enables the sensitive content check on inputs and outputs of the chatbot. +### Vector Stores -User could enable, disable, and even change the default behavior of all supported plugins like below +We introduce enhanced vector store operations, enabling users to adjust and fine-tune their settings even after the chatbot has been initialized, offering a more adaptable and user-friendly experience. For langchain users, integrating and utilizing optimized Vector Stores is straightforward by replacing the original Chroma API in langchain. ```python -from intel_extension_for_transformers.neural_chat import build_chatbot, PipelineConfig, plugins - -plugins.retrieval.enable = True -plugins.retrieval.args["input_path"]="./assets/docs/" -conf = PipelineConf(plugins=plugins) -chatbot = build_chatbot(conf) - +from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline +from langchain.chains import RetrievalQA +from langchain_core.vectorstores import VectorStoreRetriever +from intel_extension_for_transformers.langchain.vectorstores import Chroma +retriever = VectorStoreRetriever(vectorstore=Chroma(...)) +retrievalQA = RetrievalQA.from_llm(llm=HuggingFacePipeline(...), retriever=retriever) ``` -### Fine-tuning +### Retrievers -NeuralChat supports fine-tuning the pretrained large language model (LLM) for text-generation, summarization, code generation tasks, and even TTS model, for user to create the customized chatbot. - -```shell -# Command line -neuralchat finetune --base_model "Intel/neural-chat-7b-v3-1" --config pipeline/finetuning/config/finetuning.yaml -``` +We provide optimized retrievers such as `VectorStoreRetriever`, `ChildParentRetriever` to efficiently handle vectorstore operations, ensuring optimal retrieval performance. ```python -# Python code -from intel_extension_for_transformers.neural_chat import finetune_model, TextGenerationFinetuningConfig -finetune_cfg = TextGenerationFinetuningConfig() # support other finetuning config -finetune_model(finetune_cfg) +from intel_extension_for_transformers.langchain.retrievers import ChildParentRetriever +from langchain.vectorstores import Chroma +retriever = ChildParentRetriever(vectorstore=Chroma(documents=child_documents), parentstore=Chroma(documents=parent_documents), search_type=xxx, search_kwargs={...}) +docs=retriever.get_relevant_documents("Intel") ``` -### Optimization +Please refer to this [documentation](./pipeline/plugins/retrieval/README.md) for more details. -NeuralChat provides typical model optimization technologies, like `Automatic Mixed Precision (AMP)` and `Weight Only Quantization`, to allow user to define a customized chatbot. -```shell -# Command line -neuralchat optimize --base_model "Intel/neural-chat-7b-v3-1" --config pipeline/optimization/config/optimization.yaml -``` +## Advanced Features -```python -# Python code -from intel_extension_for_transformers.neural_chat import build_chatbot, MixedPrecisionConfig -pipeline_cfg = PipelineConfig(optimization_config=MixedPrecisionConfig()) -chatbot = build_chatbot(pipeline_cfg) -``` +NeuralChat introduces `plugins` that offer a wide range of useful LLM utilities and features, enhancing the capabilities of the chatbot. Additionally, NeuralChat provides advanced model optimization technologies such as `Automatic Mixed Precision (AMP)` and `Weight Only Quantization`. These technologies enable users to run a high-throughput chatbot efficiently. NeuralChat further supports fine-tuning the pretrained LLMs for tasks such as text generation, summarization, code generation, and even Text-to-Speech (TTS) models, allowing users to create customized chatbots tailored to their specific needs. -## Validated Model List +Please refer to this [documentation](./docs/advanced_features.md) for more details. + +# Models + +## Supported Models The table below displays the validated model list in NeuralChat for both inference and fine-tuning. |Pretrained model| Text Generation (Completions) | Text Generation (Chat Completions) | Summarization | Code Generation | |------------------------------------|:---:|:---:|:---:|:---:| @@ -142,43 +151,21 @@ The table below displays the validated model list in NeuralChat for both inferen |Intel/neural-chat-7b-v3-1| ✅| ✅| ✅| ✅ | |LLaMA series| ✅| ✅|✅| ✅ | |LLaMA2 series| ✅| ✅|✅| ✅ | +|GPT-J| ✅| ✅|✅| ✅ | |MPT series| ✅| ✅|✅| ✅ | -|Mistral| ✅| ✅|✅| ✅ | -|Mixtral-8x7b-v0.1| ✅| ✅|✅| ✅ | +|Mistral series| ✅| ✅|✅| ✅ | +|Mixtral series| ✅| ✅|✅| ✅ | +|SOLAR Series| ✅| ✅|✅| ✅ | |ChatGLM series| ✅| ✅|✅| ✅ | |Qwen series| ✅| ✅|✅| ✅ | |StarCoder series| | | | ✅ | |CodeLLaMA series| | | | ✅ | |CodeGen series| | | | ✅ | +|MagicCoder series| | | | ✅ | +# Notebooks -## Restful API - -### OpenAI-Compatible RESTful APIs & SDK -NeuralChat provides OpenAI-compatible APIs for LLM inference, so you can use NeuralChat as a local drop-in replacement for OpenAI APIs. The NeuralChat server is compatible with both [openai-python library](https://github.com/openai/openai-python) and cURL commands. See [neuralchat_api.md](./docs/neuralchat_api.md). - -The following OpenAI APIs are supported: - -- Chat Completions. (Reference: https://platform.openai.com/docs/api-reference/chat) -- Completions. (Reference: https://platform.openai.com/docs/api-reference/completions) - -### Additional useful RESTful APIs -In addition to the text-based chat RESTful API, NeuralChat offers several helpful plugins in its RESTful API lineup to aid users in building multimodal applications. -NeuralChat supports the following RESTful APIs: -- Finetuning -- Audio Chat -- Document Retrieval -- Code Generation -- Text to Image -- Image to Image -- Face animation - -For more details, refer to this [README](./server/README.md) - - -## Selected Notebooks - -Welcome to use Jupyter notebooks to explore how to create, deploy, and customize chatbots on multiple architectures, including Intel Xeon Scalable Processors, Intel Gaudi2, Intel Xeon CPU Max Series, Intel Data Center GPU Max Series, Intel Arc Series, and Intel Core Processors, and others. The selected notebooks are shown below: +We provide Jupyter notebooks to help users explore how to create, deploy, and customize chatbots on different hardware architecture. The selected notebooks are shown below: | Notebook | Title | Description | Link | | ------- | --------------------------------------------- | ---------------------------------------------------------- | ------------------------------------------------------- | diff --git a/intel_extension_for_transformers/neural_chat/assets/pictures/neuralchat.png b/intel_extension_for_transformers/neural_chat/assets/pictures/neuralchat.png deleted file mode 100644 index d85c2a7ab1479167f4b5ec0ed03a074ed4cd7971..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 44878 zcmeFZcT`h(+waZjATy%FC{m=(h!m;Pk!qp$4$>tw=@2@BSjIvhx^yA5(0ix>1cIRi zL+_E^K?o2+31M z74<17t|U@WT!#E|1$d{S=FSxG<0r7b%43Q$49gnu%g>IFv>s7VRK`%BKBEMF|J76V zDVT!dw+`~ZpSnE??IhW!N&Q|!@NOiJsyE~bB}^cJ^Q?&066_wE{BmNWK#{&ydj>B}p8 z{yG&FpC5r%+xG^`=$tAp{p#eGCPy!3^QhWHhNCr3xhgl-4b7yMMI*b5$29L06SBnO z2`BOc)gWnZ;6{O$4W+dPiG0%(6ff+51)h`q;!!XB`QqTA-eb;-1B&pQygywW=_cs@ zdU5a~=P&i|f4jwLc5f+JApC~P3@3Uzb+^=d zeT#)*cSmg^L**0uXxfy2|87p1xq{2MnYL$c`x~={a|j)vzP{@aHXi@H8n1b+b&AI1BoJ2FvwX@PrTa6E4nS|&V*Q&$Rnx~Dul z)|+#hxn6j_iSdzj|%{on_HB+^@{qPL#X>b}D7%__Y5Ppmv@;dBaF67m*GDVN^ z!M3y4dHT{R>O+3qshbGfS?gw4gX(jS;C-Y;IiJi6t;gTp(KH(a6^!sYH**M5BH+BO z$~D?=7YY5stit|)F__n{Gn$oH#pm4Wf45QF{?3V!i203pcAD-sJ{8zpaBZcF11nMg zM(`lj!sqG`;eE87$jX6;P(P-X!#RdiX!)9iabHVlsgDv@pofxmWY?FC1~;{xueA8Sl@!jytw~|$U>6<~Qq{qtXXP^Xep8=hn(h6-2RR(hE2F-p zV(FFUOH~r=U8?D++y!NtuaJmEaC6p(j?#$HRyP5>)uL z<={RYs32N-3%3>aL}#gPX?h$|T&^P`d}^#BHe#q~-^lUSm{$pnYdMfZGaxYx35Fwe2VK7($)5W*^2C4AcZ1iK%BWp=+;BW(gYH3a7}2S?O7^ki;?zozk3 zTJBwZlLtu&E2BD891?oB@)~X*)%~@a3f3s^pKff%U9t2%R(LUGbEhin?nv``k1wmn z^KYgq*A|E3W)84&KKHmtBCJD0V?86gu~Ni^R2R|Z437vTA$b*9#?RW{z2Q)lNK@At z)=uq3>=|h|(A2Sx;)@tfBtBKyXV`Ys?g!%)f)@k!Y_-qY6x$p3^5m}8ImaTMLV|3# zD2imKSshVa^Xt}*$r^PF+-s_qX%7%Ota|60y8JWuZ~@m&3+1IT-0=;0g9;CBb1az5 zuW#0!9yn`8W`xlye6V&rJgX~|k#D{)*3=}dJs^mG3LWMaQt?53-rP`oaJD)<`cvAE zpsD_Pl`zRlzhAAu{YhIztEh4)RN9QaB;2rlT+5y?tcZ6x!$PG3|12QDs5R>D7ePuy zF~8|_E@jhV_dIt5D>>TN-76sYCNnG6UpR&5J~33M1}WDRC5|NxfV>VzmJ1sSWD)tDW4(6{ zN*kSs2NR4$tp!NMzPkB-x^-5a>BBYJ?}iFtNo19YpA3RW#CsrA`l~EAjg)6V0>Cnv z574|jLfKQq5%STL!kBIC-O8BwDLT%j(f3_&&I*+gzJ0*ACug}~c6ByQ6Xmp@Wj-EWAuS`LhI4B6*ydE_Vg6l4*1HW)FWd&-k$*x42rHQ-?>JA9|gx>Q9Y)38S2sX}W@Z&CC@>zP~1lF-G+e$w!zW*N2xHD6Q#Nce9< zd})L8ikWz6Nr|&{U%9W8vVwL|Tip-u;Qo3~btc{rugvg<)ulv49Ou0{8rl72hzhos zR*T&?)Y%XD@N{g;%1UXv!CS}|+(SCVdqw(bq&t( zq<>gVUHagnjrbr;Fo&@5-etTU=-WxH*J^TysO9Go+0Z+fNUhG1R_*Tn{hYY9=DJ_n z=()bTEBA=u)^fhp$EA{21`_>>#C*3EYnnq!(~!gQj<&1)NrflIygf*H{NN=6XhYoa zXzzOW$DUBXd+;UMzs8Z?yR$rI#%f>eKL4{T=MTGgIGz!WqKXgF z#_eu@hNNgFe5}NWhzca^rLt*vtG5hjL8O71?bVI6&@AFh4cCAHcE{1fo`3b1Z<$3- znnlIdj_#!(2HVimwL+4yK4l%<4~LXys5~UPg%@xX^iw*Exgy~dnKPe{OpQvo)8kh* zEaoWt7RUFCxAF76-Vua+PYh^S;lsVzs?~@&MuJn#|cj6Mvhi@{sIm=RP);L=G;RUTYq1`m;3c zEk=L=soAy4r?T*w3T<}-9=zP0$b^YRMfp%j9qYUJgw_wQ^!^AF5PE^bEwYRm zRLigPb`jB?nMJR?78757hDhHwu;%nw)Y4KuT$p1FfZvlG$aIqS6Em}Bw^Z6lH8-h6 zaKe=$*<8Y5<&L%x!7r)dXq|U3LYU8HupdvL!0rlP?^`80cu#H;-dLk%aqL0EWD8nx zj8HWHWgu=Lp*dASd12)wN5GO<3(cr|1MURba>`A1(laFKy6nqJ1fQ0d;=c}sP>&Hu z47clPly+!nl(z^}%VziTGS{L<&w9lT)RmSltBEVaw$Raxr&3MdAeYi6qu0`Ne*OeEb*%| zkkcx504~?lAxBg>e+uT1FYh82ku({iJO(SXSM+=c0 zfzN(j@L&zM)ZxE6akE&OyP_RpRi>$NkrsbFY=ZHo;+lU?zG4P*_un(qgBJ$X$Y7$a z*m+80)}7>hrBh9&k%qZhGR#jD+`<}B>x;E?33;%E7L3x|2yZTMyJ;NQ^P?(Zy&REs zYqqfL#7lY2GR`??9U9p4_u_XE@ zHbucQDohia9+3ZFkgACOFBZ5*0Y^wZXCoB`m8YQ7_m}S4RLaw719t9iuKCThQmjRV z8*!@$x~J$q)jhwt>jJuFq%gt2%~sw(znoi?W1B*e?+Oi$)J%_xC!U=ho?;BQg<|q# zk~EZ#*BT3tgY3nSfq~1Wz6wv-#lg=^c#D1;R_X0MYdElq9vT?tms`f#`Wq@O6gz@W zt7BK84V>%js&ygC_QSGYSV}!I(X511>7m%;puN#>@2R7-%6hSJR)Kfaqlizt6cRKu zII_^-dYPO7%>QewDgH--ASXu@|2JwZHcvLkD;M7OPzyEx%^#>08U!aM@g*T3fA*S( zUL0DtE=^echg>u|v%kNuxsl6Y4G|7})35wb(McvBkNt!Eu;!DktW(LucaUDEkM*Za zBf1AY)2`aM^qyy&Pma;o`QEE-JaCozKG&7G)vjuAz$^s z)7!q*eU5>PnOi}2yB3WkA_Q*pE}bm2#8w6T{`quYA1`3S??HT=ggCu8Yz^%tr`Wn? zd!Uu2*SQl_N3*}>>3la)-?vT0+ESJ`8lgRC$3(u~Xv}x}LGdETPjiKRXV#WJCa;px z|Hg|;#8$lZ$>gV%yQf=J6ffS8w#JdU^t(|D3~`evZyG9_;E8JlPxW1VRf5xh4#szl zu;R~5D2Yf(`J&X#3sK-Ik21VuGcXf4;XJ$*OiaMza$#haX#VHX`pab$Rj0C z15+&qoOX-b&}<^V<=UGvd=1iILj-s~b2 zcD)SztibL;S2RK_Tm=%1^8qEp{MC;~2m#heTR!4;ruR=2FQUkQx_DZw{&V~9AOAlS zf|%!tD_PdM6yuYAo8z&=<0Zg6-8~Qh?Y?RFf0-sGifQ?@IksqExspc{xQ}rw*`*78 zcEdxX4j4sX3W0$D<^sPc{?EC%7`0i4qsPFR%%!(H;#&U8Vs05oF!KOC&PxO$=J9>< zUe>j^H+xr9P=X>)xv-`RSfYVFlr|^#4#*>60|QR`D3v|fQ)gGWNKICZ+a3c!A&)k2 z>YEVC?^<$6Jj6q^dZ~-STGPtD`inmKG#h5%=ZmPMa~bRP1zQ(*jfenn!JGeh1Px$W zOSJTV`#y){KRJ<~Hc6dt=;GtRr2U63&7(JAa9z|=$%^l~@VCG{Wwl<#v1y5w9*vYm zUz_Uh2~L~z=Vn7sgBH~ZBk+Bu(BT^)J5kDhyieQfDtI6Pc7If%WlCTJR5{t->dQ@e zZFP=qSOS5Z923>GXo8lmt9;WsmBzXHy7tctLuB(r_WTr2-Qy<_8Y*=Kw;IlIaFS!Y zQsxxENgIz2uBfaM_H&mywtB#%wI=7~X!K-1o%YNNy!VJDE$w1!zPP^oiCT!Cr;jP7 z-O&ZMu5j47nvB-a<(0o3S?A-+a=hf^TCD6_61KLsmWy(z{js=($jgpG-P$oyQ)ggW zw*_et#YdYwzxmK{ct_h|0FA`a`*y$cIFCBPTQ(j}x<|+Gds%WVl{ajFA$N~s##HP@ zf{_PYZcYu@yOS?9udEy_lZ}(~7pPL{Q9uvIDb(Nl{?g*VGJo)+m2& zk2Z;qCo<%nCh-^5>ts(At(sOW>j>Je6G#)8wbm;tdype68QmLk@bm~)?0NR6dWjX1 z((aiMh;-;754Z_0UJ6%RVoX(eh}#_aru?ia=E7l6*tmSR;l{>Ca)pLvXOau3U}YW8XH7e4cE) zyKWqwm7J#tD4MQ87n4$#WVjh91z*;3>mYUUSo#|&kId#~5_SXZ;(03$+|asfz&5Y| zcKq>GcCOD<8o;2WO^gV|_gw#Q8o+a$DR+}s{jv=*Y3#G04VOSx?83gY6IU zF}{85IM#PLJMoKAkFB*(;|(uYEg|yW_9p%9PDgq=*U|@2%aPQ4IrFo*sf2>C52{6y z9Jm~hZ3|hEipCebxYV+*AN(?OtgO{Y>5wlBZaOTHuL(x{rbyGL%_QQ6oOPq^T5;Xi zNhIRWV|i^WOs^n#3qvXkP3)t}o;zF zC+gkoc`jX`5;qpn806Pj`?bDP)UlwP%R^vk-SpgNa#rSJ^Lx#)S;+_0*12la@rvd55a1jM#AvR z!?kLtc9JFP6{xARvh`lpfzB9%Yj;ufNxISs{{tL#l%{`~_ZfAl4!s34-)A3;P&{7- zFgNxV#KPdlQ2+LUhW4s}GwK1fck2{&jKM)M5$0om{K@5DC=uK%Xy7-dlf`Jch6n`* zlZY!T7lBx_ne>I9b|w2%;8pQdEA;;Fa%ytrOHC&^{BR)&5(%qnBCa&hU{tg7`8O4v z5I3=m##p}xl5_Khn&~^?`RC)b9WK*Clfzw^Qa;vhH7!l4A&fEY1&2yA8YvrQKPN&F zf~kPeqPve&NF9zivIT6N>iUCznmm{+e4s{?80tGLUwM3n`G*w9WtQ)j9`@nLK)=5U zafdg~`5u$P$SotBTSR-FS%Lr`UDS?dUnD+BXzL_C*`s|mnKm{ME_f&>ZrW@=&HdVI zzkQkUNGxUc@m_azMY8Y{Mpt>jsnq@TX?w>f=qg2yo2*FGAISdug_eu6NEKN<*SKC; zu&#QoZ|0}+FpQ)b%mMD8ZUDpL=^;xM;<5 zP&I-iianhCDm`1m7KM$t0q@pEFkXZ-WxnT z9Y2LD=PqA4Y(6#j#qKS8MzUmGPpZNOmw4zo8Pcz*Oy)?Zgh$K>8~Rve?n>bgZS-Rb zLmU4bjPNVod>OqO+#rL|*6hrCdFUjoO9$Wkl;S<*ucIZ_8pL&FrB2AdGdd$WRx6G- z3i^c|ukvTk^YdvveqjUQQ*Hl8tCNgpDb?7s04+oM%=qaYRbK!88YCG0H5HBa>-yHp zbx`|v|8iNUp%`h|+ak0;d>xtCxZUU$vf{eWu5HzWMhf5J(C1W-{2p76TB;0Vu77SV z+-w#FGGyhU%1`c<&N)knGb|FicHd#|_3Y{g>POBwCsDL|P#uwm+#V;{(c8Xg!)Ebj zq-3!{DfYa0xv-HV1!=@^0>f`LDBd#*onbG9w)#+)xd4i43dSj`AM4#qBjl8kSdEZ3 zW7AxZjiPO3wQ8vS($&6t_6K|=+OD@l^#esmi_+iK2t5c&{v?>t4Cv0J|Kyh7xOa&!%nm z<&4MgI9K)*oDd`{PJCxONfLZo)ePz87GGubnM{u6%EN5m6$1+Gqj5&BXArm7 z0`*4UHOp~NBVggZsXt8yo}|mHF|s?)Ki*k;;dO0g_RD+OHjdY1!m{zT#2d}fNT3eTJrF>2)gS3~~`G+7b&QjW~(zusac$jZP z%k<;u5y6>oL2HtO`uWJvcchurseqE?q!s)7xtK9ww=f2D> z%c$DQ4NL}};0i21LqpbZm`8sMy%Bzhv?|w}CoRQ)o9GhW@22rqed^?1^7Xw(D*K`J zpH$x#1C`lsnPn>3>0c%)8A%mFL7C3suZ{dsWhz0o0r4SJFSdBjIe!OIIB-$qa+W`X z&uhtBDr_4K*(V%Z>;iN@-xA_xo4i)4Rc3OY)~m6tQ|Ud&-TUTIBKF&BVr=4DsCfO? zqnK74Lx`Fjo5eGWbhzfe?8FT)R*{#rzdYl@bFbf=5|xv?nSm}TNrrzoF1o{OESiLk zoZeYTOSWBISu=_@xH2^Wr1Y{&RX3`z;1WX8rEEA&Fl^zqU5;X8>l8gzF^$qdm-+R+ z7Y7OFB7g>6@qEiIYSv^0lqZFw7ihJZ6YQNasx1F_L&?FF&nE>D*Hak84`04UdL}!1 ztgS`#->?-lZX9RtV2BTj?EcpDL`^P-NOirT^y%cVN&yVy-mvZjC1|FfdRZFg_4o$Y zp{mI0iF*DWyuS3|uz#lw@&LDeyvni~L#s2Y5(I63s%fNj?5W{H6WQ50((ARi%lq|FQQ6Alhn zQ?mDc#0NvX1KE`t=sn3%{ckAib4eH8>JXqs%ah-Ou}~Cmloclg|1>g&E`8D zX~*)c;df27#HvamVr;Z(zJc+=dviAW*6rM9*awoz`rvd-di0?RGi=GEV8J#2W9?LD zp415@%qgUXOMkG{H@S7pbiDV2@}Frjx?$oY{>_pilDpZNawE6h@_x6N)?-UNVTPS$ zXn3uSMKzT3hq-$AG{-*07uc8zk@HM?q=BcP2W%Ktu!8v0WRjY+0_>v~?*Spv>(Rt) z5p3xI^UD332zGZNlU#NZ)MZ=i*utNeH*s~`sfheOifw3PB9P}BS`8>%rVU|9%48)EKF0k)+^X9|~lzaP!==Y+3>waSk z3b)*h*ha)1t0O<7Ak5W0h4p;!syQHmyASDKhMmZaj&?<_He+cHfTi4G;mn?GuXpeo zv&zrN1Ku}HS*sdUEDg=|G=?i1dC(zSQxobDfI_id5gTg)?Ut zsO}b&t|NLC8|xbzSIp94i9kKgS|pd! zz4Kznc{eS|duH)7YVVnPT*I}oukIpbR!-7paI7-}oCntto`0j>&oUB0`jqP0a~(fl zq{R{iRfQxk9UpqX*Zw7((&zSY^UXx;DSn%nq($65IS(;nIeq4F&59)k{V7<~TB~cZ z0498NjFmF1rlFm0saj=+0^P=%yndfsyf`$tkDjNGAe zwAbYy#q?Ks~QJht7|VA1;){E^zP^85qhc}nEPP?AI$fnOA!l|6VzO3P0=){vepdt?YUzz&NBA9O3Xpmm+nmdcT20JXmPz0L&qA- zS=4q|G}P?bee`4NYf1x^69ppr*uCoI!m~8Hs-F`~j0KKfFr94rMIMtSYgcpsVZ{x; zElL81GHk=MdywYawp&zoj5LQFLyaBiC@Wg|$w66t^|xgrZZlGbkD%0H&?21RAqTGH zGqbW@o7M6iRuCVXuyC*Kzz)?5@h*=}{C+u$duK-5f20d2K8Z2>C9|;Or)UtQV>sOa z4u$$J9wKaYSX?*Xzf@S>S-J+qzwJg*OO~XWMxqT*T84Q(U^DgZUm^@{7sVu*P@AV7 zpkh-kEAbvz>x!%V9L)FJ3iM+RRL%#6#+LhUhdK}RLB^uoS|nMuDF3uxW_Xul>*yX5 zbFIWVOF}ttSYG#Ag`eLa(T2E&eUcI{lQj^SeA+ll)~YPGC8cjSgH49zv9ujTR&8|bQ^Am5>3Un?Nq}7e(>x|qS!=8n^7*< zdhe>dYt;%>&PLd4z2KeAd^tKzXxo~bYA0~9hI+1ar(cRbl z_`yv;yY-L|SRF#Z^1rs^S`a9|LQM9olvqwDJp1GZc8w%N3L58C=j3OmM90{@vf%?N zpl_woQpc)u!o7*74DHO)S0)$(a&3soNiMfqP8-rr?Y=8Wcxz3^v3OiGH&D@jy-PeZ zM^6i*lb2dO9d*R-;x_@I=xU~V>H|NyG1eGM@#|juK#h62Q?fg+ywWHkj+6tW*cG)2 zAq5;kS$qL~;#0I*1HTCn&vTY=&ts}#j<6HYUGvQ`L**IKf`xriJ_NoHa;lgf3aQ;_ zj@o`68Qr6qd-1~IV&`>Vuj$pHKYa?-|C<2uwm?}!ng3GoujSh6)+Agki)d|q8 zWu-^QN;1RNA6nKh%v7v9kpg>pA14gzlRP!jr79Y$6BNor*wf!1-zjakg=(Z@_Ei?- z&^|XNVvb>iTI7gtGUD@5lbha&?pROhas4x*S2uO=?OrMkSHZ`eqnb+~R_p`9aHow= zYwhFnfb@5;xBLn%%)6)3kx>JVDWVU)GD~@N@>ydqaGDU!vZkQQw5FfbJq< z4H$x9*+qApnPqayJ+tIf%skx|FQ>blvgL4dnRYrh0UGn0 z*=1BvU~CzXP;%?4-FRlyJ>Of&_<74@DQ;|h^eef%Vza>$k?PpA68r{@Mq*i5cZ9$y zeYFQ>O0wpZi?IdIiHCF-XKR?C_FCNzyy1wx#c%T6J9AB!VbS;6Zy ziU&nmAi;=z4mawcGB`Phw?`}LE$QKhs)Y{=M7{P>mQ|s_Bt^q*9TmJN_d;J5YJRgA z{eVY9rJIU7ClKmw!tY;}Bwe>ellh(+DwQF_AdV*t^cc73G3QE~ksjdm3pv-9qn~W! z?8`E$S_Hg;T<<7fkp_=>wude|i;Ir!sA@ud?yTcq4$iDO9X}cgpPM;?=ND=tx@<_9 z>P3Q+;9hH_w=dVQX_ZCGO~`cYB3>uobQ^rCl+pH0Xr=gUwd&OFce z756Zuj1R0TFojv$=lp>S^qSX(g&Ht?ulp&>$F#Cehib>l{e6!&bqw%$`pu13Tb8du z6`tmxM$!1@N+#IMP=nA7aPKG$f2}z6fccq+a{dWB-#9|aes`cX_2jlMJa5IaY?y@| z&DvgD`%U_uRhdk53g#67Jl3glHWbynZQ{6HXbBcaSd7HsOAPggqAn-Li}TytHlBO7 zpEfMi)-cQXk0qWpoE>RY73J~EbM>BX3p6Xs6bvjT6{n;L8i%t+q_-{}g3tAP>F+h1 zHC2oU!=GQ0I4jWGNM}{&|m>b%? z0*@D8Q$o|&WLmfgP7Mw~sqbFbK(BsI-c1)?TuE)P`+aowYJl%r{=}{tbpO=0?o36? z=e)$rSL<(&>dkT0N0M&vag(PIzHV!`uzUpmBq@BKM!Rzj+v~NoeriCoMfutd^$*QW z{0&<2k-~Q<58(G20FD*Y8P>fL4DBaNZcCZkjcVcp{T>VZj%RvV&6#5<_X33OQ6DZy zFJR9W@)d1q*PZwnkUk+kIzIZ72DHsgyGk=dCht@K!P%@6AOKcZcA zy(U&y8hGB%F)%ui)s%I2<(VHdDM(z=Kw zr=N9908Hj_A!cW&R+0e@WPVwZn3oz}OFEA8otw>Xw6b?pl}28ddL4Isdc;iSh>|eV zm~bW84J)pw5FJ+0f%S*R0Dzd|4F&t0``w?Urs_WJ|HhQ{&NY-6P)tVjC{^(t zUzI&$?FmaG(GcsbGp$FP$y-s`kg*UA;-b$!QkWpX>l_`^ndff?+ct+&cGs6Qi#W6G zL{}3-!>C{7WfKrql7YZrw$V$98+o=m$a1+mrSHM(kCSs#{dIWNU2g@9>jF-}V&bv@ z_QBNNT@madUg`3w^=1U7A#K_hOZ2h)mGmq#yiY^RYw@+6NWpQyli{+x?6-~91eKM` zN{8#V?wKjZ!hvunw7}3Yflt7IUVOj@Lo@rqJoKostt?@<3yqNB2r&_9h->KwWKg5s zjjx#~sbd(i6LbW_z8M>3Dm>SM)<7NK2sFUBQlSajT;77wo#RZ158*9a!bim@x)Vg1 z?DWl2K*T4^o9dhM$VkgFd{uT2!#Fx8{Ytpm0GdAZc{G6Ci7F=wliQU{O>jlgNx4i| zhMDr;%7Y^JfOtPF+9WP4eK|FR52x|cO}VgBx-c>yl!3p@gq&jH#`H_j_M_ScrpkP` z6624byNJ>KmRpZSrbq7LFJlirXBNsyG$lTOkFJu`V$*iPsW9T)tDnx2Vgp-)Nnv3v zZrIZ=yg9X1ld3>y4AzJ_&HiiZZ`oOv>l>c#eEwrxwdu9)q3Jy|+fPrYjb=@L=k2nI zEcYrjBp8B+Z`Ej<=}%;i`%A%Bmdf66c4U=bjmG-(!ffdnyt-y6yZ=ax5ha(7C1Uq{ zh80B5ZM>HOBzzs2zbY-x0-8X7IOYxH5Ef4XAnD84k$UbTWZE^2zy0e0ncu2QPG(&B zFZGg$sxtxXZh***iPfMyXg+l*9L$|_Fym6LJghPhGl)2Q>#NcqJzfb)*+T%@Mr!_Q z0OF4lHE|RgoDnPN7Y`i;`RbY1_+kPgV=yWdaFs#)pVQ$zwktTUk@R*Q4a9XdKrBov zfBp@$Hd3qH)a2d%!N7%&khjv*^|}-?X%J|wZ~16~l}=4y#To^&rF$@~Soif9`jPkQ zDDY?%`QPaY8@B|d<8d1w`pBq^vxSE~Vr-Febp9wz>(c-o&y@_iO759ktOclLwn;74 z)kq*pqDIv^2rR}(l8u6|KHGtgZ};>EKXxC*f#8%?Xr9Y94YbIU<39&{_uEN7u|q_)4IRNp8z^y33F>y4P$filz*u>BpxL6_(Xaa+VU z5WX5_YGp1Cy!w;P<0d(;ie0DuW^tAdX!BWVMbFN7%CN*>MkQDvzH~+lyE}clW43JS zmYcN-Bu3TLsX;ng(JIS^DlA3M!(UE3X>AoXW1D8#>fd|GLAE28taX%*Q--{V>xveP zvbVPYDd+g%$R{c%$8D-nm0mFS!v5+~{qY3ZIQrxoGj1@|RbMa?d4|BmzRRF2y}LZZ z>Jin*e9dK4-O(Y4>xOlerbT%5IYgupI?X8V?*U0mv@P1Qrq)E`o81hxs_L~W8z3|Mt)lb( zB_d4~GY>;()wl6cS;Ri1H*qM&9n$SEVc>?{KZF$L_H<-mvl>`TNLBk1I@d~YPRiz^ z!-m^ULzzbYpk(PUY>vt&ut0+GF9HA!qrcp4^J0th+!Oqxeh--2r%#6giLW1RXmwVs z!qKv|V`QQ{0#Xxq{6@3D7Xz*aS_M)NS5d0c)9j8wj)6K$)GHg9tleFEC^loltaAQb zsw$Qo9w?qabQDwgJG$sq0^A!))_!k0qRKiuJspSLAhW*!8SrHxRm{>RHR!;kx`!?|%L99WFWq z{|Y@nNu>=@d}-a=(D7R;ICHHEvD+Z1#R|pxc2A*tQQ+B%@+dqQS;aGzW(Yo3u$Wtc zh_hW*a1Lin8NALDvDXymjQQ%4A?)epo#s~yj0eTTdpwZ)rm(;J0nV8@pMP-RP^s6V zEFo<6*s^jXwv16ql70gH$^6_vIE6K0&cG7uV{NqAP%dyqUTfBj!AS4KPQM>bnpYKT z5n)-Nu6r(EsM6?#WNzE!)53m>$Ywmr`MLrD3^7g60)WoL*0x+bXV<6o!~9j|KiTO! z{<;(!nj`8Q2baPzYmESQja9~>F~YpaaN7U=Z}M3BmI9I3M0v8UuXwxHasSq#ND@ub zIKU9*ZO-Hun$G9%^yKl0pyxH1BlA53Yo~kiHXv0}E*oWyQn70>%QI*n%uy37Her^Q z&VQRoA8M6UYmsf6P#V$F=yO>ut z0syfqb1RrsW`jGgyK^|q8ifQCTS|c%=yA@{OBb-pJ}O%Vj0d=e=x$pu-^s}#<8jBD z6V%BUlwyA_#nRz%(1}G&78c4fx)q=#w0gP7&=p`fj&Jg|D>oGgYm-R&`{#Epf1fY8 zOJ@`VBr)Y9<=&(;1HX?PSD?sAw?;~hpItO2E!nw46>e>v6mtNrBu<96L?6&U@ixe|n*(QM@?dHhiV8Yeg4To*5OUU>z>%QK!;7=QDikEk7CE zxj=BEsk5TG^Ij$u3hK<7OWOZ4>6%9$t3sRpu$r`_U9#3$clndOBucCE^Fx1koJ)>K zV4y=Q4MLI7rV6yaL?^Yj)(s`bWW5(17*-nT;{u8Q!8JbrS*K5p}#P|fC{`t>A71NJ(lB|=b~JEZ}ZcoODxev%RvmvAm~4j`Hk?N7MS zB>cazZj#JP%$hoMOl^7~9APzd-4xl5~Hy(NI@1((=`h zVGOC2IhMCtYEuPL^|sw;Mr|&Thvg<8urGkp9%< z>A6rcz|A5ZOErP}%jY(nS5A0bxOn{YINK_Si0gPAg5{R=x~cm6#w8SgLlbfBXGsm|N4f)cQC%fle>`S%50~k6rySdgscJ%t~V7 zyzfHrO^Q4ep(JsQ^xF%q0f52+PBHrIe;r|Kh z|4|_R$;M@>5~QLJs{RLEY!xu~=7KH`3X%rsZ2+D7uMqLSyVtx`I28I+6w9!7~rnOiyonZBIAUpOa4F@7ZvW zJEH%4Y8-%|D)-%eJ{piG5+HBMFi$At736O*^S$Xr;l#tVJ_uWc_WDQ&992WN0I z?@y!9k0S~qDw~(6pU&<{e%eDIA;`s!!ik)Xi$^vM2JWCop*9qzQzpa~v!L9)QURRn zHoR9YIc9mD)Oy3*o;X2ub&r#s|AIbt>HVNizDo*AJ+&bc1-UCx^ow7Qq$IOa0WZ8}h^Jn5n)SNBp(~!wIG!?mVfIXjBb841J zX;uS(V?C_Y2hAnf01-hNae%80tj@>yE4gW@L~;$ecwduefB)7)LaBXpwtj&__{>5} zBo-mR65juOcjt+tgY+bUo3^Cs;Pj;RMwX*%Xc;pP>-OBkwY|UwG7b%3;qt5RXK4!J z!qUWnqCE8_ay!!aTk`z6dQGf87!B5V`DOTB3wHOB2>D^B$o;1;N_QaU`$)+Y&FMmN zJ(+gG&e?LQsVXfKSy}aQLngcz)y4rtVd*LNgzV$SfY0%eTQb_}zFd@NvD8+FK;ZDullOdWPqOE%07^Gt!i+F)omIkX86pY0n$8$o&LM*o>9>07Q({8)RxD z%P9g$g93@9?7Y>q=KvRRN6UNPcizz&P2p>D{tXrDcw)P{kY={Ap1zZM5-8(ms5sPw z-U#e=rk>U4nFk#bABx>-#?=-H*U@LZQ@P?@&TpSF(D5q7!}M@+Ak$fOd4le`M-*rI zGNg7o(B(a>{FYr*f^3QLwqvWrE#e_0{(O@8x>0WiqJ1@_?w&KBpHjyLjPG2& z|Ja-ViuZ{Tv48R(?dzdH_en*{FNrqPDVdp8x@4~qR+Z}XU^HD*F_LW~lJtssky@+k zTe%tNeoAurAYuKdI(UK>2^p6R!9MFVb_W2fb)_N~?l^6r$BAZ`} zKE2)r8t0KzCY7@Su9ldNs75I|+Lfl9Z6G3Pw}-r5`Q;sj#u#d5flUkO$%bEiagTop%I=qc2+H-x zW3LXZ8dx|y>XaM5F$f}{tX-G*D8~N^B{bnIO(-NT173n+O3}B5#Uq3IeU*ogGOo+c z7Grmx`^#}{`!s%=2)Jed#_`Tfg-qp&}NOGr5 zMNdBsE>RHtq4d@srOSb(F{FI?Ku+!{<|o9mlcYWL+G+^s(CqWp%x-M`zP8J72p^rj zwT@NaJ-+hD)b1vLGErc8s=-GC-Gknd97gmBiV_L;{q>`aDlbC*>ddkXUri3NMzMak=| zQ%5R8+bgx?CK%2X;8we)K)E0alZD~>wP9(X)ORDTAGC$(5E!3gd)?|B+4Jg)V`Wc= z05)|PA0!mbS^K66)&YLxwwSaE$#;5or zq8J(_f`tZb-RWe``BOtxq1=~*VGd;lrm4(QgV8_mPl`}w>= z^9Vn(xnqjC5L%xk7)@JxUehNQ0|*fhB{@#3OZ?v?Q?BL@(*AeMa4m<IC!d zXcY1BG{Nq`mri>FG;i5y_NIKMR}7~yRB8vz#Ea{9fleBmw0kkf1&u4HoiG~doRt>M z{nkNo2HWB0$s0iX2rXi0H}(TyNh|~1-DyS@k7s+tsj*dAzX2k%tIDQ%(Frn7T}!3v z%T2Pk%}aV(=hI&+J*7z=hUWD`i;j)f<|66!xFAIxpXRtQI&hv2RnEy0ahoQMU3(zv z4IrSq@?UI`fmvY291bOXC4x3wE*iayIHlS67zgarcv?Y@`XG)(RA9ZRUzvSXZPPvJ z%(U~29+u*$ecBe`eJ_c8q`FmKN-9fCA3R#haOl?M>R)$A7^lE*b< zEQQ6GdGeSR7N<(}wjKim7e{VEvI*`%WJ)0mmGq0`%-GHQO+`v3JI=fU;oB$SiP%w7 zPrq&V$RUk>gzNLHo%z3`B*p3M;c~^>C_&$ka+t_N6})>GU>g!{UOc|x|EAJyu^kE& zpBjVodjoeKN0v*yz!;0OEwFQrY;6y{bZRV@h$anr+HugW0hM{uOUy+Tnev{&63ItQ z0W5-vvJFC4dlU)8abKR-D2X;ArgXGedi=g43OLkNvg~sBC`SNCGX;Y zF)Jx;Il%4#oVcq`%OLv#+7M0b9pPaLoUy!p66ZYIwj)IM)IsEJ=nN|iJ$xu}dqQV< zsJ5qxFOKZ1Zj&>;7vjdZwG^Ibuq&**=D_X`+eAVWHOp=GVn7=#5A$;N%+(Vo^N1?l z?>d}I>t2lROF;iaC$892x%n~CVlf-t3Y>`KB36Us6xahDo$<@HZGK~>_+dFLi$t4# z;(FMZWf|xK)zl(R*9BL9XJ655C%IkG@bKraj2&^5F%6(F?rp|M{9`YuMm-RS3^1~DIX73!G>0oHPNR zJDOM*dx5PrzcU!zr8g_X-!n+B4()`Q9Wp|8C3N(c+5b4yVbaKWG+uB)IlJ3=% zBol4(7eTemEC2LUi55Rpaln_VHRKW8@BGn}dx=6xl}JYsf<1f+a3SoVPWt+sp=IM9 zSItgAUiSW`|5~sSq_y`{4~_-vR{`fh+1JD8_q4hRz|_LA?Pe!ZzMN?B4Pbo|00c8f zBp#^x)|p|r(^K&Sv?4p^n&WXbMQfVjP2Ur0u>`W_jkhCHi_bMww$V`>NRX$W_)!#u zx1*M^cwl{mtPpn>9ZkuJ*df0jRF4eKq5;+AdFw>Zq`*yu9hDATEbn@5O3~4pi|49q zqE9aOGMzrd&q_!gJ`7*Bas!!kXez^cVJ|QxEI$ znPc#^Bd3)P4R^T;-uQsKvVG@r=QrtHo;hC2BYQ}H$%qv)$~OnB8QM3kj^|}3m>S~w ze=2xO!h2z=^B|6AWL&LAGoqy-Ut+n4~BRy5?UH% z!jw#XyAloy1o<#SV)V*$4K3X`ZABukN5heI<$!>=F5fduGM9QJ&4^H5bUCB;{zN5h z7D$CkZgTt+Xsa+DdPR#gSPL0VONQlMvEnxP`(1bU#gDJAsiIR`J|4`&th+NQ+BZrK z;;UG}OuEK_u8!gZ^YF=W=9=?HUw=TKcQ+7gRE=7nF}N7IGvIxmeLZl>T6Ax)scubP zo&l^W5bBx&SxG(o9FwWO9LW)Q-N0}9dxS4^16b8)37z}+g$KIThh_FZvSZI0Wzd9K zv1#5akFi74auck;ZBeDGAfx4~PMB$w=>n0oGH_^NQ$c<9H5&I>=#IPJzR=zc@Z@$l zc0(U_9v;=1Y*uuT)Q3GtDx&JEhaVn;8-tp2Q_(I{bVpd<-;gy$(#ZERGdo;DjcpCv zkypSo6`nj(_7+S^nXsV<#6oOG`K8L+G)tK#;>x;UxlH4Nh0)X4&h)0_gUZ$@iaa*g zR!v3jk6l4tn0-L-bA&12!>J3PGFcorByG>R&O0W;i)BnXHB|;Is;X%~9!-6bQjdR6 z_MK5w4*nOaZe5IWiaOJ&zk4}$lZ&V8jM@QW9pqQD94~u$z+AdPIh|HO{TlMs^b!cF zT>n}h{{G}Rryf@>p1^g*fL!0JO$L&#c>?#mV5XWsOdLsRf>61HhxS#p6S?epB zvy7lYz+udPatHq%ECkZ^zf8sciw7FSQcw1wiHhr;UPl}*Wl(QJ>Et#F!pR+o1K~{o zML6dP817HX5yr*zAZn2(f}O`4M;Vlzl<6@X*;BN%2(*km`lk<^$Aby-Rhrm>Aho2G z)lf(&lww1*d#d!H5uy-L+*SaOI;~wwz5fq?K6rnziF`ziMyFS%(0q{oeA)5p!R_ag zkJ>4Ilp_M~vEIJ`TUAvRHM(1(d+HFPYuB|cFk zO}V#Mqo^OB*bg21G(6BLvq9(OX$8<;Bd;Ki^3}UhH^N8&!mNI%3G=%XYHDO*-#b?Ty&z#qa; z=hY)e=sXz$6)fO&(@5?^9BOZ_!4NWWrX#?uA*G-Cu#g|kzaSq+|EHcf{$?BmDvqx2 zFLDGQ5ed9{>V0Zdm=5yNj2pDar|fKgw1qbR@#uyOz6S01(p{Q^G6fpk+Wu@Djz%X)>^|`-sY}psG>I13aRgqB9@#3iKlc81Si`>w zU-UtU##OM)U@0BR!^?6Q&7_s^d8u=Iu7KRyX&%mRl-InoGXC{lr8K^I#qT8A zKMWWIbY2Gm9|>Dm*>{!-tQStNZRS zbn>0U*80p^!f4gDjgjU9cDT)NqEdJNG1R{RGz^9<9xD?rFei5ZR6$=OANK9RY5%_$ zYKZd$$C90&N8L;n7CIra?;~26hHkCFUaBpw|2may=l4lpnYsOhRGh*9mu7Za${IHC zy5S4h$x{m5yeuOM7gcclD%0O<4O}*sK3BZbqBsu&v0UX^W}r;=#imH3!>9YeC~`O& z6!*RSP`QPYe0+4uV}98aQJc*EPB)deO9^Asj|cMZ?0^`DF-L|tr;BdY4^3+CqIAhX zdqi7oHp0s6ve1F;R0q=d8l8U9o(9ui>26qeBNM(3al1W<}Y9xGwTW|i9PNpj&C%`p~)!$i89@RPCfSTl`tEf5bZ z$<8aUI5c$NHYjNd(h+krZ)%nSawGqYO&a+wi0YInK0wWT4!&AHA7LiXeYW#`&X8N| z{B578a@}H}BzUd0?L^_8>uH`Hq9@`~#~)9IYM;-4x_j`$b`kBq`rLei1h7TyBaonH zUx(oSyt3eCYdm2WMj zGQEY&7Lk*K=aS}JLu*D@mtNlxaLd^}Jet7CV>)*py7LJ4+>zJam~?y|5kI?DgO2-} z;Z>(zqi-Ny=g%XgX&#ha+yM+J%>nuG^~JRKcGPH-8s=u?PtwqW;asTx;a{c3X8D_ zSq%Ab@xa951MhZ^DP{YozXQRXxUKi5sBIu|6vZB`e12URbQ$pBXOJT@Bnj+Z!wPH? zR^w>BolpUsF`K)U2{RHpCYHbYPw22nL;#Ac*BDCL=N1NYv|z}Tl+)gCw?n$|i}BW8 zdE-(i@T2`Al%+c-QZ8X79MXOihCLVq-eZoEt*we<4sV9Ih%2GZi%o(3P~Mr|MZnV# zPckF){)Fqt>?y7k0$(HeYSp88IdC!NhrD21`KQU{8z;Znm-trhPDh^?!`@TnL9Q zn=RS-Y%?qlauKV*aI3KDXN)s;>Auq?0OC7ieWJC4_#q^6HuRs4dsWBIsr@ zkBmQDR2blt;J;yMgL@txa~7~Su^RQm(&ctk5&4}I4Mk0NjLWsI}J%R@z@;UA3X^WPfLe)b)nQfwz2q zW6%wwF#f#itrzMgJi9fMAy8GP&uBL|wTcv}=jMO=FyN7aY@pYL1RtR6V)cKLbv zJ6pjtCEsc7a_=U#jHUWWOpW2)`+MG+*NSYlq*c9M&Pcdc`}EU??Sn@<7-YX&e2SBh z-RIJp_Vqy(X><$Q^+NZZ;ztvDT{?P>epDc{(m`N>ZbtnNA3c$+`$qLEUD!SAE=@~V z`MUqTTYtS_9RwJ{6jqYyIF5^F+quYQ6vew`d{}y*(zg$9o%*u`N2Y}VM5nU)6Mvb6 z7-#Gx&~k^-4dxYvd`&7We%xHx6I`00287=RjSo^RgX`1c$Ko^1&>GapywzlRJ-p#upT_2JR zt&ME+;*uO=Fv;R3kh{uQD>-^~3@Trl5f1iW8_Ll{=!3GAjG+!+D*89l%{)myTkXDc zMJa;A@phTU%eKNgbtC$}D(5PD=nHXnEIxzFGcuhMO?2PotEx5Yc9P+GH2R0y*Drs{ z4LsU-GA@Bb2!oaCQEEF1zw~<3y1QJ-$)d;H$XcRDQe{H399+B|YP-Z^>0pN&eS61Y zy4ynrKj*coVgI%3G9R)E*dRT*UsS7$4wPF07C0l5RI*q!@TGX%*H#1eIADAF$*%aZ zbN{zL?N!{nE);pwGE(a5N*J+5JXPn0;)rtVcZ}{-EOoVmdgI#_3lx;PZoWNcxT%Z3 zQ3YN*bcSNr=*qLE!*$h~`BH0!jaALepC{2(xcr_j$PM7Icc3PPqLRD33?&qM?!w)_ z4Mv6K8O@)Ssx{H$@>I$GG6YVUQ(g2>VSqjaP&kP`>S^UZ6)#Yu3i07fOKd=iT(xu& zXZW4-x;iOmdQP!Fw`D%3cQ)22dNh|mcl)REh)2))iPpDAdk27xDyooijrHZ_O(2D? zQ2_PR(9K24oWB=9$RP!J07(sxr8VVVTMG?W;g9|$#>2~CmEUd_$x=O_q5;!fo`j2F z?#5N@sOk#Zco``h=ytJja)cz*p>&@v&`1V9dR}~2zR3EUoE{JpQbMi;*>1@T^BKVfh^-Opc&48 zBgy4p_cF0G!+iS6yA6O!xnDbFhOeBBSfH0A7;i<|F zq9V+czh3h8zB}Op!l$Edh=YSE`j_F^Jj+^EZMj{2QV(UD!VarDc(w%qqpyQHo}cbY zb7}BgW0S+d0+SIIO>%Yl&Cs2h(u=a~*NzlzJ11N^sm3YVp?UK$GeA^YcK;`!z!$_; zD#W$2z|r!o0e4Mn*lGaHg*HWP#s6#but*Z@Tw&8}wJd)L+Din79HUx($0aV2@s;t^ z67`vxR-A;Ql_*QkT#Q^@yNy(_QcI4Q7{}UaE_O$)6H0)WA=J7`&Uwz>vZ79+O3QhU z;9>GNr9gJ_vLM%vGOV{h6F(F?(@AGILbn^+fc0FjUyL)6WDv|RYijIsGMo3Z*~F2H zii-9}hw3Et$nPmrh>zL%Up|bI4xVz&Q>hhq^ccrV9gN&PJ)2Px^6@oACI7+*Is-`v z=@Hx7nm9#?pv+L16^@p)qm2v=fsIm3)%#+@!5NAmtc8-F*CtKB@&d-Y-5Z`M7o!1+ zJ%}=mp4%YWR_R)`%)O1fmW_eA(6_8T)$TVepNmOE0xGU_>YR#4rYNtlz%3M!aW9w$Tan?dZ!bI}vh23LLpz&>dUk!TPM{}Xuq25qs`JS7* zY#-|d31lRnc9}h}uSu~Cq{g;0!-^%L;fgDh4zMnh_uGXN9}aN=xZ>c}_iARE0p;g9 zzU-M$+~HGXb}rRP<1pI32|wBxxp4)#$eQAGLFs3e!kmsj(bMd154pN);aXwebQ$wt zVR_MJhj5GTR`-Qe4@-u)PLb>)(=TWFZ%vo(ME3FvelEDRNwTZvE zn&zzl%b3d(IH%_fIxaBXE@Gb|@c8yQ$hhv=nbyFt6n>g!z@~>zaftpbYH!kEeSNr) z-``-Luh(+*CjRJ)D8!yaN+Wy?6 zx*ABSyG#x;9zC1P4t%4)mJA~w{)@Vm9@RER>n{TI+P)}3ug;5;Mk0jbJdQW-?i{w? zvzE8A5E~-(v21sE33>-VwUj)=sXAZzKa9hKIPbE`0UGfUU);K~zXS`EYsaD-!9d^u zo-(_QUk_F_cT{8~+>&-lnNLPMTx;7^%upVCg$Y!zfx_Urml)>Yx>(!%H2o4zSkUqa z@B7Cg2MUxPiWq0W{#0geZWx$5`wCG;KS9cbCQdaB8j+Ls?#TE`R?+1bl}BZUR@rK` zEZJs7FO}+ZN+`&&p&BN1Ffu;qk3d=`?JYe@k&l~%%+Ud#_{IzD4Ir53$_tZ;Zu>pm zL;lC!Wj+z!WA$nDrT-*8|<#QO?h5X{CfnXX9gQz%YkKjw@?ftcJbI3eI#|4XpbHKeV==xG-UgYHeNA6%dZ{;rH&xk4>=+5rDVS~cXO7C*1 zQC0_3YSvX>;XKJx@xg_9PA(e>Ud_o>*N4`u_9B2mfwI}j4g`W{6f`GqK3$%9Wv6b` zZEouXJ1!R?XA<-YnJchokx7HrSuS6|SE*Lz9lC0je}@Bj~k0F5cwl;XALgPFwUC0Kw@| z^aNJH|5hGw_{YCDV*8qdnq{_Rk*S^)(SXZvEVY=;G*U^>R>9E`MoK=HOWAld?ynD%B&DG8Bw`vvmxla^I)Tc_n6Gk|4R!y6}&k{#e+ z)V9{}!kKGuD`xLofJ=y34cE|aEK%n{;$4s-s^g9BWAZAu`*zl26JS0~;q}XhkzM8b zS%F`?qT6O{pWcvytrez^Nm%d>FdtRY9X0z!EpdmneAm--S==?q6N53VB#~nCGKu(a z0ggr`7_mbP;%a<=yBqvS$Yx5!ayq_(rMxs^`7H*A1`3A#j-~W`wDuvu4OE`mJxi4? z&%a&?@9L_&2-kxQAUq^wAy@bytC}20^HN_>6A#dug#(B{4B5$w62@Ab$=EkQ;@x; z;t8a+8>_OFQEFYgHfBB*euo;)GK?wj@eRoB?FF^vHnF~rl%VXs>n%Xg>r948X?f9E z#3$bzug91tbF$c_$d$yk;*Qb-fS7a_aVx!Q=(j z>qoi$-=Kzu!1N^nMJ#^9@D4YIz#+n=nprLxDV_zys(zQrej%N}5T)I+4qAW!1iUvfFMS z-&@%F)B}T$=sp`f?pgOS!8R|XM!#e>=J^p38FLG0xuj8WN%mGj1=%q{E7dHp!G@Re zVV<=sx7PsB{470WSfSqykGBiRl8Z&$UB&c9UyA79;Q@_(NCSo*kx<$;5GAVnAP2 z=9qFGI`m6QCs%@Fzs+R*j%)9eN0xG zDOGe!ByYE63Sa(WZJ7lO{Z%td%4LK?%ak`g9|0G>)$Tj{?WO2LoOyU()2X)!E@Cr{ z0?c~17B>4_=DH3`Vc5bS3jzv4-!;R~ALzc3-Ff1@c%wT^-2Io&@=WibLarq!ZHv=c z21V25PuuzTGM!zQyWAi~zgD@1H!qk#VaAz=$;nQrvi7y4LOcK4P(E&YpOCyaXSJK> zKF_yOp3ODPjgIent_2Fa)WGJw(*IZrffHLJXg7v1WQl;ozLaJTukpy5D&IrpGmrUi z?)mJ2Vxkbb5INVgO(isI2!5c8K91-Rwk&$TywrWHk*+D^^{p(C9ol#C}j?0 z`Ix$rPVaj@AmA&ZhY2xw|eNaOqFJaJm0!jR<(^WT4K9+1pM!W>k-HT;~A? z!2J(Z$k(+S`lF6pGiVKMVLf0XumQB7uIl1=ZD0@c2;O37CndrV7u#EyQONslD0`&; zi@~KiDf#|iq#V5TO5}?bX{+@e+{vHz18qS@J1@p|^OtB)oL99Oef~E;KpdLI0KKm!depwsC6X_+`6?S(iX;i8U>`oYiR|b=s+e>SZIV*{F#Q0FEG0zSh;L3v0b=@vH4t&f3!+3mp8r zyD?Inwv&@La-G`ybSQlX5W7X&S`m_ss0(ctrqBHQDf^qg+BSv*?M|eL$wkp;0NMau z^6oO9i`y}5b$atFUBdLDU-mCCJkZq$zzYNPG6PkKe9CyMv`3E-NVcR|UDD}Y& z3K#0im~SphID3c*%EDv_&dN!oR|fQYC~tCJAEg*`}L0T z088Ma#07RsK@wA!=H^U)Gf?RemVpN3R<6t|3^vzu?KOXI8F}q7pBJYsCbFUXU={F% zo$~_YkG9o+a`beY(C4ETzNpaE4l(9_#mz_gP^?DvV#AI4uYh z&F2alZGR2N`Pi-kRd=}8cCL#q_Re>%Bs%j1DK0*aT44pNAq_j-;4FmbmFo)HJzO2D zFLvYxekJm)hbAGSSD|$zCufChw&_K-2V%LU%2xJ4$%xvr)~7baF%;rc;SbWWn-*1* zef*T|qj4+N^R1wAaFowpB}tTC%Dgq4Vi5CAPbqI2TBG%bIhLmi3iVeJRwf_JpFfC@8A=U{jCgjpc@ z2e_`$e|%dWGQ5Ar2?I8g0JbCtP*PX9#)R&NZbPyGrb97heY(;#ZEgj=W{ zc)6~SC}Y)Je^zzN((NmB2rw_czGd-f@!kc&&?o0aR_}e_ushF^`NT3a>UZQtjV_^* zXXo^so@8}lmIX=*X`RNuIVK9}iQgVE_I}Si9F_1*Al~LZv*)`z1>B5Za=z_2{lOA= z+XlX_8Sa~>DLZOMA{=}bgLl^w68-RZTe98i4&-`<0Y|F_KHfW|@x)J%_YisqQETpB zJ>~gnXRT^=12{UmTAFH;WlDe(dfdbvM4{`~jZfAA_CsP+>1 z2GqVP{{}dG!>-Dy)oC}>o{fpoh{Vad2`f)NbSh30IU}#0TyZwqmQ`=8v;44*Sv$F! zsV}w~>g8L_?|vR{?<=aT09TPMT9}g3` zVWBs)bD(T4DMr196cVJMolL|~WXNkcvY<(o7`O{xUMjPErs6wGEte)Qt<{kCPRe2^ z*7~`DW?0kL?6Xf>%7TX{Ufit0v4xJ$O&dFsT!mcM35`=<;K0|9@2Rbx_N<}A6gY=q z&EBYk>N;MZ|ECq|Hh(WKSd03G?@a3bCVr#?bGLlRXo%f^zgC!UZ!3uMs@H}B;fW*VAcexR(37Ga!w4GGxQE)Hjsme`C_+O%TY0GRdSvnd$Fk5D z=uXN-MW+WYO*(UWxi0n_YUUUhv1iYo&5w4m(Ypu^aao+@k2YcqZTX>IjUuPd{j8h) zT=Yz`h`^|_p7@P9d%DekKm_ayD?c#m=Kh>xOYI)X#5zFj-;z+SPmt|nSc{E*Yakl% zg&XYP6_0skWy!?xfZheZs9z#)!GW|%Z=)|q#voM&CciB1&+S%hT4kh870w3LM0(LF zZnIr?$)_Rb2C2kPoAb#g_|)?FJmnY*B-TWg9WI=GY4D+AZXVGxKlXd9kcH`v&&yrv zW~g7A-Acu(beoV*u{Xk$@xeSUcE$8hv(ylOgNf<^jSy% zk6-GxRSa{fzx)+Bm3L8Zx9s4#-1Zbc6Z2asiJ+RUTRh0#xsD@rM7^E6HPOdtp`dCY zWg*Y9AcQsqr+e(=6!hr1v#Y6QpSV+>1HDv{0mN!zf zfX=fc#y{zKs*D(5D=sv+TCwc?`6>ltW%RsX4#(iprrT)kp3>E%U!?DNqbhp+TX0o( zD?C}&{PtbeH|ymspVtSP!z=DegA;CeOqXn~zOay%1r114A)7i&}q37Gp}vA4~DW zZLeXyo(O!Y)36NSO}U^5wXYq$4(`D=VIh3RoY-C`u$eefzhiq{WMl9fT$r6Qw&1w2 zJR7xe1`NZGIhgCLI4*V0E~2Zba`#T-lFYq{y&-|7cGd&5 zf1X&CCpWDKGcQj2^d+yYk$dj7vTWY^DnpU;nHn*M-QMPvf9A#8hgZehT=Xmd4)ws6 zC@eA6#D?&*c+K$&d$jy`cvUVgII^4-s?vnF^RMGQlxJ;QpUT-}KF9>-&*0CPD=c8%A8&}vrv6;Bg!8#7 z<2B_7cO}H~8Mnfv)+0CjWYn8AY|#Q|L$XX9d)T;&D&|M!=&(-@Opir|aAU#@xP}tb zssn@2h0Oc+4f^Y(jjw2ok9hCMVZ@dvTmLbSPY41}#<7cAsZ*Ee&G{BEq|`A&3Xx|E zeINn+esq=N+O37EFh*Px7!&Qt2KUe!9X6R|kc8z8ZJbKkY5H6%D6;XqsOH_5{Jq9k zy!NOg$33LsZ67pmd`f2UJYCG*p;?|*OXD0#Vl$~h{f_dC$i6PTB+;aK57_hAT%xLI zr#{F{u<|aPiv7Ik@3+|fpaI^?B}ci#CfVQEHWZJ4@~}$DUy`vh&97sIfqzRLX>dU` z|FJj^`6EYQC}x7Si))_>XpogGt$or!a7fW81oJKiMa@x+M?1&fHOl=Ar=QcmQeY%k`I0;fw#E!^1_ z5pD_^TubqbzIWvWo<8rF9b+t}kv5bcV9hv_-?+;r6;ig1bd$XMYghi{6Xsg#Dr{dn zMGq$3St9h>?s)EAhu^->zWZ;c$@USSE0ZeVB;jz)&Xi*R>eBV~62*yewz-}$I(t;* zum2EgL)-Zr?nzvVkZ5fC>>#q4(Z5Hj*zoO!QRb4F)#zxz(wp;9lZCH~%ypi+xUEP& zF3t`gq<3|G^vumU8MVbEjk0kbu3^}-nKIm~dDmk2>}74y`;c-m zvd`Z4CnzJ@H*b+DeZqMyyH;gN4SDT1)eSU}F-A&9{0q~g4~_U7<3cB|*NPNr1$(!r ztVB$Wx7m&8v83Z3a*~vv>*Jk5$aGa(whAJ5e}$KyDj_W#nFw`bCN4`v#59e!zzam} z1HAbT5t0gYM&^rQ_4(%f*`FW1?Pm{L?@=~BfCKyT@V7|Uh*HI&p6XOexzzOGceEY6 zN}8{aR{*w`Xu#Ht*!3RKbuvDaAErL^7(+ z#U)L_H273~9`_jqV19x~`6l54Uo|h0Ghmpb9@i5jjmB%2-T_s%x#F&B2xNxTTosfcV^D1|A<}WcX!;T}AcLo)z6L-C$ zeMPHqRdQBZF5F5Fc4~~vPf6{p=aCX*q9@zq$$epI<>!UP6(>?ZYqU2E=n&j{g)5|i zp}bda|4?VsA6^8bHU#W>Sk=09(nN5kx9n10(Nf0*mPFamoJT5x>13~DLT`bnuj>V>-BB$s;3aq+@Nu9fOY6ar94MqOoq%tHf+y*g0oD}q*HX4XnwwCjPre6xv;0< z2SwGkZ-$q;&`%Zy8*QO3+#Zqra+Z}jUvp|)y?XhX!!{MHFH{N2iw@ z(mSziZ|D5EwVXMYvO_D+9PU7ny^FOwPvKjd)MXmLsa53&BRGTb>+2!-jy6{xy`7Y$ z!1#3iN5B-YOS@kgNaoPaG!V&mwSELfiNA(UGr`!+J=yqCr2gb3ryfC%ABBDfEY&G$ z7zAQg882c`6(2(0@fQ>BZ^AR=4}T11TT=bdPJ4-?LyVI~SWqQ&U1{ZtDPbyAWanECq(K0> z@}BybnxDk&Ef+&$tkIvQ)*5uYxYmt~5EuyqK?#m!krX@yiQ(JFu!k5n5kst4A3@Nr?=#*NLxh=g@5+myR*;OxK zDwr26VZre#9jWfM7isn%8mww*{uV<_*(42Axkt|!MOoHge(w$_`-2y#%Rxdz!vz;< zg8>)(NPp5gajLM{4t{kv*>)^-ZFMLF`9+wZxL}6%Ae5-gOmd%Lz_4n))nSpZy!-%d zpINnT$P|n;j#^^&LCNnO%{cNIaca^WVM`G#SKAh%Bh{-e!9hdh zL9xEu)7yv1wXsu->L~H+Qi&FqLg{(G{6OS6F27V;)giO10&8K> z>%6o+17Mxk>;=jq$uraj%K6Y@I^BP-k92|1_Sp`xe9zs|*!1%4-GoWI@swm}#;=Au z$_qF_u-SAym$(W6p~0>0B}u-oE}oI~x!uYxthpuXZT`)Ob{O}>YdY7COsx_BFJzQV ze8QRecTzbA*n>qx?X!bz9c0XxZ{Ntwhr8F#mi{V9{$y+NfG$4}GG3>f zyAK&Ph95RyiBoo=+6-sU_5m+2&*#5YrHot}!?@i!qbEgn2yhj77sF!sA(m;<=Y}+< zT80(J92lvcF`pe=ml)oL1AB0Tbbpkq^ZhlQ$t$(_1H#g_dIMgj(Uz0eUhIeWwP$Ygl zylk<3zw<{c(paD!?Vl>iV0uAmL7}AO>phEvBYZx z;`CYu0RzL2cIQ*HZ2J`w1;G4bdk5y%<7}BRb_Q;4ouZtI=BalRg-~KyYEO-mpVDfMLwI-b($z*j)Kn6`$WC9C z(wk%%v|KN}h2Oc_O=j6%_EO{+bZnJ)G*FhADgEnv3U}&1``NSa%4ntrN8G1{Yd(B*RzxJO?ru~?h{LWm5!jn)b_IO!=ys^7-^;4 zU^SFRcvmT}pf@>h@hEmMv-)T`(2K5dkL1AG(+MkV&3k7D=UZCPv|J9FUk)DLGx8dZ zwec;vbtse=dU%i&F|NQ73^zcp36E3E+EGhP4Oql&4-p`rJZ=_<;r+Qj-kIs)(nW;L zT?2tRon6O(+MpE|yvjhw#ShN6vO~fn@*2@^M%GR1$_2Z_%tOQF)HC1m<%V?ARR_No z#aoOJw3{~Wsn1L3`fTW%SAM@FfiPpzM1Qh#L1MRh7E$mI<#uD=YK z-kW9h5ua~b(_^~-igbGsjzmm;Fy5=eWbGqmeN8{gU%x#9vhLFnl%?(_5) zHG+F%Q{jpWIcd;t*i6Fe2|bWLm#wnAeW)+*C!ME#UWJlyFcV)T-;R+(IDe`j@~J?vm#WbT%JF%(}y2<#u+EeS)=sPYdt|pBRQm_PfvG(c>@m_31rj=KndvGuPU^@gU zae0Ef6dNZXabBab(iTZy?^_U$qd065L&JJJXjWRj;VuCbCKv#Aq9&&Xqj0eydT7v4 z;P@Lp{0i64wA7R`0Fd3uPOuQje*@v-QBhG*se79(T)aJy9N2$AcppE=4hoNp`=~lv zY1;N_M9GC6 z$<*SVoeFnHjQ>J`A=W4Ec93xuF)=ZhokgtmFJ$@t;Bdl5#dyK3OFY?^d6}DjJ44r3NDg8G#F(*zc)7T+3YMy+AIBm=~u3up3g6YZZkJ>)Aq zGFg3ne{-e{<#^?<(f4?OVZNLHXVv1|Uj+&aPo#wZH%H=232JYop7}a&sH+>bGo1N% zgFsut89;-S$Hm2^wJ86sN&mOmKKRGfI;1S-TcX-h<&HlACd(~$M&xHm*`LDUt6IL7 z=#na;b|8vZ6Y`_(VNbdjX@7H8=Oj@CzbaTh z=eEG`I&H-8A2GxKHm?4s-A%o~--GY(AAOC#&Qd=JMSJcX`ikBD&H zoE-~~hZn9N>@CVghZOci4;8;-{6n54_XU~NYBjWuP6g^JJ<`Z1>GP#gNln7vwXzmGB1_s!^$~RwXGcA?Vq=Kgxdp+S<%>JTe|@vdehpWI7qnaFw;x zWhG%rgO9GXJPe5oEfhQ0{LMn5!7k`dG~O0JQI%}#*raJ5lwm%lbs%SB+;8{9rAjRaT9qHBbt-|_qt zDe6#*=5^*5VziAgu_kmL{Af2zA;T zeEzNiFzS+5D^q_6!Zluoo#>js#t-*5;6Q4*rb&aohXxx+-{G>*PH#76)cMbakU->M z?faF~ZAG)rl%@JRK8B^2uUz4~+2^+8H2+y!ivRDP1M7;C)Aq;XLdb2C;G6%c5<*v% z%3~7B9ELaY-KKQ-8dYKLj(Ty^`k*kJV&#`_^in>4qOL$6IQ-K(HGgw>(3v99#}3W0T5t)}lG zV>4g<+Mc>MZD^?Q{U%^saKxR&|F|@_1sA&#q;^aIFusq%EQ8HQ&t3gad%;TV5#P4gtD(&v@XV@TULIp5W53t**$7xB64XcRul`wCYpG;mJ1eiv z*_}`$`|CtcMtSXZ-+*XYAKZtgyq)n48^H z(!@yz$O%g}*Tr)vt=N_}i_z|7EgrG0OXv5`#x>7M$fRx_70zO7gx5Wj#G9we&pfqO zJ^N&Et!IE}Pc{`gGzW&Aeg|tDOCj~yGa^SeTdqIyPUd~V)8?7>w>uDzE*+!d&L``P zzBxkid%Q=l39c?Sr5T2Yu3#oiG9hs}Ve{AFk=5doT-F-l$J19Ib3JQ$6uQ_qyQu+G zh=$PBVZ%$Mnrm1;Nv7*5H?-%d^Bfd9rtb~x4R30|LWM;Bh^y)6cys-LiQ*&f3z9qr zi7xCxfbZD1*XbY0NB2sG3?`)mjw>NsN1O%1h^|^tuJAr7R}4(#MT5#n>0MyCNS*1C z2dIsu@8b*7B*WuJoDtd53Gjol2RAQs6y`490}DvUBoE<+9BE>n_0T=!=J^xP+xb08 zt)<-1V92*zwENfzBmEPkrS{IYbzx}2+puvTOx+G4Q0tCy# zR&(b8lkSQ#x%fs;r&+;KeqyXo1Fvqr*>ZqDg%G368L933T-91nbnbkH(KBbBvHjpw z0_@`w<-ykeDSDa<=pm1;M61EN4?W@+%(cth&mv?M`b+~db{U^nZ!Ik6aRvMBA}9oe z)!q4(E?*M7GCA{P4MJ{>7|%xG1tW~c8@_vO^JTLFXvI#0Bx0amm@zZxuU|f}c%5>R?IUg2gxH&SIHLc3*SKdjUN#Ueehk^8x4Fko`cN^(lQj zoIHzdx9^(l{|o@NmBNaBPxXFTQ${z!Fh++J>1R4UOOavbsE%scQVxi$mT(+N(nwwl zEi-$iecz`a1Lw{IF0nCkNS(dMTQ4k^8VqzOC;*x*#nZ;{;8sGZ<&JC0`HYjbJkn%2 zOpk&?*gJ~@r~D_kZy#YR?;xvwUEUkI@>XP}Z9m^vuU?&86JZ|Wmfx>p;ByFxQ651_ zV7K;~m^tm#;4z<>SxwrWZ1HRlS&{jC@)YtSl0wQdvQqu0B(~ZsEA~V5Tct-5aPMV~ zWJ8<3k8K;-9Pk7Ba)#!?ickFlUv}tnhS^O2_uUE+UqR_)A@yV$yy6Kpgx`9LZYXN9 zo$qL6O?91Yx37e7Crwi!cC_7U5zt|cTAr4# zsS`?+hP0nSLr90QjM=Z_jVQ(B#Zk<~Q?Ed^e(tOs`bFVY(4Fpy2$5a`y68wHNKp3S&Y6PSR2}lUZz7QNeGw#_vXV2MBdq3v9N#6Il_a^uL{{QE> zj^fKAUb5G0jW`*B)}R>it8I6B?^NNlC`ThuDUz7_`4`=})()6JZ6Qoz ztrK{$v5=GaJ*zhrQP0Hfk9(5rQBh*NyNB$Meg?H@@<@?ol7Vvaq6>Un8UC`#Z!tJy z9XHB|dz#Zx!VhxhC=Ve;Ve54FbCl($zu6Lm4k8qb7+z9)WL!AMEV6ndu<1)b?jNo|K$piL`4l8YUjUz7< zZA=zp?O&@J85kR9jMexhF7awix0^<%nJ)R_%;gQ4Boz7W!$E0N;l{L&_jRMqfszoP z3W{@WH|-1t9lj67ua3ioP-|sTV)GNg(KJ}ce9HO+Bx!sgsJngk<1W48v{U^&2_QVY zMjgBOpVZI7{{bEqdV>ohC`a9f-oeW;7avj9&S-*1K_@p3zccx0^5gOyUGM%sEJitIhbTwo8JTK3%K8@p3BWd3bvTrHip9r(J&aDwvob#s4@>4PTg!&Qc2hZg zU~qO^FL1=)T98+gL}Gj{Lm1K^50P%t8BbD4hK8ug~&6~dzNsgWxRW;cno%si&`%t{={p9_UaI#A^ zS?VgkD(e&zXDsSpRgIx3DYL z@Lq2Qbd(Nl3XslmIi2)TA$`{1@{Yf6I=cnjvPE|O6kT#_ML5HDxa38LBZc8MvwUrm z-26W3sTQxH{M7T>vcI*QyaQdp#qsY%t%K5mVnzH73S1p_MDLJ-xv8-?#RayukIR)E z@AfDqH_AZg-YV6BU2mkHTfMxXvIX$*aG9mLIPc4tl46$o?~V$~FB%KB*z$GZ#N_8x zc%0`Or}EYmF#;$t>ZhlzJJnW7O95~C;H4m)I&n0N9KcypZt1ftCOD(+!#SgDsrkg> zhiY$x@5TJe-E3Y;taL*J!MrH&y9b5B8_$J+R17z>9h0t{S{WHT=lxayZ2lo>F^&K-`_anO5=^GCsNLUK|JD_S%H^;q?Yr!wh4SD}iZM^s zj;Vb}5gtthYRil|H(etM<$DqSYT}{%EKu@pj6I*JaDIg^OfRUIwBj52p<+yA)RVNB zEIC|V`J}cC=A}-9J!-LJ_P7SAE+yvkB6dtf?0 zQl#{te%0a&)+7Dnyt-IN3MN=R=cY_yEmG@n3|6}c-b0#mQ=TWoINaW2UgGUA;%Fw7 zPPJJZ*;=e>)8c(Mm%82#m`=`Ky$&wEhaGj)EAZ!HO>{ehO!I=IME8+GW+qK?Y^DG# z0v?zf$!!uL$AB<1m*%quTi(;FE2Ll92aFJ{w|!+$*cZrR_4kpe)r?d z)7gn>YbymQi`;zEGtc3SA+TZJKp=jzQU*S(tfXYE{LuL)J2&@TzY$>c2gX7|iP}In zHJ2Oeu^JTg?AH^)kqHoI+hs&zizvDi5tj&EO6Bh3a6Kh6PB_WejqKh65&^G4hUq7w zfCN7eQvrR5O2+uHUPgd8l52dPKLfB@ws?on2IUC?byrCnMvEkQS+mjD@**n9XxfdT zISh;3I6KSSPA+b}G>;^ztErxRdC&=8i#@|@H8-&MoTz?5QHUF>&C3;kX+EvRo3Tx4 zW|jlS14|&`nBpiSVFIi?UvD+3Lf47OObs-4EJ<+sC6v}3ygr&Q^b5LRmPob;n+?Wq z08C^3xb4)~fJPzJ)oG|$uX(M!tWUHtrGN}mg*c`XbSmu!Po!68X3k(aJs)TT0b$D> z`U;cXtWczb05lMwlWgR&lcF9pxncFPT|X}}q~?f>9{>kzy4o5atd*=Go6h6YwC-^` zI8R10;@A^zIuS=hnP2UJ*naaV>2g#4K11wu;m1}}t_%(ncduYSVC0_dxZG~DYvBai zZ$1o7UT)(?iw5P7;e$?vxm#572L+d)M_R7y`xW7QRRHtB60%sd2)ee^#=jj{`)HOB z_g?dX=_#@D%@s4;${}A*)*y_PE4_Hyxze0=GF?CYPI#wIodqvvK;?_e}s=r z7wVYdN+P?Pkw+k>1mWjSWXk*K=e{P@;Eeu92TlU_<4SNy)0otqBQ-mGp*#)zl-Ir@YMe3qAhX;=h~- z0OY!*eabzz*H^#&r5|yq-Cp)9zOPm~_;30C?gRc2_SGXi-tXEB6?t=H*>9T{CDXTe**s|o@?Kjplkt5o~d8hMWm6zS0N|k zzo7{N55Yn$)!$WiPeuWvNfyQ5p^3A7E}el?=&z}tm>W`}k591xltX*OGh=kXi;zYH zo83)acqc22-;&m1w+j_Z2Yo*?abTbU4b29Gq?*Vb>CPQ`Qb-9cwk8+{wBz7$o{^Ty#$R5n^!-u6h*bge zAG?9r8^-pHymxV}gvDiIC1gi=(Sd1(YIUe%^dbkkSc{oGy8Xe%A>~DKPknsfRn}#& z3ze{yMLzqVl5+g|c7)sCe%>Q-Fnyw%nYW@R#{~QG6JOGZ=5=jA@f@0L9pi(Jv~<00 zbw?Mg3N9e1Soiebn%3Wqhicx}!#Y;^+e7)^>!JUV>hQn6=#SHvt=(!5J-cVI$nV#8 zbp6v_b|LW6zZRbUpMCUo#QvY9^+nv*>tG&VP*fJQa`h$bPAp>sGkv74>+OF6$)AmN diff --git a/intel_extension_for_transformers/neural_chat/docs/advanced_features.md b/intel_extension_for_transformers/neural_chat/docs/advanced_features.md new file mode 100644 index 00000000000..b2e11d8c2f1 --- /dev/null +++ b/intel_extension_for_transformers/neural_chat/docs/advanced_features.md @@ -0,0 +1,199 @@ +# Plugins + +## Chatbot with RAG +NeuralChat introduces 'plugins' that provide a comprehensive range of helpful LLM utilities and features to enhance the chatbot's capabilities. One such plugin is RAG(Retrieval-Augmented Generation), widely utilized in knowledge-based chatbot applications. + +Taking inspiration from earlier chatbot frameworks like [langchain](https://github.com/langchain-ai/langchain), [Llama-Index](https://github.com/run-llama/llama_index) and [haystack](https://github.com/deepset-ai/haystack), the NeuralChat API simplifies the creation and utilization of chatbot models, seamlessly integrating the powerful capabilities of RAG. This API design serves as both an easy-to-use extension for langchain users and a user-friendly deployment solution for the general user. + +To ensure a seamless user experience, the plugin has been designed to be compatible with common file formats such as txt, xlsx, csv, word, pdf, html and json/jsonl. It's essential to note that for optimal functionality, certain file formats must adhere to specific structural guidelines. + +| File Type | Predefined Structure | +| :----: | :----: | +| txt | NA | +| html | NA | +| markdown | NA | +| word | NA | +| pdf | NA | +| xlsx | ['Questions', 'Answers']
['question', 'answer', 'link']
['context', 'link'] | +| csv | ['question', 'correct_answer'] | +| json/jsonl | {'content':xxx, 'link':xxx}| + +Consider this straightforward example: by providing the URL of the CES main page, the chatbot can engage in a conversation based on the content from that webpage. + +```python +# python code +from intel_extension_for_transformers.neural_chat import build_chatbot, PipelineConfig, plugins +plugins.retrieval.enable = True +plugins.retrieval.args["input_path"]=["https://www.ces.tech/"] +conf = PipelineConfig(plugins=plugins) +chatbot = build_chatbot(conf) +response = chatbot.predict("When is CES 2024?") +print(response) +``` + +RAG demo video: + +https://github.com/intel/intel-extension-for-transformers/assets/104267837/d12c0123-3c89-461b-8456-3b3f03e3f12e + +The detailed description about RAG plugin, please refer to [README](./pipeline/plugins/retrieval/README.md) + +## Chatbot with Multimodal + +NeuralChat integrates multiple plugins to enhance multimodal capabilities in chatbot applications. The Audio Processing and Text-to-Speech (TTS) Plugin is a software component specifically designed to improve audio-related functionalities, especially for TalkingBot. Additionally, NeuralChat supports image and video plugins to facilitate tasks involving image and video generation. + +Test audio sample download: + +```shell +wget -c https://github.com/intel/intel-extension-for-transformers/blob/main/intel_extension_for_transformers/neural_chat/assets/audio/sample.wav +``` + +Python Code for Audio Processing and TTS: + +```python +# Python code +from intel_extension_for_transformers.neural_chat import build_chatbot, PipelineConfig, plugins +plugins.asr.enable = True +plugins.tts.enable = True +plugins.tts.args["output_audio_path"] = "./response.wav" +pipeline_config = PipelineConfig(plugins=plugins) +chatbot = build_chatbot(pipeline_config) +response = chatbot.predict(query="./sample.wav") +``` + +Multimodal demo video: + +https://github.com/intel/intel-extension-for-transformers/assets/104267837/b5a3f2c4-f7e0-489b-9513-661b400b8983 + +Please check this [example](./examples/deployment/photo_ai/README.md) for details. + +## Code Generation + +Code generation represents another significant application of Large Language Model(LLM) technology. NeuralChat supports various popular code generation models across different devices and provides services similar to GitHub Copilot. NeuralChat copilot is a hybrid copilot which involves real-time code generation using client PC combines with deeper server-based insight. Users have the flexibility to deploy a robust Large Language Model (LLM) in the public cloud or on-premises servers, facilitating the generation of extensive code excerpts based on user commands or comments. Additionally, users can employ an optimized LLM on their local PC as an AI assistant capable of addressing queries related to user code, elucidating code segments, refactoring, identifying and rectifying code anomalies, generating unit tests, and more. + +Neural Copilot demo video: + +https://github.com/intel/intel-extension-for-transformers/assets/104267837/1328969a-e60e-48b9-a1ef-5252279507a7 + +Please check this [example](./examples/deployment/codegen/README.md) for details. + + +## Safety Checker + +We prioritize the safe and responsible use of NeuralChat for everyone. Nevertheless, owing to the inherent capabilities of large language models (LLMs), we cannot assure that the generated outcomes are consistently safe and beneficial for users. To address this, we've developed a safety checker that meticulously reviews and filters sensitive or harmful words that might surface in both input and output contexts. + +```python +# python code +from intel_extension_for_transformers.neural_chat import build_chatbot, PipelineConfig +plugins.safety_checker.enable = True +conf = PipelineConfig(plugins=plugins) +chatbot = build_chatbot(conf) +response = chatbot.predict("Who is lihongzhi?") +print(response) +``` + +The detailed description about RAG plugin, please refer to [README](./pipeline/plugins/security/README.md) + +## Caching + +When LLM service encounters higher traffic levels, the expenses related to LLM API calls can become substantial. Additionally, LLM services might exhibit slow response times. Hence, we leverage GPTCache to build a semantic caching plugin for storing LLM responses. Query caching enables the fast path to get the response without LLM inference and therefore improves the chat response time. + +```python +# python code +from intel_extension_for_transformers.neural_chat import build_chatbot, PipelineConfig +plugins.cache.enable = True +conf = PipelineConfig(plugins=plugins) +chatbot = build_chatbot(conf) +response = chatbot.predict("Tell me about Intel Xeon Scalable Processors.") +print(response) +``` + +The detailed description about Caching plugin, please refer to [README](./pipeline/plugins/caching/README.md) + + +## Inference with Docker + +The easiest way of getting started is using the official Docker file. To perform inference, please check [inference with Docker](./docker/inference/README.md). We're on track to release the official Docker containers. + + + +# Advanced Topics + +## Optimization + +NeuralChat provides typical model optimization technologies, like `Automatic Mixed Precision (AMP)` and `Weight Only Quantization`, to allow user to run a high-througput chatbot. + +### Automatic Mixed Precision (AMP) + +NeuralChat utilizes Automatic Mixed Precision (AMP) optimization by default when no specific optimization method is specified by the user in the API. +Nevertheless, users also have the option to explicitly specify this parameter, as demonstrated in the following Python code snippet. + +```python +# Python code +from intel_extension_for_transformers.neural_chat import build_chatbot, MixedPrecisionConfig +pipeline_cfg = PipelineConfig(optimization_config=MixedPrecisionConfig()) +chatbot = build_chatbot(pipeline_cfg) +``` + +### Weight Only Quantization + +Compared to normal quantization like W8A8, weight only quantization is probably a better trade-off to balance the performance and the accuracy. NeuralChat leverages [Intel® Neural Compressor](https://github.com/intel/neural-compressor) to provide efficient weight only quantization. + +```python +# Python code +from intel_extension_for_transformers.neural_chat import build_chatbot, PipelineConfig +loading_config = LoadingModelConfig(use_llm_runtime=True) +config = PipelineConfig( + optimization_config=WeightOnlyQuantConfig(compute_dtype="int8", weight_dtype="int4_fullrange") +) +chatbot = build_chatbot(config) +response = chatbot.predict("Tell me about Intel Xeon Scalable Processors.") +``` + +### Weight Only Quantization with LLM Runtime +[LLM Runtime](../llm/runtime/graph/README.md) is designed to provide the efficient inference of large language models (LLMs) on Intel platforms in pure C/C++ with optimized weight-only quantization kernels. Applying weight-only quantization with LLM Runtime can yield enhanced performance. However, please be mindful that it might impact accuracy. Presently, we're employing GPTQ for weight-only quantization with LLM Runtime to ensure the accuracy. + +```python +# Python code +from intel_extension_for_transformers.neural_chat import build_chatbot, PipelineConfig +from intel_extension_for_transformers.neural_chat.config import LoadingModelConfig +loading_config = LoadingModelConfig(use_llm_runtime=True) +config = PipelineConfig( + optimization_config=WeightOnlyQuantConfig(compute_dtype="int8", weight_dtype="int4"), + loading_config=loading_config +) +chatbot = build_chatbot(config) +response = chatbot.predict("Tell me about Intel Xeon Scalable Processors.") +``` + +## Fine-tuning + +NeuralChat supports fine-tuning the pretrained large language model (LLM) for text-generation, summarization, code generation tasks, and even TTS model, for user to create the customized chatbot. + +```shell +# Command line +neuralchat finetune --base_model "Intel/neural-chat-7b-v3-1" --config pipeline/finetuning/config/finetuning.yaml +``` + +```python +# Python code +from intel_extension_for_transformers.neural_chat import finetune_model, TextGenerationFinetuningConfig +finetune_cfg = TextGenerationFinetuningConfig() # support other finetuning config +finetune_model(finetune_cfg) +``` + +For detailed fine-tuning instructions, please refer to the documentation below. + +[NeuralChat Fine-tuning](./examples/finetuning/instruction/README.md) + +[Direct Preference Optimization](./examples/finetuning/dpo_pipeline/README.md) + +[Reinforcement Learning from Human Feedback](./examples/finetuning/ppo_pipeline/README.md) + +[Multi-Modal](./examples/finetuning/multi_modal/README.md) + +[How to train Intel/neural-chat-7b-v3-1 on Intel Gaudi2](./examples/finetuning/finetune_neuralchat_v3/README.md) + +[Text-To-Speech (TTS) model finetuning](./examples/finetuning/tts/README.md) + +And NeuralChat also provides Docker file tailored for easy fine-tuning. Explore details in [finetuning with Docker](./docker/finetuning/README.md). + diff --git a/intel_extension_for_transformers/neural_chat/docs/images/neuralchat_arch.png b/intel_extension_for_transformers/neural_chat/docs/images/neuralchat_arch.png new file mode 100644 index 0000000000000000000000000000000000000000..a7562042dfcd2eb7a97bb413cace73b4666167a6 GIT binary patch literal 434602 zcmeFZdsvcr-aqcl>`u>YQ$G7`n#?@xjI(2D<(Qdy!kulC)QUWTqUe;PCZwc+1jGZQx4X<|Drc{&@qF0RFQJ zTkJ6l{PLUJPmg@+;?h>Ne(Cscf#2UbeJB;<;`m{2{dF+il$*`%iy{pfQSWzR{IViu~=+pT2ne@u8{jYc-FhKF{A{ zZ?md@d~%og&VPRNa9W`&VK#;3ay};h=8enVc2touD)*)L%4GQx1i10PzRK&3u-54- zzkB0fmt0(yiq)Kyp+9bZ^OqMF^nICab#Z>b`Q^puV+7%e;Oj~M@ym;kJMF~#$ldh+ z{N=?vXshF*EIr|aUtT_Q-rCbiV?AGc>a)!GrX=0kO<2;`Z{8XEzc2e=l{h`=f4#)-e*yqs>U###I1*-fH{nL(20{tXng@7DF>0;=MInPPd$Cyw!Wz zSueP_SY!U%13upBpZR}cVhI7Hh=KK~#_w!@Y>mtizd6GD=H;sgANLjb%50dA>Bs!B zt1x}8H1fr_53grR>tUEeS?~|6ebu$dGrZm8AaBd2mbc2jZ7P zw|K8>j6NdoqE<3s`RW4li;oadjG_%O`RCM39QwsaE9+COra#NMs-lAA=`AXsMyjk| zaxY0(q;~xkEj{Lv*Twb~tc$vG;XnyI731&yc&9ny+&f9LVeuuOY+Bx(YY2(0KXIl! zo(T0=jJ(6sY|RFZEWiv~6rq@c&F1mCP+>#X^OBJgI2jYh3-yt4(zmSM=P_tDk}jtN z#e4;)A8>Ig>+o^?S|IDcaQrh!YHqIgctgBp@_5<;pN2Xxb@%W>>7WXCRnHkeAiWMw z$hUqh**@9uK)A;h)ezbVT@sImi$*MWBr%#i-c$VQ`@1Z)I;$5+ZSz`8g2q%|9;e*7 z_sKZDl{0!_AFRuEEckghU^zdl=+@C{T8RDTp!#Icvy@P+D#ckdyC zTv4i_fi2IiF^a$T2fslSo-uS9Y?+29xs`@bhZcjuD+BLW1YKrtARGU&rd`u(;qiR5 z_wxO%c_0R)@jBHN3A+E*H_yFhJy_5TQ-Y&^ed=?*r@%EPf3Ym%VUJ{3jjIei)~q`) z6~6t~1*W>*5>#I}PCQyyrQM{Nnj1)=AwpH(TK_Y5F*UyP2(P2}>>I`_;z!7--~gw= zALHUQgn{)wLRjtCUJbcBDxflVVByW=p6DchqkT1c&rzyXHAqKjP0}rL7*HmsWp-Xe z&^J(ghw{yNt~-YMDv~q*8OzI&ZfPF@y)rf67mpb})(_di!g|19PsZQOq3QlIgm#0hgO+eC^nu`dZolw){>2c%jU;=9U8t}sSCXiHyac<}Fqu)Z6Mco0#q@w6t=k;u zLsb?b?%nKqzNFR%L>TOJ7|oG9&mBCJL(sOPW6~sbqY;xJ<|prry-?SGssHJ^KC!X$ zC$>WMLYfsDyr%2$=3nNuJ8}gp8-~26iNT~1`tR+U`l3;~^6cv@hUJKiDoq?9$_yo$ON0_sIf>SG=tQ zUkS}Cr!My4`)GBm54%+GS|ogD#-c{aN+8StI27?XkekZS9z)nqZB)eTWumiFZ&>7MiWzlr0Z+AGBmmKZL3Q`2v5mg$ zP3TBYf`c(pY;hyab1NK-?(v>I(R39uPnaeM++n(`OmD+)jP-_5V0lpo>vXq`?fR{_ zijdWZG?M>E$9l(=>p}Q&?(22dbJjS;h5on7?rfgzxou8k2ZD_PagV)8&%XW+QV4dv z#tG{iCbGLqH|Eel1rE8y7$PY&hJDvq#V0*s2iv9t0}>{XX)A-+-Gc3jWF;;=yt6|w zyp=9rea5!b5`wK+qD~Sj3vi+yEh~HV(TLYyOHn`BdU$iO`3n(w1x(1<<>4Vz*4da3wBM=QKA6K_4Rct6cRS-FN`nk}IQ zGp9*N#XMoiQ1hF_%p@*@C&|!Q*nCLE091Pb!f1mljZpO9zyfU_*RZ_Au*mPWCh>cE zpj9bYkJV@EO?xdJogz%Yi)s7Hn)Y_e7#A)Wik`jE)am`!^Hu<0(*w?NqpcqapWJ;M z?%i+{S~Z&J@D}{7$oy4>ujUCd6Xxb=)M8)0P5DG5xPB&f1+2O2M@Q$D^_P&f$+QA= zMzlptYu7jCNM@6=c1AFobdF9f}Iw<0J_;M-cJ5T#<0>kwFm{}R#gg;Dwdwd7$2NoprdZ`A%Z2EGx zE@7F~G(gW!(DJb2{mPYZE#*eHeo3yDmoPzvdWr@E+w&>=w7H?zq=rO7zsGeO9U5;L z8^bN8>Pyo)m(n=J%u{3X8|EEZ8O?B6b)n8&E*b8jDyKAHQOl`%@(-WL zxkBm87nhl?+;&pLo!b+je6F&%9I}6p8a6$@X=%O(tvAyURcYBPL9R)>UnGPZ-Ngvc zdgOgKS`U-vC@{;h#)d9Nr`ji04do2`>Ct=DFiB_9Cc|aNM>9jr6ZvpVvfO-&G#z(?6s*YV37|mQd{$&_Yf~S zR-OAGGDZ{>PD!J_*pa|br56MTwfG1oyKhr`CeP$}8B(mQB=qOS<=_XoQxR{idd2I- z#H&QK^tYGi4r+XF za&lvp9~Rn}8Hx5VeJJB7^K+}79EuwFiBmVg^3S@eWnHuIW!q}@xYCC7ZR9F8#8g0&V_ke>7}q!&Kx zO5{W4^Dq&mkP-`b;bB*UXANW@-7KkyXYf|@#VKe8 z1+SU-oxl7fSqmG}8&Y5b-n+0$@jXaJIn`7sYx4{>Ci1KK+78lOOBeC)mmRLmY;_tS@jKO;MH_XaRI>%c}#PLv5O3QXg)glM7)9ZM|0oSQUfrJ55{ORZj1 zh%;nP4#}7Da2dFSA>`D47U+D2;6q;bx2@K83QNlNm7QcoND>^#Dc)Kyg?p_bJICUr z4X~xt1=Mwk9sOShOubPxe;K?uItg-QA{P z;-lctAV?uunT~x1--~(oAft_}Y>;hR;ZOH!LYmTI!a7*e>DFXSL|r|gnX9Ic^;BCL zfWcIGslMEHzxF~1RFUpDJqTsMhCmVXSEUNz)`^6X9&gs8=DFM)hnHuS)N%{r z4w%M$2Z6HH`jMw(uB@Q)LCYb14y{MlkIFEfWzT6I?r*HU5H2V9%eK`{sH_=!)bgJHph;%5nZMTj1-%Oq*#t-&3Kd8OM;&E04VsKR#qwDBRvKrgCv00qum9Hj zyec+uTO&OZ8VP5S^FI#M_Arrb;wLxBNGQ|$O6rq`@I!jgaCO!$ajWEnxyL*5H?lB( zAE{zsG;pyUFnwbamw?U}F+3C(d#J(fPwTE~q5hl_L3bZUf8BAj16(OTSy;?YWJSW{ z(it~4>G$nTUB!{RJi%CN=v7NMS=k`jKh(F>rLL!hO&>%Gu^OI!H%&5|a*?UqE^CD%qAseI#0Fp4EyI3J#|Wr1 zp+1uCkZXCiO`5fWKkC=olzM)J7khgGf2-iaNpZx&Na=UCV)bwVaWj3W0?_5+KuLtFYcVs$h zqh79wZ3?}ngrS=zTc&fE3S+HGee#a&@H zDNi>k948AH$(XB1G;LobW&iX+Ms@3qJuzX0+3s*^rerA1{8^59p2tzFJRQWx0-%Ay z?#=MmxD&_l+dL%`6kn?32cOAWWin3f&-M^HB}ABQ<<{}e=&Ww!y9s*#9C->dQ&2(z zV``M?Jgv%)$5BN%jZ0ScB8+wAW_%+O>aW4elyj__rv}t%^zMtbSuuJhIV-c$*0eUC zdcOGDbmlXiPJY{;*L|kT9Z*`KNDQ>(laKV&u9u%2V|0?0^Ii|SZJ&)m z8JQC={xO&=2=!C_qzM{n%ct$jWx=h~?j2CEM7z!}v2rzE0)Fexk{X8alNjsWF|>qu-S3zDjWtY!QL+vqaZvO|gL}~O(I_lQMZ+21qYnZ2Cp#GdwyyK%m`g{9 zOsYaWT-dzNe_n3dl+|fob=$)B%%A1eBI-#N`y&Zc+E+*C!nZ1Juj~N}O6ss?56I7g zJ3SQ|XJ_S1`|vo16lN5*V6DvL{+dx!^XHEpTr5H24k{sqQ^)@?T-eSVc*`%3kDMxO zvB^AL`_qHn>q9Ih@)ovzc>F#3#O6G2*&uvD3goFu79_$KkDArKXa;0%^E2JB#19%; zV9Z1LP`Ab{Tv7EeljZ)bGi>1XX|!b+mlB_g=r&Y}9pRuyX@Cxf9aW@B-`2jPRT0<% zS`W-45&|kAl@f%yuwnl0))^}#RB8Q?$H~#Q9urIWB9j&#`?J7w0#;EX(|X2LD?g&= zV?~k_beT*FS7hY?RhI=;i?w%R{YSK|F!?R`*PP(aATnzl15`0oU#!z9(30oPaRm zCkiw2X@mmWzS0CcONKK0ic!l)simDC@=BS~6|Dhv6iCDG(~ks)St$$YhwUtqq5O^VBmUhi*nB$6zGLwaUy7|bIy0M^}e%~_tDWu?>~ zL}pH4GkADWLi4^DWZJ}w&u6VNhk$HV zOyAHzmj5Il?0)$ONNh~SV=7+p#O(56TSdWY($! zpS(kr(5?l)d&kq67C4e`7GKSpUk!4g?vcj|72faTGd)-fS8iFsD@}Y-h@Od`ynccj zV1b%LWqnU)yrZK^CaR`C=5;f0AWyxuB@>lkN8S|I&rd(o9(WsQ6^eSHtwSf~Ypaz& z>7FJiVUfgo>DyvI)P5FMOA01l2jW5-yl~53(KnnvZtm_QThcryR%Z4S|E$$asPI77 zJ~M23R-@@sSo!Qtg21Srji8djO91wnl>2%4{%E$ zSs21NaBBC_{43Iqo-CF{9$&3|j@ypPlJSHM-ZXi?Zrz>u*`B0GEagsk1xR7&QoPwj z7VG;3+tGox$g-;chjMK6b$E1of*p>Wla8H-E%q_NUW|{b{e9NID(jUU{HeIic@&XZ=^w+-4e~NN+m9 zQH#MUc=>yCvNPCoeCuM#zsD03w=;+UaP0RhNvkI|9&2J-X1UT-KmE*mFAU`OON44>gZ5az`VG!S`eJR9GRcF zSZ3!p2Ondl58N?2=i>y#f$19O4h=Lk^X2{+tc%OT5r-6tt6TW(q2Rjn_s zoPODo$_i7FNfiiWh`&tmbS8rMvd0Ky2<*6h1b0wa-ap*8g-!@g%>Al}sETY_jSZFu zXETZmh`|>#p~jSp(vM^Enc>Sf_Skw4$F@>G&RgB!8hX8uetOKl>{epIKXC_eKW-FTAwbsW&uKOdmF$2#OUjHOCog ztR=)XwJ)m{DRcaD26OUr^oA;}ZB1X@*56)nPF8j9dnnF*!hBxeMr_^sJuY_&XNgFE zUh1rNOBgO?@t|gSl5bysvt5FmdciJw_T-|p0Lm!6Kde3oDw%NR5#tG3fiko0TXodb zNi`5Kb#%=AYEuz;HB8prZY`k#4sZ}z)7&BRq+qGoTIHS1d5!szSaC+ME&cFnF%dnv zKg?=eKINe=pT=KT0%HKRpXZ7^(Hfp6kUXfxMDjQz7W6Rx=s1?8t6XH10wryIGYEzo zkBbbH2vE~9g0n=>Jz(%gw;4Zt&DBYu(!^~<%5dnJPO@V2IcSd|iD?*Ec$X^ShZ}g0 zL$nUB&Og)3EoFzze%6Nh=HNtDSEif8K=@weZdrT)g}+5E&HRJ29q(g*U|L{Ep0ZW-DxY@=FkRDyofBpf1 z@l!Mj2O#R8f3}p``r78umfpiy23a`p(!Qps{rKgp`hIw0svV^MDHin)c-yrv)T^7} zp7VpN;7GVKeBpFeeWJ^`tP#O#Jd#WM_^nH~Tcok}lM{~LCMt8y@=^`Zpi^uKZ0o^i z!vPmwx!CnAXF?`MIp8%wt=+VqR;<)au-UY=Zl-OYGfnGyk!BL@(5o~|lAlDr1*7pg z%+iw5`!nOIC9K2IlmRe%3mt*AdQ`A$gB1J>7GLuuf5h!nB%#1OopUpQ5+)Lu=GomT z!BA(fs>SeZ8CtsiR=5)tSq-{mls2Sn8Ng^Bp7lP21ykj<7gNrNY;SKL32y1pE!T%q zB~IwG*|3b!6j1nS+BBd7n{vOb{8P33FxE^we#Q%ds9*E3BIEjxoXa&u z@aG^W`(P&Vs_Rdjd%YjYe(UmB9cPVqh9t!C#-#5caj{ghWqn?`EKT^6C#&*99zPA8 zQ6+u?U?)%n!|$!RmlN+bT6VxKa29lWs=u#PXwCe01i8LeaY zWDRll_K4M^EA2wVl*fPCe#$o@gKULQj99z;ZL>BK(sMM0O+!_@CU>>M5HaDdC%QK8+LetW3 z9y_9~h4c-lg(}BbO%5^Su%0=p4qOD#FmY%(Lk)B;Ox`8R66kM}BjZW_90;ArJ0W;O z2yt#fup4S2P8N#oJ*w+tpYeaZ=~6+n-(pE~XlmP^?vL6>`F$s2?Fm zu%B3&xqg7sYbZt8*+1tm8}DOtch6*mTczS+w?RLNbo^IHW;FgH|JuIh)hX}%P9V9* z3YRKd)88{=3HwD~3mHPu`86v$pr*&yqKJWJjD z*Ff*P3}q@Dn=lc!*6tok%XD`qWt6wL4FrsasXdeKk?x(+^AZLBXmWPGx@}rXP-6?; zd>$H2i33!`88(K-MSU76vzDmxNRL6?mit zY+v_XR|h3zUaVDrnqc?KUwK~;5NEV`I+HQ#534wK#g;zt;Y`ESwrJ1>rNFO9m0hOoFyyysh|>>V~8O7`+JAf|G># zgT4jM6aFhBb3@+0KcVOz){G3c0d(cPoXc*cW_95s3(V~L z`*XqgGmf1J{2)Vz$S-I4#TIAW3$IK6w2Gd)@fFuHm zbF_-n)9}$6I5KGRVX#dLch~y>naZs6sB{2xX~oJ3^P&?Ia2%K_cdzEYCu)O>B?4=F zr=pT58ihOT$!!9!2D(t6sT(xro+eB7Gxn~Kb(@~othPlcS(%g4@}uuUSZ{V2F!`US zl{$lwr@DTVQXCaxdvjr?m{}DL%nj^rEgP_*r}+jj>KE?4g~EKD@}@r(o~CP2YhoRob(}Y)LCPsGfQ>6L;0WahC`)( ziVIB50W^in18d^rE!UM^*v~7(q!^<~&@-|!!Wj((mF~om5}9H<1CP7mX+3d&G1+V> zFFryT@vw?nTv61>Hp*xz)49DpY46}MoNL=qt|V+D&}PTc4f0VQ7b{Xm>^TY(X~bsA znx@VIRDzChHpJ!hK_jji7&Xb`G?~otxV}YVpt&3VB7Endmhb@KDEO=7iu_Mgw%;r@ zuNL(1;I)Fjn40++)2I9|x5@yj)Lfhu0(df#dchC$#C49Eb;#OG-RCbH*G@zU9mJBo zM`ek+=8$Van0;aF7E&(Mi0dGuicvC~!#RR0XUZHzBq`CM)e@ns+G?_Wwz9NPV>@hT z=4d{2PVUbSmozHxc;XdbHWeAJW5>gz`X&9v+1_AFP^s#DD(YBpsh5J-0#D1e^Ts)~{ukVxwYY$h<`3@Csz0`3OODm5Q zv;{a~@}og{gO@tQ&`}5ya}eFctaW(NJ9#H|ndL{4J9ZGl!_VVdhn~)y9kTGHkzu>( zj^EQWup;=b;75H703fbSm*_~?w_PHEd71R>6x~B(=s~3YV>&QE0j3qLs@!q1aU(!4 z*^tDB+<~kz_}R&8-Bi`e!uVvSL$scSTYDwS9h15>U%%qtiEc)GOylBMs17J*+ zwx37y)iaYtwEQ$%62L298r+zSxhPp|hbf z>pR((Zd0UHe!Rsb@bu%^7s7A7Adxo(lXTHlYx^vz2W7i{+>8p+jhkR z>3Q`I)))Vt1BUuHyik0zLAKtm3W$l}2e`N_R~s6EeN6a#-?JU6rKwq2UzGs`^C(4o zQ(pcNt}QLH!c#-9QMwGJgg(?R^Ml@H;#p}+2I~I{8s2HdE57n=6v!hdR=AGC(TXpV zk(5Ug22;VOq=0ilolSdtE|e-|Jo`JCrSBt;GedU66f-RiF#1ufNRU)$T}&2z;Aq7b z=OY@`Nf@1&ml`1eNk}FrBdyz*`_*0v5S`AMHB)kZxVtE)oPM;MG$(U6AD7IT=jVuc zLKLbvs(jKdpFFwUe#qX`_B5y~87VfPrqALhmCVBV6p45dnJbqJ?zeb&G9zgO27lyM z6}1HDth>Y3P5or(AvQb6%N7eR8x*Hcs-a*$Cu9iTj`*WlV7r?27&nUuK)9~#{#C~> zp5O9~Z9qD#zXVzQWV;Az>#@fDWzqW51livd;yuuT7Az-ckEsG)xIF)1L<5jqFxT5x zfC**`x%6*Rh4!I9d@H(IIkepYP$#6*`J{?OtR`|=<;aRBUQ^CDuVSp0MGF#@1G8Dl zAa)go+ocku+2}{b?Bdl1_IPcEDMk_%y)qPLuZ1aT%UHQ2(+SH&Wb?F%-qRM+-wI*lv)h+kri7*|%Xh!D8tNLu{0)hm;V-Xt6#2n|{GK zbhmhJz(N<-OZ&V+Z$kEaopjP-GIeEZ_&U_LQFl2gzyGVx#|p^p92WkKxO^tQ5TXY| zyi zEgtDlp%FIcK6vp+PT&Y}EUN0p_2p(@F}V-uuXC}YFo>Nlt|$rac|RGZ`K+CijOoP# z^AEXv6CuF46lMUsCFgYY@jKc1Br#V8MR;GNDs)_S*|m!CV2gg?W=POk9I3O>uvlE!b(=R(_TBA$-4|jMFhzY4|{#VlKH`0+wt0 z&BdL7T?cv07HL{xMyo;5wAryM7|OYjz>yxYhXb2=S>ase&@GE>*_&)ZL@X|Ln6GuG z4Oe|TKc%%D#lqt36Fs-Dac1<^zmIe>Cxy9a0>GD;pXv{s!0tSK!C4QW5R%pTiy%3) zO}G(tOVBl#ez-#qjj_j!9DJd@f0~IKj`Wiq0X<3SfLd6|THCQVA3vf=x`6mS3I=t; z2+P;PVA@JsSQ;+Ls3jyYx_LIx9KdSV_qVa&Ap*yB3T>Ycm^8af*H@7})~TI)Q8oWS zGuPv24h&HC<+HRJ`N=Zz9G7(O_x|z(2A}t|4)<%-&hzHm(WR6||Em}q+l0cZVd?6s zup(BtUnQEetX=FvZD1|zT#CtVcvLt+8(c(WSsnljomn=f8<)ZIvWJjL0Nx)iU6;mW zH`TAD(1z*?VMy$lC~A}e%AqKm&LoO zSsit&v@`d=iF}b;@SdUTSc_wBw8LLi-O>GR#gk%hZo*LHSTQ={8Zw8F{^Y%x+*&DlwxoDvU@M z9~y-)6k%z?L-s>sl~>zN>XKkK#iMO@U!-UyZqlmgL&<_HA<9d&7vMP{Cb0Q=WCy?$ z^qD-S>$GoQDKQ7i7bn7-UK+u+UPWI@u$8r0(ZRPtn;rkU`NAo%ygW1{ajY|jz=2W% zsF_k znMd!XIPo^eEFbfPmgThcByZ*Vl4fVRAL8>s9iaRIzzbkiQyUa*sW^&|wmg{oX>gAh z=acYjf;M*<{zU$JdFuO{C}`mO9U4(-l_xs(b2}1Qz(%!`-8!`u$FT=Z3R@Hk{f!DS zjSKZ!KfX%wgAcqaQ1@cKIHZ46{6n?tKU4FfN|E*a-G9GN&N^(iNcW*5uYGI%hOosu z)}`#*`PvS$ay)f%e%70&=WZmY3P3Jd;xq@{FPiqo4*1<>amC!fzei|79)&K`1k|pLO;|{P* zphq)aBz8CqX2B`bqsg-Gt0#AKi00oB@%~wjh09(rCzC{eGJ?|Lh8M-#mqUhLASxDx z?vo8)C$9M5J;U7c}Qu4_Hwqr67|dKs5>duZmy z#U2j8z*TkxB?x?$&j-e0JNoR>xG)x+TL72$DwyZ(<*)=GyBoSToh?BXy8<@N0 z$x2bvZO0~WTW-K}x*YhizG`Zw4nDN*|F}@L6K(=@6UdEjQQFju;OQ+i%=k{US)%gU z3=;**mSUo?>P}7n_T8wKtI1Yjc&fHkIl?m7VfxQe@LbbEi);@^Ea0r%%T zJ`!NsrRB)VmcF~&oaaz7Q9oe2puVfN{;SPf`dIB%k=(J&!s-2qjm{qeOLs!l z@4@^Fe&h1kw^_mi&~*o-obA0B;XP4mnBa}E#i{_}7M^$CxDB0d zO%q@~yZ^#hu!t!1UbX-oTq9D3_3hS;E7@cpd)Ms_w#NZRr;I582K@h4umWoDRnJ+2 zr>x3k;U&&h8=SQqS+X}L7l?zo(4b}75r!;r+BVo$(U%pIPddc2ELCQ;v%0k&6P6vo zg&Ur_qEmG7RzX{4Z~?4R4z5=IkE`OFLpx=B-Z;xV=pmLwukPan0&tkRwT_k z*BazxUVq=a*EJ2@9W6d2ibsStC83$E>IC6W;oyE@6&sgEfAD)SJLaI?lu@ZvtGdHW zRTgBv=T=by6NctEnle~|LI%J&<;zC;E)})r*@4{vGKrt8DL^j`2o;N{v1{dV`5mMR z>}Za(N&xyOdo0N(M*pfN0+UFoqipe~X;g6>Ks>7VNZl4GAOQT*Nq5hI!NRNgWA4x2 zUL7vEH?2-8@%2Tw%RR*(&w@S02Jef0^ zXcPTvEa#Ae30-N5uz4;z@#$IrS&7&*TIf$WY-Vspmyv%gH@T6QTgsz!Bwj`ZsT^ev zG*2(|D^s)gPT!yFD(~;P6|HlQEzfw+hkj4hJ!Amfx7VnSPe6Ga0ns|+0DE;$8xc`r zxd)=^B9~|E`IrWhq_0$Ym<|hAmKzd}Ec7_LwGkMXjCP-_e6Wuf`t)H#vj4QzzZ|~;hvsj zp52$Bu-$l8(#o#>8_ghNOt9+dOq++}PD=h%-J9oHXc8xP*tcPE%JlbI$qOkFU<{r( zQx#q+RQQQN7;i*#7}_)+d3@_VzG0pEJ42cPgh12)dGd_;C!v#K&aGR#qHi5V-ER`~ zk+htYwCs&JRX(y&d{O?jHsF85Dp3CV7H868FRmFN%Ys{k=4G}nM>=jvTY#SQ2dcVS zG8Z7rLpX@Xr|l$W)-@zF7kCq-L!Wi#g6c*?q`Hb^@|-+|yb1I^Rk`G09<8g#0PW5~ zBvH-P%7#F9qcrDpsa1yR>1?_br2C^R2HJXm_$&eu=Ye|$OjQMlFp^elQ#O?|<=T$& z7-Y~DZIVAbCR=|sjLVDGtn{M>YvBT&j=X5_v|e1?9<9M3F@HfHW{^dfmm^2L(4#B; zJg%nS{T6UO;-Is+iGXL_m+?-2E@*7j4KH;VJX$vEHbb2p>OQ$~ZT>t~=Gp%NbyL~& ziFWN<9f!=hGS_j7+dz4f;9*D3?h3l2l>)lPwQtv3w2D3}=y28i1{akdp#uPrsJ!{^ zvLt$q*0@z~V%xs}y2sJUpAtn_3X8WO0 z$!K8co{c;rzv?9g=AG537EJC~AvQ}&^_P+{YItEjpNT%G%>1vB9>4`ZCmFFjO69}q zMS#+O>lYy+zc_gao0e@z7~4waT~$^zhp~i&3d1LGBYo)MuyH(L3?LdUKMQ6CyKA2M z+7&%N+ow(mj@}!`_AT*S?Z_sbD2frc+{_X_Zc~B+vv|4?M zuZ=2$CKJi7^?G92g3BGr4$3_wSh}BEYHCj-9 z>AQl-TZB#nCun*Fxz}r~?T|i|T45|E7={iuRwX$ZoS?fZqX0JH2*)UMdA8D;gaCa@ z7+08-%dE&mhUgwHvE4X(6!c=UQ=MrKWxcy$i7lvLPtlJ^ZmDrYPfiNCidCL}qb%H4-DN0eQs6>doG8KZELhkQvV}`9$ZWHr zM+gk)crHT5--k#IhYbFpm2`e68|~TiG5Q*b7ds|7 zs{`(Sq8I1u*BIEq`I*)wgk?@$TeY3{hzQVm=+KqMZYLvO48;B@9Q!VKI+$0VUGc!+ zM&&mzC+FF#eI%a%oT>R~oXm8D8;vrMP}9GobCQ+Ix7OgY@k>AvC%qEuH%E&C`D}pK zE7SatXgZ=k>7~MZy2pqTSQ!J%*4};QX>*HaubGa&7kSKBN6gko1o|?g)I^>6$=~_% zPk}}khC=^*dgMfV(QGS8rems$JI*E%~egsMCPu zSVLf)ger;fsMf+B%yo|^mguhm*odiQqH?!W#p?4Wh{pu?|8Z~Dpr*_~gH0i-3B zx)vdLPwxW7E8KDJ(TvgPES8e|y-)dLYAx|B4`z~ZPxK0q5y*Cck+@8m)tsyVOPJ}y z;n%{g_4Bp_S~0o}|0>tV+iqxK^r5DB7GEJhIp`!zt7G%oDWR?nA@Bo`Om7%qcEREe zV9C<%Xd3RlxepTKpsPL!!A%6rPdCp6@oLS_G-C{qFd5y)WV^ZtWDj%hHj4^|=A{w~{4C7k27yOA-g}z-fmuZfj<5+Hxye zvH|EaXf|@74a~@7Wpu*k#l|244E0L#sEr@Mz4n|gP%u99eYQ=pl>gA2$%{(g47ze3MV$n5Cfza)(? zPd|>O0Ds1?45=lEBfyr*LEr*vh+GEL&~1RRrc}yZhQ9L^a&te>udsV`b#LNcw|yCQ zmK{iw)treu~;^or* zbw{7vTl@GIm&dpr<)vPc-}A`G`DS-nweX3=E%H*~&$RaVjGVV>S6vOHu`Wz(Q{euu-PY2g*3k^IQb1wHHcVJmRG1!An>ysQ4{Sr5G67 zzd!=D6rZz5BW~^VGe4;a2jQ$&i*XHB;Jk)Uc7(A>sA@or;?TSML5cOLOg57mhLha& zo7x|n38P$sX`I9zlAghfaOVSj34p7O;AIRss) z1nH^H^l!f<_DIaDK~Bsbj-q%>LXBIm`%Cs#&Rf#POU5a;cQaJB?Fsck(ZH~C4mkS6 zSDXokXRRJ7fK6j1{UwikeH33y4f>J7BwV^FZfLmstZdZu94YK?b9^8CS+j@26EnJv z<8SxRPaOKzi32=sk5Y#!6yZ~7^ZC*hCR+>xSVQTLnDQcICJWB@5o7^w@$75UcfTTi zCZfF2e+x4SElKn~^h5QwN@;;)tcrGn6;72BO+#y^3|(AAWZ}w9;Rf^&DD5&biMgVR z(5?Vy*urG}%TbHZQ(a1(hDG+J7xFhEJ89+oQ4bF>BYbADFzPG`ak$i zni9+rET@lxjX~^xN#2Iv(E1&+(z_7b2b@olh3y9FXlWmuR}5Y~pYp>d&2f}0;?Abm zK(Hp-`+U?6v-R)5k?_|fuVY2(*sfu(jNXKOR(oDr009(9OgTOO5A`ddtq%xh^|MDH zzpVMH_A2D`js6aLG;?0)p`JTyZdw-_WPy+h0ctZX6FBh>o}M0b^Lmk|`&P@U`VeTf z_ksZA2iU2lM___Qz#Bf|v4DjZfCQ+K1@uJV3Q+pSv%03M}DBbj4L=@BAU&{C>n8TVw5N%F$Pf( zZKuDftx>ERHSW{6MG=i67&S;bsY_g-E!03}U8u8(B^Y(z4e$}F3Fr$H^KMtT^ef_XH5feUfAs$tFH3FA^W z+aJ@jRJa4#M{YMe^a*#Wi&0p1gG_pbcz>qkZ|eNmJ%`ul5t}dBoK!NslR(! z54jB~pYL6d%Wc?xbt~hRLs{dh+yLay_J8+&V#Orru|j;ZVFo-e8xh|`n!rnC1I^>;Dx;-L zdB#ynER#Q4J0{vN57JJmE-h?99{%fUH-W8p0+^D1%cR(LiK1`xygob&`rk;1=B>Ba zDd7QRK%6%w{*qkG31eNzSYns4m02K0n8Rgxu-s{Wa0dfxV-+@D( zo}eCqY_}zpuL3!eaIN`K277cvC`kJqAsk)+(|uhQ=!Qf*Wk;4o+tKaE&X|;&FNAo!}7b#7cR>2 z!$5}>yTh_fEaHYX^n<1rYR_%2O0Omcz)1D&TY{kHfn$j5H9c~SpM*p+^gWY(L8;Q0 zJh2Z%D#_!~>^t0n1V8V{<4$(KE{F=(H3DI9D)Fx72j{pNKmvUAQBDKA+@2v@vXMMJ zSEZ)uzk!lKo^P>+z(dBU@C@ln2-tm2hk=TIR@e7J3SF&MeB2Z=_*(94@+CzZe0Y=R z`U%;FC+v0c`UoMPf5WF?BtLe@9slkH8^*ReI*9N#c9Ej~=_J!WGHpnMyKvQa)wfr& zJ!q!@ooY6njj=;G8JqmerKcGX26MRCv@`XkGPH546}h#=o<3O)h=5wTrV*)RUS_!B z01cry&pYAVP%bEI!F$#B0@*rnMX^@88Ozc0@v+@*wK!1v6Ksj|H8!(C)s^~{lNz#1 zmU*ruQKMdL9MM$!DDp&(;cne~xjzobVu6ARRU?6KOlIYKtl2*m9|MA4EhKL~Q-x(n zgOwpX^6Xm`->z0eQ6|?a?!K#h&Y8?{Bs@Loj0e7ux7`PnD#K|sL6mxsY~AsGe9cQW zct1kF^Q8hXDGdGMNw#NJ;i~f+54!MZ9wKAxR-GUL4EyYse^)H$Ru*@q*y!ojOX;%x z>frRZvGJ79I!KI|QwmnSV-OTWa10%Zr@co_4ZuX0O!$-EuuES}%HvLhJHua^j-4$@ z=OKL@IL*rYg+o3>a@w`*#?CEbXCA{u*#RnbI3ibIO1xX4m)}cVRr)iU%bXC~?BxOI zUPi~h=%#EOOI8u6w($Fz4>0Dv^*&dcpM3rMQ~u?^`ebMRF|t9JxOlrlpuN8w43`q; zcGa(SkpR-JZ&LH)IZ$TZiBCs`r=-G6wZvJBV%7b@=zdP(`)Oo&_MkdSID@9X6tL$UU>uY8Q_We!Z7dz(EfJ-7~XU$$pV%B@pV&&~{_9mXDX#~XfpT1qM z;uI&g;<_n2@<-ej3R~!D2ipApzRFm1c|GBT^kdB|2>HyAaO<}G{5lARYsID2gMqf}DrW`7Y00M_4a z--g6M)$NwO9AAH7E_G|4)3jUSxzh@m2!F7~)L`}9h-H$DWp#BO6Q7F@8V{v##AyVQ zXVA9-O_u3!_Y`IjrVlIQ2LU{+bS>puRn`y#K>Q2TsOLsXDUXUJP;GT5W!)Bn=Uix@ zVs#6E3=*k-g>RJmU}&FJ1z zKYR|xTPbc?vxTNh7}~7fIb|eYT`#+YYOCBVNv+R~1<|i}Kt@*sHi4%SdQM$dex$5E;{3zOJ{F-V1`Cb zcvAQSe_<6#8F_s{>8sIJYhjG}U@i0(D|Md+dR1HfQVuTK7!7fJrF7FrGZ-Etqap;H zR#rv2t8wy6S_~gq(dPq9AvVtQHJ1ozfz~sz2uRB;JIlBxAB01+_+#EUI~cpJ=>}y4 z={-NlA!haNr@37UXusD9B@OlP7kp_Ap;~XKg*N0flgvG|K^AS3e2I4 zhgRsOpE(4irW1BcAYQumjuA7Gs%X`qT>M-01$ zudg!I9fwpUdeGdYbHQxVhmgLe(5-p&2n0+Qn3FQoxqWzK1Qgg;$-yz`Z5}f)F+h^) z7wa^-Gbc9CoO6R&FC(QJo{RprK;~(NKPRCFy|onF&Ckp zK{6`YB=Z`P!GN(jb`qWbQ8Sa_C^{fyM1{5CX*t`5k@FF;K{dynqV*Ayr}EIa0)=3Q zlmD<(TDAuVRMBw74Fa!VJA=p?KTPy>Zs4bpxH7&P6P<-LOwjGZcIIr;==!08j{VNf zO{F6%@70Hmdh|L(K1Sptd48y4Z?AGS9M$7^(`2`Odc@t6T0v_3F1Dwowi6M=4FLyq z(A9zlXVH784jIf`j^6E4expz&!FK448a|TLqPHacKk}Jw`NtL@W>VWusVkjn959D)Bs;1k36L(SWJ`Z9z!MRS#&@4n|93 z^z4ccgR)u8K_CMmd!tEFvc23F+*j#wBQirlCj>YA5QDu;Ty;&tRp_<2Mr7G~zQVtU zGC~WajY-e`@ff5m`IlBEX{SnKjcu!ab}FxHXE&Ji^Yrs3Eq+#IaG;nBs$$7L(qq#Z zi1EIAq9>rWYpE4KbL{Iucg4m}4GGunV0z=KW6$dvZ62>J5SaeoxpXgd*sdVbQ|S`$ zW)`^C48?A;aN zmA8e*DdV+)h9BvWR`V3g&B5S@MOJqr7;dMgk48pAZW1|;7}dOZjO0>i+% z>Uw%G#WZyKWptDuc|cg_9@Iv3y_W4ds#^65Jie-8I@Hj;p8BMF#e~V-2!EhF zW_3o|(n(E|_WnlcfTuF$n(aMh|FY@-6|*x%$g|fjm)9jZ6cDWyy72V$OHHJq!Aagj z2$k0?S^{|*+F5LLm~Mg?M!bg3?!ByCsR)#~uhtrvnn=io4h}gz>QInhQ^u6s6$44I zr7J5k#IyaNUD2`Sm433*W@f3fD>7uk74%Y8WbPUjK7B& z4Etrb>)KQl;PyhR(&~-}mAQSD>%l4lZB?Y6d;EY*%bu0T!!&Wtq{|;6>RDl|(949ah2RWN zabn@L7jUz#o}YwZpoLDT=Z#J-z3Vt5FA}e*K+EbEHVVG#&Z#a3gK1}tN}Q8*Q8+6V zzXdHwun4Y7)s`7o{7y0|m%1?eP{?g-LNk+q1xYunWCl({8cp|KQJF*EbzVvHNuyTq zsYyMukOE>-XlDCMSx3^g;|-mz4U4-Knh~tIHoFpcURYg|gwA=hCl<>&{-MhF56kMxWMc=sR$`*!OgMSwU)2utb z7MFKiKc1gdkWuHAouA6xTAze4ptTBJ8y3Hnx*du6F{EA#Vw9)O&Ef-@AvMZ@?lqaV z@;{?EN=#So3!{8Ml|HC@6yvo*n8zH1^){gslSE!&AylsI1MEyl$W57i&h)R9`|;*N zrmiq;+@#WUXgz2dev!lZ!CO`5B>zr*UN7Bo7KJNlH4UbXnvgel)Tg1MBVZ`f%}7s# z`AWK~qz)8jjG7rTF{nT})9e>S>Tt0l$Ufs$jrpjTbx`Ybau<(0of>iuLwlOyWC6pVppJF~c2!8S; zjcoVtBTS}P1A025^h&uLXww{qT-C46t!K`7RnDt2I}R8ak!jl5DDV1~sp7G4uO?70 zC1(#I{YhS{CpMc!e@m}X@S)blyb$IjY? zfY(~=B`tpu5+`V?)0M}cW3nd=XWP?UCg=|4Lb^okFwG-{Ew{HR;KEsqt*XsL%6cCx z`D*xb)14I)LpS+pU>dvt?Pcuv9zQ|oyAR=xn_{Ty#mmfg;|>iaPB)K|w5L2=$=0Sz zx|dyGO-|T~f#dbRd%o5QUpBFRRCI~bT<_W#T6R9W-G?YrYa>C$sm@nd6XWqDqTt9M zgzCPn9TpGrM~(e*J13pVHKc6$;T+vhnN4!@$U&F-t?c~xg1sUkGz?e3>{#10iS<4c$I*V z`vJOrukq}sVaCbNdQ@8$X^Glzbhqa#IvWSu6h4SLAK`l7YGruUO4yhq5iCO;c$;9b z+jhQEmweUA;`6%d3=yEQbBqi(Sadk+B*+$*JAEaq#;Tr1T7dI1=;c|DS%&!cnQai& z7)vNNeFS&k%i^xv5=)d{K3csx1Y!uW{1@K_EWBf{t;U{qO4!2Q)7WE-%1(_Md=1ZsmvD789SKRd2BxCWM701gq3+t2-d11K z9*^j?7>G~Mi8^!mDnMZ%$BA6RR8r*$fzqX68l)@=cOYdezr&H{<$#W^2Bdsn&Ct1CI}#?J zWHV<(Q3k{J=b{W$BL+poMjNip&Qu<&gh|`bq^2(*ep$o@$H-%cgIMd`+CFD~xa#?B zOix(jM~wO75UJWW{XLMB9FMeH6vhgJf>g*H=nqBOa=IaVU|X}Rmfp*EXMW${7?u$K zmW6eJ@*+t@s5TUSfHt%-x^1wtCNkw*dh>o}Wt*2rtQJ>weF8?RK|6JcDRsS2T6scr zXl9$u`bLZQty*I1vqHY6b=OYabx8j#^&_boNO^#>$|I8*td;iSA_6yHdhQ|4q1srd zNUJ(#0>b_!6o&-vL9uUHEQWTdd~{xzrFgswmwkcSmnVow*-kE*hvBzNhi<@-l7&Yt zps#Wn@qN^+E`06vAhPqz*HgjkPriCRTJ_z(+t95%)4bvFct+bK=BDk<)57dFXBKz) zf8g0Mxv?qF-?a!q1$vajc_rQKSPoGr%5+{HCB~VTPDF0VLQ9M8AioRrd-Ym3@ zHGD&OOt)Gkj+2Ss(s#z7$|&lcjXTDsq;9{nxcW(_#1$VA9$p(G+@d(;;wO-ejTr7+ z_X2GWYsZsh{f`TYVe@@_LqC0Z1pBFR@$uGrE0~VCH(Mfqydk9EtBQGJ;7Fs!d2(qXq%eAS)cFeOT4j*3v)SVVO`Ub5 zAjz+@&S4ZJ_%^w_H*xqoV$SAdeJG4RkzQ$S_<-_=4{aMwW|7v(`2ps&p#?9c#@B07 z<3HcBxOcwO5~6<+h3{9^DIgR? zZ4Z)^GJA2@qp$5G8lG)s@%LOMha`Hpz>r5r=hBHEb$U7U713?nJPxBvrKi2Ly}0XM zw|bwR79;g5U$q<=y=8;$lAvYlg8`nL`K6%rFj)S(4x}>!Wd|DccQir2Efjk(z~$rNZHW^&NxOSce&g$X`Z=WP>abj1$9cNN+vCQEJs?0R9FEG(Gk zJ2CEfW(&^iCV)*kIK%)M%0Z>SAP3aVoKZ%S3`4c-#$1> z_wymf6ilOYnmy-&s$2GvNPXc#|E@_oa5?!Rs8G?a?In6QAga+qDAzGAhip5RB&58c zDzuOqc*D8H*}Ls#I+%=+1gq&p5c+P?|I5PFeiF5)`~pzqD(I((F)DL(c|AWqz4{Eh z*zi3ya)iEL_hU>Kmv7XRQHpzIu@RZ{=!8Yjb-lavy50rsB$CT`<>?8F7uyz(j*M6u zl|O20u+Q#|I@+KScE=7nN88SwGL#X2{2K?<(zQ8cz0MpesUa z#@2Y@W-mwW2|h-s>~G0wbG||DTWcq7@qDdh<)@KQ_xZeL5A)RA!z=UuEU2>5PkyHu z^+V9Y**Qk4uxaU=mLvXM#fD`FdU#6e zxNcXhtQBIsnsa1=73p_C z35!svoFTOF_Zm&2M$1_7>7R7k$+H*u;xmHdFqK4@ zbV3l%E$)mt({v9PkJ{qZ!dljpJxFK-5@H zMB5&X6K?TZzV>LqR2>r271#ugyJah1(@~iYRuzSjKF(wG6YeAj)PZbNE*(wTF-AFN z8$`0Q_>9b8?~4vUc7b>#2`#{w-<%4)H+f5pM2egfs!1vJt<{oRZByH2ZA>xw`+a86r!hKJ=41SLJW zmWP9~=ec2kA3j|J)e{(R0n^NalnBf9N+MYHe4blJnl4_N)VaKUH#`VT|FwZ!UGib2ZaYD(t6 ze?pceu=&`|*wgJwcW;Z&r{qL7TxdF-O&(3CHuP7|Ycr%5l{;vAr~ z39R-kVy+-8>_&ZbE|6HO*OKsO!;mcQ2jcIk=Dx(Azz1V$jj`y6%(lsQf?^9YRz@EW zm83lWo8rhzv}g`N##6Kx$8xDyfD^cM(67P?SfzQ&h-g)ve|DWJ2yP9N4M8pfIFyk& z?ukc|Yga}0oOO4U{EtU2ud|KkTB`YLY%}_0f|C`ZRtv<`?;Q-+=v+__p#e zom4AXSb;u|?r{k$Gx)uzGFap_&Bcd{KgJ2-t0@VfO^p<>Bd5!jiS*dgk9c@CNA6j&yrGp;B zxs+H#!-N|rVPtXV|2pkrhibK;TQNh#UU})gAX%egdn40Xj2Y3+%^{2 zs8np*Y-Ymg(Z|*)bJteUP(rn$|HrB%*wK?qsN6N#6@XM;q%%FNJJ>=)7_gZyHLuOP znRNHxJ2|}tmh*o0_qi>8ec>j>T#I?}aS*|{&kRMNFo#F!Yk*m#SFJ|mUQM$5ygg*$ z?}G4be#SK1#|SJ>M?e?5?oB;?U~d%ypZ}t-ZPEu(b?<0VOq|wsY65X8Mz|e`Y68+S zAThe)Eer2aBd@@a&De@+c%f$KXG~2i2Q~GsqsAImB zD+2L|K%uLSbUPZ)aNI}AdKhjVWY$3&tql$1r)ha3_vDs)7PR7t~2hh z$XctBv6#1rA6`~{_k)AgWO=UsTR_+Vmb;jwKMA#fslJ&1@X;FPP6}-u5f0fi1=yRt zGjsP=5l0tgmRML&uCpBp>4F1_G2_SusY44307e*rIHHovf zhP}Pr1-;)ngH#D8kBxcD%nLu4siid;GM4Ax+I1k6#p+%S@CHe@>I-b&F5CW)VYwR_ z+gQ>^_r})3cNE!MrP0xuPO>YQ&@uX>21iPh*3#n1o$GccfqE5o5B#UibZ@%#PE}(FXP6! z!Fii39#e5gz?_-e6XKhO;tv$fD+GS+t&Sqk+wH1AL!4}j;&_x@RD>dlhM7TvSd#9C z$IXSab1oZRJea=rlZyt32UBQ!+5sqEG)!x|a`5CZKjX!KR+zEkt!OupN0VSf+>^o_ zy>Msqr6gNz?2zY`Td^Qd_aOs}&I06{9Bbx;mpfTu3X{->tiJdU3tWuKBhy~zzfyYa zs~8W_k>MKkI<pK;T^P$}pH=S+L6KWYW-{)XF=Yp0m{F>SpIrYAS@&T2mGIc%d+Q z$T5Tsv3MWtjKA>qWK&~yIFsbAJ~}>ty&i22Y$&RxBai|Mi!vJSNCADfQyTMQJyAEH zTK7aOn1MehMPbd>c=(FTSIx<}b?%_w#bGq=hvE=q#9sxr2z>BeSN(&}`0>pky$jJM z4?0+RRM?C)ESd*QH^`A86Ic?keX0rHl6Btc`AKpR6H(E0hEG6{Z1sAAc`XJ8 zCRRQFeRrB-d20+fkbmo*!x8n+{ZI1+fc#w%vbjNzKzBObIJ7MwYUorKa3C@DXGY@N zCo#+Bb}zem1(GY@almjG?N6byNHL&!w9?Mv?wR!Ee55&Jv zrSEz!{M#qxIi-7iR7nQ0$eU=WF{XWq}eeU*i>aLuU1y!blfF9|T85M5(pZ{f1ann@vkBOWtS|q$$kzCm} zD_&w&Z^Ya7{Nj&4V7N|w=j?g}^54>Qn?G7Saca{goDggzxTbyib29RKp^>qf(=+e5 zvQLpfEQ-v?0jBvn?7wuIQvY(~+XwE7j%WV`1I4E!qCr5V>%Uz3pP$Qi=rFJO?`Bew z&u8KIj)nB4N|WT?210;Mzt&&wr+L6j$7a&sk;!=fO7AnD+%goye*ehvb(H}g{(87$ zl|a4`n6Y+8@sUuqq!)&H(NfXKY$T(mJi^ru_T*Wx}p2YBMf zsM()x{okMcA4)c;;s2=Q{}^Y36#h?^tdHdW|AU-dkL+zg@}%96))RZUext&3{o4l^ zoiURW>i_<0;DYjYrC~F4tgMD`o$wUq0>G_sPMqjW3^3TZ!ArFZ>I?{>9b5 zX*t4Hv~ZWS@qIsR7b9z?=HNTOxN4}M4(L2z{{O2_mtBzi(-&&QfZQzmFX*Nvx{-!4S%`y+A{#@Pmp_6j(|5kWt!7h>dZ#-jM-ReZZ2wl3COF@ zEwzTD-1KF!Nya3;h9CCK99Ynsejl~?oRw}A8*^;AFOipWE;^$3O<2Ee{c~*g^NU&z zh}FN?oUgww8b@8C^&1+#oF)6Pjr|$*#jmYV59u~gNgg#!F^f#}i^S!=3KwC z%jN%K^z9#D4*nxm`sVjVs$Gr;mDz(Tid$=)%1a(hZWRF&3>yqUGMBnv=0>4Rv1qh*t=Ehi_6Z58O7+5L^HJun42+ zEA%&SzD$i+-TbpI1gp(Wcc;GpAwYK)X4l)uCw%rib2MY*@uV9DX-#}g)!j8#_@#Wi zIBYeiU7e!9*Lm=s=O$jc8PL8|zkHf_0UO^~abvXE|NQ0S3|g`T`MoIZCxG-FG@j{y zbcFhd%48dd$YRpWob@9dUMwlg?M8u#4!S@6TK@Fs%&uZFWj|^Sxa%`-^#rH@aEn61 zr&nvjBPwpSZ-?1wPov5HRavr~wpSqe47U{ZLKb&qME&%1MexHQeWetf%2g5~vDh?a zHz}p~M07rPgd4{9=xe-~D=s2e))N%stIQ(3rk>X_s=DrZ?Qi6W-UFk$gJ7(9#T!)7 za_$-?k3{vzAp!RUBN^7h>JXg!D7!1-(qn;)eO{e3o|^$7Ol!Z&2yTwXoE4?U)iWtw zFUEHqMyZFat3!JHhLF-ej%6c3G9Al&x>9IJd{%N-SDfrjFqp=-ZvW)67eQhekqw3$ zTvE9;X+$YjF7-5WI;Nu}BdKqM9fDTwmOkgjWtS&0ooF#E1xQx6ZGW zQu7!pS?lVbJvG04fBOD@395mAz_72cl+*!EdY4B+{BlZD!$&(~Y?d3{@)>G*lkuDW z1v!ybHq2pj7+TCy*(sy`nJn17;v@GC*(%gq!SGL2;G3Yn*7wk3{{7XS9c=v{?YJHK zPcQirKTB`sa^(**>%Bps9RiYd_IY^-n@5`{M4n?|r_dOH^SayNlH}!F1ZFWU^T}mmh2Hw0 zIc*o%{pZ`k?myzSkP|h$8CFqC=3>|oK%ztPuZ^1c;V>&zbw(T*!pWFALI{?SEGM1WZ#uz(4Z!RGXZtyFTTFKFB!D!Gdo&}Jepu1wC*AB{!uxwE?$i8 z+MPM`_IcfVeZ@Us=$>J6_bd38J-E3w!S>0)^Gxg&mI(JA;}n_Y=T0v@;+jGb!O%J$m6;sxo_ozYIF) z74(5&(&Fo^!Om)$Klhm^B&6J<8TP@oLw;eFq-Yg>VO`uXNer~fTvDy_8O1pQ9!kUc zzEbNO|9D31Py?cXTwW!LKcqi8b%AWRB#u_S)mbd4Coa5e1*uJY&!JcP=w{0)Fmr2D z=}%xLQk$l=VloG`XxNOpgvs6tli1S^Rg{4_WOK#xY#itzrqD2!+EBFN{5bsABRmDjW1iplG?``=M1E?<0p8sER#;zD)!kt+`R zjIR3@yM~u_AS(+QDIJC{hASUS@>A5CI)(D#^x^Y;E$x+os3|G!&>CoMW~XVYi=?Mq z@>zYk_QKcW)!0^upRjT>nWvcK*}vlkMla=64gk>B(G z`#Z~%zEDXcR&w$X+E3wldB)yUw&&6J^JOa zFy%4A4z$j8274TPCKQ4^-!)ag9GZd_kpm}+Mb?eselLpo2z#OWU>G&Cpu>RQ^?^hU z+F}42yiupL!uuNappowN(0@+cTC_Q(KfI0^pkaT0tthKAkfR?TU*G=TRaM7%_<~?{ z&X#z^5#S_kESIi!X0>@G(=eY#s+yh zR|YkgJ2Ozccl%m#*0>78%)J5qNdu_hY1C)!tdS#|kI=kkBz_cfiMuy}h>y+E>hBJV zjyo85J8!tuEK*tQ@btmsxW+nBce{azKU5^ANCe^+)Z%e`RV2~9Dy`g&TfC?hYcI|$ z$ra!fmUKjeRNbm9Yp178_DOG=ls4xlk;}^s*t=j~r}w=78}{YA?PVeKF`R;b08$@2 ztE}c~=sg3Lkeq7y5lz)no7`rw!Sc9{JANUiX)Gzoz~rf_P>NW?-d0}Uae}ZQJ)(d? zYz)cRU7wb)t3GUDJhc{xQvOcK)Hm`lOlDO~&UcNu$E+BUlhkVrbRzu-lE*8L3=m8d zR2fbQHuG89H5T6%z_YokV(n@?2sm73D~g70VT% z;P%lcqqu|fUAKS`XLkL{VSLe*Z(HGO#b1|K+a*fNiGh!k>rEq;YJcIB^h+Xj9uly8 zJ)$XE6%2>mUCjuqIMyB0MK+3Mx{+mu!1PVE+A&)w+OZ!Vg}D!>Jj@GVx*pvN@bLqX zPUhkkkis3vTeA0+#n)O9P&Kvf;W;7BwqBf~uD&ZXDHo``x)&-m7;t}1+p8i?S3D9W zuuE*}j#@fsiBT)wD1Dq+tG_;zJXQk9>tYM1KvI0{|GeiU`zy^0b%ZdDzuu^wr8IrH zIWh<}ya&u$Gc)Ab{46S}ZG1`)g9O3 z{L7~vVN*28T0zmxX&*0-X{kVnz#xaTGv7FNxV5kM5 zx1hTk|Idk=zm4gi&wcz&RkfgJq?+DSY#Gs9tNUE)wKC zNjT{dH&?x%=CQiNgzH%%cg*hk-U8WUP$(XkuGN3wvYXhK*L`EvH0?U|%0GPh52nZb z)8cgex1G16an{K|w z#{z`k?wB(+kgrC%Wocubz6NkM$FQysQfN>Ckr)anA<_s+YDD3sQ**&Qxdwx=)3^%L z3e2AF+}mj0sn`g)=DTM;UfzH}Sb|gY9}Bt{jXtC-4ahOE+1`BVShTQOOO>@okGK1~ zWb<>uA!lugDCK>ZUB3x|w-5_gg_)LMa^uv{m#Ge|xYLN4rL=~0n7_VIPt<-fTy>#? zT|kS~vi@0>m_jp_^JT()INPysMWF0?tZ|$Uh-577249~2;j*qSUw_Veeo&Oq7R^1& zKhD_~EtQA)-SC&p;pa6H9};OK)6^L3g_PAMIjLzyj0MCw!k&67PF%!5Xwh+)#8W{n zDOQU@K&?}FxpZ!!K;dtA7Rx9G0`gMy1fJ-Ds9RLexi|1-eW&eWwXx-A+zJC!bMEQt z`(fw^s*CrDdfau$A_;YzFr9W?bouCSP#KB5hiJ0xgHwtde9Mo6YIL7p^6y=!({~mz zB3;!FW>3SFiBXgiM&i@Cl`wQVa&$TQ@_j9LfFt|(ktmrGk}-b^DXpUeAyKSF3`Jx9 z1RoN|H7!PAC~@V*MGWX>Sl^t~T^QZW&5i7e%!1nrkAsR-d!tY`#*t@>Ig#a$L@k?P zylMWTmN+{hD4m|V!Wb#3C@u|8r$>{p7|={nN>bB7M~DMhmYlgq!MrsrS|}S9;a%O2 zV=CIm>Eq?Y;Sq3diA@zF!E0=<+qb_-Y}`}KeVL9Pc!(ufi8fQQ=d(G{ps+#W2qA_g@U5QDGL9e=o%-ou#|0LxHz6kLEOJT3d17vv=P?DX0os0P*bQ1 zk9y&Q2YVqemvQ7RO~Fo8k@s6oT9WROGXs}Ojbz{sBB?qzgr2YqEZzZ)!uR1;U%1f3 zFKu_2K2%<<3jW_MVccGbXTELgx|BJ}5`BzwvU*ft`D{Q-uzClMw7qo-3->4;Ss>kX zEG6#4o=B5^(OX~$y+Dpug}S=Kf`sA%Mw-oVxZ#B=s5J=J<38cxf{d-x_DrffUaXwP zW|9+edxB;!V<-VZtuCUTeT2U2QQ2}E**=D2=TR|8g5F&(I&F8}Fm)2>#-SA49Mdtho+(FxzsW@HV_!fNh05`UNjPij&!sSs^smCvD4T-95JpGQ`Dv_UCPu6*5M#3O(}{j z%#&)v3H9FZ|1qyOcrku{OHR=u=?H+aXp#Dzg(&D6b}tcM+=$%T5Tx%ukC&gKpwlB^A)i zfB8lpj5!!hZxaRO|I#=9q@sLr;k3Sg{cd$R#Y*LU)5;u8ym?Ey6$yRPeV~Sd)T$fM z*2_O8?sDR}QQ^Z}M|ZsS>Kex@sJ!Tgz_Q_D)KpfnB)CPN9Iq&H>Z8ON8bhkK#|#b; zTMGi1j|aHgA)kgx0hdBNhVPD_Ji;2EjGZnXg6^eD%N1VuYcp7VHntK1M+|0{jn#|# z9Yun4SYncRaTn)EcR^bYz8!PjD?OSV;UQ@9uxoES%?9HbBbdnEj^`bzyMN18BJ4I- zID5p{^vuC#gKFBkTwBEi=)dHTxEXiI+#sjKawilg&L+P~BA9zYrE)5$#S9{>>5Fsr zl200f8le>kL(Pg#bW;Hw?ql|7zF@CY&WVPIxF{3>9;2uWG~7ApMUHQ9x^}o0k8ED1 z7+pp)tcSTBZJo{gmwP+gwTjJ>>kTf~-i}u=t1h%;SNdvhIPF&)K#O8^=NN`mu*%vv zSOcyt3Raujxa{jMU87;sPX zMW}I`)1mW=X~DdcAuvaERUdg&P}CzgUTD?4AIr8ZMIPN)?v3^3=0h87_%MXzOu`;Y zV)_qRiEf-j>m*xy#7FXXM=uq{+jU6-_r+n#Fgl~G6zibJks2Wkz4tiB7;Rd*CVp>EBrCPH^>=S}7i z;*a7Z&ipFal9Dvq=QM9$x^_1&i4HyB`QnQSu4P_NBXDo}Ejw^SB3LuE9g7Ns*u_;m z8p5&~1F6_U)|2L{LG}ou6ChKu=5|AjwqfYf{Ed`71 z4m4KRI#b$oi8||hPck}My-vywb?3H;E=AK>-*LOc63*7)k$P^)+WID1Fl4@aywjpm zy(V7KFFDfBtFSO4hRgWmx2{Zo?otT-mgb$rhOn&EZwb6=#X-DRC^|~^@%V+lIEM|9 zL%X^dx-%>-dH_^8U8r|K7*Lv&WDqdykxqBeB3Rl@z^bB)isR7d6Luj6*{1`-*HYzgpWW}-Vs1_=^TvCWVY3c9Qq z$gEScl0yn8-g*=&os_HzSK%sGLgII?vE}t;j<3Su@`t0_PVKl+e2Ajs?D|z?6uFn^ znGbDUzZ-*;RsFllZnu)y=JsMF?o0z>2sG)Bv$?^y`zYRq%CQS&g-V7(oDl}|T*vXw zmBu()Bu%7P;V!2gUt6Xdfq8Q#n(L9@X1X0G%T1jYoYdT)#3^{>)>ylO8JMhgCP)Th zHp_G9tQrpS%p<^kGz@}gUR@oE7I~z~_#hL#0x%=rZ6qeth5;S9zKN|iKy@ZfYVH+O zM-7-fO85qRG@XR_O)3(|Rc-3Qj=-vIMpsA9!RWNb z4*dv%Zz4`_nzZK&NF2OAE01EN={-G5#_5+d3uQ#2Qo|PabG7f`I_NC9nsPj>0LT}V z*qYKsTX9)@Hek9!X4^uNV6A$=0e%QkQjjDsxC0h?M+OE8jmWbqCp2^ zTxE_@!UjAEmbY+^Qx?_~jS?!qr5OExo0Q4GQK5wgz4N4-sZjj=^WM-0@<;8U6V;l| z-@`dbsz8yweuR{ae^uOGJs1Oq;X(Q8inf9J~L;dJewi+-MYG$w0z zcF#~D*AScc^Wf>%U-whY1(+%kUmG|=wY z8jbEINK_+_LK z?24WpI&S@ln`BEVEd-25&h8m$g!XBVRvD4%Y;2w2go6WXJ^=HqWKxRAiPogxqU~zY zJNU3l8f$##B)gLDuF9dyNPUz&hj?R6BSh*^VD7w)CS3jvEI8W-1brse?#J4Y9zGt& z;#$^Gy^c-jA7A5UR3iY529RA#LBtTA4TcWSTZbfswzIyQx!a zaas~@z*x@eYuHQRNwTUtPdLOBPh~br)9I{odn~L_0aQL*JD3|fX;*{Y>Y(KyNAWkd z%WV10t7}xvx#hKDMK;6S<6D4{wuRuEo}Off*1W~gtnQbDM^-8X~kdQ#dwJQ03z+Lbt9QZ<3in zr_abc?sA%K&^$OHlT1Pk_qRGD(5{{7@F-d8j`?>l@BIw~YpCj?@JY%HNo$47;|5tj;uuG` zrUUTyif*-rs?b;5fwnw=CPfFOOlh493Il=xO)rJ|#TXnj@m>E=n4)_mQ%t zvgIfAy-vK2b~e8C|6}jXqnfnbb-wuijoJt)jTBQyQ3AMJ2R+J$~B2bGE z34{m;k%T1nYg>C1f+(N}B(_W@Ay5fP1WaOU1qCEBB$$LGDl#P`ks*mBB)K~bw&%3# zch~*nuDkA9U8}3M@y(n4KJRCLp6B^I&)yqb2MZ9g8HVKh+dD8w0H&@ue<~bahZuWO$^w(vKfztT;Z*98wu7mAk4=qm#<5h@yv{3Bex>j zp4`o3i4;!#zjcq7hXinre29?6m3bxFgU1sHj`WlCH>+@N<}CGP=lcp`eOHQ3(bU&2 zZh&YW`dGivL6r<_qAf+V8nR1Sp45LgjZu;|8*GrKC(2O%#b2T~J@v3jY~v)*tQ2aU zvWaXydHJ2^ffg2`E`RK&z2s*eVKe@e%4jc5KMwgrDwF>j-miGJv(|B@XA(qB1Y`pj zk`Pyp7RJgaG45ef(|x_DGZsmYTEonKdRhvOppr}L@7(oD|Bz8P2t8fT$kbC9a9ig26|^l0t+NACVBnUbr`_Ghq<5hkD?pIS&rd@;b;{t zGF+B5{2ap`*~e@{4pr99N6;HDUjJ`1J>75tO8lMeS^9&IOB^-Oiz--v-HX43Z~h$rJ#Z=46z$dVKK28|@@_ zn>Blj#<~6qbI?#%XZFnGC6!5V2F+s7a2%1)N@Qu1Y{%~ z&X=P03OVmmcfga-aScTc+G%5e@Vgcl?u81pPlF9i(3WO2Ep`Sor0m`45^d)z#w_;L z=pCEf=bWg|uSfsXGZ=lP$hNxh+`MYwIlInPS33);aC`X`t}Ck(qNGwS4;}rZaqk^U z3OYm=K;=bW@0u*N1hCb=8ckRAq`pKu3B-EDTAr0cHYOiqBfpcs)<|K(7HpvZMS zjN!Ls!1R`W{0udK^>sNy)&tgR(M5P)_{iOH%T3d&RFFL*)Jl|b0XvGR$0_`@!yTN) zuKu^H47nCIdut+x)SAr|bsY?7;O6^>6@v}Jto-Nx94TTvA=wrqa%JwmM5ersH$SVb zP!omrnActHRm!xE95ZkxRD5)+NzIotM7?}PMus~g6I_dEV?I?4!qSiW=?@1WWIpr_ zNi(0VDuHxeoFf{@?LWWc85Hcx_Tafl$>+&d^GT%Itp&vNE})jehW>M3N9~EDEg_{y z!tt5Zr-L1o6vDM8oP{CIetNqXq8s^2t;|o<&0gkFdz)dd9ma8ujhCW_C{xU_;6;0m z+%74T*CQLA#H&lpNen`!bA^VQ-KBBn#zHhGO$AFLr7)X$B*9u*=tu2PDM++i8vLZv zOUQ9FJ~-MaCXR1wu)%3H7MY^bfnrXfAAx3$2aG6NCsRi8z%zct`nMd7E3Th76Sx6n|`C~y9VM)uL}%l z%3)qtm^(8*#9K^jmBh0$iJb$(%&GbjUrIsW_g`a_Xf|OX!P`|2qLZ&9+qaGJ$cmzb zsBtPYV~7wlWK+rvks6OpnZ4fmyL;>0%%l4_oOnj6N%o}FtCu^_(jGYNS;rA=N8uEs00yf3N(?GfpTg@G3w#>>#IJu21AU`S#ZTzB!uEx&pj1M#y zg_@1FDVU5?C~%dy+F5BF$1%v?MshLgHNLo;PeUm?H+B3OCQazXPymw$AfU#7pN~EI zuc4*4zgFLFqt7A^TD#7Cn;e$E$NSApCDrm)FJzO2|429c4#k&;LKUFV(KUGH;cmjWhoQ%bN7yN`WJrC>uV=} z8y_|pcJt1o2emu9;(h|Z`)zA$D(!puo0ZXZ_zw$0Dkgw60@uxHj9S5q|QMCx>_Z)o`uN0{~U|Sn%{Nt1rI)`O|*` zu-pYmO>-Ia`fsRB_7AJ(P4ag3iA6iE<=*GEX6KIlDFcn8G8P6kXWFBwu>7f&!v99Y z+u1|1iw!w97ZKU8p7B?A`yuBk;y*6i$!hzHAN?WasD2(>^TWy6!1~L(ebRthcUq~L^Thx0vbuYczr3uxWXGnCHlIav^;cud+DfsdEpw;$w{Le^TsZNR0@`|Jg!bhWN}I;<&o6N8hV&DE zM#`~Rc{LZ^wZ20qg{$bx;ZJW344rxkfGn|=2(wt4T zmw10^tan&Go7AWOa9!yqe+A5=d&mpSGH)%*6!b4IJ7V}F$ev;+{m6fXvgp!J{`iUn zEB?YbaqZ^HkNzB7&pplDzgEq-Zb&G}UAOq6|MX`M`FC^u=`a7k`~WMoO_GeZ=L=_} zEUfSeHC)-hgAy^n_tnB@kAh-*X{{H}&Tlxi@S*R$WN*>Qu!XI#=RZ8PA^XdkRiT>~ zmgUZW`0T^_v53K-JGK$9vSohbmxWt^HM1Gj^S7??U-)ePkJ>1l*+rqhm4#EOOe*IJV+$#Cw%aUf( zIloWV-$ZY`wm=ilJ{hw-4_5Hz{ug_`bt^LD_o;jM{?*0PNRxG&xzh}kYv*K@G@wcCyr^Ua6ip6~C?+6NPqW*XEIX5l-o)v%3iocVn|DPsN z$6Nhuvl**tgafl!o7uM47pbtg+Jv_L8{@$=XV&Z`o8I=sFBLnHhYmqs3r6&8cX0T5 zU0z&Q@$f!NHnIF-gzt_gs8_yQ*wN?Ym1$zmI-O$4?m_CuGfy^y`xd^sXRItcYWRoieomhA0rY&KMhe|+jsZLe$_u}Pt+Oc?=&ZdJ_uH?(nDz}UzFmDc2;J= zl*g0>Q@5V_oKja#bVz#2Xnq!3S>+Sj+fkky-mI&U*@WvPHyx)Howi$E%>}XK?=~dNLa3Gam(Jw2~maD zU63w=T>O|%@Y{#J@oVnC9cifvpS>RihrI3X#H$XwkC}n_Jv8we(m#XI%EdGL1uq}x zeM0m?T=CS8KtJ991k2w8UfRe~d*!3g!SN|tLjIIuMqDjJVeRc4~S%+8=zK1$_cO?auzlS1v^;g*H8Do8a*V>)|np^ zPZDk8KD`P~8NsM?{`i>(Ewm{Z<(gbIctHMGkv?2{6T|Ug+JfDbYpQWY_m=0#3W9bi z(}$abGwZhZY#mRs#719NRE*rAqhGlix`?o?m*){C@CMdepSb^5X_$EM@u_0*ZA}g4 zP2UATfOAG%cy3|L;J91ds;OtMA%&I*b+Ef>3TA73(lXLW33V60e#>joFqQ?w zSfp2vH${`;*NSTz_B+0oGG>_|oLRDBQ{P~M z2*C;e(>hf(7#l;tNLp5K_)4=Fo0TLNb*gJdO7F1RJVf;Amvef%GkBXxab4#0@{|~} zUfHhH=sl<$6}u81RZ4K?8U`fz-Mwj&&w2bHD$>a&8?rd{`Q4m~#Np-1u0=f(R|?76XJeg?}n5^9VS zOzZ?tmnh`RjBl=dUffp`;psOpOzBChuj6wocfO~;nId=hQ!~$t5zJ;{JL?O$vc*&A zO50zx^%0_H#+dYlvXFSOQsATFb=5@ohbK)HSReJ)>ba&zo|XgYM_20)Iv5M*1xM?^ z>fsOH3(dPB-TKCBB@y1`Ibud4i!DYaW$%SWI8u>_HmNufMXT`JS_Zbv06LBmbzsP2nBuNsa_3=n8uMmJCEKP6E=xB*4193!OZV{m#kpbKd}6 zwrX~MCN5YG{h7ERU(MIt4LEH#9Px)y=Vw#;*rnucjLIqIWWm%N?j*XDFF$(jyP;PZ zq1;XC#W9N1e(8=u9h#sWn+Z#lKWECL1Sedo4N+0X5D1e#}xKyHNb{BbT1% zLn%pJeN^-9c}i}Xx_VrA``)gZZ*FfmJuz>o<6G^NDfr|AHCgv0pTAc_*o8Oj{akm7 zT5t3|o*TWc98-iTq-=FIs^~=sdc7I;Mi3O<>}DaDT2T6(m6@;vpPuckLUf3liQ_^<*I<~}8!6|Anl<}m z2MF;9G89Q3h=LOy1ZsHZPuWi>|NO&T_UCk| zp~QjtQ9BI8;r#eL^?*wz)8q$-Ab5g~BGy}>L<|qKqfOrH#+pSi+10YuV`Y+_49aKu zC-{+qXXRmxd_NVU|6#77cooEWDIy-M_hNx%I_jK{><>^>T(XW?4<}d}wQfAn4s+1* zS==p5a!i{b>&4oOzc+UBRNY-50DKW4CtS#fAaN zvVZ^dQH$~Fh2n(><5BF~d;xw-UBPn;&MUx9Cd>$ROL~;(G5PleoXM1-fxwZ@;&g45 z_s5&dF(NK>m0x@-WL>lf6DLtL4uIu$$Z<^#llZfUT6c3rjXsi5PZehw6}!UnR9%E6 z!Urlhyl{k31;(2VVH=XuIQc0eXgF6?|DMV1l=8(`xBfm>+xV}kEX*|3afQHe;MI8( zKjQ}L+M=4xASiI{*OXZJH;JDM4c2mq`#nXY3oB>F+7;{9WKjli&`kn^L-pf8j@ z(~k4e&Mbv;PXHxP|Am8KF-B`bb_Q*xtV(11y!o2(P279#nCU8r%ZCdvAO_fBAm4Db z2&)*2F~7}oonrQxo;~X=mv-@{iKUatu4|`!dS+nqF?^ND5`h|djaAo#VQ1?ORSBqI zMM`c&%dzrvOmhZ2@%zR)lwRN4oIhRXHxJsn$8q>Kg8^vGIFv@V)}z0NE}QZh9nDAs zA(A8-ESAM5Z6h8WRt#Zqq?;31wHLQ%QrUw-M#>PfaooI7uLj!R`_#9!Vjr#tc6PMx zrsF=eJiQIQyCNk(*rJF#iIK7(Nizo%EJ-{B@d$OY6g)>!^1nPXiuziyI>G~(4N-4t z%(;MClQz9dsYV9pNfndWq?w;G%K$M{=?%e|U^U$l^^NKT|0;b%fQ$f3#&f$E9yEV8 z4)G|E8f{(gfe!%m_FjKDs;xy~{ppx~Pe6W@#5HLKD>yN}rz+FRi(AN3(|n_^ zxo!%fP8vhE)p(~~LQxCE;GU+g)NmRh(JxFQPt75)@`u2{f6*KiaNEqt_HkUvcma#l89b zy(E0%CgD*}Qfc8xTeMIU$sq8A60lh}>-l|$oH=q(e_P9nhtkB()2Tv<)rL?@(hWDz zn+7vIrI%3~sw~uwVttK3BQk4o2L1qRlIJZf>Z($aM1093a!KtI3Io;k^~Xu4&Fp!sPBPeEh8%6JdXowvJ*FtJQ*KR zycZ^8fH1*L_)Ag~ggGek-X9>X)Gc|AUW~3FM;Ai&gwMoVeV-D%CqMQ4diuyl(Y_uW zj(lWB=GiuzQ4Y>f#)swo#I^9izG?+IF6SowY6L#XhNW2&N0w~&TK3xxCn?iwp1JoJ zayJU&k(4|5A|$_o()Ya!lVg3N`|Ol^CA9I1MMmnY*IQ_LIxUC95I^sXku4W&iNOfg zqJ_?Xv|8T=&OnoJ+*#<|;-Q2KYr-~&s0q_EP|-eUf3Hd2%|QC#@`wH@pzd}9_=yqH zBpQ^Sz$2unn?pMeuVdG5mK797f4~%sLc~giG07Ql>jvgj3_LNEey~aKY&k^uBa1?a z0*meInkiuj#eIIiM&tvwulI2fY+VFQ{sOhu?7M3kc6{K}emhu7miW7z4dZ#}I`pXs zuPlh;l*sHY9E}-K@W&xYyelO<&m*%Wa^&G%FTJ*NTbu{`ofoDlJ4m_HHLJaH10;a~Tzd z-b5k|uNhB!niC`c9!vB~^Y^|)7mSr{wj7f#i-ae3C^AWNHgO&sOnlg~tV)29Fm~Z> zHJK3%vcACFUJ`#v6@m4^WY@vsZq}3;u}=6P4FiD1U~2t|t30OvV(d_%O^gaiyEORE zTbCD;^jNUPlRZh-?%sHKR`OlVfMw64ySF=EW09T`Kw@Sfn7GAKj(F*l8c*4Do$*lR zsz{6sFYJwo?~n{WaA!~bfMM%$`(l5$%phIqcCSR4${f*p?pR{=r#Uj)d24|$iBp@y zS6}Q-V@8w7Npj82Q#FD8GGNP$WkgR0s-2N3+doiiEeX1v<#Gy!S#wf_{YL_|(gsDK zMp``ke~F$q3;!{BBpHP)vJ&TvqS=qG?V9f884Mx`NP27~LU7VC`^h0!UQJ~JBCNdK z@*U>N#2Zmvl5}3@O>n~J&Pafi1Rf0%iJ{6=`3fUp#^$OK)Oja5bY5Bv{6ZIksowQR z-y8a6i0t@sfa@{Xn0&(=DwxY!J@k7T8n;@^6r;fqtmibk{EE-lw8u|)=x^w~Gbn(9 z2{5Z!GPPx<*3rQ?e4c;FyQgY-Vd@!Nh`Oo8=(YM0rU>9B#g*4S$1(`uyo;qp1Ak>3 z?Q<Ro-Yaha%0kmO3OykyW!Q%H8}in{66~8qELp>}i@_j`fWT_hC#M5%7Kj zd>BYxkqlDlk7dh}LU@hzHl2huZRvt; zr4~pJ#AHI)+AqOI01Snb(%0Xi_JwW27X!@=NAKgVnhmE1iY1QMkkin>>QxATj?S{D z!T7<%_;pQ;5Wo7ONV9)OUDoiKAYFF)31_7@#^Dq)P`C_GXlTbM3wIDT4%u5oVvrD& z$6-t?lG+9r5|HYbX3=4;@EHBt^p1Jcv~#LfDSM<`oi;1>R(Pda1I}}6I$dPUi>PmI z4qe8n0+AWyh}|cc^}ZeNGg^OwmPdz1uNRrB74a0*VJ3SyuPyysWoTJJR~6>Q;0#<^ zD1}c?vzS)>IPb`JeSFnw!49zIrhBSFWUNl;3S=`1&>KerCEtIoyE*$wLsvxoDu~PR z)^?;Q9nBm+2Z)GKR|69qvO+w#Rgv|Vh_3*4Sj-#S7xG5nQJ)A?%TW5g1C&0B8%KKB ziWlbd=ty(#B*NcC+uo2_&U2j5br?n^*Z`ML@LYk>ci#?N(azNJkTG5U^IFjbRLU6P zmQGHNSn6;!O6^oRi-&6wIBNYl=16O7)qb4TMY4fufmFyHNT&fk^8{^CW;WlCohB^O zglZ(|f21LBDpw^=bXT|LE2dq%Y2o3wT4GgP!|Z2Nu}bG>=jrQ|h>;(9IF0;CW$)#~ zA)&I~NuEJUwOW4=!gc;C!{qNcGn#&;@aeN?`b1rgBAAAb920(Ue4R-=yvDq znWj*rOAZRrB74xA<#5V_IdY3Z^$IKH)S{AUW@}lob@-atYHii|oZ1jbI41JUnqDpt z8*6rIn@}Vpc`4u~2P;z)SBF#BN-HgJiQssgJ=I@-ln|ii8^^|V-;2~^%(yj#om$`Yk6vh%U*sI!EvnpWP zdNOc_VY)Bwy3$Wyd;8Ou9X{Txo~f{-X*JneyqSa@-W6Eiu+_i6SE^J#d~?n?nOtSZ zoNYKc_|@&EFdxIpfN#t#Q<#f2fo66C=`nwfxIg|ty`nV=Zsdg`xk-9lSRMq)SsV@c z;X3fcWi#>~eBO-_Rst31<|>mq~vf z>8Ch7!kp+=t`EInNKUZ*O0W}>TqDP;t>o!`{#l;-b;{`yO$tn^PT1%`qxm#)N=}4U`ugAnFvUDk~mo#Z; z#2xYGFgfN*wzHke^|$s_33ki+wN9EdVfR&ZHH}ThnSXoF922+n*+(xT=Y$1a^4D)` zzt#mj+C214!qDj$kkw_LaCIge3+`~)Vfv~Vl{yv^MZZ2Yx;IsK8?DPOLN%5_u;EM( zh}k+i6FqIcaq^N19n`VZVfq+k8QHf#HxNDZiK&zLo!IJPvApsOoGcFvWL_{(PO=Z& zoYAory6cv5xK8jTyY)M>J>LNC9)v>b@tw@Wp>Sd-J*}_X*cd=H_cnMS^Y_r% zL!G+fp1O$Sb9ks-*)tU>pAMOJOO<7roUKmiC<%w9hOT9@;1XS>{ymnkNo@JVqt{I| z`+Wm7(UsarQAL?vvZhwme+2Kh9CRaK=uvY{Er3U8ueHnuk(NGNvuGU4LRw*I|F-rg z->)nZcHFMX!A&CBc9R=Ze+%YJ_zKu(+u%2~#zQr$06xG!L3aD(e-{xiyt^$>Z>U^# zuX~DhNFT52y$e$Bv*L_Lqn=i$0Lqs@5sd@ulqfm}a?l)jewznR6^^CP72Y(9kjtKz z4oq4)}_zJ6VS+3$t%cPMeE@nCAM+%8S9i-q6 z=K@rD0Se>Qtb;)!43vw!HgC~Guq99x4kTig`qNKhBve}K+cZUSck-JKSM_UlPae+j z&*ledo);EdUS?V}GjBe+4BQtMf6rh~F5*S2R!3U~9!M|Yfy;NWiJM!ajN3@38{^!~ zE41z)#kluvt!`9a$C5RoAVWIvO8oPKgjtnCepN$;+(ti<>>_G>4D!BqW=`^~6Wqhs zQ=R3KZMzT~)6c@121Em1z=aAW_XYVBIGP`cUu0ps$FTT_5=uRq2dmgfRdhuBTS z?#%h8T0)uR(pqaO94~$WCx+;*YvrM|R!YI(c@+?(FHi`i>666GvT`u9b~rfT8YxSK z(EhT_r%fk@JJ=LKf^Gg)qu>&gFQs%c-)P8 z;NRE1)4;F5wsfWT()H8gFL&UkYqxrUyoif@S{7_aEIwEkR_>7yS3R#yI+HFMb8Ops zYw;1ZUfBvvN%z*uW)YpZfvFCr|j@VBa3!cXQl-0 zfL0A%Pz-kCt$OIS>Qw+YAXE9k%+<1*F=6I*QoUKfg1Ae>agkKVJ#?un0!6qr1l$3cNOq}!Ye$B zrKUR`IC^(edABC!$kT__IFY=LTHLr?RF8gr4ZB|_2M+yfO!zP|dB!{s0KYi>Mo$_~jU2BS}6hn>{>;bwC`*YUYC^HM}o z3D}8=0V7>5rfCiMnR&}XS2T7}-;))J!<~~ZrM-JY03$`$>oGy-jf8tJ*C;DKc{QB} zrS%1MfbWBc0JjhxlZV3Ft+AP|a_?gN$dROuyE~cE_la|Y|2rV2GE>r8yYc>+ocu#< zO5=FU8p-#K1C)rtYY~`G`t=&)s+sT9rm zO0WI2SoTj!CyU4RU+ESsS6#CXy;q+sdkI3G4&fP3MN~9D!Fi`+K=CI@R5p`B1tlKq zVyM0-IO3(>)`sfaCJWARTmp#Cs)w{+58sA~%n$It6)fwfXYRvweFGMbQ?lOoB>gLA zmF1W7DDb*C(`!yEfQ<1f4+9AZ z)HGc17QIt~O~|KeoqHSWqGb`P0gdF8dM?PAkmH+U4LQJ${yN@Zit=X^BuN4`d0U>; zGwzhC%v(rS8~?0pcJn6W&|5&FTHGzi+**nG;2#^XUv-Hhrt~hFZ1n_!tQ1K&7x~P$_?%=seXDb11Jfjw=wi4 z;LmzmsN~d+60nqNiNmM}_vR)Wr5N!6( zlw4CKopYul^Ys~V7Q0of=np3oOr=leDUmpI*K(w~?|aoTB`cvm6s5;6;xhl3^WZ_2 z7IG&Rp%mT`Pk1K}bDo)BM6(nNh@e=OU+MbF1l^OnJ1d7cxs*`aFa^ve9dFbEeZ{kC zirE}4D3eCr$Gsvm=Y5>)j9jDH+SRyCL<45Tn~f+WcL!k4$4$dA((hIMoR@KI3J-7o zVGfTdy#Q&tC5HOoLcMYMy^c#}DT_wrFBs9vc{N%`h6bi2L4K}G&_20)8)he*ZF|+f zOvuGq`o2RG$IWARt)xq+6eq&7FLxLLqr?5STxcNq8KY;Ycq2Y@c;!-u51Atc7znly zSD=Nn9@EimqKF+*$kOfxslRhFi!nQP7&n~|El+c_?3 zir{5!jVO+9+@m}%Js{%a>y;8#{|2_Hzp)d%T~NXDKpG?Kqb(N2%%T3Yr?I=AqzeCe zc*BAWD4hPlE?a2v)GouT4DrQc9@u2$e6egh4ihvtWSlNQNvwXNE9l~)#GcY?Zo-qE zl_rrhWdd##d0IZFhu_y?d%Wq>M*CKOAsvmC-lCW~YDLe^2m#0A0YiUSj9*^4YN5K& zxGzL)3>FfmMl-JS|M`f!*j{7IT_~UI3cX8j-}@_dub8Q?*yWM2g2tovqyu;3gnMNV z*JW+K%OJ}!*IreHIJSI2zn(Dt<1W#_MHKj1Wuf#R4#)k~UdIjR8ax6hCB5l_#Nn&k zZ@od=N{7jp!U;#C9??i72Eh}n7wz&){D|n z>}0wBcd|5&>xJ%}qf7$#AxFjy%63ELoFVn{pn?&>;rSc1uuEtK-@x`FD zpZu>UqZ1IH^WC@`mOk^(AlY#uJemWh2XCp&Q=V|@O^1`;gV0zq(h-YIYC|zK)3wWT zWQvow=_|sLVnp4x7LF%dA@Yh|)L;@vGlW{U%#$DE$}vM--dAkrZxv zt|0Pf3G}F{1VvQ>*e6Hc(7#InirGd9GOyV+f82;_R4NfsG#)pGi5`=XC%M!e&&_j1 zJV2Vl%y7BIG$ya^t66h>jxz%_mO?u&L+5eXzAYq^bA$Uc$XMy!>SM(`5XN6(~SY!=!va{o4~MK-BPBqC#mE^U{#mFG5uUYRNz?$Bsf zWx`C$oFVnV`#=o?mYI4kbxh;Vh*Sr4D1PZ4q!i1yp1C-4; z(BeV!*WXts)#umuqzBYCV_cFl(k_{|JIEHDx%syj)k8lo)4wLRh-f&KJnhjIc1DNl zOrQ9x))wJ1p7=C!N5k@f{)2jGu3fSOyO{#_v-_v<)1LhUP7GnF!y_GVKeOTtBHY+g z^TziagE5@*&T|i%A3f3LB3CcNYrT?YKs@pz$*%X;a>xghbDnnYto9a_WUWMe12z=9 z1Tp^GDQvZ&QzZ;3j}QcRtSM4OvW%EypR?WkrcB$WE|*K$~Knl9aJ8Ju|JWG^-k?aD?WcBm=c zDpy@P2BmifaU)&^Q9mceI|s0}^*jQaevp8d`GcUHh(=tEVqi49S3qL{qQAEQdB1?u zXzDF6CniPFXU4V|6CD~Br@G7UQ#};!xG{(x?o8OYiLXE3?*>e{OG*`&=2$=W!>z%Q z9thMBEY9`w-!VDvl~JDe?17_0Z6A)~l-)XBAhu{X2|ou(p2YFSXADemM_G|73PSh_ z&Tp2j6wNzuamF6W_tVBm-L|B>@2V2c@`mQ~KGjyP=_&~LIi2Gj2m%T|d@Z8>3Xg;! z%+wN_*c5vi-`iE0=VB9Yvl*I%mIYLr3)Gh0a_SPispLsY918`3v<;HmJ^-UXW)7bV ziNu7b7tOEPm4)UIzw=U99<=wp0Mmezy5vWZ*gm(-zt#>?QBg{*j6BT)!4!DqWMemE zAtVy15O^~^qQcawKQZm)>4A?(rsfUo%I63IJC^?5V`y1u2P-+Rr#8AAxDZjJVqQWW z-o#Gp*yR$2H;kCm$CMY12{^rrT5(2h4~MP?+T9`o`rt8?tMYZT4wz4YWIeu|&Ge`rI8gtvh z0CS0I+>$85*>_%x3PulXm-g4pO>SO)YfSyC#5h2bEwk&D_|&pAbY(NOYkCB!TY@*3 zCOq31nQ?&~(}!YgxqW0T1ndeMCm%9-ZJrC#f7NX!w4UX&5_Hh;$^?*;zUYC1Tg;G2BF?Ydb(0^U$}e-bmKQ*BR@J2)^JDQWG3njzpqh?k0Iz5qbY ze(OfC8k-ESj-tot6_~I*6bz4?vCHb$r9S)Z9Z{n0!R_f=$RE9axLy=zP`LY7?aYC9 zSHnnP!@tcUMq+A5Ea`A%f`6kSx}aFr>+O23#8@mg&BA6|=EC+FIi5k1q_-uH2Kr5D z$9LAkg#~=0Tu$DD8Z7R%LhbBs*DpMEGtahwwwxH^MXu?$#?FHQx`QAT=d~hrw}n|e z9~P+ieIm8;EIxSU8j&{@XOaOZhz#XLbJY2G7ta`=%+V(4UqyD=B5L5EiHI{41Lp@M z$MB5zD>L)y(|^?=2%2y#k|JC8E|+JjX0rF>Ti_wmxo0l)wR$K~=I&);9KSHDie&r5 z^h`4KubAn&t)hq9aUacP8$jk5zO$A*;8B#=i?Gf>XU;1_4FPT9-Aco=c4CnGa z-OUll%Zx%RooECEU;lBWArwvuvqCM#P){jw`bU-u(gli*-9R1!WI)A%zjkqWR0kJ& z1rR+u&xZ7ut;oNj1Td{CJ{4%XA7Wvu=cE0Ri_;ASS)3?GRvC|+G4 zK1eXnj?A^WX{~}c*dpP0^vt*%VYb6IMHW?8V?0zlVB>|EIl0#2IqJf<6Ti&lJV*a# zZx+>_P*fRs>ZIf*E6ng6o*iN%R>z^)*r$h_CHBT886UfKb&~E4rg^+uv#$ddg^919 zS49L!@eavgpfYN%kc%4DM;K(?nrrRbX|Q^X3#&qQt^TDOmljJ#j@qwJLG@xe^TY60wE6l{!rK!3!Erj&DMlI;6^~_U+e}-ogXxu3sqna>d0x;cBU<3{6*ZY4lSxS`&v)mnW7e1q zGeX`R=Mp}HF{mjeAP_h7jXa>>_cEw6CpdDRI=X2&7PmfBBik<3+R5ztNav0MyWUh6 ziEXy#8)AlZVbnulJ3+40xM4_Xuv!A+WEtpwPO1paHpa}zA_jk;?8F3R8l})4g%ezM zSV}227Zx+g;v44{3_i3^P@-C+&*OuHWd$iaK#d1&ej|`|oHOXf!8{N^l>`t-x^wG@eht-*& zuE}D`n_XX8>N#+{BD*g19@(fy@XHn)F4T)=# zHwb%_^`8vx&f}O_vB`c!tL4407KL*Yw|a;kr)KrerLeQbEKBGZ@@zUYHKyL)1+T9| zKD`;LuiI(%PSsBZm>$Fi16PlU z%$@kpbs1@lBu76NAtvFBOAW>P+<9wC77g=_Yz$y3bKrS%^lgI<`phu$li2O z3}@=v0`wCSl+d+o{j~^PFy2_pf)hs1X1*Vwi)4r$Z>24ki3khpBIM`;&+8nqx~|z< zUZ%G$6ee`s?iDW$p3&V=EESiWDUCpOa7oj7<3elz@rLVMGK}{Pdy%x|sN{b7vQRJy zw3Yg@DJJp?i0Y6;#~c-*+UF;btnTc5xc}7KOycIbW;&#q7r`(YR))^^LPQEP_9Tz1 z2`#;XD3;jXWn_=-B)zU6PdLC#2^n(*y$K*rpvCTpFNG6Ga|$9Ixf(GN4^QklE#OcS8|t+X z?rj3c9$As!!CaLWP_aLb_jaQtJzxnDiWgwgZuW4zWqcvnoSGMGvbcL5`#}28UQ<_l zrrv7!wZt_63eki-S{G%yOmBCo;pOM5JGgBp;dAXQO^Sa7G^lwd zFH!cZwgzZdBP(}CxXXG}e!bi6wUAu2RDI^$ocSVLwkGpFk#b_YL5EuU=40YWihuy3DfP?I3Zwm$Z7baRomilA7IDn*kA7 zIYq{Q|CCuvR%{UNHm$j;d+7#jv2@_LxVmE1yL{6T?GFXZzHLaM&v@`0PXab@cPR_= z!FA?5(m=3y*R0>2;)emQzea5`Ck$^}=CE5h!|DjG3Fdbb zyLDsF_Irw%8z_W9YSm0p@pkRYR0Y_9p}ZN4QrGbKpmjs{Z$)OAh0voSMX5C#>;ttB z=Mwm}BJX4okBkff&@^VKQb>n6>OeuXie$JrM1r#7+D-mC*91_=8@|@;2sPf0E#+7_ z8*K(FH!9irqSy+AIIK*(P?8a#{YlyWYMib>McS*n7?tE)!4ia$H|u+Geq#cMy2TY4 z)3${QS1M>1$Mi&9p7`njs#2=FAZ=U+qR+Eus=LovBc^Hnaynl%aK+Z!)aKQD29T+@g54 zUH|AJ>OHK_)>9-PJMah(uJwJ7N#le+$I49k8s`p(^qk<9^hGAB){h4N4>ya3}t zn9!5#zG{%|zA)@h#zVU3%=*(U?u4(Jia93Mr1D&gZ>h+jWjEbGv@GE>r%5zY8Eu-C zBzNn?KuYd3fmUIb#iz_Zf9wMmfJGf**Iy$!AGw33`@0NPtTAd)F*QogaB#< zWW&;&xP|%Is}nA)jBtGI(z5^cYne=UciEQ+Ed#A##f`JY43}cNT`V6f@xFK=?RTNT zBf*>x{h|Kp2d)k)foxS~8ED$Tw1L7ATa0N2bO&mr%w<2f$C5IqxyRs~RT>>=5=j*l z&1+X(&=r+%e3#2@-t|@S8;_Hi*2xst6F>%JH|XZtZ5KR=t-4vnH9h9x_Y@Qd8M@jc zWexp?-??u8T{_%yi#t8w?k^pHhpMfnq*h2BwwyKH2=NdkG%7>wuMm=Wcxi>~4Top1 zw@<}qXL-Pd2ZogAOh%fE!^ewu$gnLewiSH5F-LTDQ1wm2@aa~`0D$G z97b3>w*c5&wdb+;Tw`>*gQWJC9=G;SwWC)R#RcdfJ?F63waZc#@(V=Nyw~GywaWw_ z^30>9VfZBuclM>g>uS$ILwM)O$OHSukiF3bEyzR-Q^{sM#_GKm)JoX{Z4t7$<_&3kK z&RqSUkyOI!cBo1cn&8Y+gH|cpdIzxjdSq&Pn8foey(~V6x*m)q7g-O** z5dwMdc0j7JwA9R@m7#OJ?D0D$4?k$Gfh9>Q+OLQ1IM-6X;+BAI>L`GY=GysGDOYV> zaQl{)en*fb+sosiYzo|!iqgQf@n6{D3aHE<4gg_n6s_qAloTJ zPopi@0-T{&1Jzh*7tju7+PGwJo~Dy|#b00n|LfJ`&JmF9Z^3L9?dc<#rEppJP0YDp zAlmA(i(@zC+XvUA#&}ZOl{MZD4jw~ZzL2;GuwpKq0kR#lAtO{B{j*V0&lXr*UeYG& zjAJk=yC2)tTwf^_$%$^Zxx_WkMIb)e)^1w#AuR0&0abIL=b4WcETUe#ZJ&J+7(zer zx`C4*f&jtJu;`<4s&8wtRrMbIkq7H|Jazp>Tda-$5uS^D9v5yT3He;5eYu6$;#0by>Sk^r=zf8>FIzq~^V4Q~^|wkyc~a^c2imE3M)1%{ zXsTn#sTPSEoEm*jOE|ELK@DLuS}gw7IsIrAVavmo`mWun_z}h8{5|H{=g$5SF2U;b zMPJNIp4UnB{1LVxMz~Xp?x5q_6@+owh@bHSP>}w6Bfo`#CLUOMs7n8mF`S+zR&?}# zC~|N(mPGg%dnX`{=B(q-?8#JT|7<+{tJoTA`DX1cCz^l`wPn>S%Os>4lW3Rjoq&0T zKyHbD#=BlPhb_5=8#G6$`;O=OHOGL(Ku%#sXLg^gr^bsPkvPCXmne8k6OAoEIXE;nQoQO+qwegX-d)hzRKgn)^9PB^B&DVz z=hWBrTW3L6wK76S+O=6!&rc(bPRtdnGDR7`9K^deDDA1GCROm|oZB`k2;9I9&^Hs> zB^5*|j6Ej0;{Rgr-J_bk()M9H{Y{-|t4>=SML}MR)&`0c5IKiw9XSLwHIPISfhhg`Q&%RIBb=~*gd*z<&N_|^R+W6ATQgb7}gS6HBU~~VmF(m3@zqZ!_ zI31%od>_Bm@NP@&=<&MOw)`wgoxH24WL5Tm%7FeS17mYcn||Vnw1H}K=yzF9R{nC= zC=9RgSV_;9oM!Ue7S~0!Hbv{R+g$bH`k;YvVTAnIhCM6H_T&w~Tcmu2`&u>!)Ha1$4`wdRt)0qn_u9YKC2%lBy-5EDu)DnId*-f(6{vKu zy53l2%9J9PXP&QK`mh1?;#aIBQaMd|)hoz=I=v!a#EWB@r@@w{y46uXQkd^`7brA<)QD@tj0YsxV+yNQL`C%`A)!NLV!jV6^bt4s$;z25`Ly}@5%uB~^;PUwH?X(TUV&~F##F$6w&4-PE zRk`Z5F#%`lg_*azx?cO~9*nuG?a^>iC3DxKIMk@v5}5<~5|X%dds5%}%xx&7tvZ`-Vd8+q8Ks$PWnRv8z54zmE@1SJLr7 z(HJJwr_R5%Wfjar?AL<{~i zY_d5>O8+l{M4Z#{)X;)-B)@i*AEbP})r-7{go@fnl7@?SYz;1tU%cjjRg5t_sX-br zm?0!)D zoT<=+G8(|;%;^~$OW5-N?uO6y8ZY1AG%CJw-11U4yz;RA?zn#h^C%ct`0NWfV-}#0 zhMRg8AXt{7wg!=+U3FRR=}a>}+S1vV7gfiuL_yYY_&?T8|8wnH`g-C0)e*;>0;D4etk)p$x|u{xQ)p<0>iR8^ zNEYs&x*D3J<=daI7YZ!C~Mk-xPG^}p7IXQe#NnEU*fhqrt@0v^VDe55}%x}uDH7zf&#qbL|} z$NWH6r=@KQOXg$R{0Va^Te%uBpunT$z9LT+Fc+3=ag|ALLK`ZzeLc1#+s`t@ z5$WwaUR3aC7{Nah=o*6^T~=7yfNup7J^QueHi+ zXSZBcgJW&Lj%pv4m|xpNnfkata?1!TqF6r|Pp~PGq);U-En2Y~l@t~~-rjr`*>Vt! zG?8>G|N2z2DcD*ucL^Hq)^JW^h|8*`oMXd%5+{6U%BdN40*E{uf0sRnx?Iwy9q`3gKS)<42){3tZPiTKv6&|9#2LsRtyVxc{a|bE|{1 z51GjTfDh|GwuSbAdD#2k64u&z>*lk2fCblo?$y`5TDw<{0&XFA6&3#i`(H)HtEhMt z75`3#Uq!{MsCX3>ucG1=)c#i?@ufpm7$6E1+4c@l)*VTxdlP5O6;GaL(vh;g! z!s5nqzxtKmmgd!py>gfh5oF7jCDR=pap;a((RPMYuu#sj%6k8^EvJ6;aDO8rFfJhU z!#Ci`yVC9|ypR9sKmKwj{&I_?R2ts&a?IxW7U+xLTxv_&wac*zLb+nK;O{k`KAp{2 z@ijVbe7ppiPD`VQtcfUB8FT|Ui@Lmah)}17ze#&Dkt2Enj^W-oLUIe=wjAh;Gi%L( zi zh}GdkwSB&jWV;8W7k<5D3nyA$g7vKn@P`6cz>(Kp!7OFSRE352&asO}H%|p$as{t^ zFm}Cb^BPa8|KT5=i6%WqmQ5PuXICoW9)Jyv>(mk<3O{?*;mlbZrn`bke9 zPz|r$0rv9$#F;L&{PSuHt2ZzA;(zdJ`CcvGEAM+1yr5hDD)+rA9b2}%Djoj>HeOX; z06f64Yp;OIIskk23irK&&0Ds-g3Yg32Vlbgf0MSW5t;~-Cc?(xXi}P7s$bjk47x$} zXCqf5&|Rq|Z&5w&S#FTp zd+#lE#e8+7u+DiBu_rw!)C(4|%U|2lvVw)|TP-nJ+yB;qg#U61m^b+Dbd8<$S4nJ^i%hP+Tv%%R6?1kALx3{bccXtFxyc z^Y^Ct8^U;vq5HGxby=;HuT^Nt4N1wR5VF6e<7(ZJIs2(U-?RQ7{!fo4<1r=N>eQFl zJ~9UH*-D+i_ukq0glE3rj-#86_YX&vyKL}!_xH)_`sb?a{BF0S_En7`9`~1gqRRuw zK0(7?E*FclWbDT&(!%RQp9+g9fcK5L6fY}1c?`;vT* zqqezm2eC%vS6ng5E-&RDULJ{KwtX7v%lsze8vX(*AgcCK<0O%y*{_+0X_JM{Ml2hGxLm zUesF9-kQZd@&W7F%2oWN@1LXTYm$Qww@Blk@#mox{XUR4yif1pqwgFxLgi2DZ+0Oj z_{8OHTyO-FrGHxck%s?vWoKCz-YoJx(6beLI{X6j+Q5QJqci^D_bsB5#zWw-K2+U2 z@&OdWhbL}1!y*djPMeIkGa{;1V=?M7duyWuDZtU z3yBV|KC;?yWV_w?mkcWL(w-Ycuyk(m24*xk&C$GgU)AFl=LY`u(bXI^UY#v`Gwvf~ z-*-HDa38OuVksu>1iI_!e_g$miIcx9eCNAztntC3KV%imd_O6OeCf8h?+?;%e|HzI zDj}nmy`sf!8HBK$7qvK8Mv?B0Ph&zZ#uZ#EQDYT=)E6EKh-xbF@C#lQd?ImamQf+X z+ZKH1eRnZ?t`k4jE~A*b?&73Y3)X^z!OiT+v@|Vk^2ZYsJ|Wa)_QDA+`(j5D4{T`9 zmi#9XfoDx(l9?Xi)y04%-JcKEuOsKhZ3%@xLS9|LKjL_sDZk&(RE=&T_(X)s%IS zNIt>60KYI&nHJmYbVvS~h@y3b3N;J-U6oPw&-2E9LW=Iz$29mt(xdlzAH!J(#re^f zK)g^>`8yPxPspa4TO;^uG}Sfu{IhAy_$t^&I$xKMhuz1Ul{Uk~TcH(?i4k^#w`sbl zU#yq^=@ok-R`<4P_65fAgaZ5pW5W?}AaDekb!T4{bTnAsUd4cRI`Hjt<|+)G#CO4M zE=oo3(nao2@Sdyhuj_UOZ0Y2BQcIus6xDxy#xDE9lc;O${URcxpbNj-I#n!8u z)S;Q3G2E@R&x(jX1aQjrhOaes4Mj-Wr@p8bD)%EZSH9u?agNe;F50k+#o*Ae*X1)W zR@&11d(Z6XS%G#s_F28S?--<}FO)B=8S(5x{RPR&5AOQy4XeV{-`FRY$J!S?Necf7RlAmabElQmVT0Fs*${<$tl;5r>%|G9?Nh&eo7rA9 ze{@Q$sr;MXFM$7gTFtXhC8(0E#Jf?Z9(j-4UNc3rc%AlPwmg2) z4c4DpEHc%h+Si|VAW!$#<@AVo9|Mi!^nWUrue|2BA=GBz2G?`kh+71>JL!{Ts^#A9 zd&B_Ty|p#h`9vqZ_6*8jYyulOa;qQx0^Tcl=c%Rg>Uxt>HQd`v2wPXliB68_zO_yIXb zw5v83I=^*4SL7}&i2Q=*d_<8lL}66Pjc6kEiY)iRtt_!&|!x zCW9ta4Amg!Ee*u`I)2yRLe@2SJ?E$}WldWrk9<{M6V_#i@M!HHF#UzO(Ea*w7UrghBkXBBo37YeUtQJimw%00McMODBhs-M(cI>Y*P=ve zwX0+e8Dp|;z@JavE!aK4i{-#OEF?PrOkLw-Y_)FSzRcRja zzBQp3LT%Nv`Fw;m2eGFi>0FM?wIU}i7D@(p{fKtgua>Jh&5|1PljG{W+^$XqBsKrG z)O!Y3=DZh(^M(98{%@67EWlGF`en4F2j#UMo+lOE4)UL0T7e31dJ_^#k4&vKMYImP z?g+NZ_%Y?*UwBct>v2tP7s%!lZsF(THN^U*YkxAnfqQ)m2_rXD7_Yc92){%<_kG^M zINVg_jOJ@?TB1c0)u%1w>Sq{m? z6j?-7Q!)t0jTr;M-ARuQQ7d6_AdaDT+YJ55(zb=m^lgZ}$P8w-;_mQXuL(Dz@S3ff zeS4y#sosH|MoJ)8&+tX3u(jKfwZ}F}M(a)2!_?&h=ZxSiIPY`r?~H}bC?qf(MmZ(u zP!dxbbv{_WTX0q=X7;A&Y{E!*Z5Eq}kRD1#WMni+bHKm7{>-+%L6!SJR50K4ipRNF zh>&$#9gV*zPGMg@Qu)aXKxa2!iQHwjBR2-rDn3ea6XDnBnJu#@yRB zM@DYNjy0Z|W+eEsU-&QWt>3m~CdDMl;KT>dK3u7FQ?)CcgY1%!#)hCc9# zcn=Y6X(@+W_Q!Vpa@t?u9_5|oKC+d5!9~J3Fo71VJSI$(vZV=tNR0YT@{izVnQC$4`LMejyjBq6_pRP2vtr^r*5s6t)kd8gpa` zE6g}#t#F>PQ%6cL`D2Gj>i_U)@_ViRrlc-@Lia!{ch*?vEqH$4f%O}D>9V|!P{l>G zaZ@$T!?Kk&@eo89O4JgSUI*yK4ii|-V@C7m45+`AWd#=j6=$xVvDbigPz(F|m|k5= z!vyw+ggJ+maN5CeSW;IwKkK=!c9k^ynQ`{T(i7~UF0xtwIcqd_xbVBjwub6>D)DQs zCu=wE<*v=TqfXvHIC~GDvnCH5JUIKp1{e4ZvZugUzQTrD$in7-+48hsy5$~?n@x+H zlv}7U8LCBI!CZaM+vWtA&_-W#?Mhe23g2mxi)P>tiGSk zLSZ$#Eq5Kybj1pYIQ+$Z%%R>E@5F3d8v0diabE{))fVYO#w$)Z5XAVR13aNBEVI zod}vZGet9NrW+aapGw+YltGzAi<3@C)?07wXClP{U5c_D7XwZ{*sAiA1-YnA&jU#G z%i_hdtLdGn1!IiyC6@_Be`Y>MMCy-=d%*jIToP^8zo zH>u69%RjyAPG8#D`s8JG8aJxz#@>x|Qi>&&#>cW3<_ztVeQmur1!oOXd2($I7P4(9 zz|g3n;5#L0j)*oNh@UITaGqS5`u|6ps;IX`$U@3!ZMyhu4P}xQ1ORI5O^qC*eqKf3G^1hY1f|PDHc!X9s5u668G{(Y zs&w?PAj4qOtN$qtXsiM!bKT);c^=I%P+kST304t^4Nwl>tGMs;tt^2#wW#Rov zx5e}i+0e^Z<$p`q@;5lHDR#myIB(%8EDqY*emItHPT<7wwe*w9^a!cCOmtt!9`TwA z&N3$;y9{{x1gDY|U>mc_y*Y+r&)F*|2ez`ip85He4L;gZmV` zh7bjF!NO@xvSW|JO`7KAi2OAiq|DeI+LO~)!DR!Yo4o}cghR3yu-~)OXIQf=iRU_6s_u%jmd-L< zjWMeyzbLYw9GcO_F%Pa`&Ga2^3g!qb^-{8vr0 zd6B4tS8yNo{j3fv6ZM-52Ckrz5SQJ~n8~iF$(^zn5Uaou1J`}fy+1N7RYew@)UmdO zS%g8Z>1DRh2&X*{5AvBGi^9?MfvP^OQgKvnFQ?+L{i&Y`$|6J7EH*--F&lNpl~6=D z5?*RLxDKX_!K${TBXXI>v{&{Bep*`BjAy6>eDs7)@~vrK5&I;R9l9*E9#6!!a2FDK z&dchl`fo8F6;MqERb(a3^cl>X50yI6%clb(=$DaUNrQar(^Bt=P|6d?Nvdcu8IysA zklFDU+4_&^kcQors=kIvt&VLJ9$}ChK2qnkwz(ZeWqrdX3Ifvo z1r^5G+g$c#)?sFguk)dPSoHacmG#GGFt3hpGOSkb4P^BEdl9oC*~IuCrC(gX_Z&o)X}`0=1(sx@+k~OmpCDjfJV6BA4WK_$$vu zvf_!)+M6YLV%+i2Tm%mZQ$HFdc-T{0IdNB$ zGpF`QLu%J7NlHA3g1HUm@nF++w$V>AwskB6us+JO}lD@%~!1y^u*uA+(TF1!o^S*IXJ( zo^Xrv@ow8bCnk4dN(7kf=R;uMZ2IwKowN^Pq;a1djDZEuQb~m&UrTmd#a%B%+0c*N zyR}Itp#AF<$RMcxLYx{mO4mhu=-oo#tbDX`{9MR4Bk|W{M}5X;g3_b0**Vmz znv?QDZ4S3e+}9+XpYhC#Ouuw)ruTe%v0pc-wmT~&bisGfACB3Z+T)xA-NlebUd~F( zUkDK0z2%w_GxWzLDAP4byT{N_e+(k8J7j8CUP9R=X>dxyx}()PwjfHp*pq3hjzFin zVWqzk+u+8B&x?r9-(fE7tZckt!J@nIB`b9I;Olms%$Nr(t-{5nq6ssbel9$HB)I{W zWzi0c^JDJW;XiqDSy=C?7%je2lkC7rjRTk%s7sy0p}hI7yk!bj41@aNXhxJheLkZmM2n5|yVb$0rM9i6c>x zCsDI*fR5txag+G`zLBFId!kr<_FX@P z^Roe*>6cN{V(Ozv1dsRl$Qnwue`YIHRMuEme%17ArK#AgG;N^LAWk&WzU4sCX;nKb z9a(us%hUSBVC-4s*JHg;q$|m{qFLpsi_OzV`Nf3Q#}aN-`^1Tg>f{QmN3mB%`xJ4d z2rV+HkrH+8tZ!Vx3x}Qnasi_&q&5O1=gjgB4sn@3sPfZfk)P?BB{QM8#Y0S2=5M}$ zTlWQwKd%ze<|RCw1d9-c`(mPP9^u<*;Tmp<$SN-_6*oo5oEBl-9U&5i{|A`{;65S>Bn033g}@w$w3XY&VSI7~8L-_*2vS zGd6IVZ_qs8@v77z(ZF&t63QgJ939p$z)dOcoxEdqQPgx(pz7}Iv^+ts)egN=85OG( zr2BZi5xSC{Z)M4`9e?>EZMXF})5&KK$JG>EavB8gfF~EHISCsCvTV5s#Z_e%avLvS zK8#oT;Eed5ys9JqRcFMR>>HY#K4o3jP>q&=+XnF)x{C73?g|T?B>LdgGU-uw>-YhsBtOdK z-Anb&_m^I%!uP>u2S%!6v(heWw4HCq`lh8yn{&!{8eYm|4C$qFtGghQL;hWT)=|;@ z1I`hyTS-%L~WUjOn<^y)WY+}(h-C8byHdTE%I7+O#him-~w#QWWleA z;PP4`z%cRZnw%d)Buwxbg;&B&ZCdNf|UTqJc9<$|CTn=o$Za~+G%ESu)5izUzWv0=zHc)x^oS`4F>I3O& zr?H#wMhXx6spFW}RDJc1+?25W)Fv3;R7HHMUBoA-sk3gL>6u=7@yb-$<7#oS2hGY- zS!qZ)a0ND0Npa|OKvBKumejF~OEK2TLzrENl0y8ns%8-n&&bJ{fMX24o)ScTT{8)G zUp*^q_c=IPp!cCBDlde__A^i&k(V22Y)(#JI4hIu62I+=s!mhWi>BS9_^~M3HFCRZb^-xa5c9Mvb(9#)8ql2 z_Wp@j=mV0pdG$)PkMhac(L&o<>R1}c^-n#*^HIgBGh0(9pA|^Hmfa~U<{yW^Gutt6 z(*eljp$Q#h$Z10ZPkwnKUd7`Wm zpM99<3d8m%_8Csj;fS*5dnHu-rE^9OW&ShiOb+3DaS^r&08ruF`5Wh0WWVm^fuj|z zb2AejB=X*YJ?}w;ry=34((72J@kE>-U`bi2t9Q(Or?)~)O%@sO%=x(&7)&+~-5{8H z*q(^HEYnRUQ9~ql2BSHZ5AxZ=ta3i4oPrL%AqeIVM%oELUzk1nMh$gdMT&q&L2Y$-QFz2+vx@qK81{ZBbA}i*4`czTer< z*Zj`S&6rL~-H2)Q;d9JnkG{)`2P3kLd8yBLJY&*!ox#t9LaB|MlRyHFAhHJ~FP5g` z1{X%SJ04oXicwQecdk^qS6F>x`cJ1OHhNNQXqynh8Q$GC+xWJTw2pE>=KX zpS$x#Ghgp&$A_YM!aU>5>8_W(Jn3KwM?6EQr>bUF{g`K5a$8bhUh$6ohkA#0&YlJJ zJ1bFQx02QRbdL*Uo9Wm_`F>nt8CQpN_a%{*KVCnJtKwp!*X*nETSyj%4q+rxv7W9?tr9&Y~EE zGDBi)U!i+C%L)Jy@d5WDx`FCE0BkDZ{6(ctjSY(ds<)rq6~3m||A4|7y)}Ek)A&4b zQ3dPGL>^r)S4lK~2t^tFB-wC(zPJGIeq_WCScM0lrkvOYA+w0Y4ZHX8V%@C=uIn61{ zCpRrCNe)=c)}gayk!Y8NCs=%JE;WL(PU?MUnZAz((n#YuJbxAlU{WIP+?sUXfReMqp*+v#$qbl_U1vs- zs9cP(L6iXZ15!e~Uj}P#e0a7UW{0Jw%k#%V=)&MVyrAN!+Khg?M09qZB(0(x4{EKv zFA&yFIMQcFN=Tg?`K5rH=$0RWJ7K}-8V?Wadp-;*vkQpdnC9U+?Q77Wjq!*{%|=g) zb@pM~ift90bE;AkXZTS!_adm?ZSEt|#9qrol{x88X2~Bv-A@Jg+yeXhPkpj(;HR#9 zXdKmp-YUq`rir9KV+-@xrnvDMB6KIDZ!{;%^%;=-XYyS2w^1@KPFYSU-f!*^Xgtt6sz0^^M#%a&WJmfo9q?Dx23Nvo0_4mx!1HL;-TSS@I*1 zt3=~hD-9XPS3u4JJWaL`L9Z!KoqF<7?MI7&v#D$s92pm8S*&t^R76(oV=h;SvS9O^ z)S<;n44EQITVMj91+Ir6${K+9YTMg zGbP{FahW;bi^AGmQ04HRlX@bkmn#$qqi#V(GH{2sS4Qtie13oKL<(#*-{=#QPzmUO zMNv|d4|huyIVMlUzvVp3dvWo6istU`jMz%0xblOJ`-?~GIF6|$@2or)Rv`vOjkBB8)`S9IZPjM(Wl>vUJMJP-x3c$fftzDvFqu50mp`m^Zt<8hTJn+ifq-=CQa)Yd|g@C)6lSG=}lI9|O|bCFF*yz)``fL+9T zV!5E?ru(?$#&A`}O;?um-9F$tXC~Z?Sfj3MU+sMvM$lQ_WJ+{rL%{;kCU0yxpvNf>e3?#^agO%wU|c=13)Cl}U;D zTCyikl$I|-!&NE@561V6j2US-nk2J;oMOj+^aWqUAw^Bszw0$YdO%gCusLs7)dn#f zf=d{)Y-O=ge~xt$_(1jzs#0uD>!gV@5n>ZV`o@!@^}Sj~;jG8{cCv`OHwiI$e3;n- zp6AlO*Z9lZD;@7puJoT@zJaI~)xdGre7FW|R@R8DZKscBZ>|48a1q6LyD}=Wsywv| zNLr7=l!F0#7b&itj^i~o){J}=slL#nhR!)T2dqEh*m*uB(uTh1zu3pJi0Thh@4D-c z6YdN3)lriDB|~|-)2fwNaC2~MX3AGy5<^JNI#7!}cPjilHf^!~b0B_3I-FPzX;ORP zSmpkLlCco`E(`+*dFH^63DfGI36_WBd&c6%b)z$zD6M|}XFIimcOkom)>gGB&h!x3 zkznc}>QP*F>pS*Svzd)eRC8;f;V4Pi!4T$VaTiKm#^W9fGzU5vF)akf_R86kNFcba z_M5v;ps9w%Lj&jYW!`>M&&=umCea6^duv2iAU zqH;Ir1P-Iv-M6`VU0><@owYO+Q34@z-CF#z(xYaC-S5+j_fH_LYW-p8?3Vi%NV9F< zXOYI^4)7&+&$NcmiYHF+&^a23+k=NIl>v=)i=@BhY~rJ0SLw;nP|0>T-SA+F9bq?PfU<+q;Ju>GexBeM=E2ouXEjxgXW5Wqo@snMKEvK z+7D6r-!?}Kl(_k4_l??5CFK8;*|AAi7?i}zC$i5|9q1l*!1AnYV4?#oZliPi4F4mr zKX7pu7(DVgJCi+Zo7zRdhgagH>(!nitvfW=4yXqO$jin8S|WB)i+2_EL;7g8DT{@s z6DH>fMdh%d?970Q>59ATaAA0_nyb06ZuSUfSrIcG7MG7#sdkdf$3hs=sK+z>SkQ8v zNSHr?j7l}*bTO3K1* zKSd+j=ra4e=1u0MCap6sA(#Pd4*!SB&-51@xtZ&wcMggJS`j)_{6@v1Ho-~CM`pH8 zolggeJ&tctbFg!G5L-k`ty-)&%Y|xkOk*c3`*S9kAuVvfu2C`W*nrn|lGKGRPrCAv zRzg3-r|_aL&(P0Qq}|v3{$n_4%|kUka?OeFQBhC{Uz3^l4625TyuO<{4YQieK$66=tfsrC!qxIKp)4u)l}JPL=}cc1&s z-lrk{b`b+3{{AzX@ql#Fgd?IP6qWsmp*-Tnw=nBxlbdtQle&!o+qXi8&U3$&Wv>nd zH$$Z|Yw{x%`NIR16BVB$_oj(PI4)f{Jk4MEWG^Qm5rZi6u}x^XWMwvZD^w$CO%~~g zoB9?P(FBe3v&Ftz8XH)9@V%7+3nDx-ud12Umo;L62Xk7h)p@FKseSbX;?Ts5?zlOF z^KKwKS`d3Ao(sZTqEQFxI`m@2qy=AOOG&8o=&+e(Qq-D_p1&x&!CqL)?B#>%DOa+ z@>x4gs|eNk_e4}BS1dj+5M_wkp;|3IQGCWz)Xg35S{V;T*dUokM{>rw6HEo@vo%Tc zJetHn$^&eSmD)2}$@Z%{^2l*fCu{$KNCS=U7hdl}2v6;GZ`)3sOsu1h7pq*I3rSrF zSOzDgukNt}y*8%7Hg<-Z)T!(2CA6JqC&65L!oyO#wC!FB%0g{wmqI-L$0hjY{E}70 z`p{L)+C+B#kx`9}h)5o=! z)^oB`75SnJXQ?08uj>Ia+N&;hHO%RQm|n5W11Y#Q)`4Ze(Oh5B4w#O0LQeT0Fph!8qL<)4@+GeFm+%?; z$-giz+C~BWJBHB*!kW1ZZa(kH(nL?B=8qw{>WP1FjkAyN64LfvcUnZrshJ@%A5xNo zWf-GDP(uu(v1uJ%O+2#|n}b2KO~nbqn`p~N6+&e|#9~hA*a_=f_8K_h^5K1!uU9HX zG}(OGSCt7Uw&JFli7{lCbmy~kDIY9HHuT@WPt=k3-bOPDg7BS=F-xY%Q&21T#`JGuSNyd>6B7G&xN zgr?Zl>?|&0=w1&+{D<&^cO#!w+QSiK5f?!>6?@%6 z{U$NZhQ3yCi}aDoUe3cenI3Yl7BB;GWfUuaQMt?>Yxo!;v8X8-l^kb;t-1m_KKi-0#8pWV1=&xe>OIxNn2xRitL{Zi64;Bti@ zE$nC)G7wtSiVbostNFNC(Y{v5?Iupe_1!`iQCeX9sit+!(-6a=KYFo;0J<3L@vwe9 zVR7ZV-C?^(Jr$7v5@(23_RY@9T~yPujvVcA80oLy_k~9r&#~Yyp-0T!ae%a-pjbd@dJ}Mgk_Fd11rCfT~3*-m!zGH%Qrvp)=Ai=Lb9}> zki6&il`b6iNKy3s?Tsj{v|%FqgP?TPY5jDsf9GlYk!NA}a2Id{u(yYEb!zwY`4LT_ zPv`m2VK+dgTvk>KWZI7qW$l_QeqJMSLi(Aeuhv%Ge8$O?d&ZT8kOIl)G(-kQlb23% zaoRVVBuZ3R%tRB+PQ9~X=_08}&UJT zI<0Q8FIvSF?ev?Jj3yWx9)Bm^;jgLZW9^3uELeLS@IeBGr*jPLs2?wEoi{^mmFEZA zFZ%dw*3{bjFZK*#jTYl({3Kh@Y4PxOK9-8cKdpMO+qyj~hA#N2szsp_b+9oO{nw){qo`!}d@`Y)fN^2WRa*XZ%M;^9`% zCkWzkwv%8syL^^y*mt(cG?T7s+wVEPIggW$_f`>eYIZx=uY5o^or|de$v8Bt!O@?( z6n(+McET(2+)P;O(U60FnTAILyy=9YVlr(^73c(JcS&olb}-NWvJ}kX7sF&OLkeKk zN%p69No@UyB7Ut~p=3{fwydMf9cJS1%NDA4n+5C#LCYar5VJ_S&`%=cw(N^5wW!(i z&5$U*VjU=Z#A@|_>wN0%T#Z^pCLU0YLB?C{q63{6K*_G3zbQf=%%#M%aN8^(C-X%p z`>uFgS@n_LJSmA0yM0DDVn2B=Yu=QAn;cEx)jW=6rTJ$){(LPN`y|o3!}socl!Wi4 z%bOg{Cuw{tNwmeC7q_FbC(Xf2cp0YB0}ap_aou97K&JcaI!7|jCg_;d6#8N%Y;R#l ze=j^Ia5e(i*%tS(cz&gU=pC`2ngnNTU+Y6-Jccmi@6!X*zvDA37(ghIg4+Z6U)I^G zIlIFug4!%tyDc*yr(BcDf6;-FGDI5@GFpyNe~VOY#BhB|#S9Z^yBro~WOCb@ySizT zCxLM>PkM9#WUbWJuTf7I&}}4}%hMs^6z{_3kJa zC;T8DVYw)WOLcjv+Ab^W8t(Xqo#&^mxB8F)bZ~C!mup(0oTRZ12QR9YJjNd;r<=o= zSrQB?Y_Va}<`re-!H~(dI7aij>++{EiAjvZ_6ucTLK?H2K)Al9foXEm+^R1V11Eq4 ztOc+Fm|QI3F4K2oWBKzs;~RU;_FhZX>wH*op&dV~`LQ=G6Ghr-udjtICxEd`w0RnQ z!`^5zX+})1c^g93Bq-lp!6Y$(p*!0mx6fpo-lwM&i4ru;{y<8~Yb6j%ZIB9-pbag9 zrz#sS`2^0_HAO|i1+4h&Qc88QXldz#^%*Fs;0z%2K*+2fSOtZcy`85F<7;KnpdDQz zJF0LXWzIiR+^MkS*pJi_R~OxbW3_Z`L+Vsu;yEjDA+sd;R2*$m;90dK7I;WNKkVef zTZXV%?hdT_BFnmOF)EWIwnO)X1M&}uQf!YvUeGlF8a#*T44V4~I!S)y1nIl(a1Ro{ zHTMaY;kVGmbK`SO+X?u-Y}ks@^SC&1RR3#pv*c+9!{HL**Ur|PQdt(2tM6uSVQZkI7z<{q&$#79|oR1;3``gYdFa0pp)Te!f40Uh%IoyAKC`#gd%OX?C*)klX* zulXwmO6~)^tdn-ss2WfT=^jU{4{m-<`$!c$!LgWtbDr#uUAL}Qc&6wEw>3X&lC%-k zsXt?LD^-=UyT>z|fZAE5^GQXT{>)9$XcJYt6=Li@DZ&d~IXkL9dKl3b&YUk4d7s3c z0{k5G*LvGT&Ws9kJcDqQ?FTb%=^1~;_4VzxA*EzJfG66i-b@wVdY$A1Op8@j)KF4b zWlYVb2jRXsYlRpvpS1`c?jf;GRZd@R{dJBFx-rN0;N&k7o1{mByIydMf}HjKNc>24 z%ZTjy{kLPGX$|q$zUM^5<_yX@a+~Auy#t9-uk>}Bx=aFQ*OxT`PaR?Ho@MyhPTYz< zci5b>fsO%dKh(LKI;QeK)KKOMnv(1~pB6-J;e+V`R&r6p!Ylzu*Qhk?D8$^g_+?M6 zZ%a&?ecw$%iz}iJL?}1C_bdyrQ&cMm+C7hKJ8Mz(;)5{)25hZyRIH9>?TDEy7{fEu zZRp;Nnw+URR~8tG!O^Wj?^=ILTj=CxG0IvDeGzT;Q5;o=(W1}0g>x)Pj6^mH8907*QgvoAq(r3B- z&_FWUsFGc&^#kFR2H+L9F5`x*!%tdAw90lLd}FodF;5&|LPKPddX*5&HSse_BxRse z4OC@;hZyr7q6Bba(2RcMXXK6GdXQ>@rO=uwB7*WkhjteL~xPVK!NJ)^mYb zEMix=i)TFhF)Zy^?^!oM?yC~O28PZhJE!1q&_M@nD5?>Frx}BTr+tM)-p-I8mz%k4 z$Eh_$#dK!f%EdBUhk=Otn>kYxbyfi>DCRsLtv$plMpIYbB;gVhFK@$|S*&VOU7)Yns?gLTd{Vru1Us^d>BVu5rVtkQO0vn}2h?T{ zNxs=pY=b;a{qlfq#)oI=94MH(CV$%z15X_FNo|2nudCK zH3avrfn+e{@q*ZI&Ep;hJ6AGQrC#e>SEe3vo`$Jwd%p^yF$Yw`7K}aF%YlL@<@uF4 zc6UA0E)jU~feMQnj8hAS06O+k<+bARm}sc~BogK}0_+(-;ZAZ(@~+wqhG;)xHWsb5 zV4Ax6B~*hjBc4sRL_@+5lRFUrF$W3q2Z zcJF?42tb7TuvG_=`dfETzhG&+B(0|+ZY4R`js63JI=0aUgmWk3(;yQ`0-$8OBO&Q+nJ@Xc@EQ4nQ97Z*}G`upp zb7D1my{a}ey2XNE{b@Z|V^x^}L(v<#HET7vYNYo&yhQ{ZWf zk8bJO+^3Y3{@a8lxH9Tclkvmi1l7TUEJ>wX1ta{6gB9G|>_XZlZBDsM!pF|1g3I3F z&Ze9@p~nga3!UHxEnu%=(Azq-|!V)8uJm7UR~|ByKTQ zjaw9*PK;|J#WgNy>qdnTt40wJ?J{i=;!@W{qu9DDf^|XSf~JcaabaA52ntQyq6owV zP!WA^G->-Rb3O0-{`0%8-&Ou-QUYJT-*cb)+~<7G=bU2@xqOXO)^KgS96(V+fbUQi zq+RcySdTe0T(?%-g;=>0gbN3SXC+1EjJqcNei932u2LV?Z7iFbP+2U{Er-qahg zsv^h828q^*jgr$n`SSxY4X**{g5lIv&QL71=`P^srur(v_n`|^tH40LfuX~Q&NLnN z{r;c|^vb#39l)~;WB2qPB}|bg7M35l$J?KwA*{)}if;3Is~Md6&bZK^N>De&Zm@1} zUm+K5cA?l7Rg-JOs?ue4%}z|?o2-#>&6Z;8bMmFD8MAp${Q|o>ug+xIPX$-t`g~0} z4ffW)CJUVD=!GA~X#%8!Tsky_{lQoP)hy6%4@@&;^ zB}i`xE@&cFQHxso26NADJ6pyzhG`SQ0^3S5IyA|vS7M#*9r}JX%2U4?WoBHdL*-sQ zp}B0h)3k3M*yEPySPJm>8!M9RJHo13*%{W-N0{gCP9W5&n34vINn<+xeQ;vXP>DW% zQ%9u)Df=_M?o(DiTJ?q?!N;z)TKaHU@38R*XQ=r&rR`*qgE~mszNCf<%qKa!Oj@~h zHoku(C+R!)Se82$WEG`89mu|W;j|b@Z|SC>%adH{Q;bosvZB{tT)IjP?%C|KSULY< zFUPN|{4=|clZv661mfC&AlS~=n45ul9>4_f#h#reD>oW^=sEG0ojtp`6D0P~4s=CW zb;GT-PnOs3W*h>%#+GG(IW}hoCtU{2yTo)uysR$qQ>iUNbUy-Y{APA|ulkAo`I%B5 z;qKfne$FvX-&u+q%#HA42yX>`Lcl=28>$OsH*5-!HzO*|IBPX>Pk*l9* zSA+db@glbG;mRnqu1eDW6PuNEqzd~5xw#K`I zS-*b+N>pC2gadck)u9I^Lj!MFG@Un=ac`On)>Kzo8mG$&F^;jWI0R}7ki$8XGaol@ z_bywg&?2U-JK;fjDLoh+ww_fAifR_7hwT4#a#ww2t4M}NyfAZpGweR=+Co(CiBeb@ zNbR9AYR}f+=!r9d_AAEonpO(ST?+=hpO?s|?B9R*RIZBND z15i;K>KsBS8*8n!1HMCE)dkHY@5*$tAX3HyWWbCbO~v%14KEPf{!^OAxosu=XGK^&jrcin`4J!kz4CnrT)`7B+g-LGllwNp_=Qer6`uH) zhJu$Q4&@6Hhhf}bkM9;IJX$)AXk;9}V!5^I(IgL*z2_}=a>~^-u-|G5C`ErM=q|Oe z!Sf}bkODKu{Rus6heNon(*UY$?ZNYF-Q#0Cv1yi;Ha;a#*Lg8W^EM@TSFPh+7f@_| zTJNZ%FAwUv;P(>A2yS6fD+0IV9iS{8Aw00p*7-D>B<;`==7d1fwTJEr-MW=N9Tfo+ zHEd^)#&sKX7?bb0)7STw-c;To!#6tP7Q)DJh=O)5aK9)4PAz?kp_LD-u75OcrW5=p(_X;OsP(QMVvelifk~Mr3JOA{H7f1(>$;g(njZ%g$cXpP>cI5% zjH0J?GxOS&@`e~Xsz}?ojN&hvB`OAr+8?lycY`)7)n%-?oK)}F;O_RGJch;h+%zlZ z1ac6#9tMfsC+gfxlRbY*ta54y7+%Qdc8)4OBGPCP5c?x*k3!+2huvsQ%shVX*;OM~ z-bZnn@aby2IJtV57*N)5Vg`6C(Mg@o9`(BcQag8xHc3|h+RKFg%*!q|pvV{()-4S% ze+n`GUTxc(Yi0AWsrD3kq2d#~BCDunCMqgJo>6+LcPg>944f04oc<@BnibJ=cJ7PL zT3X=@v2Tg4uUloR^P(9vEtn;v_Iw`5$Y@}GZNw8^_6UtgpOqVbHY7IPx5 z7ue^F$sUXHNE?)HU0o}yR8fimNw|~WJal3PpL9|;jv_Z7$;vh$Sn!;T1FS2Cg)|=E z?Y-vK=aD4Ge`{osHS#29U1!E0L?wpb2P5C!HwT}Vo}JjaUSnBlLA`VT@0n| zLa`g!+_xCqJ13*BfYItclws%#Z&d!a8x_|M*=%@+S2ZN$&0;X$89tvy9Z_CSUZ za>sO50C#hpI_ytmliAK-B4Ex=ohy&Vjv;RmH?y(|Ry$jyRhH*)^C84|7gX5MibPZ6 zrjb6c=v3Q^A==@w9zsGT#BF{%=Rt^pOsE}a)&F;&r!}c}S`zE5vpod{gfTM0DG(vr zx#FXuvoG+n16yUCfXG>7qf-PDo6Yr5fsFI+3k+MGV&iiSR1Unu3?D(+I@Qd$k0jXW z)kXaT&>rp16S}5A`r~dOTg6CRg1CT>#cQ>P_4-Om<<7~`;{w(z`w z@S5{I-(tM|qyN_0M^Mo#W};CymUVR3LQXeAlQw2QA?)El0npC;3$iwRoP#DF?ZnGA z!^`StEUWnANV1>}YEhqG-Jg?Y)}w{KV*pNbADyk2XNXiu-gfGZZI6&tpD=S1?a?4# zDpCJv5$5M5i;yKJk!~S#ZD5>BH1rY&$>h6lxp7)XcN{wcNLuE`+3THpZiDcxitRQv z17iqze*P`W7T_`Hpy@R-g<(~R6e}*WcLt8N#X4eFV85UWs2xv%F#%-UkHBqcE zCxaE-GHudb|GKWO`RTG%KB;>#zEvm0c%;!&CS1}z2nf>CD=FPC>0*vA8`S*tRYT5e z+i>h?DdC6wujV0=(R&6-&y1xx*=4gs5vj>fru#0u*Si5|wu$nKrkR0@whGXAV>`WVsk9=p0@6hFcR9J zKEKBMkG8OP_h7jsIpq0%^^QV&^LltJyTmD&)jVHm3Cec2`C%nRB7f6>r0T9Yp?rr? z({+gmll5IF|M@c3-}p*)z_x+U6oms)y@IM%%wtojZPo$?-;`}PR%X@rlObfTQT0CW zc*q<^YJsZ+G*qW6o~8xDQ2&<8#JOly7>C72EpzOj>(MdW#|>s~fbtgvGiV1b z72*dOf%Cb5((7s*&NQtPuh)4~+IS{CS1fV^w17E+pr3mX`KG<9sNSm^ydYDQjqqWP+*n(#kk6J zw>62DLv%@@P&((|7q7ZPgsqQkRw7C-%a=X`O)!c!c|H*}j%0O2oQ73Y?s(&te1?`= zDzJBjRjH^>apufw9DY|2QU)9<1Z&79lEjFQziUbI4Z>GFF3Wdx9&W1NiJ1@OEs5`} zDxuNmc0*vGFxX5mzHHp&lYyaKHYSiAdj&mrA4}syV){i*+_Jh%oVeFH!c~i?smx=e zch1+n*@jcT^Ka}`+=FlY^F4MNgKCVLh+98GH`#|jFPFXTwV0RRHs`z9HqsU4rSci7 z+_vdGf&Dlu-@X^{xSh-8SmI>vQ`&m#^e4_WfOxhv0|@A>O8Plq=!`=EQU(nNWG0?6_ZnZk3UCbGR+RDLd^AJ^zKoEqXmY&A9LK>Ff$ZI)F#v zwDPzRLh;!8$<5Aq)7~lGo%D5(c#|KZtX7}5DsYSe%0^k?By zOzpjub2B|JtWqE7v@=mxDIzRXUNv)DwQD)23pc>6g5E26%q?~8XVBd{f~FSs{Xl4X z<@%*K?4Z=Q1OnBy6~045(3;`CX;Rnzt)e9K#AMq^Ek%giDs-{lO62K9z794{+_Dtv z69y=%h)Ty86KsC6*REXhEXDC;BS2N`Yv|^IOR6k>VlrC4fSE8+fI`NFG@c7t1UyPR zOPf8-2&)PO*J+u-1JxkOA_GXm_h>%^#Nh$hhUuT#e3`{r`CXqTBHLU6bKCv&gjssf zFM_pC2)A^SZmc-2zGp5o^}(VtMlJQt5Z3qxHGz;~rD`f4CK}8MmWE9WHg5@bJS>7e zCw&^AIG#xrXbUqXXbuC#k#u3ek}e-W_^r(N-V)FFETd@iIec~?^TU!@vH(|kqAWvB z_|408<2>u!M~z#gpV#amQ7KuLcYS)`PwLyhug~JI)}%M$xa0zY&tSs&Y=dYZG(m^{ zC?FeKJslFh4RR)7%j*h|TNixiFQ|w_yXc0yt)0-#Ml5~0rv`;qGwuD!a7Tk)s;e>{ zO3wBz4jE0xyoIIHB{{pKR3RY$}@GDSmK~mw&-Cca2;^OV5Or=L$SQXVRGW>MGnmW4SJGu1@ z8mir$n+7v2a+Gm;{l!iCX#9@eof#15xGB?Jv_)4%h3@1Ki0N)D4C zs+zk^-|+Xu*HqkDm=+6^Z`pNHveQmDqEo9aC5veKsPict-x&Bas^^Zp{!4(Pp7X$X zdfLantnF)EcuyNOOq+Vo7qRmw_~XiO5b)sUk-5Kuk8VA9$@@xz?M0RnFx5u750cL8 z<@B)BlEmmkp$xs@9YJbY|DrA1w>w)n2wVCljh>|5{nkE;!T>&?d}g)6awE{;-c-?gPqys+Me63;U zIy>~9wM}5YlyR+vGgN_g;@3M(ois~VH;t~kFNM>k zV=dm5>wwYhn-3Z;4f;6kogm#1%P^-!K!H=jSFQ`!-W2_uS7k$q87>1xv)@V;(ih`V z_H^V%j5jD}fx2y(09=go1in{81hzfKjr%M=`RmaY`T~QIwGM-7EAp|?LJMfo!N8$8 zO0IBeCH_WQ8Eh$MRS(9COmfaB#3Z-35qh{#gSk@O#{Mvef`qE`1~|?WN>Q|(Wlzk! z!pz6vnyk%(*p!5EB)#NcXhUD{FNDeOlJBV!%AP!W@BG%=p^G8RQYJh1X~u=Ecjc8% zg0xmW|GtYv*?_y=C8>_LMKDfTz7XlKx5x7(*Xv4pk8p+M3m4z7(gz&Fnvptx-27+& z`1~h&fakmqT@35NE2RHY?zI2dfRRlq9**x}RgS(v2Go1_)7cK33PDe!lSg7og7f(3C}EWGCyqn)G!T0G1= zsRiYFsY`iQpxxJG+U4LG06!+R^m|z1mzN7RA&p}T_{7G3B0w?n+z2N8oDH}6VCenE z?~x^i4~%;Zs9{jgAbfuU(dy|VJI}=vclV14FIaI}3`FuAES^TPR!COwHr<%l1??}? z@k{@|a#mKsE_dTKDBw2yqO|DrYvH49#zG9DFLl&Gk{!%;s_JG$JNXpmQ!nuQ@WG;V zaIVm0icU^(M~4n&=fcu>=X3XM!AeT&5F@@laIw*}+k&cdoV?^0L9Oqvx0YSQB508& zZd>q`SUA6ho3`gojxJ+7V*3S6mMwofy$iq*PlZUmzyKEcHw&Bs7Fd>0W?Zlam%j0- z3sleT7%N}mS)u=h6~ANQ;_{=LeaJxC#m z+=u6&V-}}|hFbG&IwKtQ{h7I|IM$kbk#nyRj<2W<-pOBpk(mLN+w}%$9(m9I91gdL zY`9ji(v1Wv&@y{}taU4mRdWwy_f(^@nxE6dzhv$bTCW?RyMN4x4`zl$RBk{dp-^P{ zF=J^ENP=wo1!jHT^WKXsKhl+5Zy-vGPi8ol8)gfV7Ky9$e2b)323>vV^`qlDGxB?u zhRN)HjG|7##@4~f0lTbEZ@dYyl8}AMEMt7O^#WVH{FfMk63_zCjb{kTD_f<86esW4 zn2o-jbDn8+I)K^9Ll(^BRm@n(pt`I@gGvZW$1()85%CbWDhJwR7jwrai$@+!-q(vFQ*`PG@V z0b=XFQxx30ZA(k~O z0RO~`I6B4$M643PlRb4E)DHTMAUR#0OZS$BD=?IM{x<`5!>nuS3kWSW>_cXtQ9$6l zf8~jXaY)J}g8R%=)}2oM*BTYHGW_+p-#8a7Hk#?VDx(BMgfm>(I08U8gm*~opa`qr zxcmf*6(5z!)%SDj^P=h624p|uJsK_uvdVV2;7Xg+!J`|{3;X># zk|>r4pUrF&Q8-aa-9A@URz>}3QJ9-UE1zwm?}S!iu2r2VTkn(TI=l6Ijol9G3CI>6=%m%~=?(AWNvX%6CETPe_EzNu zO&S$i+uznju?+VY1YoIW-%vc(zEdUjJ^4%UOJ4GY_g`cThL1NBSDSaZQ*=}FLci`P z|00wY^UhfnK4G>hklCqt8d_CzMKNTgbPtl8xQ}oS!}obx6oBwj z!kX9IRIx>)0Nw8^y{fbMSs83!@NEI>!z6>quG77$*=uPpu_bGK-$_D&K*2hS6R<($ z&kSSj)E@Dor>?=GcXBgiEPWjJn#NR|R5`=#?s;N7mTEfGZv7&HYRtE0Ke1P&%fi@wZjO^A)BX;_P($Ne_jGejM*96% zL@$$Ls2^T%!sy|T>Lt8uCgxc=Z`Mp|xABi(g)SRtW7!30NBoh-Rw#3!)=K}+h&89D zmp4IplhRs-Ot9<1Otw!<+YZA(#aFS{NiTrTn#fcS{i zUJUSfHQyc(6oOEKqtxx|&h-m$brtYSQ1!WOiHc;JD7nuu10C-1>CW3cg{|Wcf)ddt zzi4J(2GbS!w(N>3MchJ5dSeg2EfKmbkVPc7BkXu`W3eG)COPhkH?Ds$02~g03=(@| zTLGvM8D;ILJ~(+1|T+EJs1N!r_7eJVNG*vtIbB=hd5FB* zI!&z9CVdoVenCZAdm#nHtaKr#=dM?=COMF2?$zVv12AjeoJn05$OG+}M1IL+_~J0R zCi{*S-W`t{JfNYmZG#VeuKV8}A2bU0H?=8Yz7ZvN9WP3d6wG}xWhFC?RfN?;JR7XE z?=g^*A#;rcC-SwHzByvk)%8MjD;(EV17J{;sJ_Ql4Qf(Uz(0L_V_obfrtU@4$4LCkMv1Btu`&T|}$l;g$!$CXf zfa&vR%|9)a5smK=Qv8pYSBDDbgFdEG3e@hDe8jg>HlL?;x2axp z_lQUzOaW7PqzjMk^nLm3Xb8?f_o3Anvyg;!Nfv{<;g2*vgPe={!8)}?XsAApmrD(e z@UO)(Hc$1Q@bA|5(E|n%%*=7jgPhwq_tMH1zp%3Va~Xe8GIkFJ?ie67zU}?$>;u>0 zNWj%wQ!i=>W9?PERDo#`Dj?Y7Km{~3Sou3t&~@T-{L54T>c}s5Jmv&<98Uu(Fj7ubayWlgi%=nVjbw?Ad^k)lA{jUiPUpDk*OjJV)>brs5EsyDu!XNz!=m|vr;1`FNw;}pZw*ju*)Bn}kdyP9iS34EE z++Tjw>D(JaFzEpObm6Op{CC|*$3H~-f4T+xIp@nBYYVqDvp3b-dqjZ! zg7ijEZ^IoJ0~G=Cv*1Y{(w5_Yht(1mFdiEPZpN>3BZ?F`t9@t|+BBYL& zg7NYahK_Z!UbP(%Y$!|b_;&S6u#<4_*NG8(6OW!N9(?oKLdJH#$vlVl;x35IoJ#9Y zSkKbkZJUpg7#d)C9y$V<*uInr>gV4t%?p{>)hL_;EE=hMZamPtWyVYhGuVGDz2m(9 zjw0gxVbr_eIxUZ9z7T>D1L}F2JCq|Y;-QywGI#WjH~&&nvD@TX1T8yf^L>Ci-Up%u zhOza`^nl_uaD}!1bcN&p`U4IpC5OlzPmq0ynYPCpnm-^r|*wBeHy=7 zBefCwV@}^6^O=Df{4w78kMY(T8o&H89sG~s>KYn<%%(T~o7!6fUi2U1t^XKr{fFB; zIrjgvHEOpP={yq-j;e0yk7=|gV$WxRS%lnd-vskgzrrMHY@n$n|pu$@h=)0 zf5_nf3K?LO!kz5hH-4|)8wrb&e_dPr-GtL2 zf5<>X;}049Ap^Co_(Ko=kiq|FJ=of9Rv_HQmP5a|YI;<-;qtN3E3bX~SIWh~e?cox zRvz>FT=5I%^HEczL(Dr<23(7f7>(|$3n8bHP z?;LFY^$qZsU#4{)U3m04NSyrkO0hlId??|M%l*HW9Z6EgN4D%PrtgIRsLqU75uHV~!r1!zh-;bG!!}u>huc7fh@j%c)`)^*$-_9H9 zaIx4R9Ew^Y|9%yJF$3%MY~WxsZuid}fm;i1Htz)Eio9&DCW;&ey8*!>T2 zPOj1GD%@IlhZ8Z8DUrKcNH$JunKIDkYGHI;TI{?Nm#Wc_pQb32n2kafyfqJMqy^#J zyjGvgKHIrBTULWorGGYZYfSOk&&7YzxVP|k&IgC03g+(i*Aku|cTtv~B7V>#*uG26 zep=Gp$$(`zEdCU(dBC;nzx;sD)|acfM#nU^i`T& zrhToUF>n7((l-Z!&|UAZ23}Hrw|{N!>IacEdr!a8f3jh{jqzuV7No*S+N+__;DeK| zXU3VaHXnxPCxqm$1z)oG9O0#O@?beFVw-O~I<@QQ!8L?k-tezdHT{FAVFlum)HzS-%BHh-=|qs8ay{CRmc?9U7N^80bAr~&lD@Rq09!xazI zcj8c`l#WZ9>L9QX=S$ILr(_lK)RuoRPuG!Z-+XQpD{Oe+oaLE>mp)!_@xI2+wFQC+ z_*F17j@xU;T~Iy$ZY46vV#PRXhbDhyyS&=Uc$hfB>GMc<=leYTyCuH7xb79oY5FVa z9?V!6^-ksXJ?hm&S8Zc~bb>z2An%?VvWlZ5rQK=g2n$aQv`W``B{j;=<1VvO=er ziDk2l9^>FQ3?Uy+UlJ1?SAv%RBCK|-SPdA(?R?$Sb`yy;HL+Uyi@l{6`Zh0xwx(-M zyi03+Q{$IFyFKk(OZ|TC?j=6Y{FqhS}lW|r ziMK)W=_#&r=wJmQm~Sdijb-ihsuEY70?8tC$bm~L7U|Qt0~&U{z&NHY{yji+F%dW5 zJ|<%%O|3k6?P_e`4xU`g!puf$xK2nIHR&)Iiq$AoZjx%muaPR+Y`k+1d|o>B#*>n< zv^B7q+uz(UMQ)xm`AJbiYOKwQvsX3bNn|hwWck&aj5d1@l&Ob>C(C{jvB^7c2x}nw zoJ}eJL^s`20n}IP;a0O*I!h<<1vR)ZQfX&E~ld>pFJ4zj66cCIYmqAy;p5FDm+9$o&Csf zuv=Jq(`UQ2Y-eewZuo<}Tgp?E#NF8{8<^9082)t6qN)%E`pMMtdm4=HoL$n{r8V93 zhF2cm;xQEK<|at-rVQ`=+z~pMr_uQc^Qm_%XcBPn;jL$++)a|)k%o}g4rSD4U7Opg zm5EGDdM^55j_EovyJeS;(dI^|jMArJ{bxE}CpEOvk4Qt6rClucQo=hT=j9FrNKfVL z*`1?g#y!Ex8`zJ&ikt5-ByOel-@LfDUWcMyNuT~-gb~7U8pB^46qb~v^tdOvBpKET zYZkt%mc=EFB^maaXJ#w@E->QR2Ew#B7mC7BNvbNxV=`y28O?%MG&U|Biqf6^JBPtJ z3@IzWYFc%-@|27|(YM}m3a+R=PMo4wavEz>$-EQY@yk0Mj%74^!cRB9o*1b;j7cMu zx)Qqc=Ix0&wAp`k1*8!|v1J^YQfytqzsu$oDUbGk;X4+b!9gbDGC908it5d6`0u1x z756$!OT*~4#JFhT!9pDb?-m{7k`&e1O?OR#i|ZBj=SQ&NF3yAAtrlM5+q~xkE$`*C z<++d>eFMr+pjqoWnlfhxEcAfJhRvRfB?=Sulc1ep5x9PX+}-E)L}$}_X?+0ZB>lz} zzqZPSiyzcSM(JR(_%%~~mFM*q5$$jX{xLS825~NTLSi)H-%;IKwpqgL8N-c<6ytTrla?$CtXl1XjR| z5P6HK@pq3K&;iH(-_)EhV^~uBtWUoFiU zg1!Q};Jly9qv6b4WiH8?fzk(4w8#k}w%s#wW!)x>5(fRyCuESU@V-KU$Pa$X`=dqJUnN zTCjC@N>%#zEk%^VZespj6RY|F?rp~%P4LvyiH9_vpW0L70JvY^70sH!#hJ&R%;YPy$@Qf`cE-L+rFAsZ?ng(MrhVKebpF2Fu~xU_++ENBA6}JMJbTUx`uU`#@?Fg zG#>8jqevxzy>s0kl}&i1MBeK7m+$YS9t3UypBQ&sV;eFGIlZQ%_|0G}C^&K5kuI|6 zGHzkoLW^?pY~}}k(wf?{&%@cPBO@h>H;r#jY=!0~Onq{WR7dw8Ol9*{S{J?z7GyI+ zFGUC3S$a()<;b4qldsRt#mXnnPMa;MYRh;lj&#D><_{RDJL7J zQ4V8#Im~LUB{6+E2#nyYY!e19OZ8~-sX4Qq$mQI8TTgrIUB~crTvp?%%AW3kjLYSx zqA{A%yR8|+Nteq{)NF4#Rwuiv2D;mC;;P!_H<3kT!x7)>zSn2q%C#iJg_>yd+2k5j z`HAOOV&SeL8@yL{=X`r7E5G0wz)7!(?na7Jf)ANZSLD_X`TNDtYQr=>vyI=|dP_+2S8|Hl@|{v7=T{@J zM|->SsKQCsRCQ^RqwLc5_yAT+AtM}DNyN0aF@$zc6^1f2TXGb~=PR2GiSdS@Fsy4D zbtCRllVsMCF>~Jq6>$o5(6cwX8)O}3&=lhwRlP!dDoKdYZM5tx{IW|$&)&E z21wSeqbS4PXS!=+531xY(YZC$JN~G5!J*^e+f_?FtNy7v8X+dt6ZNN!kaC@-U}hj} zI_HLXh+!Fwuf zS?M>Ir(lx1PZ-9UEmI8^u^+4Er$&}DfrblY<1PK;G{e465qgWGRyG54{xn}8$cBIr z%22^`AIW$+<-yIKZ)RDq98dVLBaNLB-N}`+>GQRiv~KoDt=acJ=pNRx^evivT{2y% znK(-d@MVI+D6h3uN>_fy&_$p7!s&9GXc|7&ulfuz&qegndYGP8X;}c2)US(FK}ron z#LOe))vI@O#o_JF0g|usX8wv1+><7Q#({F$L|-k*zumld(jL*-*X=)1ttag6&+jwu zFmz0US6+4Kvpo4FtS%F#8?=9UF_kT>KxDqsW6XS$Oq(r+vtH>W!MoW4xGan&&@OKd zLS?)~Q~pBjsOV^ZwpF=)xn*^7wtmk3V{Z3n;WCkO>1Re&QYtB-j7*z}U4w^Iz>3JV zLr54yLnChAOQLCQMh9!wL{(<#x;j^0YD$B%t`hKRZsF95s~8-2G_J*o{g_+zaZlIIc!TeI{9j26nPyhTa8o{ok$BoTP zyi51=w7@L4_;`oGgrAIr*J1R!3*4M9p$C!Z4CaJOe6uLYVg5!xFYPBs$@5mnpgdlS z1GI9(K?&DhI~v35Ni=svLssZr#=_D^M;I&zj3=7GX)>iZkm#lM-@8q;9W&&358k`W zizj4xH6>4R5!_yCGMs`FxLcHO?28iK$hUCi%3j%qz{0Ll*8g_nsZ(%$`p>q~KYvzamcIB=J9O093cK_V&L6{+uhwMjd|^pUs-b7)5R!g8;^g9Jyi0oq zZ1f2?3sN~|8d&O~DeZFWx8NA&ZJkzJS1vHmMX?GK%37XUpvP#h1%R+HM+K?ikUG2k zeTeqOHC~?fk!{^^a}`j?+Vg*=mR%ALBE|ax?V=}-EHY)PLhRI&SZs48hr=$n8M;ug z;Hv1qZIL!s{4@@GvD*u6x9K>sHk4R%o@#$w+|8mEcTS6!-=a0ndnoSNHr_d+_>o-u zCImTiqM+{f(%{H^y*7OjL1}UtOhald z1esyK=Va9kCnBK}BWFz#j}`=()gRaR<;ouV)?i~`{vpKN3bq+ghi)61M4zG-qN{Zklgn0?N zd^-=iM!T|?a6BFEyp_qg3GuGDOL#!TF>@bysh0Ih>SfW@^s*q-c21ii!@K~N5kTwC zkUy!fayF2yXZebwohUaavN|n1c1hH_-rMVK*0-NdEoM4*$XhxsI!+dipCTstNFP^< zD(q_3+vw2}YWWFBut<9{(1XRxW=cP$pFt8X&yA%_X9jI4ZVW%VDx1G`irDhgbP9kC z87q05_{-?UqMG0-O>>jLF$X;?0UqhX{oxQz^|+ISVDs|Q^a@Okdv!oxjw+q=!Qtr8 zTtQRpZF`4Z*bw==9wey}oBu-RE$sJk+{QCY_Bd$QD$+GsS- z&I60c3ze-C=!Jw*sxhTerzR=03RQ7}f#clhuu;5AEKn9wZ!?7jri-!Rv8nhzdCS@5 zWICcbhtYCzqK#Z)uo{*TQJLN-Q;0fel(FG#w~OkT_+p3L$~D_)-XDo#o%G-q zZjma>+@kU9rFN4{Z0-)@yqpsGVR7fg*FN{X&}V68xl~mKZ2WC!cpg8+_RQh}CDCeZ z^Fb48_9LMFzhV9h2X;eI1?#-ZAa3k1lnc`^*NyVtep}6ztQ= z!qA+IHinHiP|r=f+sur5wyYIP83re(zovNz_V59XlneWQ?iJDCv_A@8-fV4P?#VFc zfeq;G*L?{Uj^qC1zi9BhMi9SryVM0`EEC^OEOxPwtY`HOmB=%=?hu2uCZD+b<1$!@ zFL1RSx`BGZx^i;}1ujkR!6sp`Q{z|V8Hf(~Iqw^Uf$@Y7haaUt(CF+TU-eIb?mc$| z_#ge35cP3qUSF@LtmLR{frVXPs|{5jF8uiD19_X&gs3n!;g;JRf)y1Td0&w_#cJghF%E8%jG$2!o107L%ChFm z3zZX}EC(OD;+)N7yP)-S`Mj3D`^C8WxwVPXno)Y5t|^hU&>WXJ-0~wlE!k~vcQZp{Xf&;Y*rUCJQ-PrT=Y>~hwVKNtBjr4(F!$)( z`sUIc3a9kJy>jk$M1hM1fLc$t>zi_}N$>6b4+iJ_k9En)wWz}`@t&@VNfSn7ON805 z`V8D~wFu4_nQ^DamVQ}#EqkJEQnpF9H?njHbKoX-*Z|*IcADrrK7Mno_?hmVqoOyw z!9q<=@yQ%g4kQ4agpr<}id#8bo-4VN_VsgfIUBTlrhNpN9hax0KDVYk(0ZkVypvkG zQrnT4LjbF==3>1fACwau5)uPK5R=5TjGrG*caQ102drxcGmM3B#mMDD9UXG(w+mun z+FWRbDs~{yAo1#*&ZlW`=2uoLA$_NUn%eM@+hZN>iLDWTLq#^D{pb77%f&JKV4c0nC>21F^^Hw{}?e35HQpu6>vAhekJSl@`(zt|7Y}WaIjeW$?S`w7v?QH^S9Yq6 z+@eg^WJ^kf8C21x`nhHBuQhxvc21_#8&h>Mvmi!Rt`Bf$jfgoQ7&el#nS?jegTzkb zrgNY9Dq9Mnfo&D*0*hP)5}nP=xM1&XI>CwBvs~ImZW^D__wg9srKG~vRzs7(=BKY2 z6GPtaz2y?^yWZ_TeDH?%auS48)k!wyXn_`OYX_@_ zzfmFz{!Hu;vIyoHE}f-s#d@uuBuE z9MOPVI71wLk7ymi#YJX8lq%q7Hx+nJWv(a?^Cy4l;+8EZ40)xL8LRU^b<6oYx;caVoZ?-10Q6 zgdCSG9%v7Tw7vh2ZvL9%huYkzIn{NUqpL-80jhjL!e&_v(79`TZm&04dfP#sq`5H8 zw8(slFy;Q``E^*cbXkW@<$jt9 zda`mH-L=2=&_B^%Bsly<`X6{GW$Js$XC>DK- z8xkk-iuP***1{M6uJZiWyDf?@7qjke>p>EEoml`h#r5O4!5Qluwu3iWg~Q_&mo+P* zdP&Q}w&2+I^`p-7?W?O1J9sV<*pj&vaWJ0|UvUD0Te`zUjLOV>RM7DLe|4Jon9*#P z+&t>Ctyn=APZnt4S`d+$acQ8w1IcIr``Ej+QlMy8(4lz(onW8NM)}ap0Dhg> zA^0>KyVmiTa^A)uvN=-MO|1p4l+=-fX@U6$&4zQE4zNzJGeBofIOrX9PGUwo z8RQoNkHLiz3Y9Gpf3)D0`6_&b*+JGxY~1FJEvKCP)iB^$49KMv>GLsutguDhygds+g^0af z7p&}qxPR>t2n3KLy+*eqGHylOb+G`q3sem{X0^*eSrcJLdbMaMv|WOgK>|FEP<7QH64qQ#%@>uQ2=a@WV0I+8IuH|S zxI8>(LBc5$ zTadL84RC3pC%^`7rq&1}b3xcR?HLK&ODGvGbhBv@LNd^~U_`_M;c`6ME819y;i>eBl*!B2O^q2|3i;eD;kv@HFk>iw zvAe(}I{4B`GhFn##xJ*D1ctQZpSx2awD4PL#aIuOX0Q3}h*TmII3Y;f!pAqRq@u3- zluXQ5(%+=u`cDz#YZM0(kX|6hdzN~}P!#-x%jb6@fV%?Acnh|iTbOof&eMBD%uoEV z8Q100RZDW9pd3t;nlvYuXxFNw!oCN#h7S)hlq$aKD!eY6!HPHYROAY_faP2G#s?0W zA@Pp(x(8%q>Zqqo5)si2AQ}Z?Mk(!05*POMcy`I!5;fOxD}-VnRFUfBE8D`#EguSx%-y_}6vlgZZlhN00ac=~yT+067wB$DC9P{(4d)A6)^vM^V#><_PhFxtUBf>#fNkd*^?_YlI+~qHJKhYfK$WjIIapfR)-6BUI>~l81Hd=N z8jmk6ikDDOH^dFA>Z5NEXnR4=(Bqsp$`ejoiMo5u=e~`Ssf`6}vZhw{uFBDb{^Qc- zmwJYYPyWi-JZ~(EpHo?xPP^enroAV%?&SI>u~(yi)8nHBMJwwo4-%WHZwZI;Vy~6k z5WHfkxYZE!NZe`q=xcZny;BqRgm-mXU^$Ma`WCCWF^t;IulYF_Y^nr z@mg1o_-a2e*ucc*;onMWG;)qI&UT3=MX{}}njy|WPh`JvrThO98-~fo_{9P1P|2^UYqJ{06;7_?`-z5!|^h(Se)E-ZODa(^8>LGt`r?OvpmP zI#>;<*F#IKJjD_Q%f$l32C%BGDF+TsM}H6cAupH*P2V(fJqCA1&H+ooyV)s|Fs1{B z{n3}p{w1x2PiCp^=l9LUqCOX{f6c)D$V8M^+E4+E1`N~u_j_MlGM|pjr8nLw4WNA0 zplJ@ASpfa%Mz&WReki@YdQqOQ3?E=3zUL8aJh-Weq2A7KxFjv-&#(Qwc4<+sy?m#1 zO|!R?$Xu_drT?tG`j>`64uJ=)!uwxg35@63PMNg)!x*tXKT*25Hl7vAMV>O0ln9N& z{)>3O%lMsm&CJ!}HRyT&@(cM&Qx)BzBdOos`&Dl{``yBxwiwRk9aG!m_Kb+BAp2tu z2_N`TPZ2Fdfr%f!-FvSIg?5WBE2kY^?5N%LWVHV&W?iC)({2|QY&}-$aLaG>EK(W^ z6U!~ZlB#{6*uS~XKlhHkju%Z@a9}0Z=Io-#%IAF`e4OZ>Cdn^<7#s=Nq_nnyB!mZz z(TWNqbsyg#&{7p@<6!KTsWuK3mMsB-pM-M9=j-^(a(PnIc>O|VLvncbf0gRj3lGK! z^>h`j3tO-%x9wY42Z3NeGWHPE**Cv0p55iz)KawLxhK(hZu1w zd3idBu}S6J1ZQk8Nb-gN#*0JCf7jaEOQx}#UtQgki3u@$Y&GIoL8sN~8$VUb{~?dP zaV6g=*$o#KijQpMytD_M{RpT3a5OG!-H^%UBhdh>V>azu16YPuPsx|SsATLbEetQM z+>wO&TD9xrCIBCWb``~Xq@rrJDD0`1jGZ$kj`t1(46~MO%1*3JD2&oL4mK}>4V&gd zRDj}1Ch*8&b&~m`yPBZ4%kamsQpHlx5gM+-(vTwXqIiPU8~F?h#QV>ZsioqDfccIb zi`q<-Uraz9ivfm7?D#?bssqiW@(et>yTVfVT%H8Wn?KuoIf*l0+5f1ROlhpWd_~hR z1X4JV<)qPL$UGdxV%+GnRx}Rn8i+xrOWe)WI06&{+A6O9XitYs1T z|FHL_aZO)a+b}(zQ_s13TC1Xhh@6TFf=B@YA%?U^1_6bJQ38R=C<#LWAwU@HsDw#m zP^M5BWC#gm_=60Ap3?#{ra%S>5G)V^gb*2$NJ8M*0kyr)dq2H&mtI>*+2o!jIF?0Ru ziHhP{qd~p(oPBH8?M#oGP4Y&8UE+js41^EdEvnt{A6)hOB{Fkb^!KPr`V+?bQAQS{ zN9Yimn9)PUEv*!O`?T;Gve0(G6QCM7rx`z#H_kz=f+&G%mw zo)C<}5a5K=?bV%tn#&p-KZbo*y%RhbQ`?e{S8A#vPmE!I_zjRcQQ&JGeMVDMVRp|q zz$o##YWu|x`UctTgY70qck64Du^kvp`Vv4<3f$i&^m;8lTfcP8!?6gBiQ0Z&Q-W9! zF%#U|OKL9~zFS#hl|A|h3?yNz&}!2e^<^sKl+}}~@9|IJm)5P_us*wh#nAFy@Rmvd zUPAc8BAo>%J4dg{ux3rJZSIs4y`znv9NN8Ug)H_rrExQ$7_FKeHS3Zo4F1;q=6Mpi z@t_t@h4X~VdERh$H=f{k<`OAxEa238IKKikCO@qM%*k(eL1Wir4A;Zmn{?TiG28*9 zGT_Orp3}hvZG7z|_;z`0q2RfJDYCVIzOCGTv@59Yt+AV6`Q!Vj(V~no%o}5?4(VrU zCjsAgc3BxVl|3krlXFkd6{TJ)Z6jkmyL@#){U=!X$^D$i&2D%hx)CE#9Y0+02o`zk z94npauJ*7TM?v{obK@-y>HzyhR3D?S>Y-`-76%57&K|i7@M%#w2;R-^F@xOyf(*RL zLF$&=MvkcIiha^$K{Y>ZdaW7QTqyY1@ZE}tsoZrt$(<2=h%&#XML+nwz2KTP`*?RB z@r%qpFgTq){{@s)U(F@##*~RHUEIbyuN2v{+x!!h1N&yPygFeKjHB-MFd#zUidI3c zfz#g6TkrBsz@XR>PV}Av_VHKDMr>$^Wwl*6 z55+wBmgS}(moz0QFUikL)ohEB9$^KJF5=;ZyN#UY%nT3VB;DH!mmbG#bigaJ=%AqX zZ1$yx8%*Q27`M$gLHt(ICcQm8d$(>Y4i|Cj=Dt7L&WyoeT`U^Pe5UUMw#8+HRm9C) zwse__8>)dSO-s%yEPnkQYlzM&xZ<2FoY#}rn)HklS{)5apO=6zGjGBh$=jx-g)8pm z0OZ;1n}853Tv>afFnSO$dKz;RMGoSWu7R7BE)(AL%ckg_@ObA=Y-`E#Tbs{j18l51 znA%m|CS1B|FiJzshu;njKlWuCNg(%N51;QV4+L6AzuS*#GGw&XrRjG#KYT21DNf>H89a|y>P}_$S#D0^#EE~U3=xBPU=#1aAVhXqn zTz?q(St=UaZW^XMJ2ZN!Vv`_68AfT`DPVVArfy>di$j$Pbo&H~&1)2;&)q+$%Y%0l zmTrW`gkWD^AgbfBHX02rgL`l(IqsB57ot|mE24if!YKkd_IqIF523?ufYJf(y+}EZ zk(AV#$#dM1ECcOyKBdTg=?VrMn^yeiv;x(K+TP9B4x5fdbdE2Y3xD2-v*U;OhhX?7 zyCFSZ%Es7e+}SN(tio!zrRTXzMLT7eaCVYBbSrcl5oQqS(~pVXW0FyUvtu8d2G&&# zhaX432iS^LgJIC-RK3Qp59#91>^rNv@AY3E+GUefa>>Q(YTW}d(}nLS5%*wgzzBa4 zSIN_K4&Mp12=o?-Nzq>vU=h(j$tP3vYuleon6oq`-ECy%rr_?7@hyUNd{8& z)nCHBKpmI%B4Z9Pwk3Ac+G^0c3t{&;j+ zdzi7@c^s)Wjb8t+uG4Io2R~6$r_hRpcx!DGF@5*ooPJuVq?C}lM}yQ&Tt2@TdIi8l z;=fdj|9Lo=Y@~w^+9(SNm)U?kWsB9L4GLxd46x!clo3vxU}{#=FI)8c$WrEivDeEg z!X7142KFx1uvL}~`}BXR7ef(LZ5690;=j7+jZ79Mz*wsj046<+7_NeX{1G|6E6xym4E8N@YyCJ+~XxGtk7!XA>A1*DVXykSg`T@u^Q1$n% zuT1CK+3;X1HT&qapAqu&1v%3DAe5Z5l~dB?|D`*xuYiR%ziXkSUbI1OhFDD~URMX1 z?fSM`?X2UQ)3dR)T72S%QgVB+?GJnGFUz}tUAdR_doi(BU5 z-`iW^*r-t$-+M7-#>zQbQzV@5}mPE{D2I@yH2zl*?* z_lt@IujBR5{1ffKW*uSfiZd({OG14x96xcRt@7+HE75}=^~q@h4wAM$iMHrGgNd>Y zRR=aN#`tXw$^RODP*7ZJte>}%Y7{5S`Om|W^Fx@DlG`2~Z2O9iXM7gcU~O%5qSt@p zOtskX@S^G%aK@3$3~HhZ`Y;vy*hCBb;?&R#-<;*NX;>YVpL`qWbpF0puQJOGYS4B& z4faFRfs)eafN|pl_DRr|>ufZ*1(5V`M!JyIdPZiOI!pXwEx-h-Y}RroZ@Rrs#-n<- z2}^zPe9}}&-RdY*UAH)0ip}0=^!Go3&tc#9V!4Jzk*5~hiuv^>%)P_2On+|;K7!1a z9K|`?mZ1Y~Q2oWD!AWI?S*B^hU>zWY&c1NT2DMdZKf`jhG0Wf*V}&#-e`T#@O>kmJ4cJ1kC|GwDzRGA;MbVkFgD4Ha#DA8g-R)I; z1uIG6NJqid^xVc|lTP}-mR7y1O>ldw;J582{#Ahnj;;Co_e#@i!9aC!#+;O4w<-CX z;A?dvQRpBT&`vTje3uj%lNKkOuV@}#*PkS2QMJ_MX%~O_!t%-n7@R3>T zekU|it)X(+Cbou63>#_!mxix)fuBpf-=m%HQ9{M*f-I8}iv}!?GQV+H z#yD>)J#8FPcc&&gf+h+(YuWpH7+F0oomQiW^>p7|D#}tHq9UPQQ&RVUeH1b?qY~rk zYp2vD z>8>D&IUDIa=uT4HTEYat^cS1KojHbg6%H96+{MOV;-1%KKR~Q?xLjAWx;%jKF%LpK zZq;D%SSz?P;>THZ9PXx~R%^eAA6&^1TY@W40Q8Yny4=FYGe66YoG@kb`%fSV(T4hI zm-6i~5+2B1*v|plO^q7@(R~kk9r9cqJ=7hV<*i(r`E~Vm+r=|wPg;CH{aB`dY1g|o zKypqtaWh)~0VRA6SY<$5dTN%6U8`xKj6jWc;dsZ^!mpV863O_rZD(0G;!*Rk@L;X9nl~&zqE`V!yjo+? zJeNv@qmQTlW?g7Abr0mjV*@lmZGT&?k4GsC`*g{pw7D~$U!BfBO#ISGt+e@!<1LL^ z@kYWi=$t2`d9>5fddV!S43R6%`pvkpKf&kI`7@S|Bj?6Sb1YLL6irT>BedI)<#w! z&k0!H)y{MU~mA* z58qPP(-*pz>DN)CMt}yZVOI&pPdT@B44k9*gh1L}dbk)WJco<=0T1dRoWlx~WvtrW zYE+=B8Trd2z@U_gx0>cUOq&h{0^o2k>e)TOQ;=9H>v}>o)S8v!@P+j58WZ7%kZCF< znb5>6GYNVfoWse>S6nPoDLVmB$8B>*Yr9Y{QLRdM=MuTH**hCt<*Ln}bc>f3WU_O} zw>-%^eOKXHksFnxQNfCMaVVEo$}c%JF@h1c;+DXE7+4&akcclJE+`U67WCM&z< zJq66G56(vOQYI6G3{;rUu7$Z~-@7XBfIHPd?(@3O;wg64Jxu}nr`w4~8pp7Q0VA|R z9s4xza6?tdbut1L`R^Kon)HF1_)Y_!krTc&CJ>qL20d>Vegv*=_Ao5b#OCK5=~rz7 z*QNo9+L~WYy_jf+)duVu*a1C#3N=*20HjoDrG~h8#fjLGwqCr4Ur_~cZrBGgGzCyw zU9UFehpdHxqy$8aKnc5Zi7Kf*3Z0Y1IIs;`URE~`2Z<%X)|kz8hjXmj=UB4jD`K0S zK}nH@onK-+TvRo5b1xtKeMBCwPpU%^Q>@pV>?gxB!|6{*Re$ebjIE;=ITxjAu}8Y) z0I~w`iRfAjrk1m%0KNy+SXK)5?$O-E5yK!&zs9mDP`dZFvEkw$!|$zK0a=F|+DzYKUFWAnv!QIDj_@z&w%zLw zgk$;6{Dkzh>v}PJpK`%4);Nec~KbW`c|) zFRu=Ch&S|TyyDzhx+Z1-&`zwt-d{2%UXJc~Y-PWlQhxbSxhS!vXPq`0O%)BUFkHfw zC1cm%NcE9uIBEi|CMy7)B8BNaQxzr`kohO|_(tgLUO3?$rI#^#?L&p*$A0qG(|=c& zyx^lO8NQXMI4OhQw~D~T_jd8PA-$8{eBqQSWI*VjTr6gBz&x@)*PVPg6+eORpE|Kj zRNPg11@?N|Td%tx?d~x0bhrFqsSzJ)(+TZhLz6M>pXaiVBdABQ0$T;!t~|+dm0~Sr zAClCV2C-%;V;2junuRXy!Y6SFs6wot`d-o??P@y^Uev2j%+nIm=dtC*ka0AlaJqs0 zDp0p4?)DI~$HW8rVC_*IVLu!sgKOJQjSB9f#IYbDtcXw#jk;b9Cd7gA0$NoaB5To^ zx0_4l)Y+HJXr$)>O~3qpz<<>Ab57m6zSCunRsMENx(s|;CLy!oTyQN^!u1qJ0a{H4 z9T|V}s#7s(MHif~-LDb|OPh3d3;U8)fI}68z3qj?UwFXmuZgQ=bYZUl`vXlG@0%(z z6=TNJ;eF%eebq0wF2(osIR}?!8^N}R^U@W!r7LSq9j>00U`=$zpDO=GSNbsFjRcCPwg5%DmZ*=}$lJRSQYx>ekaPn_4h*rz^e}*8)OXy8e9YVV!Ct0y$a{Vv$h+4&ny&>4^BmgLkW;PCGB3s2 zp(OtvRI!W(Tw({REUB_xu8hVr8V`(tCa3R?mYHc2atG(qA2@Y^SZs(xMZKt^GfzXL zmQXzu?-JV7yi4&Wr^3<4Dq@1*v(FC+F#r<7UfP8XteME6H%Vnk7{S+DG{>Xojl$23 zj%~B$VgBba0`*qJ8EA{hjmd0+r_KbDY45;MQl@zray%sPn%g~gPCDohj}EnTq6RyF zIzSrtpe}PJQ7V3PPQ^zhp(q64RJJ`cFLZTQA>5)I>ovC$TA>)h?1*)R_VvW zJzn9C>-MF?(4tyQ`V~U^+PAtjvku2+0ewYizqD{!+?kV}F*`WPTnZ6fd9!LQJ4SqH zH{SYxmqChhAf)&)q#OuiaSJ4(l@&$5qP}8}0#}=QPN0LTd`r z%A0MXbLK>choYq|qsK9!H~{>WTuww=auHKQH&3#hcCx`xou$+n|J~p1D7nz%O>6{E zP+m(TAO^cjTQ(X;soIK<$+MuZab3gs=t&g5ZWKbM9VJIk29serg+onUr@Paa+G`l- z!ZF3I@Fv!y1sBg;w>3)qDjOB~i^DmDrJx|%+_d(o&0te&#Wvdu0r2J?O&ufOc~woq zex;XZK{VWOKj2@nf8{)*afTZV8iMnWeS5xpe6{olBU($ONkERtzA`tjqt0H25#!#U zHwJXcy2WaeG8i9b-$9E!b1o2Y>uL~$sGz_iJb$;7{dWmi3C6&t_hN3L2fn1#d}U*$ z8d;s$hwi+(W(@a367sjnXE;Hv50s4v2AYsT4S%>Fip=dbtZ8%?Ud}7OMD@F^RhLqA z^3vnxJe)uXgT#X?4_(c;4tj{exn*S-p>>R~)r22JR7kCvp`i)CL+_UL_UdR`GPyC* z;a-xs+H{)16k;^jW8yVV>#lv=#4f%PpA&iYw%o2dgIRy8?@YxFYHNe*IiI1q+_AWi z87(swvY+vTg)A%nwo2~AA^EcNzzxRyE~T{3t#n%F#DfVu2hcBHWvzYh+yY}_5>d7A> zyDD!W)s6N1JPKzOo9+gn7Nt|Nftbhcru!xPfPaKO_3D|$ipLu>_mlD{_WFj7_|`wY z`VO_-F+WswIl&WEs7F)ki-won7;6n}O+*$xm=q_3D!=Dok2OHmjW5dnNrvl+9rW&| z_4~OlU8#{Z!u2q-4S;R?%=~IVOnP)G2YSF(>+X9~r-7R+1q=$wTl_8rEf>^bw=vbT z@rk5terS23ZDD3`#x?=^rElOj)h=a1}>@2@XDQm5Il4EbPX1Bzv-a6rt z^`4{nae()9=Y^PbE+o{8Zc1|vMeM?K9`@b~&}&Gj`IIRmawKiClWA@zACJx?G)>+` z4W@RR1s<7}{_DsCa#SA*Q9-^6BAq9xzvAmS$` zNApmF3M0A(-TQMir-iYu?%hJa|#i#j}8(eNL<$+b^qBo{@fS2}6LCx7&Ch7!iF zs!9Z?Z>$|Hetgyb2XMFK7fxRJNsR}3IW7UJzXxf(CupUH43(Qsql<6gjn?z}iO+@{ zy*#&?E9d0pc~?+tpViUetOlB=I5^lVC^EpdT0oUBDr-|YG1F!vC0##Uc#sDloo$@f zlXV2tVj_0!T^q2Ce^Ua0`z5{BR#wmiYDFK~UcSYv%)@_{IbzbHaH{bdUM)`(OG(SwZJBNIHuy$;pe?Cq~B?XgeK=tFigU))Kut*GwV0WDb;?^nvy;;}RH4m8aevFO4^|y;UJ{P(rZk#~bspn) zTfntH1Ya9!&TYyF9YXJ5RzW*SB-x2dl73-4K_Ri(D!DV(qJM?8GdJ$#Q47{AU7A^F zwBD-c$~zSi==^Mo-Daa!Z_bSWIq&0l^Xgya^p~&ciB4-qTRIgn&G8$9yh2ttqb8!G za00cJS8rDjcHVC4Nk&z=zlpE9QO$WRo((JBwz@`!FY=JB$m-+@AG_QfcH}Wrx##V~a`8X|BPx5wDjLmLs_ni3 zPqq!+M)h_u#jp7y$$d2z9dUq}&8<+6YPG|7hK2~62BR`|JZz+0?&j%)(+c%@DC zRNvC7bRIrW=AU}dQ*VO9p_YbL5&Ww=xPg-tGI#fPzy>?N#ZS~*gEvmH3%Z=Qv?W?2 zqBGT+g-l zaiknrHaeOoaz$;PnZdnLK z$L>^aig!+ZdWcaxB|o@gy5mkgl>D^)ySP6qT|Th)+LzXE&F1JV-A8xc-(Z*g4IcLo z-8Pd{iVykr{UME&=F}d2@Ry;>fBI2kCHeJyTaqyi%*auErTzAB$M-k%y!vU%xO8=( zl<#hosq`{&B=M5s=hpXLtE9P0omD8B5ua@Aud4jO-zplgvuzhYbO%r7c!-FNFwr7@;Bzv{ZGsGx4OL& zf7I;bd-5HnH@|(yo@9z*y3OI-?Z29gM{ARAtLUaEG!Yum$sL}uvZ>LJ41Gr~1AXUPZayZkrn?1 z<}T-_8y++b;mV857}m#`V06>27Mao;0y$6>d-1-*&PO5U_n?_fA4iR|_B;0PAz_dF zerLa3cpiwN0s9q}Bd*DL5WaBZZZm1;m9hFVTfV=edT5(z z+v5NAt{`v6{TwVt$Z0$FDzAY>#oOJWy)N2O^7VUTgTJ4JaH;qI{7*@uPnZbjU%1Kg z>21l!!xySMtJHB0EkXx+bGR&gN1psm=wCjkY9E-6`2946VL-F~9>_{F@Xyb|;(h?1 z5e@DsE@}1ug!{eJ|D7lRWG);7x0(p0ouL0}E55t|n3Co|V7cfp_g5JB{?-^jnt_yK zBXxhnw(9mxWQY2RXQnXy?Ebsqw(4BmCwNXy|hMBy1)#G->g(L z3-);zfEG)BrL=7$V>i}eta!J`f28_6`~dMJ8sEYkBVG$+#ZY)Fp9fO>nu#9O#d@KM zcKG1J#<{v7&x`NkMxR<55SPrSaZ$wNymG8_KDc>(`&FbkUBeN;x+>D$kH|{E=KHfW zP?DyPTAOzYOPhhTgaZ&PWY%A2P06#8q%Bp~QWKQ^(YupY%xznTkhMoj+v*~LJg4oF zW3>j(DPPFx!cNyx5rG2gO*u8letjDpJ@l(>^}! zBtz8c0fAN(QNH!ye{|QL8N+Cy9mh21=LyNFQ-tbMr?_;+jSu^O#SXbs_cl;tLqiK0S4`! zk`mC`Zw!c$!1wMjUU@2Zh205e<3jLYcHOPg!sK)XZkLY%HnGH#mggaCK`L!GZs;L)yiKVs z50gsnCle{T$?x1kO1+C-6&r#%Ct$Pgr*4Ai`P1ZSokDMa@TQgqfpqUC1e|*%jNS}- z7UZXMgxAllRhedC=h8^qK06_N}df&fG5d zSE=jWJC#i_K_!2Cr)vH0?=;0aQErXPkgeBNlolR#PS>>^n-k)Ya2V9IvmrcJYT|DM z$cXLca%zQ%7@6;=*QFt2;>xS^+^25{J?O+Eqc?ceqWK=-s~gpMMz!E(_46|T0gUH= zS<6nvxJfS^K_>@^Qp2&b#_*)UHo~4q2NkWWR2mLy0y$7YNRy6A}(Iu7)5S+??OO<|Ie9qbyff%K0{uU#7gQSe#%1@0J=~ zymCvuF>uY`<%Qrbo4C&(=J@T{WthR6bz2W?g;qeY^~-<#M2+_0C#hOoLLOBkcVO;= zJ2CCaLH9W_e9~QmGOl-w6lO{PE@VFGNVn`ZW-^=o+$P|Z9)H!p>p@dsX^j8$cJmP^ zxbh;al6Pm?c|es3X5Z{BtmqRr!M@Mjq~E*a$EiBqsw2+01`y6(Z;|1Mf!<+QNi<+l zp{)^2%lf3L!TqxwY6mKmSOx^8(tv=FZTN*$-^t+O-f-bi$n7bhEDfGdhBtK|fp9Uy zw&58vW{2#H50ZOLOaA7RA>$^j3wUHzD$QamZPUccIo zux5z!AaIK9_9%qI!O8HUX-a<}Rl}=Hr3_`^3wX6ay3;CDH^sMzZZy2e&PP1YT*{=t z0yg#~EA#d5^_0$U=!c$y;#P)?;ToIqenEg;3_3#e;@18a6~h>vcSIJ!jO0|T=WRi8 z-sOuAaQI$<=emVq*N^xLxCZyD>?C91<(nZkt;cp(P1^bHC$RrmwT7%|o7x5=8_DEA zeA0nSnl-RO)l_ZoIXdp4V#z>Qa@UeF)?kn`zv?uGW$UQ2k5dUe6ZHS@dFJewwaiR{ zo<@0TB+jjKOPru`J<#GBMfpwGkPP=#j!SVW9*xMNlhrgNmDGExMoT`ipzrK7j;%e4 zEIL|Ya6C9Um^emL@T+@n-mbM5Q0;)G-SnlgN)*#$vQy&cG${zqS%-Z@}M?Zl8=MZe+(z(t~+B4O&;=K*DY;xVxQKJh?~E+R}sFI?XKfvJO;2!{KOz=n-V zrP=KG>!H^)+*T|o<;jk$CW?%dm3aqQLnhpuEf<^Gu^Q2)pGbcI#mt$ zQe#)?njVV3pgNmZub$sn-b`s65lyv=OI2W#L(X8Sou;jObqTIxjU^8_X11fTXnru7 z_oGmSN?ZQ;K`(LH)^B1!{vuqLJmoV{3Dk*H|8#4$+-sykQWlv%`OZt9^z9424>#c7 zizE)@cwvCn$}uE${F)r-gKGEKLG?f?#VeV0J4kdK^cEu)@ox0a@R#XAQ_HAA@?btx zRF4e@8E`U4t@cw_6;XJioZJ0Wq8Xy>XDP6fjrBj=QUw;{tEO@yF4$^}IdyO{|1ol0 zEhW1t;SP|YI+xZ8>OrY{I^wXH;c8&>jY}C&mZm zIs|t^XZKXa9srVfl$Zo}%(%WTOHf;tI6VWKmRYSAkOwuPqSbOZU{G|FD2xGeW17uS zRf3{xk@4Oa3R|pXeoHQGE@m1XrOux4Fc8O0QPLPX1mvOas>P{e)Wm0{H}+yyIrr-g z#)8drqR12FNd@z_5(=g~k%I~y9^@v8V{p}aU49h}Z~aJTT9e#6(s>3s2*Y8MPwy|n zL>=l@s;Zt+3tGFOQ#d1;rx9fXK8Mm7t}gFd=dAsE0rn>)&_ccr+6G=Gz^1t|_t=l( z_W$|SPWhHPw;$Hn1zVKIw5+v%BJe&sE=QlLF)D2Jv0BB%a0aj-wgrzgB)LyZ%FKZ_ zg{dya&xFmqxMHj0QwWAtuvBGMQ3dc&0q}<=zyR5XW$l{Pi53=G^$M=En+SaFafKKEshIF9NDa;P5 zuM<@nKyGg4JCHMoCM|PZAlno|5d!TKAm8aZB-Txt;iN3t?;J&Y8cwWe2ihv{`~FFu z%-!NW@nlfxS~B{g0~i6Lf@G6)i33WPbo+VWuP-(oItR!fLI(DUDg8=Y#?&r9DHGT3 zI{j|^QU^OOGj(YoaK#;Dmc`SbE8==E!?2Dt|8Y7}8G)a{)m8z=I4+|r_GibHizT49 z7R+`1VmBKBqljp|8N`P(#LR|1!U0csqAhXlfd(XKBGxac^GL-SkAoN%1aA?gSlhbq z8T&;3!>r8(RUkfkFr}ZPs#-!~TF@=Nu?bjdYBJMdmATo_j*pLUj>r(N)iXV7r2&fO zZfN-86Dr=&V`POBUsAO5C9h_xJ!UR1Z;KgOvV|>;Xe0Fhc+biE#%kKWA3cy3pj%ss zCP~M&sv^~lgvm-fk43mY!Pe*dOz0~CxR|8Ed*G3!C%Uoq{3LHR8ED*Ko1+Zw>hZO7 zB%wgl=!k5oMuaO$>U!lpk`PE@9R}g=oFe=^+2=vlxR70y0Axgl3CC9Q4tTmd#`R^g zV^Fjk8yQjJoYl^;u{sW*DBD?EoL4BOJpiJ-j>A_`QZQd*{6HN%N~lwe4F(kyhk4eZ{y-_;vA%0=c*j; z7P?$!2uCs32V`D zG061whay6O-f0Z;cv(Q`#;}Kw*gEDJZtwG;DrBhNC(m*f=fPOvv_2axszm|iFA}#P zxAj#G_hc7orG?P5prf--(=P6@X#Bt(-o3R=u(@M9!>I%51oorNjKBOMhNWesg53lM4NL$cO<@1J*v1VAvLpqHDSanu$`$` zS;ghUM&3RTJ&l5ftYb5InOg3R8W_AZ`!e?o;?2R<=iJj)Ug<-Xr}*Y5qy!}s3q?&^ z4`youd?Y`6t!A|aQLuu{{>gLQ1WFesbp4vT#{m0z{RgP>OUNx48K@q50*%Ha-P1R_ zw_6EI3t4QVg48=*k7IT`qmxtUE75DLeNg`175L9ZnYMDY0DChopqmz8rEfe!@xDlFPi+@Iow|-MzJ!;7!hP zk{lPQcWn$}20}ZqK9QlbrV}1m{$cj`Qk#h#i1>lHfJPS^mle-alfX(Izb)@U)pZuM z(NzEEBzwIQt?9ulunG6nVv22jSxEo{=$%y==OI&_u~USH+^0B6c?j1W7}XUM^{7cq zHnBE}x@|{a2?>&6qqe`R#}h8P*`hLWSVK9!Es)!pZW$faWnLM4ZT*#veAo{$vE$_! z>F!9;)k#K$VZ&IUVZpeabw){PP;qdYo_N-VH;^^wKO}K1eqR^Bh8Mfd2#xEra5 zVT$7eG?lN~`5o+TH)`9)_qGQ(+Hi@!(Xz5N$bd>oS>TnFnzRgi@};T$d6ov&R;%`< zDkVTnQjcG11N804QLxhJ#PqB6?wO`9PeQ!msm;fE({7595~dTzfk@CglpkJ5O|B5d zMP9a-{JbF~3xmPPr!pJIM4S`MnqzAx%9ENHO}Edh-jTqsl*@h@ga^!V+z&Cb*zRq( zp~;`@C2ZHu&7u)q$hfA9u@b{!#I;z&_jEQN#YEU)H`IXjwU|yIb*t%e#qgG&GUo;K z8?{b+N0)bB(MqXNqHj7r8z_PB%wpyVCsHz3#cH)NKfW~9BV%H1a$ObY!hc@Z;QPJ+ zbr%#*G&c&@5fU++al!KCM|VdjGwC?i_NYqvDvA8B2l+wIv^ij^i#sF&8b>KJ#At&ZfzntxFRV>$4$sJcojz0r_ zhEQxN`8ijzs!nSn^SN!L>Rq5jyt*t2PiZec_ z$CQvSc6Y5XP1@#_##=GQ{ScgAb(B-7(2IQSNa2CzmwM-~@F0Bzx>E6z1IG9I3_0jgS2sI=g(#OE>!u_<`<35dytN! z3NKgxrRMG;7)cM7OEd=;IyN#A2a+=S;Xs$q5$?BRc2ZBa7@ug59+|XeSy>EBrM%e% zF}42!(uq|W=u)YBum{b^A>s79#ELw3ROD6~JBo&uS46f)dAJ0bNViz00O+U_#a@xo z`$a0T}$qEJf{`!U1J$sNoV$(>(4?&n_UK5V2UjQ#F;^nnnnB-b*c5sd4R{!l9S zXzQ|$u5Fs}h?N+P^c!3UahY#4bhQK%Di^nV)dAa6BvqB2C3NN$m-!S-9O#ArkXUub z(Tdyo5oUsM^+KJfi^q1AywZ*_8SOn!c$nCePH9DD`QUq!a@mQRt$a}LGYVJCkg~>n zV>jo*bz+-+y&EE2t0Jfp$o&htGOi31sE@RJm#L7fMBa?Ofed|X#w6a0uw|^QjFpji zKUe%yT5;&RN_V^!hyKPsk9O~`+!i;#k7USBnWJc!J-j6mUsUo#5?vlCNnB zlP3?@v{y|!TME)G12ju!z13(m(J$sS&475Ow?YhG;q<$zivZdHa$wH`F7|F*<#(96 zH<^zly)bjKNQwNExmILQmN&4nR0qL;;|qvgQ5~gHpth+cXwy$bMVAnKgW^uw$mkP2 z>^4+&Kzg3oePv?BL^x@$lzcrWsIJtovOJ+nrs^vm|A>&!l`)1b2AdJ%iPN10t#`XB zirscbn zSab_|0(*0$3XE$~;Fi?ct<90ot;464ACO$ z(t|EKjzpJGW-Zu25f$X&elBlC0egFaG&PJRwLyf52SsT>=X|v1fqInEWgt`7yk=(P zGysOLo^P|5$i0i51@nZ?>^gk^lI*h~g3LwP)SlmI!a4&N*hgC##N3)XR?*4pv_g0a z8qrE=DUr({Mtzel`7uA}uC}*{bm)85V56Wf!lBfp>C)8jX@)9PVFTV{6=&hh^sQT% zwnM4`M?Gm)uU}^!c@0Qe8k~eg2HD3NozzJ@$_MnH;iyB5D6N_8l@w)l=&+ zyoRb_(8T4|kAtn8u?0bTuAW^j>ddCt^YxBbZiL$o7~L>|^&$srE7V){jn;|%*7ZXA z(lsy>C}qApsKZX~2}=T7MQ3!3N~5>OqT{v@esgBQ`9GcSmZdp6VS>VIuPRG`AcKvv5uPpm zTFXv$nvHNrwuTefu}5;xRddI)3>(cZ67qLpwsG|20-KcY1v3W)W6 znuQ*I#$XVhLe@&uIAb~=aHjoT_SyNb{q z^2wuAi_Wzc@|p1uXbn^pJD-dyS!|=Ny)Mz=n_yaSDyt>E!xHNJKEN>!nYss_cTn1Y?Rrc4pT!rQP7h+L8K{NPofdX0lY!m z25Q2cSI>qUf^HE-_8ZueC%e}61g8#JWDX8SDA%23v|t>Y#0IM5b#nyV0E5>Zh0)0J z%>F<9qSapqtTPpJ`AXAdmo;c5Eh^vy<>alZ#H|FkPG_Xqu8q%cB-^kUO%{jKHo5|U zlvD^97;$}RVBx6`gSka8cLN1u`e1l@!0?zcm^I^9%$(QP8LNS)M%Z#U8$}9+iTm6& z%rM_p! zsWsh>@jYjQRZEDm;2Hqj1MdgR1(3y8$B{Doc!Wc3LSMea(j{=$y3)1k-sz?Tyvy27vt^bVl)Qr1^@$*2vHzF#DrwGsos8J?u91B5> zDFEnhXXaV%E|uy2(=e%FDb72)vPu0pDNn-<9%PM)gU%eV;x;hiYA?ExB|oWx91@vd zj9N);jJn@%6H#Z)x}{9=7kS@WoTxFLoDg$(-;Ic-8@_YPg#GBF%w+-bMF*JhO9(%3 zby9n{SUGuvCP&}8H>skE z<7O1K6^cx5$TyCuNV)N`4%jMj64ud}2W{?6D!$Fhpd5y&8N<7>cf2ExIuMxzfo zaSo&L$MdsCk6$k})Wbw%XTk5&=Wk!U04^_HfV|ide&2Y_NphX4pG}o}ZazS0Vmtxv zQ_hJX%0J7t0&VN`ae(zG7Ne10_H!k$#M(`6O~8ftiB8OQGPCWwwdbHB))_+Dql1f3 z6Uq$a=y6j|5waF1&+U1s1D2zAzv9HIkm7r_;`)=8hW6Pi?|-ES$%ckcxRKhqC!JFd z<^s@w<}fF%;wX#@;b&@OaRXFS9e3~$jqypID}7o_dNI4Rtspcg^gIe|`~YNm^Tn%NlSxs~!^OK;U*Of-P;sZU=)3qQ^ zBZyx0NV1)3^9_Q7{S1xHR+tl;#V@Kccu@+zI!3rSgjOhO*P0}i#a$%i4$aMLd%F=P z)=nJGCFO%c5frIf1j*6e)i_!`6lJ(}`7;7+Jt3m9Y1+MIXDs9bg}gp)mgyU)%W@{~ zHR}66(SY#Xl_Q%Ja`lJ>K%N76Pfh9?`5H<}){0u)`GCKu^A{^cIgzu``8~t?G-9Yz ziE`+-&2caTh(}Z#5uz#;NX1H8(qC&6@4vd(wv9_vApopVy=)4_`v=q&pkWp5ug4uT z)?=k-1OaC@IkrJrozvs%*YF(3+f~>4vc=Ad$?q`a-(R%QSNs|!c`!Xm(GkfbdKW;G z6cQL&EFM8j%Ky2fkbfWnVefYPmFTqany3lLsf zgSW3Ha%3UOnhi7gk7EvakBil5m#C#+GG&M#;RqN-M4@WC`S-@Mm;;CBU+G}{nC;C7 zMvt#+SRbafHhS4UB9MKeyZvx(_`&Y}QvR1aI^e8v9GLA}O7KiQmfK2DG{-4)_7pJw zL&glVC%7El=+|XOR?pH}Nk54!X~=3#E58Ksazp$t+pn9f|2-;5WaP|m^JF~S9r2?W z#0C)`G&z9WeNVn=`9#xjv|*tqn00m)VaK=dfGq4qwO&`0lz#aMKuE;ce~@?q7d%8? z-DXUDcN!Sqblchcm$&(ZtH#MPAY#a`O)LjOzySNkSxY!87XDDCZV8I#>pnSF+H8%H zRW*Nr;QI zXW}M|TJ%nQEjJ(O*=6gp;vD(`IEwW`+mEZtoYdQyu&vTt2hec6Of9H`A*(ek z-0$9^1;%6=nOz_}gnmx%M~f&A$LoEnV>G(J7{kgGX>K~D{^Cw`2<^t%vRMxl0ybYo zuTumpgvWR$D$!`ug77e%Qh9Z8b8k9=1^MYQ>j(t-k#N);L@ZFDgL(9m1}#cT9}m1< z4DnZ@VD5WgxZynr8%<^0*`FC178&>6ocQhu_pT~LC3z-b_>s}}!hQlrZVcd*C|mxP zghI;Ih;&;L^cLrBru3()px@I>R3F;kF4O>buP{*I(2Db zdEY4BAL)>~)V9>xOP79_yO;%h6m-zSETe({$5f?DGGwt0@nmboz?wiSkr(_|j;9*G zJSdY%7e7DT^XC^srIYbsk)-x%2_=%$nnsG}rZdy&_w$|K@1N(7=bYzrpEGCl+}z8xysp=Ez2EnBRaZ3^ zRQuH#;nFLg=;k?MFZ2=&wa@C1A3e_j68rRL_J@Gl7P>8Ru-pFL;)0ak7fqB@F#v8& zT=Dg5NC4)`4Vj>TX?J!Difge0MoG8ks$^&041#+S8(~H@RzL?S?;weKWjewY7*BFH zbe?xrVr?!S;32@qB<}Z(5XDmb(Yw1#>OP@k=r=9?N_(D9DxbLP(d8Jz8)?TfrAu53 z6hdOey0m3Cv#lQU?Fr8HzRcUCo+cO3#Ryw;V1T{A6QJn9dmHf+(A~fuB0qMY8wHqw z8N;vYF{kY)fatSlD#agU@MrgI00K$uV2dfoj)B%!2Qt$Zzi|JO!ODu*GxKY@F~tMl zHy01a#*Y7RHFkBzRj6}_x%DiA6VfyP$9rU{-v1t3KCZT6_B5i?Zn ztV%kR5F3_D0Vg>NuP3I9WAY||l6`S|5fgdW+gDEr5Vq>~xsT>X|Jdu(;7BxWZU1C$ z)l<_QBWx_{itA`fN@Femgx2J0uFVmEl;qcH_s^{UQ70c_cHo?7p`<+F1UD_UcB$BDogzy8JVd&yT?CkXA6SCow+eoZk-r z72VB5I`5s-XN?#ormZco_CIUD%Qon{?*|WhtkliE)>|nE*X_$-pvu*S9{mRDcf}=9 zol2Exd2A&MrPJ3uq`wQ`1c6xhv-3d5mWHUZqjl(7pr;nF;ve!Qz@KM?H@T>1JpEKB zBtHTHErLhe@v43XATy;Wi$IPKxQ~COkH>B%Fa-&~rv{*rTJ1uAASIIj>-d)`KpReG ztJVOtI`YnBXXNN&gx7;f>(!xJUc~wYHvi>nE+LSp&Fi-gL=TA0JL7AkG|%af>U%S= z=YUFHYfSTMkf?jN^z?Xg9j~D+Hae~#Pg#SFjxgPMZ{anN>h|+z&a<*>R#kNV>0Ep2 z+$a_3B#^BQ>=%Fn<*-jG7pN_z6<*4LFN;FV8c2KQc~AF_Kvq$#q^fz%cjVA_CR+hd zg!Rk-y}ys%1xENDyZ|$ArH9^q?{6w_0J+wl+Qh6GS@-$- z)nWm#m$J`hpa^T&o}L_<2kT!QeLNif58oM>o$>6dO^TXadcAj*5APyIg*+gw3`Kvy zu4G2r!JQiTt0`j|>qU3kDl66h1PFS_1rgcF?w-Bveypz&F^o;eZl zp(#%VqE7~JA1`2jeRR_M!UWktZ4H*z$G8F`?~KgYlY6webs=zy`youPa_Ne?dL;~L z5;<%f$ffw_7R&%#85)4LM8SdTWbuR9q-d=N9emWLkK8qggAyY{exbLko7T*kgrDO8 zTGj)bC9rZ${c`L4m`0p_tPq=~sib~B$ zuwb4b?)p_AAM#26jTx};8RrHJBN?(pL?2fVS5Z&c3$)3ES7w^*Z)}SJH~>K(D2C>_ z#2P%$%Dh>0XAyNuW}M}pn$@*9(Xl}j=+p4D-bGw&St6}B55OU2shG3!r5yz|DR8zf z(Cf&B&jd;?EEbqBV?A5zW4|HidkzIZ6brQeo9v&Ki2r3ZYvBP|%W1aq-3mu%f1vB5 z6OSY4c9P%zHxJmYz^H%hzXZ%xz8`_Z|a^&S|)X8161jh2s2SoBZCOb4=7@JOrLJU?iX2vd0b$z;T#s$^_ zuK>jyVzuY0CWM2zM!(Pjd;@}wZzVSY*JlDWipMMnCh9hSM0`;MPgayHns zFZcvS@E4mF&p)B1$i>Qh@tC0yV5ib>lH^C5)vcCRlH@?`GkJE;%jc@qpwprJZMg;Z(#Rx~3^thF@I-|ZKRTj{Q zFC74}31Wh8a3E@KEL-kLzc<`q)q>a0pNCKM~fSwde7kZ~;8QX#VM}4zy=_KP& z2KslPhU||mj;pTKro;m=#T0Zm^+A#g5^cH4zB^!Ipa&c4P;^ZV38~6!U{$9#nb72) z?dKH$Y`X?CL!8tTdKAFnaSsU%%2k+Gi;Pn79jUi3sAz6!X|$9HMt26&RStfblJWM0H5L6zIGm zdG)4hk;nXhXu(?h%%k0gZI7fOnmlV@#+Jb3AK@h!9UY=GJ6*|4nm>r4>McHgQZrb< zZ7?jl-&z)4m z)@)&HeavLeq*3@nz^zQrB?fAV#Gf#8DcTVg!mh)Vnc>fyflVzGd6o1{9Ct2Z@rL7M zM-6XA&Ybynp^+Y?z`(urA*xicin<6Kup?=tNJe~=UExTdp^bcEP}6k=jX(9 z&t-XQDwWemeVDtQ$vn!yFnG!0v2Z?6PD2D~+eQ$r%a@0+tDbt7;lXlNgB}VJ?snh# zG9B!<_;SSW11csCVX6 zfHQ-vMYoMyrbM2T(_T~6YW^#(;a=lTlkwl(HMPItxNy(KUXehfGd;wFfh9OI)0Wys zhP1U7rg1W2Okm?773?f)jrqP!`DU1yrP7Takw$qw$Ed8Lx7=D6ONWWb{l5oVrnnRW zZ@iiLEWs_ph&8hTf4@;()_KJ9SAjb>$wxigI@dChPVvH5m`nWe<`u2_C={;V0Sk_* z>mp|CCXd6W=D`7xAx@ar4EJMjvwzOi(z4^3S z8y+QgA)!>qA=Q)H=p}}l?D3OuOlxzgO7Wx<-a9J(9ukB#<=ni^tac}W0!!ZV+*=bK0_v#V_9)07m#mCGZ-C3a-4~Cq z53)90zBjnQ;jefL&*IYh`QAcH2YS33T-Iea#_PeXUWPT{o=b=U^KB$6jI4$d;ifWz z+PX$+(-!G250KLN{@f(SI$C>}x#Xy~re672&8Rr%5~joeFJ}eLw608-?U|HM5iDQE zR6yW29nG}IQU&h#F{29vcO;P&-}!&ind@x$<$A`2#MdA?)$ohLLUp+Q|BJ#XW;KNz z<1&J7y1Q6!c%e1&yc;J>Q^hm{W&~g7u0A}kG)x_qQy(02XYYk$)XvmLj?pY(vyz;t z-GICF#LS`#TT$8sR<-t-$GlF%JUT@?Rvlq_<5Y4Yf5?EEl;_^;Q4)sj#vLnT@m#5_ z$32K^P)8b9hVVj%aIL7G& z-QKiCX^qCLO`3@DkvtqfT_H1CQ&ahEzYRoTLqBVr`X^ zAH9It8Bl2FHd2!?-U3NZaZmkb2d`2z_6?#`>oju`l*-k!B>}TTNj%5Pau`2~a;v&i z04wZf;6Q^Wb)k$*hY-jX`bkXp=CnyG;kz}JkzPe_lPHI>Sx4~aanLrNUE=_QFk_+5 zABGiC2rtx~w%3)7iA`L-BNn8^M%aHkpB5s!ztR&d%J1WL?XHIF5DYfs4%yI4HHul| ztg5*2RF|W&_~-N*t=^=>pyq4(W%L>af^KQglB7u=r#6W_2?B4&W33v8VrP&W@aGk0 z2D#x#H^g#&4j1d<6$u<=<^G(IC1)}}<|6U5Lwm(T!3o#K+|)!m z>a+7mABKBE6&_6qZlF_kaNIWl_!>58+ZOd43ZJQ%+@c;m#R^MsYx{1ihYCI@s+484 z`Zdbvsl)hNF{ch|u=m#lmCh&2_d<{@Dz38kJjSi#TeWY0^)cQ^X_}8|3%lBViycz|}dABQP-KKADHmG#Nvw@c*z$fD?fQEsi1j!+LD=kP#W!wS&U zD5s~L5|oL|T=$;hs`?qvt>I#*PM#^CQwkgR7cMS6_h;^P=7u9+^|LMNsSjMt%EzP$ zA`GA{)>vGByLv%PU&ZF0dvWY(m`C$gj9|Hh5JnyC-we(?#i=#(uN6XOH{*uN($+MF z<7Wg0i`{2<`?=3*#%v}7#;wg*ppA|4+i*b)>lQ#)KABWjWg3S`TT*9((!!<6|MWIW zlX_~hi@ifDBLfI(EZrNY*sbdQ*iw|;>c>sgoh_x0b9c7Tf$?#&_}oDHJV!U%-K+f` zu`C4$S(hl9`|(LI^+;pa8HFQZZz9!97b@jeF6DanrQz++#D|MroUk__B53A~0id%2 zUiDN(=VFM!Gldwd9oFn#{aJ}(xTT+*~qnVt$nwwlTI zqStU%8+A*5{0GVl!1v~3u;nb}@-ch*yy#R-hD+f-!ora+;b~O|%ERc_>OQMM>I9!V z;oin%1@gw-#S~+PvBa}OkbkZpkJFus+Bb7kA5hIR&62`0r#Z_DLp~%Sf{r@_0kw9Nwqqlh>OM2r zM$m06pFFQ#eZEAf0cY+{a{_!&j<|@g<7tZ0GkHTEEfpC&v)+$E6Czc(k{Epr|WYaKorvv2+XmxJ}+=Pdk*wRlv0G7S! zShfM+25X@Pcj{@jQpc-qQAebusMjIGc*%ir zoAIOLnl9W0ygL_&$kKm-RnA{v)&38#a%e?w1tdG?yOA7t=gXIbX)5gG3#DD=ELIA| z?7Du!#SR6?Ec-4qcNMoei|-(R+F-kMF@Vi7&FK1YqRP^*ur`H~n}H~6;9{Q{J585B zArzzHCjZUEZW#C?rsU|LxyWq8Q7GyN$E^W&NSbQKE%si(uS$ISk#p;O@h1EOixRQv za7Dlut*GT;i80RAG&1sLoY>rr8q`)X=0Z8)f4vP_9j?<7X%{JpM_b_rqQ00Rz2-qT zuX=0?ojL}c--LOVJ{bc+Nly5b?CUpE8;)1?mF?*krQBmK=_|gi6 zFrwGrTng2ou_o>A84n+VE)X3z;s%_^C7k8z^j&*sc1#c$kkvH+)A6^yTl;+WWOUfl z0i3$%r7ICL^U{UHdoySr2}N21<^ro9Y5Wotz2i?(=#qe&&MsXZ9Bg)VBrUm5D3ol7 zzr6X{WCe-1%6HmOArlkqXnco7RGeQqy~mm)Mme^ zmq+Yk2O@G$_GjnR8~{kC(fm=4n2`S5kL}$rDN1+wu%tU&?+mn>WcpH!i&;8_)*|;5 zTM`=9%Gt;YyWyDb%3aVPvSGQUtoX7~(L=%K$}g$HjGmvSu!eF!|22xv9J1y+0fNd` zQF`cddLtS5Q6k@j8b5FW^Z6Z~Wi5K#k!ARSkGw=tTa7G(=;xQ0fS2%&e^>>G2_}tz zS^#OT_F1)YJ8CIYZJmUJLY^W)*EEqilrNKaa0We8LGN*_Ur@ulr??y~ZuEcRo@bo4 zN(SN_h>SFgs00J;bY@e24ozo~V>O(2OQ6@4LUYQt!&#ApPCw=E#G(+5Lwrq%N=Qj7rJ z4Q$+NU1bMZa7~pz*#SyU-M_(YeiNrBJ6-UJ{HJV8NPk%nwGU4KK3|l0!MSx3EVZj; z_2bKHGo#|rq7?Hulq3RsW9~UM-kC6(oPi_vVE}-tiZg!>jx~0fxw>jYAb$ALM-oAI zP!afsx5jRZ#I?mw+b>}z)CdcknV;Gnr29(;7`}r`nJq9shbezKd{RCvPwl34ya8#= zzw(4LSZ(I=At(`jk%{Z#rml*_u%8O~5Y{!jPSv(L{8))`*#tAltDZ*G#gQlN)qU&3 zNU?yaD3=w9sskWkVl}#+fyNPH>p>V7Qc-!IY+CHGJ!GVMs3{qDP0IKq7s^qP{${qqR1q@W(?SU@z_Dh~63d<@NxIVez?IiX8%a z-KEeNg4{MUSR0OJIWV6K$NTFWwfakyiWfq$A;t?;v-Ch8hfcGaNgLn}*2)ceN8otF zCFnqwrSf>Ia=(uAhp?DFoy|RZ+(S0jc^Q-xLH$7peA^>|ryoYmr5?4V|80zRF>9$b zCq}cEuXH{59xO;4$5S1DRy}DmNeVCA368yKeP|oF&IVWt1>s4RPG?$htJ3#E;Iz|JVK9I@9V3ss^58_!jlg&0am`K2 z^ZcBV6KqgoG}%Mr?qE7tPW^cv(nuXD4Hz@6Z1doBbjia=OgOe9N~(sQrZl`bN*gN) zaJNR~Bh&T@E0H&VW>zIJ6LEs=rDfKsFu%PTf1#>I#k`>NfHk{G`WB0kU z^THM^DXVxpC&X;F;TH{1k38 z{zrR~!X6FLOSloCS{Vv2RUA$(WA=Gc7YfuPsbDz^{p46gw3X?*96Tqv$AYxf@6>4_ zQSADK`bvq+PS5=uo8K6SaCl%a-fsua8VIy6I<>2WIq-)9o&Y9t>@-+MH9%0 z^hnKZ1YHg}eYf*$Uodk&fGk-d{bO#f_)c#K>Zpr1c9vK$8IX57joF#+*nCQHJjAH~ zX#e=le8+y%;V@@4_G9;EJ1UzJ6vj&pTZknANr9GVA<^Zi0zMb`gNLPGV5rhgR2`*U zv-(~7>)pG)5t+N!4)du|So238e^PV|-JrZL9Wax92$2)1dQq?vpWA_Y!rzhTPo9hdi8&4 zO94QQX;u$MuaUynefEYTx6@a%g-quO`aO(XH{6}1 z{YThC0ZqQ={pYqQ#&$Z=rXE#IPs6|F1-&7Iv58;!{(b~Bkvys4TtA{R;G_Qdp$}7? zjRk@x#$+F1v6~PEoZO~Pkx!f&c1$XAXJ5CL&$FVka5+2-e#n|+Q_pAs>NA9x8`eYI zKxzXKzOj*%3e=Q{BYOa$eNY1`UOT_^#ZUz-tH4O)tdH{&)HGE8b-?g=Ryq`o@&sa@fh)9J|6i)BUdKL zmBf&eHp--r%Mq3z&Pe)rRXe$Dgg{zA+$xsh|1Mw1l22u*6y~e`I23i2eqq0ZYV%RUd2(Jne{! zZ*QOXp{S#bRtYjt<3u@PmUW&TSVgNAitVS+gA#Nf54Ti6mcA6l5*gvUu*s^v1I~Ej z8vE9IC-2r;4W2YUqnP(b;|)Kq-x)(YgeT>v^ba_eEIDWZjMEK%_Yz=IC5#FYtH80X z6*b_8I9(z(aYOfdj?Wr7c(9`V--K7CuovCg*eYue?i!uMIJDD_V#1dyC%2VGdvru< zGMY;@@=jGFl#_e&Yt!A-@n{QNa^k6lgp&TFUV|Kh4KT?_l@pW3?Q<&Oy1__1mky*$ z$$V?gp+7O5(fg$&r^fYVraHIs_Ba=;W}f+wf5JLIyeGpJryF4nfbr4pG3$X~r$13~ ziK0|8pm02V$w4kH<~|IyiE@HZ2;m=>jrIdG z%x{O6%$w2$2`ONIc}mEHx}i8&PLdieSwFB%i8*rC>lCzl^94$5*Ve zqqMLoU#GQHub3rrXhprIm7;b4f{|!E{0(`$@3=}HYdMQ=x*dN}P}gV;^Z`=QKA^&F zQPKQ}<<_#SKHh8ru=jnz@1@$RMMI~cZ5=>#&OVjIv{o=qKh<~P9uk|( zB;~_+8lw5$2=b%_tE%b{sGd-uDAAtk>dXq)PWVLYHFu1STUHUb3yVMr|m-bF+7r&pTN z3OX=v6To#A%99M7d_x-t5e_TiIzmJ z)410L92z(+EY5{ie`>VP4)a#P;Y;E>pT@Cw{snQ}Um(tin-(H=y~kYb8m zmg<}7U0$og+*kH8#ejc7UT8CL4IGA|Br&ja2J@dhNN>^ZouHn{K1U*>*+P+f;TJ|* zV!X=4FU@;DUIyf36Ttjzmn-P=0kSM_zAYzc8vq|!{U=I(9cfVyX?69d2vYaq<)a2qEhd*pN}FKv+xx;OI}{5I;s(V_VMN zSbA@BV;9^d%p8!%f5KYEh_n_frVz@ZWivYg7bJ4Tm#W<{i=i;+yyRf$09O~kbTja= z`Hx;CfHM5?hQ0=1ftthqUDm4e@_s;dbrtLxK=SHdx+FT~5@YOK*#V1E?nh`Utrb_w zhB-5GBUT*JX`MFpvzlf_`@+zKp8D!ZyGr`HVkuKLBGgM-9nGj3t!)AV^W)cyCmeSn zfJ(3d`}GJBP#A_O3VQ<;P6OV9Yp!vr1`3fInP&U88nJ8!4KVd)%E?%Fo))fd)p4lR z_R7N867g@P8fwGQe=SH+3ev^>uyYP2DD_bjEmunCYD$yu@Xy#coNTy?@I@`c0$=3> z3E~m_YGH(*-+UgIu!qnjeGRBT@J;ym`I~_{+%&utb5X~D9o_2$v@$B*zsA(N<>ovw zf^e8xRc9EV2gZK2kn?7ysQ$4h!WHWpstl3>5Nt&MMv<8z05Y7{)lj4z{uKyV$5_5k zY%Zh_0xS9+`eM}1)`urCI42nn4`O{&SYG(ppX#}lE`_kIy@eci$yM=qdTpzdbb|wY z(uz{=b(CXV?MzIXJ&xD3&`jEdpa$TAv#mX{2^_cC633)U_hw5)@g@Q#LYmX%J`W`OGZM7yDp)!~x zsz#Z?hDvL{CvY~IzB}+5r7g5{0hUlut(0N)B_-5DgauY7qh$8E_TMn4X zpM05efZ^QyQp(>`U~KM__GDlIJb9v}38)?LkZA&w5~i8@?P{HVGZvemi`&{}tzC3r z^;Ugh*bSJVZqAkS>yiz6Nsn=_X@Th6@mf9vUJ5j|0*IXv+kLH&W^8Dr|l!{el(AW^Y?s z_vy`1Dvyjb-&?Iem$Nn)igXEgxU@ zi$Xqd=IfEa@D&A5Rt&I*2*5)6*VDjH%(H*j6eaFHR=P+nt2-)Z)CQEF|9KQ^eGM)S z+l3fI4JiZ^{?Fg|_DWkZh=^lw%(u64RsdELfB4c{PqpWlegXB!&wGCa{iDkpaKQ+L zf80q`h%(eb#D8<2lgO{h_O(-^GIBPp*{H1}}pSQloxBM!|um{&L+X{TX zdHs`&lV5+qTFMuS`sTDmVDi_8PW!$k=AEpWFKg2`FIH^&^+_Cp4&VFnThYxO`wDCM zuTQc73G1|d`^vQaUtjt4NpQ78`mORk1>WC0$w|gPOyZmSOUys``i$S6nUwu{;cw6M z7=C@|Z_gBme0|<;&o~T!HQR5`S`ftyiM1L*%etYKH;<>Mq|Mt{>UkH%Le>DUM z;lCya2;sjzvR<0Q6{d>Z2iFMNN( zMU?77W!~Wz%8EyZ|D1yP&tG>a`!C)8FZWrqi2rYKpVt#U!(!8G>!IU2C7wdr(h>&T zLtnG*Yp1^Eg0EXA4{UqkhdnLv(9gqM;w>TkyRR0$^=m?V^*9;yVtw&;Zq9})qDqpa zg!)U+_}AY&{58S-c-a>fvB}{e%SiH%8GjFH3Flj=e>8hPDf4&2`^TFXlW$?WhL>FP ze2L?ysbE+(Tb$03Sb`{yUL`;U5V$vfqYX(U z8?dOC$eol!gz(iZHeZexoUiqbD@Ns@C;MJaehe%Jz23*otDkkt?}s$|J|=|!pu%-; z`wdjIF2bA}r+EkE@gszt?G0rc=Yt78WXS*J!VV0=j$$LIocnG<64?suKX3{?*Wa*? z_5gK$%m=$+AJ={IchG(*meG#<1;VVJ3ir8vIZ4)vHDBbwn%_~pm;39nG?dm6=(yv4 z7WHy@XXx6qUy1f!JJh4dd~IOk(^n-F`-7Xia|k<#Tfn#99>JeR8i1QU@@t{Zfd(w2 zixr(XEkgL4i`Wr~KnBP>oZKni$fZ5<^++IBkPs*BFUeK*LdH*}ptMkzu=h4|4}F2= z43(p+M`4%>cX_=;vH9Ocl4~)(s1y27Imm9pj&l0siELlg?{%+g;-%(HSQUZgN#YkE z|1?w*aIu2T+#E>YhT6#Jc?Su94fQ%Ra z2KqAG_5~;Lm(`}QhxTlM+!P&r#$z*K*ZC}O{uJS+>rB5KP)AJ&^bKuyUI z!cRiSAN8AmV5?1Hd&Gib|4AZ(rdB689@KqD`#^0Zmhy^!%I{@B{hT-X-_ zos`|ZdN@&2Q3>lLdcKw+PnuSM(y|CUO6_QR2-*mq_Vgx!`G{2k`r|12lPqTXaoNiE z$){Kq4qI3iS-x^y&&B6gy2kAmKW_z;U)kw#6QwEPU<9o8lRJG>(49qPQj;l&J1A@@ zf$Nxp^3X^`4K@~Jpzf#r4)ROY0{RY(C#IT3b&b1T#NNwA@nr~zFXg%7kbjlZi^-n? ztx?t#PIx+jC&1mE)Lk(jl z=ow;)ce`@Z>aqb)60g@;J21vj;eLseONxIlr}u2YE0w4* z59$vF=>HTI+z}CPi@I+cnMDDy)bXc-0YJzelx$cCKdjAttCoZ!_zBMBnn*e?9%^NV zntXE~;H$)t!XBe+vznW8`uSubm>ZLxhmzZL7cS>kJr^o#r7+wYe)vOJ?v;=)@Tq+9 zUtIV@D`CgeLT8np2!CdTKNvGLHF;33(dB~)0#*J1A{TZ~uyOf%nc4bLxSVlgc@pyu zDrj7ACKx3Th*1-5S2`Ds$nD?4(qDganlRgz2D0oPzp`lJI)Sjj@rkT?!cG=yk7_z9 z2-?+*EFv{W&D586jY|qw_wxez<<@_5_1yN!6M#z1Y@_UKqb~Bge$$VLu|*w_#`<<+ zFLz^uy0M|er00`M!WF`CFOi!31A@=v?JS*=uI7-%Pdl2?ADM&E8Bi znW0tpoQt|6ih3f7m|9ragQWM9U7hbejNjbqq*%~qiOfsvO3?DYKhR#wTjUdghdaj7 znRuE!JU2ZUJvGiZ$(=YMf25N#7yYZ5efM}pGzl=ltrCfqop>$BaozK6QXcmg}9SgO;JPDIS@X=FTsnh>sXmtmCsyDa)u2H{E zh02|){Rw4Y8+Fb$3f#fGWa8#O{W{tpqEt$2;%fHfQv_-bEyzf4PZ7pVSYF*zk#s!_ z((Ju>)mDM=?u32hVb`-L`J(O_+cGhtiynwHA~|83Z$2?jR_Ux8jV}3Jlx3EuM+i6njTR}<(g{kY{e->Q zDlWCoZD!p#LyCsed)QM@)+z8U2T$5ytSG2wC- zb~R^bn8;(Vgv`o3yy#&p;p>mJ^qd^9 z8FFLMi|P9T0oVJk_uPvRiWIT>aK*g4bmSdBF|DBkq1VDf76K;PL?rvO^z z)rV8ZNcA%>*c>YcI&=Gh+6!5?NTFb>k#ha66W-Y4QeGMzFTpl#hO8JGEe1|w~n18NI&>$}y=aTm`mfrasix9QW*AAJ&N&7bA& ze1m+)dKprS>e3CXs{+xs4=!{l4I&CyVz%C#;7Ncf_bJ7vpekwTh;4xZ70FMs5x&)p zsZ1c^Ue*m&KxK@pt;3u53H$tnL;C{c>zH-&0SD0!s*>u?!J0k{XXC7f6$%nU!pkgS zb8qTclVfk9-I_hPlvkjY$k_)Cc707ADr`CQ^SBSQdFdfbqy3wrPpO*yhtk zq4<%jdZ$uq~-vnt^s`mr0*Z3 z`7N3;nmgWeOQMOqx38HW9_ZAu3h#%qAtCqJAOowiyv`GKx74HR=Gg5=qBLE0S2vT| zE@wYe(w$g*D^($jLz4B*^6n4r$)vS$4Utj5=eEq%Jvu1WO2< z&-wfRZrg?E4E9f+%NlQY73XYtv`G@j{{3_p;?1QA%BP>EehmlSvo5_jwF}X!HbMDZ z72agT%}FLaJ6s1IoRS|zgb$FLsK_5ZsXU!dWQI?zhR}v zwu;|F`x9=!_J&0HKhbN=3J+ghS{;$PLm_C_U1=o?Mzi`?adC?+I>D5DYrx#h!`~{&wi}oG@Tw!!QQS{G?f2* zZfv?~gI9*^LV_)6II3altx&nQUbtgXQ@~Kz=vSh00+oN7>=)}hzaUMgBg_xuNzr+P!Ox`od$WhM zW(^ylV<9oA?fsw5W!AoXpzFVs?aQJWwh$;~8Ab?Cm2hywIlr@ z9zvfdHqpxqzo;L~kG&SD#5z5HerI*JwvAoq0SG^r|t38G^`}}RV??-~avPpOJ zt?gN&h=V6a%mVV3BLeakwojNZWiD3?SBZ0C>#9qH)@^Ba9ZtDpCb;g=BCEEiV%@Bs zSK0sSLL_p4!vXQZaE5`!lh3zdrU(UVX%1A3z<1dvY$i^nQohwQ&VMGnv zW3_4TV_Vgvh|Ue!S5AaBtV~2>3vBob91w zFwW%zffQ*i{Ra?P$!Eu(jSe|2qtX1asCmCr7MG$!pB3pr_HKHpB^8ml_#_P0lp*_$ zE|pBz-|@?k@Ub%1a^myj^FRFjly`qUlr(U$=DaY=OWUL2s?i&AgpthhUC)p=s>d$~pU+I{VzhCCnzYkM$E_s82zDx-=p{wM~*d7b3OmvIHVF<417DN!BMh^2poWy4ML>O0lC`{cz`IKTMf; zzP&8px?@Bgotw>G-%PtD@hlCs9PtP={yX*8=VhgHb<7BMR{cJF zGx`0Gtf~{MKRo#n+>jZ10Ks~!dSBn|B}DPz-rE(ljpS1YGj0v~;dQMyk#u><(a z^jj4<@X*5=yPtMlaXCbz|6H;4J747+5NmF&`+Df5(KY;y9CkBPu1g;}0$}iY(gB0P z9eF1_Hb5)Ap{JmV_fb?y@(r!)gaYjKV+wR@NpcUh^CXL&Jvh6wrR#-r{-x3r?t5>5 zTaoG8FKz;rrB8hSN?qq?_@)Z-t#aq@F2c8X2S{iHl=AVEnO_y1IsoKTdbr6KHCeHu z=r1o;F^Xd6jehn>NWT~L*&>+A6LvPx8{Ohn#)Cza94f~(Y;nwsUUIMDVd{SQM#@LC9G{3f8=O4e%{3lnUD2ZWl`O9D2$ znO{an)$%iVhUl}TK@q$+awNl-JMZ;S)7VVgtxWtpsCQYLEb6mw8$+0>W!J?0E_xR9 z_Om@Xvir90e4#YG^nt=d7ug{d4OW9?dRc7EF)y(elstwdU+(CKzbmbYo^^fhGNu`e zZ}(XA%d8x=)s3B&^<|~7TiP2}RQ2{P-E4<^jM7CfYry-SXS7v`v@s>Ko>0sx_t-Qi|7#HiO5B?ZscU0t{=VosZXDi@93OL>zx4m zhr68k=@(M@>5p~`(%z+T{(6h3oRsRMkxrceIaCk2!DOud_PMO*N}ZR4 z!d(G!@3xyCL%z&;d^_i6=ehvd_w^akd~x4H(=LOnQNFM3CJaAro-qG#?%*F)(mf5v z6sar2(muou!4K2d8SBEtAsN*R$Sv#`;?h8rV^MD%t4AQC5;K$-OT_WA?jL1NdWKNJ zFj%#XQyZ%5ZCKs$SvGM7qbPYs2(5URK ztF~A)2zto6Zt2@culcpv>{_4CzoqbqWI<~d$WsAXAu`IPn+9SZoXDk5dGo2ykxA*h>((# zWW6_$$~tphLDTSfT6mf9g~OL|$G5*B5W2jMF;v0&XL`RQI#lPahxC9H(FY zQSImCQ$}75tW>q4>x8`*j6LMc4j7!;;Cw;s($mzbyXfvEv>=rS^YsUBY03nJwF%$p z3u#(5Hv`|qSe6-nf}bsb1W-GX2X7Az#Kp&Z*=67^Rd&9}yhZ$6J;&w9kAywoQ_dMS zNZSY0oi1RDw?mcC*nJ*_-S63W#HY>>r9pX8&;jWAen8-}kDp&Pm9Y%`PUA^|pZ@mn zWV2uQZj#WwL|A`0^Qc$k&Ev^^2R4!894@9xdj%A$1F!yjHf;p!T!S~%QJ5tmTeQ4XGP!tYK9Hi z0?{_H+}ZH{eP(*NFX7N7!ug$T-}!ia|B80}KGR4S47!7wXn`y-pN}Nz>w?!^BK)We zK6>A!?oq|&W9(~eVutNud@Y}7F_u$9+LDBlS||U2&pgSxiw4-8R&@JQOjzT`gJFrg zQh=%z@g%G9gCIKCDD$K9cW|{DWY=SY?{_w$T@^XxbFZu=E55%(c&_Bl`ObTj+0VEE zv#jbj$XI{6%zEex_=op$D~kL1pHkhQEhUG|@)AaqYoaMrlIBBX+Y}-o}Bf>_s=3S`q_rSIwYfIf zq!VLqAM+4JLdoxuUN`t}T{SJAl|#~^?|Es2?{8>|rnE!nl64Wk4Y#$6 z%F+(bdMWCS)wve}3Fi51v12A^yipepAR7Ic|H3w3a%|4=*=XI+rtq8w%oe@eck+3P zsfFeqM{Vou3(v=AozI+e&NP{j&pa{xi*Dz^k%;qgH|}SBx;h@J8+b!+IaJsCcOUqx zPtEj}il}pOKXM}vnvF#sG*|>5)Lq@Fpzlx}gjizI^_)^(=f`D>W&qC>4i60+zi9BRM8!iSzt@{t&PCvdA*ER!e z7mke73Zdp{PmanHy{ZVnrMYi#?gI-uTV}X8_=xZc{H#DMkw&_?8T&pT+;gtxP=o3F zu=wa{gHVAO`GnWau6jTVp5RPGqRRBQhn7*{)jPURW>sGp?uOIG`AbK!j5@w$)!iBl z3xDaDrYzECFagw{<@C~Z+ALzW=2gFA8$#8lKmWR+6nvs!bJ_8q_aeikLe3@_K8Pr5 zw7B+uJ(N2L0)-y7vHUF%6<4hOGd@lF^4T(#4N$WxT)|~(gWls~3O~GleeG7olauKQ zXhN*VhE3D4GA(y+xJIvs^6TW9<@2`q){Rt}r$v=M8@XD2!BkhSdACwbnqHRWVw_>w z8jM^4KDU~79ESnP`OKi3@k`f$H~P$4U^nqIL=-H2l`Mi;Kc+jKJAg+VwkgBff$qZ} z+{bM7=~eOp2-=}n^j%GXZ5u1X5-U#a)UZ5&i2TmmL#C7y(7e&qj%wcopjmkG{mh8u`#`Vv`&l249DHYb;apO}4MXp#5%}A;&8@F1k};2c z_$lu?@90F)n)`h}edq^( z7{0HPzEr}x^#2g{=HXDc@&D*kic*6X#biyPtj{#cE-8{d88g;olu5|GlN7QGBiXW- z88l-ZYo!=7){&9DvYQy$#_rsn-?^@HUB7d_ztewp)tG$D>wdjo%lj_McX%I8fG1C8 zajVEW8`eSR8l50Ymv=aXUmbTJqq5gBod{K$3tK~_vfnAJ6~q%wAa z)JyjzD+%m1bh#}nkeK+6Yq|l;v9!U!OkrzW!3KNvXf@@%nblrs*7N_#O_U%ghx@+* zBa-K3NTxxrU<;7E=JSgM5m!R-N7FCxlWG6Nhhr^t@M}53kbgIN(zNj@+7NwA7n1*i8!rn~uq+%f;7X3@5d8Ez0NxpL2W%g8gcbOU%^>rS=fj zub=Ku0`26_5n%$XYW9R&5%Y=|$#?l(=6cOPQ)u$HQ>W?9`IcGOK31jL;H;W#*{?$V z^i2I9Z5>67GnHmk?d6w0-(=e|Xid$LmJ14y$G7tCe`Bfyk9Y>loK20tVR4z$cZ0Q4 zJL~=Gh6O~ce_ybyOz&@C>bzMxfOiboau#xx8sV~0B_iPG&5wl65_30_Wy|{d`ilj3 zc7n|o)JD##Kdl@sX}Y31NDoeiLk{Ju>`lM(dXJ4;4gM+xu~Ue>cP`f$5fOe+x%r7r` zFZKt*FHUwZ@TH;n>OCrVJ$m}Dp7!c(AAzQ(^&ZvN(}V$x>c>(@oR`dj?lxuD`tM4J zKMkui5o}c1mh~5N%z}jzJ0ZDlLEH-Z74`)=D}tSiOC-QFrNv^rba|*yeq4q;mYwj0 zj2vi2n($E5Y_Eg)^MO}5ftL3GWryb}t<8I&i8l@jsm6y=T|R7GM-W)_iJi*t0l43z zoTnu?(eqdvs!NoR`9hLS`8A5Y@|5RM2axf2C_+`U{b=iN;K@)CB`s)TpJL4o`K0uC z^mM+$w2;M-CkWf`_ur8F)-8jJ35TuXI!AZUDwT<$M>Zr#z5x@%B@3&T!OTn&AYPk* zcuj2BQeTxfC65<{h_)K4o!7|}lL!%yh~K-(CA38lP}~$72w4|ZdOD*y5VCk+z~^TK zF<_51Sr{~ylo`+{c5Pv|%BqQx?ml8*n>w%3B;G%|Zbn+$+;SL-DREw=sBNHdi%`(HSR%`YLzm}9$| zg&lhVY069{no@60o*>C3>3Z1A8D#97Op{-J3A5?&&xuLjILNO8XqR`32fBUbQQORH zS8^Q2C^hcG96#;f$ywNAN)YS;r5F8mqKF)+%OWoxWs!}UZJnna$m^}t`;UGqKEnHF z92Z(v1pLa7UwMtob#Z10-R;O4xfSa+Cf8EKyebIr>HY1k_s2T@%ACKVFtL>}iefU& zT{X-yiIMT67{Op#&k3U8ZHRe)%oO*G(VwtALWWtf|p{xK_hghZr^egTqcG3zbzP*6Zk9mZMdLPDzG5?mSX2jem8Eu0_e*uddX~<+_*sk|Js|0v)H$u?X!y{& zB17(+bC`V7n9YK#U>{{~evWn>%N>BR+^LEloGD;HhPP{7l$XpIk?&osv$Ff6=P>;I zhl#mC!p>|XsV3J&I77hn6PTu$(3i-0015A&gwIAI#T58Hnph~DekW!ddly<8br1zD zeGF~5^>$|l=_W#Lp#R7zbM`8`rHMPks&8wc%Ri3J);Lvh=MZuGHcCCAzz)CeB`n>2 zJv6-RfqKj&MO@2bmpM-aB%2;Z*o2pu|4&4}$)uX!PB>cr6CUtuG~QC9wI)Y(%MvZt$|h#RIk?T$!+J5*UQzr zCY>Wo97-O`O>MK$O{u-pduE22TO}E)Tj!L5H_hEAzROK6w%L+%PHCiuwF)P)wFaNx zirc-tCg{ecA-%<=CK(3Pzyjq(m{Sh}-SVa1 z^QgT+I?D&|`FW7e0HK z*fKgf{A)NXC(j;2m!~_jGFxVuY^byK!Fip!jQ3YMF)X!}duKbuhQ}9Ab-I;#-8|j? z*zIjZX2KMJB+TMb4D{r=!1f+vx+q34UlbokgDoah99KN8F~+=QC(Zh?cfV+jjYeA- z^?9ohISH>)Y5Qq*Cf4l;j^NxBpekuFDmwiUUMb)eKX-fO$M3;En8msLimRE}si|@s zJ*v`lJ-mAOPfaWR$^x^OMh|T7n8x-5Casvs)S*9htDwIOa;N{F#Vgrap0{&wsB)6H zN(>HN^tW5Ivb=X)F84a!!{%~WtMltZQ4?cwMACDfsaJdSPx0z=M|7dzUmY}q(rJF? zdP5&mf)5o+8W-f`@&?1j=aE9OrM$8EyeYjWvf(_ZUlt{Yl+N?j-+>B1oL)L?PPS$a z=6y`e>l$I$O-MmrV1B-L$j7-(C}a4l5Sz6do)e|?9BN)dqLWValIG>05NFlj$?W*o zR_DS?6V(@jAVQT6+V5$8UL(V*E-3Z7s|;y$)fxL)l5rlXRnMTzEx=y2c&s*g}s}?WppDv`%Iy)OmUCc1ESJ$zPMe%=~ z(qMPkP~&AD%eIW^R|ir3)VOgY$|2(DQX7+Osj)huryPIwG^KV7dP!;y4jg^pDE$wRuEg|> zUX1Zl{zTYy%5;M3sp+D6Z1M&P2;H=)@||E3j-dM*E0>lv;81y>ecmCEjgM+@8K z8LMLUJ0_js^7NNu)DO!|NvEcRrCPgowc|23D!J_il~<1``7TYn|6CU%?yYyB#|XmI zEHMptA?yek|I)eCL{zwUja?KMgGj++K9idbV_3m}#*0HfAzGykvkc;VOPLbbaN$y8 zTV=!I9jM=-0zF=?t#hVHzB$B!)WqZ&3$iI10y9KN*}k(VHQ($uSg_pqgV zkl2IkS0j&AvVO|dL> z7RUt6NYn8|0z5{+p;HRI^e0tCE=9N8_I%RAhTG!p{-H6w^7QtzWWGl$G1L1b3L7^Y z!e~b3?U0d)CZ<`kVqTv3HWp>^6>w`WH;a#oX^E>U!FFH$B_mDvsOPqj{PI(>{O~D5 z-EabXtXsUjOUmawg@nmQdrzz6;L*dFb6z$UI+-403SG6#{73niuly|EL8ZwoT+%|o z9{^6l!@bkW53rA+!8f20f~o3S@p@x967}Jttp#q2|7zStP=9Kvzv)w5On@u(jDP+S z>iZoZmigNl+s}LkUmId7p#Lz6I~vZcv$$)fBQlz4)$@@n3q^6d%&PHD-Ra~N7q#&W zuTI@;`Y~F!dk+cqHr+M4VtApZ^{Q7iyfQOY+`O=IvZkVAr zUZE4X->tp*?-wN#zjf@Ncf0>jRonlyp7M)A&{A|pyH3eCzc03y_DS3zv^nOEpXqxLKdP4%(`l=)I26>I@q%)XTi1lI3(hh;8?%5 zSMA0*h3(1F_=66B>e%v>MPPlp6nJ=F7Uym>S1pM-DTx~M&Mk$?WVUt)S4zFLrEU;i z)W_o1O{m3ZWN==yYkiv_XQHz1FjIT$fE>hY!x{Jj-H#nl-(F^@wwL7X&xxzF4J)lQ%^4vbRor)OJZO zG)cdy)3O9yz6TpU#2bs3Iqej_!6W;M4}|iRQo8%$o&{nyfJjNIzKb_hH9B2ii^xqZ zyRmqOdNUNSUvr%esUA}~Lh97e4q+x#9fgwT1VkA!+0s{%%^X|Zlw;YkeACI)~N zu8I4B!!i3?2QEPjZi!wNi!mdUkruM9d<^&FXkKsEj2(r41{C+^AGov~ELTCjQP(6aMDLdk004oLX}Tm*+(1nWFbJC*z5#{ zbL!%H{d-9OZB78R(E@10f&TTDMS0%GOx(bY$}A%qw5Fs(0NSh}T^;*k>X)f+xo(lx__Ggu947qAl65_7MU$rLZ-{sLk@Pcm?xx9aeS-zlk2wstPwRx@ zQLnDkW2SGnRy3u=UFu5cF$D&%5Wf>KE{69D(kLYdN`AY3fgk>vfA0{#B0}ezJ8-63 zm3-Ph_4ttA^+3PYcBs`d1@cCG-t;U4yo`%c*vwt=7*v+Mje~lP2KB2-bQt!M%W3c% zw?5-rP85ygMSVfVHXA8RoQ%_J{&@E^ z9K=yqp)Ig4;eV8uJ}N)=|6rImYdD@$jwv-9A4K(W5VKhB{P0;QhSuHDEm@zJr~jcl zP4&=wxN5%L`(UyD+v(l{@nNd!KgWfdBaNUx|B5ML9334`qn9*Af?~on8izqD0_N*? zT5)L=pNJ^wm+L(mT448=2+x53NO5B_0@d%Gt~LlW4*o6WB}Xc{4qF6# z{{cJcaSKar9^A!KU+?-0deJncrDAxWBTuE&xhR8l!|(q{ka(oSKGkvZ7x)SWwUi^beW&Nm}oUdIah z&dqW*REOV3w6W*{Is37OU{>SCDU<&Y$>q;+2bpLs6fTo@VPk@q=9C{!ID$5VClnyY zpbZ)WB~C|f8qlWW;@(V#-GfptUwu=s?1W0;eBgD|_98FO*Sn&T1$p2%c85iMQzq8F zd5)a@SD5_vy!r8xX5%xV(jqwfY7oZiHl~+^B==TtYZ}^aH)J#}swnx{=(KEl3AAjc z2q>@WDLwr?4cL(wk@=$w-38#PG*81#2%8?Kdf}Q-u(8Y2QjPUze-=aW!sNAvu@om( zpk>wkZsC)Cf^_i(p|r{N>il}tlbQu8(Iwz?i-kwWGY=xvto{bW^yBvSX%!Pb zrUuMWDb zI8C61RBQ#q#G(9pVJJ4`VV`&|*3N^K4%$M6*RZr`3@=a=S|00T_}p50?<4ZqiO+vY z1o22|fs+ydQ^G0Q4E(rvSC9zfBb-Z7+F;>dP6txcY9&uQwEGe#`c@9UWi5>MQ)sJ3 zr|(+upWLxT+0D&Xd$8>ox0U(SkkaQ|lzC-gLy5Jy@#EIDu?|3q5E#cg1E_^-YGaAd ztwAA%P4vpVPlN|9VE#G0s&>^y?Lvbki;~i5=Dd|3v@Qs9Pz#lQSM)!2436Tt-q{qF zJJJou-C9j?%^-XQOy{I*7L5NYCc2_UOm0^4Hgp0bc;aR3_ItzT8;*}Exp{6eo!;m) z>j1&o8iM+zb*o3!M9m_bn`7hSYn+^?a{shNi(YuhlVOHAL0Z;Z6~X55+5Hg(8NPy7sByw6aZ!C&rG@-1f1hO2 zS3da0rOa_do$pLMbSm4aMwbmy>k_PHg|by2l(o=Fs9R(Ez;2dH#6GY#=xxkUz9H{R<6Q@15zJzZcD>H*5S> zH{Q#CNeS=-Nvu~Q*Uu;qVzZmHv<{g3pm3Bb=O%ts)}ajk88hNwf2&e;+q4zI@;UwS zPB=kXr#jlQzr0aEi|g>}2-dh9CRgzXT?-b%N_@x|{0Q zZPej#;uk-`Zj69ryjPa*2an?K0ZyJlUa((M(KeQ+qB!mOL-C$`R9lboLqwXLbWy%M z;vtx%B2!fMwIZOHD9*}G z8vLt*rI#RP&~R(BuIjjFd^2KXATo51QCs2|?$2C%ST7?pWbV9@f;&$)3C~uw;ZW)}l z3Ryoeki)fmy;Nn9ZJ=>)%YA;7NvTpB!#a5fnN|(2C=3LSn>#E+Y*S+ULvCIDvrfd1 z?OsIr%%2V7bLs;?M3Ko(No;;(jcpCTLHY?C?8-w zUenRU7q}+R>e)=!BMN1L0XJHCoL03coF`9z4svY{qf!n_!>QDKCApNN9yZeB)AhzJ z9l?otq4=|HKhor4z96q;N##9KwNwUANfZAjoS<#c*njTdSsVVVe=#uhZ&L^Iiz3R; zqeh9J$Frr`S~X1AsJwngvH$%|CAS8f!8~T5`B?! z_^95dgUT1PHc&^5(kD2c=sKj7HnOA40j{XZlug?15 zRxouPq-GM8F=P~mOdOr}x#J4*Gm^0-MFk)FDvP|btpFM}FC<{j9Yr5dJN!R(P40XB zv-;aGH)q+gyP(%Mx7oM!I$f|0}RFKd?Ew_()Iz_Zig|8Eg*e^OgGk0-;_urSDdik*;PEx)g z=c2~jB%Ebx&!ZWOS(PW5uF>IzaAW~jd4R?~28#*%y60}Lu1bD7ZPS0^ zBh-q7VA*VaoNO!^yqeisP38@J<*p~KFbM?6EfDWBic}#-VJDFJ?iw48$ER zgPRlH*OzHAdL6!sxxVDFK25fmuC5LT(R+jJc$!J+RZs6-xdi&W&JSp1568#-QS4gm z-~|~$yL))ago3iIci3IEhtY8FCyC*Smcve>fqncfbH~vydk7f>s7TG#=J+^}wmc7& zKJpnSxC`#$#)co~Jce%gmE9=rx;pu1l;bj0*{FWq>JErG z5m<>=5eKrHkp;iy5q$hWm@u?FVs8MPw}(7Z zSIwG{*K|7*39?7`tu@621&J*(a()?H5YfJmSRhSmcTZa41r9N4lJ=iTrxMf0Pp%~` zu%Q$hs>K&TrzQ0F@K&MG?Xp|fZkN3Tk@(+=2`T)SoKnkmaubPbw;sAg9z--NB!ljB zZYUlzxa%n?^_ipm_fu@#o0JnAW!qIz*#R!2JYU}?zb}eD_#88H)rc zPo5s&@cFF|wcd>Rn$(%WYlp^PqqaK$64Ubf8oApiBCv76`O z<$_&{i>6(jdvHF+t20PgZMrc#E2;3rqbtyeY~4tmRvHg|F=_W-sHhb5qLjG^&W`*q z2AXl)UT_84_leD>_nBd=Ve>yM8+{$}QYe0xqh5W%@9x-IvrYDz)-FMm3%%;1A#PqR zoQfMXs%dsJ#y9LuT-sg@j!~vs-8?$z4RLKYKQhY+6_uaP^ zBQQ%A+03~PAXz|u&<|&r^LRjqz3}z<|FI%<2*F`2w{YPtXoyA4%9jcvc8;LB{L+pK zR^PG+zj;6ma#jIi%NxB%e-~dp*p(+LtN}9zPEvl-ruUt(fFkW51YZs%{`%y! z$9y+SP>_WC7Qb-1VGQrJ!LqJ*15qGb1$88z? z9o5Zp5*ESVOS`Kahd6c>%p=CQ8}WGzX!M%Vc5TrwSdanLiI3`hZbaRX&kLt8`f$@q zN7xBUQ0Kf$nznl(4MAh%8LzKD`y5f7QJ8ox-|fY?kV5jLP8&2Y&&C-Wo(OFOw!F&j z4G}7a)~YDo{b#gS9-FPx%m)1g#eKfaddtmiHU7kd`SJu2u$aHB^~9wt7lsoFHYhc(7>o0UYp z=phEq+|jbQ13FfR4s#GZRi^k(I2GsF5nhKODbCCHu~#fvpT++RGA+t9CFNb)GmNv{ z&djJ^v{dq2xRxeQPc+p?)sg^~nnbWT0Hk{WkX}_!ljA1}#ki@4(=E;1n>~b5k1FBK z94n8-E#m4Y7Mi-tq)Y+zx}&b?V!C&b_0`c8vw^4MJH*}aGIA}WN!rOLu#JOAf3j82 zjV_5|LxG{}2G)7~1sKY@h`u`!=)Z&9l-~->=r2gzn5c(&gY^_Xu&&+a@71(Qt7fEY zGQZxKNN;MrNugYDgTD1Pbh-}~lz3&238LfX%4G?dWm{R(>4x?QLSd)hEB&;c^J%Ie zL+)mHx&?ZTu>td@FQeP_1^KCIqx@uOCh!f?x8bfc`9&?EcnK;G#NHtC3-a?PHy^z{ z0`ya@RGzgkP_v<5-N)>;$9}I0d-ae+<^`t#ZB?L<3Gh{s_Rd|g}6K%`vrym>-1pE;%{U6r?6eWvE8i# zqrD(nQBd~ELWpsxH}h~%5D(7u2znKxA>J?)Pwm?=BKLhTMlg31)#uM{OQITvzRS?S z^_rAgHCnN!CthXZG0P^qYMJ(9j$YnZS>RP|)mH`T4kPDHuQj%+t{wO;^#8|=+CYKy z-^><;gw?N)nylDkN!oAO%VM9aLw%vpc$>*)Wv6W?5O)sbBmXVqw(aA8#>rHHgdxZ& zCD9XQ>1Qb^J6LkxPO5J90lg6hb$H7~sdO~ljsbqE;p^X3FZ?%oN=AN{*Po8nrp;Zm z3cKwFa^DFnBwSZSi&Y)eN)QreuAAqiwR~S^jOF~j8}YqSayZ{X1=FBzRY>*natwfz zPj?7&G*;9*SvG%~1fxcolOnF#vKqH3eJH#i9!iKdQ<4BVB+k5*>b%oYe8 zGG!fQ0V{94mt4Jp`-So98WOM;)`-G#n>c&MGxAQ9Yh%ciBW&m%W&gZ>RZ3sZ>iTs$ zyD)pV;htzhK|JV(Smx_$Aw{JVpa-N5eY_zldK` zb~(>}K5~)^-LTedOUfUwVJ|1fPUVRCdCn?k3ABw%X>^QXR2LMp7T`h?3^=&QKu^8F z2kJY5#{EtlxsuB2ap5v{V@ofMq62CEkHy@0u*i<9l zUsZ8fcnq;`Fqp#o;@WL?&rBP@oCX*UQ?!0c%*C=K{%BhXpn_Wz$_A$BTh0gp+bvMU zX_V?P2e=i;2YA`+rvtu$NMO0G$as*56jaoyLii$s1oPf3clcBBsO`;%uEiX@i77KB z(R5)I82;HS_xR5MWzZ-v_juONn1K4@_r_QMy70>{xY0uJ;U7J3;#EM?Wgd2a#Tn)p zb7e3nd*XRQ`|BBGJoly*B@aKUIZNbUq!`FZxyg^GA8?k?{KyQ?Xx+P2NayA~2GU zUIN-t*bjmC?+lGoHme^0a0GSd_j&hLA6rYBucXPCZ+GaEep68D#|y>0Fh|-r54e9t+LPJKs6N0Zk(V7}Yp zvl{+d=ie31$J29eRmU!^K9;}C>Ad0mfpN+;#5sb!szSdxLBS<`?yB2z&Dz%WG~4)^ zk-xz`^95eF4&W<;&pqBTC>6YXsB(K#(m6Xsj`Lw9X@@p7-P^BloX2a^HO~4Zd*sUm zEEcmu*UO80(U$|V^b0-wm2Dj5_nZXCO9!Nb%9?jdy2cZ(!-4QpQDt?qCF;f@0E0|Y z|L~aI3QiKqGYWW5r!nH;J!y!ggLdnkhO7sNiJ)vFm!5-vlIORcsCrVcbvb^b2Rx+_ zw+zGa>l^sxW?XlJf-4_RPlRgiaSCQ94Mxi^UR9s1o6Cgi@0LOfT4~Uy309wFB;9K{GO-K#j{@!Dv7xNMiItkoccu^#y_* z%?0ze*4&~<=QF<44u6kF%A612i~9@0=9UPe3P@T}ux4cOK{0zUL28Rg0n6_YaoE0r zg9u~MRdet+7E08UdB-fvM{JY8(v@9G1Mf$NgQ(?dukiga*DGWCNr=Pz>(U%)ow4+f z+q8W#yoh|Aeup(EP_3rLL*Xa@S0F{D9Djfr(fkqo*F7v(=PT!d{Wqd?9&vU7=`jiJ zH%!eAI~yVvA-ii4XT4sTp|~Xlf=_4N;av8DsvpGcL0qCWF;4@lrXz&4R)*0NBhanC zIM72q!sLgK8p_vZz?O<04u{o54u{DcVanx4lc7v%RM1!oFUW3EeS)m9%_shz9fG-@^E|+*sk1mkbT*=>A)I_A*vGE@|2Pf(&Qa z5HO%Fl&3bBJd_Q}Y)39VP9v-<*c?F{ST9~qBmNXd&()h#zLUzH+Q^zt)$5CQ(XZ2^ z9cUS6+g=RFCI2n21HIEO|13{%e0rdVSDI1?y;+y+3)M}vjYwS_M4%9zNXim#mQcGD zZV>qTc-l(7WP-x(QJC-W$fyX@4fgbpy~8Iy)(E;?5OyCQP8{}$>#3%QFdy>&aO;U6 z464Z9PKO444l4V8o$kllxkAI|;H5+H*b^oqIJb#Qn5XfO{R&LJ+gG=qr-&^jC1=5eryFG9nu7B>R1&md5J@ee!QlL4i*Ys$ zyH^9Os?e9vqLTjMZu+oY5WqPX!i(feM1LN^rSS?2Wf5>)0g^uwKI z{SI*3e5K`ub=JbUfrDjowmF35{P!)raj>pdKRrh*IVN}G)mG9#E+hjVe%NL|aI_r-3D z?qlM@WcSrsvKcfoO1m&>?z>le%+mtK9Cp4s8dt|ms03Ul#iv57Nou;;+=5;bG4{yK zl1+Gp=C{2TgDtqhLv`iO^qKI-czfoTTrKKz?v=Mdq_ZFR{nP9!^mMl${70W+U?zNq zYyoYY`!T_W0><7iDG0|C@2Z-9VE;`KfZbAaw}+9b`CRcS@2E$}R$`fIkQ_}RW^(k{ zXDN-72!R?-VA8Xp%631U_3#z|TD8XO)}kf}<<}9+|0rBEw=E4*4>rs{1%;$fwcd*K zN_Jm3q4SuW7gtJvtIWm7zKby_P+pvwPLq3l(6Ab#Y*Q{tYItY)-%eZZr>wIL598hX zO#}~vGPdpq5rikCu+G8E8_I`gkOx$ccq03Y{0~#oF%)5 zba_yF<-gy9+$x!$(7N}My_~lE!aXPJ-VwcvVMyik2$Ow|CoN2-@gROy5#pj!xa|B~ zhPoO%Q}?s&!CY4BDFU_YdP?B(A@}WexrrYivZ*H^_L(!s?dR2?hl%i*%B`mYEy`;N zPgFLaJ@KY1NyV#)=ozU>MjNV1N5^SgI2KQu))7wrrX!Saiz7tj*jkYJ6mtXHKDO$G zs#(SnXTs&EzXfeyUj)@bYSm7(^vKdBRQ>EY@#(k*^$SEeqZ1^)n^=3D6zc zw5GXvm`~pjx|8DQiAPZZKw=wvK)%6t`*GTt^xE5D$URBZsl5jMti3xRsy+#GSfHYT zoKFFA{;T%jMEy6*H~2NC55ru%^>td^l^?1ZX?Oa^#k~FdgkNn31sjYYWQhuzE~F%a zkONTE)J>X^M&4PHS`zA-;rdTo)NI(}12UF>{IG`Ij&0~YH*=gXPYg|B!T=%EjHQ3jFcn-6>%my9Z>JP@Z>)D z#0xl=de+nSo*n``0c0#VMm$*7@-6NZ|Xm@E!1f|V|SSe}1tsI=fk9L{oB^=v_PXa5|% z4#cqLt6Mul?8OuM)F*QFhqqmpe{dKF>~BLN7469dqaRSz|M!h*o)Wm`qSQHow`IM) zAXPPpGGjv-1&>m9BoQ*ukF>?t;emmCK+3nDy=|<1dJUFl@+{xYUBH(Ch;=gzPgF-wgB|vdr0shDZz$fGnL%B zT7{VJ`P9|iPf6@mD_+8pw8_~#Zc);tvdJ5#0xPxqX--s&wDRp|Plo)W|F}~}3DaC@ zb=wKYGoQxF;p}8U65)Q5R34+?z(tMXBufd%%?;q<4$ zKchp^U^S3#Qos|s2sYaYNN+8JmksjD618=*oqk?EMW)0APsY3wzM?6`%W-s=0UE+( zFBem|tzK><@0Nt(1beoWkfC_hf|^Z{VM1b zT^@ywnI9unKZNY>h`3YMnkfsh|1hJBV;u|1ApCjuN71)hKsg{=K-Ht{$w*K{OnN}l zieNG8w7Jfa2On;HLHY}JoN0Ocvoz&Ym0o#W)*62$subF2QVOjyDHQ^{jD{6zy?%4e zrgA-)+^-qpt?ckui!e*Fvl?5^q-)l=2D}!O+}GpIQg13nd!|yR82+CUS?jTHI+^7D5_+{a?V|bdzSaTI-1c+-CJAx4*o;g8$?=CY?MLP1 zxynr~snyNHM#O&HnEqp+E48Qd{eX#x(?kr&n1PPazkiRQ z0PH3unr2-V`wqH6i@HXy?M`KjXi1F9Xh{sqXtk(yl5o+V?U`ggcuUaPS2|><{9sTS z|9m4erdDJ}2Mq9WNOw@1sadt}Pl8Yqr{zE>zG2~Cpdq@d zh8%IY6^b9-iEA_4iEA}P=d+=LEQjrHFzLtK&4!dnwH+lM47b@K@7tRzLxLPcmna=F zxQJ-wg1)|5&?a+lhuiO`?Vhgs=Fd|Ty&A!U<-5}t-*b*s$zhFlXIv+Dvzv5-SEq%) z286!4#QJ}28O+3UW!n^cya$6^k6ZoE>-78*^@LM5G{HQWQ{XsJEGaP2Y~msWVL1Bg z%z6l#Fy=y~CjKpnE?|9{ggu_SO4=x$rZnyD#{bnYiP~=3W9>~y+3sWwdoZOOzWy*+ zh@Lt<+Z&Q5{Hl#3GmuX=wS;PUNd^iBER;PLE|4E8$_mD1%R%k#Ok<$+`n$zoi4nA} zC!)b-lTuAiAKN|Q_YF6CDIi`=SG;p%cF5OZ>p_n(Rz3cU0~8*u{Eb0WKu-3naT#oU zGClHgwyK-8qEPg?W9ZT19a64c7QD8#Rej^zQu^4otc__REBI$G(VvQ$ou-$hIt`6g z$a*^4PBuSMwlE9B<(1ivUdHtHkNDp@fQytZ(mNtsL1jjCI-fjzP=la1mR$Iq1<&PseP_Q`=922TyB#Xc;*0pWod?cv%=#T+V7RqE z6b7ZDN)%Mb<+V2CVeIH{%1tqci)^Uki$XweB0>l?*0nOX3*SJUl2VnaF4ld+m9^@wt8LQo>wdt z^8;B~jn@%L{B2?ja6^$}?;(u(#orzh8K#=40Zt~N(mo>wRAm^#(_jJGGFRU+x1|h4 zMAu!-ChaIK%`?@c!6V!)+s@wGblq5)Zj>EzskIjBtv4=pMExoI>|bIKK>*3y%Ut#a zIv`RTRgfzW^4uP=R!W`?E6|VHLcJlC+kKzBXpi!7I&7$-#L8?9iV z@q>Rq&^y0;uPTucb#(u0gJka!`wv6W#~yrm@*nQ&^XP$HK$l$}?R`5d8@Z(w z=B)=wxk0bnEvijNmpZx0TN>N{`i(_s$O>;48BwBl7gk}9deun}$Qi|li2g=CtTv^7 zUFt9+Q;-I8jj&Q0Dd~k5S%l&bb4+sj><#<>8dwtE+pt}l-dopLnr_-I+AiBF4g57w zmWr*Mp5zp~Bn2Ar52tfiEGFjCOFv6OZYzmOPd4AvgrNbVBi1i(SYgyIUF0Buq))Y4 z2M_j6%zmvNXnTl#N64{jBb&PG5l_y!qLBp3pmW)w<6)!sA(ZA=4|kJQ`v3pLYwUpeUFDW>*i@e3;i(0_jrdX z6V~o8VRFUJ7&fi0>5_+cu)&n|ogLy8~tl(vLQbdJEtNnSm8l`G@yJlBSz(h%KPc4@Q+m&UkjgH2|~vjNsVBLBd2CHM4Z zU9>M8m9Q^VHPbfoi}KX3)zLQaeI4^=(U)KSmkYl-Si0f)$9i<}1*obgi`u)e*yBkZ z=M*i#?7zyaLG3FJO(xR?23km96OqO<;IYcnI<1d|{Iop}N%z6FD=J#vt};)6uKJts za@pIxpu_&?s~^dK!f#e>F4rd|Ab2&>gBGxk!tl*aoeR8U5s~pUPM7gh%!c|7xJWY} zD~^L=6&ollEVy+azzEbBK{FcJSa*bVgw2}&nY8iQbo@k#zz;9K7*$$7K^)|MxPsBR z(6A-!%@lOoKNI4qDLX1U6xus8SBRxjkY(5HY`TZ_ig)Q_mGP>x%rMm*dnM$F0c!lXM3>yE;taj^yy-k{;D zjdx{(7dib#AEmaX3j0~spMCSj-ROT`XFbw3w>Vp%9C4->XnuB9D!4-Mq>iAVku;v} z%NW{;0{An92PJ0@>O$!~)6QbK*G;0+tGhl;HfjNnM7hvf>e|?(ti`h0%E zNV#3ovv2CnY!-j_Q{#npKWw*#Y!|7nFN(Q$UD>snDpj1Jsuub+S}EL5bNu+hY-nL_ zHP3WbQhedxQ#U+TD$hWSNE@CnE<-kpXIgE^_?XhTpnpV~>;A^iH?mh;lDHj6I>IeW zx!x~X(1^0-t%ANv>swURx!8ZE=3G`7@5R7&MO9GLjU5&Nf^aI1!RyvF4k)Y+SU=5w z{PyF%pV(g7uU-1I8_%Z*oN~nN%$+BxK;X^C$B#0#P(b?t33BbFb#M58T1Z=>YGBb{IrZTI zOnrj!naq|@B69lW*=&_x8Jze4r|BStt%k})(6opz2kShN0+>~yM3 z{v3EjAw+j!)g+in-z?=QpD@>S$B{ns-<|RS_Oqct1|8&y93lXJVnmq?IsEvNxAeO| zP~L{>xXB9H(~x;2yah`O>ByvA-TDYOjAuguU;Hmp)D8qA9l{B(bBgr7Al+&% z11o?3sNik6I~f<#cQK)I1$G48P|avi;HMOWsH0!VBoYCO9~o)PBQjm9 zj5irBA-m}X4)ZU+fD%ev%~ileF;HxjmMM_1UpzB|Jp%_r+s5&nQt)UIjzE3ULcRhD z(ACI-DaBGCX?~h~#3JbcRQ|~?$j>P1jGz4ZSGcXwp8Jt@%i!d&@46tV2+MHYIq(Y0;G+BQk)THviIm%Zoh4O8W?5NeS_=Ttm(7^p2IhB_epkNFsCFxS;O zw9EOJ6fSfJ5=bjm>t;Q`t8cMbkB7N^9@~@H*PLusj1SoTG*$COgyD7j$G6k%L~OzO?1ZFi2A&H?0L?1y_5IiT|D;~=K_0IPuo`+{iwBVPC?|ZLx zilp(x)Vzx0%Wbovli+*ye=+u!K~cVM*sz7t&C;=?z#@VmT}!thu?qrH(kb1@F5SqY zNQcs}OQ&?JGziil-QCaa@Bh9t@0WMx`NGUH12f!Q_j#U2T*oPCQ%om$Lw<3n|9wmG z;Cwmr)`-FD-0H9`@G|Ys0fMq`-{s3{r19(3q8j9gutmK9K72Jq)8Kn3+Ks$GVKVmd zBm;ruNvt3~oTPbodi=S)J9#us)tj<9+pp0X7D%d`H#yUDwoj79zPqDqEj`sL5gUGXe|wTr2ljL_WZ# z$*A7#;uF>#ejW;XjH#)g1maSxn79Le-0VDrUs64QyQ~*k+G88rR7iK|@zGOrW8a75 z@7X+8eP|&0hB)Rt5ywisu4Mol9(cCBX;Kx|w3YO13b7GfGS>t|5EGQlh zX)KE-Dz*hffXj52{|IehRm0g>*2=%inxAt|@04jG-xSi^V*B(oa-!?@VtMaMOv+5> zQ^4)Oe18UurrTN0g{%B%`=H=4Fn-sS0hd@bPzmTVI)IU(sxSaKigN6E-C3>7-yLQK zeExnu{=NT&@imBAa4+bXW^Q6iN4{(EIOIYBSu;q8`0N zScxMZbSP()i#aIf_9Rk4c` z0btfLl4r3nVgYE?zxFPHPRXiJc$9}yIa}W=)W|D6+q@FNtJTPeVvE_^qr)DlTalJc z#ZjS_BiY{-dwzhA*)HGoub%g`stNC1QeFA&s?=>6sDrs)xe5KW(m&+m%J@|ar9pZ zpY_>qMT_}*#g*2L%*zBcA^latw$x?(RmM@)w&YXby1^$dLqO>9POxFH}nd zh~u2+6Hx$m1YD=)Yb~(uAN~>1nP=M6dw=QI7<%fyi~YIIwQ82`{^?aihQxHMHBJKY z37P?z=+t95YCgaT!MBL!H@h|m0QI@>SwZ(x0`plmS;hIq5Lg?N6hG=JcG#uzc(3m1 zPA;!7O|K}g1IsT31$lw2Re(MR&H<;CnOKTYX@Kl-E#~b1AOlI_J7vU-mAynOJRT^lO1u6qzWOSCMdAyyXAVoT1 zydm*c)4ulDCC>k<_-QmnOe_UfX_@2uXE-MEWh%A+r(Vn0tMmj@W~<)=)Ic&#5c=lQtBpCR6~5fd#~fpqgq1r z`zf050#AQFY>Lcr=m@(9mRQS+kAJ`_t&~0aN*fytMjor|HGAHt;09HMyp?@LToI-8 zM5|$>nCSy^a~|d9(f7S=M^{pfd&5KnQb$H-f{m4x6hC9PglaY4dksl6@q1GLG_GU5 z&9`=Rl+5@n$Vf14(cX{danq$~8X*@Yp#%imu7SZ2RJm@9q>}%oY&DZu#zvmY-W;9Vqk=5S&n%NwIwlsL*hKW7mTtft zIz_xc1Z=!8uaC+;<*z7__g_Ev0~YQu+e=*3S6+Xus(k)#AW(AbXHLqeDU%m;gkxbd zu+&N|HXey6gX(s9QIygWvzHj2;c1f**~3Rg_FXv?uZ@YbB%$j?)Y+`o-tHhwa<+yQ z*qqucy`r36eun&h6c!h%T<%M1co!$Urh2>zqTX)?54yw^UMzAHul6Ll;)a51IX^15 zGt>DkjaP9KpvtJ!e=e=E7LjFtvy|YOnbZt!3Bbgoy-c2%fG|4Bs_H$i$EwI&C$7ZD zl6uS*Kd$^N zYRz`6iFcZxE{(9q<~1((#=YL$u$XV9L-o9=x90j_4|IErHlgPCfz1n_L)jS1yK8o? zD!d0An@;Aeuw$rI%qe3`e~yJEp>@E;X4Fx0pC6F%qY{8XK#2#UJ+Bb#v0X&If!c~f z`GY2y$L&af7sJU( zN}2K?%XyFwuQ3mjOebE-Cty@MOWZzTSrHqVf}ndhgtOUp2|>MDn%P;>7esc}s|yL4 zB;)2QcRIZdE*(m}To9;Sd0q7p)Lt7#KMLO5B@ceyREu9k%@z_g~PeM`-op(f8*PS!d@YV%=b-E+G zOVP~zo13yIcM#7xU!V~PSqs0PQ0(wc6};X2{WryWf9`M5MiHcsc8 zo%(PzY~o6Lmho@s{erI?I-JEiz5MTpAqAOJJ!qm5I-k~Qu&jDdB=A7$&2xw`*SS)H z7oH}}03^3aSj{x9LO?hzBp$5p zxv(Co;kGxLmbDrt*Tr{un~Mwg`vJ*>kG^yXaA{$Jkxrk$bk^KO87JKWv>IJ=QW8Ju z_LU_o_0x!Ku+i~>Fp`F0L+fDYB6=-mi-Q-s8A*rua15#T7wgFt2v$N+ml@p&aIoum z@8J8l5OWu(9u>YIu)a&>Pm1ds$xP-q^4Q=gOCHpa9H~AIME?MBrzDS0>c?UZjw^i}^V2&77R?nWXJ?N{pDX#9ZD-uk9 zOesYKEHb)l55B~}jNzXT@aK#_r3wD!lT&u+(r% zXg3!LT8CN{8!nOUG)4bzu5TZEJC5?NzbUlWK%dJ(aPJ#7eAN=7-pR#cjt!n^($b(-yRn(+lPQLB>l-HJi=F3{C-{W<7ym{-fzR1xkkikQ;> zs+729U>ztjd>me?3mdVugjl+0>oE^be@wzmC#&}3Zag-6i%WBs8h)1I)Y((q(lx@X zw^X@mcd(R(Wj*q_rcp>J5e>W)h>}?ILI(NCPX3SV>V-D&Ze{q_PVVKJ!;0|nAy{{V zCP}d$7wDVmXL9r|3yG={Oo17tFzUr=q0D+GGZJ?FVGztQWIn{ZE*P{K&@)`U$7gY> zHG&@TVN|p&H&@y`Av3!qy@_QqTW~=TUJyv;2812esahZHdZ#t{Q?Dh+uPi)pA@P>u z(+Wxwa@{|#Ioc1n)gNPUCv?AJ4AYG3u#QKQ#U$0_XkD`lgwF=CCu|K1m=UCg28OWH zr;uglDu-$6P}}jPKJu6I3-6X;%#vwDVq1$b-a5=9O}=<8jGj82KWafL_Hs?bkoyid{7tk!q|-xr z+Zz{O>^trYZ=Op%cAD>5SUq}bd!ZKc?H767m0GB8b6!d@`8fb^SR}P zT?x>xvH9X(2^vRt@;A-vzBw=b8OL+>@>7pz<%EIbZZODXV6|>IkknEw9)>M5@XC(r zSVzB9l6*EaASg5TI(F;A`)Tihyy^XMd90@puJ+QI+6v1x&2Y8U`^{HO>muNRQX)FL z;}5y2n!8fX>Bm>j=+$F_Ue}a?WKZc}_m@ZnYy`#QgEN+Q}uqUl6 zxP^&m{Xjn%rX8od%!xE-)NxriNsP9%k)qN&?JzZ!dh|Cm;Q3@_mf$NTLr~~##Ryrb zq+3RY8@=0XsLs^-a$#()!$F0clC=Be{r$bB!IgP`pn~ov$fE*%5}*?u^0;t}xDuaW z%fHgPG9N~cVPO7uX8#r1fAuzy<14!#F4rdkV}6%wpyYb!evjyibBEfB>p+}@y7L1ep zbZ1^L;3X|{XVGdV9FRz;3gnm?<)gQChsIp;?H|X5gqc9->^wjGY@u+Q_+h$8ga3qg zjzNCLY~k-DmZ0Bfn6R%RHub<@3mcbMjjZJw(>ml!wudyGae5`y_e>Gq+x({-7R?G< zro#`l2Bh?Q)ghuTD&+Hc-;B`LUCOY>4UC$iDI6z9`+EsDNp`2D6fpq zDAN7*eq*LJ%@#C#?juraH$o|^`@~hTm$Gxjt9>{|D39F#|YVDCsf7yuRIOPsY-jcM-M+ zLo9i8*`F&Y!4HbOx<+)F=@8M$j~ac{n73nUHo8P*W=>lAhAREKlntQ0Dxo1Jkq>zA zIDxWQsPs~$*XJmSfQ_tY{&vZi&n7J&viuRU%t~ZA;$@Q<#%^Jg)IGrJ{l{*kZn)EB z8D}{fi_!7{ zz+XQrvOMe9zJJ^B@86SsANO=UtsN0xZFb(m;FJ)jf`4Auf|DijsFLO{+g z&=WY#KNDjZq=D{&DY&i0gLeZDVt?@8mp8*xwrSGdJyS(0a98_erCV`ecBH|o>?V1Y z2g%KYpfD#1L%lL+fng)JgJaB&BfyfLZS$Aoyc ztX0xJl`dG58SD?~z?gnGEERF`M6TpYqq89@9QZHU=10J6EEAw8xx-9Wpi}fxUB74` z6nNBnm|wI4n2{uE_^B5k4JLX#08CS5n1u`9Ypwk;lMmIi zQ}6X5YvEDDgB$-akqOBXp?ZG!e$d;Ad(NnVc8_;93dy+F^+EH@yltez%VC$u?QURU zQ3|h`Mj~ulJp-w~%z^m0x_U-sJDwSuiUPfv z1IoPw=Ryg(aXTWG&>Yby_yhTFSJJ0Yl=+Wf=fgb9UtnfIRTDd^ZJT*6$Bto#^DtqL zbI(7$1Bxyl3}vg@=_Wx-OLleY<}+tvaXGKsN1xZ7g|(G3(#>AB)+_J|CzE09(;HLC zF|c-wtA@TQm|Io}78wq_vFt@6uV}|{Bg1vo^PA#zk*CGb9biRJ+n;PP2>dJmaVWBd zw`k*vdn7p^T&WdP#rQN44cUI8b#-s^#XVGLuv}mcZwa>QbS({iSb5LK_|@$Z#()G> z_1oDLB~~IvxW&acN?_`K^M5yWOR~q%YSMjo0v88VFj z5fzSD+_y`>^)GeS;jaq**~Dji(Y2KPJIdwKQ@(fU?&xl2FQAp%3DUm&>u1N13onzn z>7&h;>L1co#dOd%xZGIY{9OszrDTTc*(D^u{QNQCguPf~cc(ZU zv|AWyZYXM%rSx;;cU4e~A-2OBqC^|4%4*StADpYt%C$9<#dRX&g^)IR?d^DNzcaqp z4@EsLPl+^un4op?-|`|#oNe`OK?V2^w&sJCGfp@0A9eJ{&yPIHpO4snh(S^r;buZa3(WX6b~NT4b=;k-pee*g>zOE zBq55bmreiv#!8kjGljn(tf?k@yPA%$J<)z^%$sbh-n)qt)-9n`ufZigjo*oP)FQxR zq5c+Z$4_U7ALO6Z0=!q*(Ot|F&jm>agee9-M&~POv-QECW&Eg$aMJt54@ja4mHvT% z!UuX0;kp_v{9mIH`C44&01}7Z%|cNJQ??NCDD=3JR|`Otum>OBM?wfhYE)RidS8ZA zKer<#09y#M?Q&W26@46M$9cDtsH?-N%4%tudYmr9$z1#oYRth^}>v}#K1kI zOa|9Y9mc$=a2;7Pt0Gsq!0_Qr5WznSER}bB$LqfMv&d+7avO;_W~9>Y1zDmmojfy7 z&#}k{?9Iu5eaZ_qB;vsMoSLW99yU438V>)djQDV>Zv@(m|lHckoNwD^B1^pY9RxLjrXibvPW=whT1B(h z6jjlwao%_yU@>mmE~NLhUibXJY)v9Ol1Y2a4^H9LESfpze@t9S-^C@*=G4##YkNf+9<}R!O-x=gz-& znOM}#n-19B;Ow?Xz)=LqVB?K#KkL6Rn~QiN1RoWNc-j^3UM+&NIkdtyVkobv*X&BJhS>`)IKOA34OH|T)|hBMKmi~U{uS(@2lBvWl|X&j zTz|N3AmC*pxmSObQsKSDfKPiS{%2D$yGWL)Xmg_pq`vXnt+k-9e4!U@(&b$Txw7XG z?Zgi?FV%QniHE3{B-UU6Lo+aDrjZ%+-&_wWEl37JGjglL5|zHP8uImffZK;Xe8EPA zztV|u;M0%pQq!YrhWXaT39Cih(KU&ox?K0=S~Fgw`(LXnKdU^74l>%YmM_ZjkpH87 zIW^Aewe($|9Dh#Dio*EoEkN>)W#4Rdw${`zTLXuP%a#HbV}M8U+rYPnAxuL5;B*9dAl#4mmMRf ze_YU6H_%%4DdS25^P{_9!;R_4lE3sN2&-ik0DAh(21RyTC~enKy=_HJ>r|xgS}SCp{uX$sJX&ef)|Uk zUwIAWW1#lGHrB;xRb0wm8 zWqbKa(E_`i=6tJc5Q3I&et?t4EATH&QEy4!(HZ0B#O^yslHUIg^$Ek9CmupiGW zkrJ(;i7WpHDf)8&#&cUvIswD%683Q%@bhB~dbqVwO8v|NoT?O+Zly`X6<1ymWB(uze4qkc{Z`~-~RMf z-QU_}mev<&#G`7V6cewCuvY{+fje9 zL46&EfP`M5v2~esk`>9yVeQOS$0bs%rhMHIFa=UQWCRRiYQv1;GN#6Z0pB_Uo?0rq z5?Gb_f++F#o$$zGI@x#M&DST^INWyp$*BRXQr%<9dM3_{IYnOk;vw(nrsv`GqlbOd1 zje?4Py+Mu^m}oWLfzOZ6NeD)3a%HY-t*IJGZilx>fQ}T9-V$ok!#m8hJxU7nqFytB zAw-#*xwaIPh_Rg#sGF$Bz(>@@!{_DHVQoDCYycA7CA# zuP1HCMq-aVsxlqY6AIqEXB`Ri4`O+ux$L8NKk^x6mT)n#e7&6MeaB_?mKhgfQzHgs zh$4}2{n>{~c8hvjsGjGPs5=F}9yM51y&L1k#Q~&I)bfF1Z65K6VWN|t zLSuY0!qx}W$R92M+ek7kjSEZmr`)7foLOF2|3ew@-w49j%RC^}ToHeQ1o*3eQ^~4K zWZ{AQKLYDc5@=M!{~+!EBOfqdi17(7zTW5Hh^gEMvjbL5FncjBcx$!Z`7ADMj^&M# zu7@KSic4Am1r5MF()L%VRd&*rIycn24__(58bh-%{!EbcncR2MX~bWDn$+doa;sqf zwpsR+dV4*lzwnm~*&6F#VYjE{&ix7oLpaQMm@n7PxcrSybk)_mxY?HXyBt^Ow2Gy~ zB>pS`ziDDQSSn^*7;{>CrQ@f*1FsoOzY~GxJZbBCLB${#GE}^00l9J$d}iEKa#~Qg zvPSW?Yx4({gf}R)dJFfxKleHpR%_(0?@C^Z%gVWvt)@n=LDbNCn{&~4M2526C)x*# ztsivNH7K^8=5cBakh;=Jn1g13v4B6B%gpBROJZJEL7#yeKB}@xibaxr0DcOwj7a6U z3+s6GS{J;%Diwp}G~Wg=4V&1B>&WH9#B_@)zRXsnNrgPzxJ5d~L~72JfYL#bptpygbnV)goE> za7Y`YcL!tdGw&*D6IWaFtv#2 zuELOXtv*gANSxma1>N1Mr+eQ0nmy6zI)>M8jdNMj3Ie^hUo~A%Ghe%Imc&PRYhPsN zavaR;Q928XX|e`ZJo@1dDt#eRk0l&9EFokbc zI@K^iEHUwKel(LVOjn7uC$gK3m>=zT9qzeYZnKeVNui29GmF;7R~+HGuyGT0bxk6r zZ%!^5ho4z;xiR1j)q4ah&a`z&ke^Mj&PAI9{pzE0**7$4~6uP9WGaG!R^tg2Q)NUBwe{#!k}$A-l_2;Rxlh%Cq!?_Y zDB-GIF&wE~3Ff&A$ek;^#YHUj8zJ&%Bfa($b<%nB>6)e{KQ&*|m+VqUTruuaQ9!h1 zq3caI!M2nu9ScKxgl*bmcS>*+9^?0w>#dshMp?~un$D5t5h`6N(2pdArP+LLm%o45 zL~AyHm3i)}(bY4jbn<(g<+<8Njs?dlcyM!-{1WzKd*s{v2wnX0(Hqwv%&2(u*h%7! zfr)wpKUNfs-2#GYg>^Q_b}z}5_~w}=hr>E=!eOQS;cDpbz*w4i$FOk|2gro;nel!6 zdV4HNd+t=65`^GFvG-IFvxF=en+0=WB*SM^AD*N`M;9p<_`$S#qjH=AEa zN{7_7s=)R+H4TyzsuvR9g^m4AQm!Kl%X;coLcsd!*@NjO7Hy2*bd#<^@1e~NyPv^N z1qrZeGp>|+zwHH0lVMBj1R9Ues!j5Zh)GZmVt>bF$v2dQE}(vlXpC(Cth92iAK? zgQE|^G_4!9+re=1`B5LTC!8-)^pCGnI|}}ufREF~KDNNaI?8sHbP5F@Y_NcqhT7(} z(sYBai?N>OZSm$EG!zsBDb#$KYFZ|4Zxny5h(e2Bw)Qm>@th$-3_YpdG|K+kfE7jS zs<4-&^Q+j}eXAlZp&Na>5wGD~%2OGg99o2^56&-%CaqJi5_^YRG<2&=UyzRE@z;k< zok0b~+H0f+cQQkrW=oZv+zDq5*0QSSl^bfPD{69m$IpO9kjejv)@R72A!1qgMP9o~ zrW~M83I`)){xKC&1pm0L6n-3dv6`w};&(z$i@rmquCeofjUX|QQM=bC{NKm`eELu9 zxK96+y3cw3BB?v@Tvv@|t*=mAIhmuF3kO~l-vy0(^38-q5PoZlqoi!jrEEv~f!M)Q z-!?2YEjFK32wsQCO7ypbRm88(W65?)hM8!jYu!<>Id7=E)GPV@frQMeE#HwG04Uw4 zovX1@mBl_M@y}%i6;v1A$ld`SX-5qPOy&Eoyv8p=svONX0jH$wQhbyxJTI~;?eBjx zSoAtr#X8?PrhfCiOl|2ejt9hNIN{)GkMg;Hw;}YA-So?ceV@$_E>gB!O6%O(&#IT~ zmfom2Ix?icAi~1Js&iKraXP4`HFLe{J>YF$0I4w!1U&b&jWZ>C2Ow3HUjwLZSaw0D zk!+Bl)O2@Bfg~RmZPpi)$usa8qBATirHVq^0}AvOq=ge`8PWN9RtEuuf@0xrr6U1V zaqMp*l<)dk15qW2yskB~bw7`)vJT;6ReE13&&D;ogx(;(+%w@amqeMsUS^YAR>2BrF>dJqZwth3sW+MO!s7rhD zx?1tK>%kI*{Tzm}Cz_N#X29utddHa6e8~F{o%NELVQ(>}rvZ?csl=E%`>Vz0IgJvy zUqmAcmZXKUC8uhhB^zqEcK0 z#S|UteHT|s7+AjM8st7*_gkMSdH^1%^OIdWBizDC55#vPSc>Og;RN>isYX<|ORX~d zKm28MO;CgmA1|4?lkTO_{Pj5_KmHpatc2uqoA)~jfy(>MA*R$3FbZ}vK#Z3GT3uS? z_+|hpdPJ9hj-NS~=iC+ECB#2Kpv5TyP`M-aMbV=-vM*guP6gS(c9sl9lKwcM zS{#c%p?be&t>bHSvEjGq`F4(Xf}5VVk0sOM^Ksz>?UD-8VR1uDI;y>w@vxxBJM$rd zjlhh5N`FoYm{SY^&M9S6WZy=R_QbG-a2{3OR*p3(S3J3>zQfZ9>D=1e>q5Twe%fRZ z-p-9x=OIWm3Ge5ENC(K=H#V-L){FVp+B{5FFlqmI^QAv%jvzCr_RhRuRZKS-K-j6_ zG$$PGKa{$)sh-4%M#iriq`VqMztIH`3er1?Op&V`r9kl^FggT5IMpN)IFFRG1tB5o*UN@52BA*rq{^u(;{O>6+BGQ69 zZeIO(_uSro9;%az_leY>mbdHvO-SV8rYMf8HNf^7DAWq+s3=ek`#ExM%ve*2!h{k$OE=E|Lk69lA4knl(p#IYkxKh79<4D zkoSqk^zH6OsI5s{SkMK&}5y_L@u*i|Dz{wJ~(Uyz+0B!VT4wHzJCaU4CE7 z&Y0sPIYfSYQGjFS5DMBEfa+PwGC_)K!@dPl0*e*tG}0gxm}hukJ8lIvADm|=Im66V zLM`(RinB%J8P)+yDf8&Ji~3$m8{S@<$g};y!HZ9~gI~OkcG!fy)~Or|lZL%>PW8Ek zM!k)L$T&p2JvyV|d&{+>Yn>h$dA0660Zm7@YhJ*1Ccf3-$stb1rv@JDNUfxIOGm4; zE{lnWft@a?y;cK4i}fj>w`1p&e6HBlH3S)jImy=BnMJ->IJ;Wm75zkyo3No64 zmv}?pc9UWN1~I1g6Q<{GiH`3YXqq)*kPHFGw3Y093u;XoA&tXx=O2y`gIY>Php9L=S|ZeEfuEdk*O5B8L+9rwjl|-u z04r}1;ReY?pJnF5f|fkjtv=M(0sxz%2o%KclSf7R zmV0IdJECOAdd`a9ZD%Cma(&?G)0MtiPgGiyxL+f3j$lA$i)nN zj9?2lU6ZOr9UGoz-?fq0NJQ5$oEg|vd?bbReZ|ft6s!293M~wE=pH$$wwrWb=*96n zKJoliYcD8y?#co{F~s!;%p_cCl7AP8<-GkIy8$F|YX-JUW9C5%=qg}F^0?JI`5i|u zKS3#g)6qqW0&G}?ut2#^K&+8tMzNdCs&|{=sH%6`miEJf|g8x}nzbo3#cPQ=p zKC)>hWGQ_z0DBTY;vhxZxT^^Ld%v@|(?cStH{%J(+|~1GBH>U4SYzGw3dq1?5R$RM^>_mzrxc?Z05@|?LyIvw97NSeYee(miOt#50NkMbOXHAmRN z7UNOy7@zd-!J3_}=b?cSL9v-&rfu`{6cS+PameW&OV*9=zTMq3znSKTCK|oCet0L? zChg^b1Jvn+9k7T7n+ynrPs&44CP0grxg;1P^n3VY3AB`qUTWp6Rd~?jIVd2+<#zVV;$1T8+a=`?y}@83CgUyF$oMk38oO3Km}w zIwHa}3*%RflASn%U=~~c!GhzCHt}iVtoA|r{gV%n@z|{%inEtyWNEcfjR#=gh zuVfHgRURB`W$a7d5MvPC5%x3ZX)&?)Z#Tx{bXdHJ(z0`5JO#rU6`iu@CpojCTH zqqFlCZipTG@%Rf-#)^2H?8aE zY7M#cE9+pfE8%k}rTbTLn4CwOm+*GFZp)ZMZ#6V$#%qlZp`@Jj^STl5A1Dz&lw}d)R+RK z&f+Fn2#Egj?I>z?Q!lIToF?|}nszQ=??u%Lj=tUx)8&c#gvKNlZiI3J3u=%XuY9G! zrS?ORsGXMT5=XEnGSHc3Ftt^5&Eq*HYYs zN?;K+Xugo8J*GW?5Yn>6R>Y5kJ+RZ7Svp^sp8v+Yu_W%(&psX$E%=4bwboC>K*yyX zqG~_N@qphBzkjzFnN(4AQ`N~Wmf%d0ZO>tEr`Q_HaW%R5D1^%tha_s6Dr`DIr_w8* zspfsDioVC(@CQ<`GzAtNAk#+GOy+#&u7;PGc^Bbri-<&FJp+}&YI6MXi?VbR-PnNT z+@OjoRtO8gNVaR|K@^hFW8{#lhJ%V?GG1F$rJOBFdV^)8>`}z?F?~5m2$#N%4Gg-l zWI$0ycl~;CK@rH%hIOhSGb+yVrza8dmI5}{j-2o_QcS-ZjXn|rxS;}y^OL%NeN>}M zN$O@xlnN`HM=lV>lq%%Bh$+4PLIDNE6ki*@TgwzaX%&*KH{=XK@}x4nOz%%P zPRpCWw=u~uyKU>)B2E=HX2sup!!q>J5r4wzS@Jh;d~xR$^0`ZAHlNs3q|eUdRl;hi+32TAXSRT;yBE08 zE2S^i%9-Df6&K85q`v+%V18s>`jG^)qzf*-XvOgBeUV}^Pgc?adW;mH3p1PZ-5F9x z?dg*Y-`+qc?4tBPtMxU^qpQ^(umKOkaAltDf8xZgC?dX#RPP}OBTBlOSMpWvg~mW6 za}JI<=KyR_JHMorbv?B^6@+Y7`@ogvmLwhRYPa?Gqw}+fDGKd5m;-|W&;~E|9 z4h`4#ns8W<1UHkzHwh0mjSrI_tHM^v(Cc2Qh^{{gk$@JS42U?9uxQTv15`XD15bgV z{AOvxWYmAGbQJ5EWO(OXSp3K`lXgR8wEI0}qcax1_fktQPIIH)GMe$9WxqeW!QOMf z*XADpT@L=XAi4yhOC|hR|)p$%Gi%-e&)Y`c4#Y+s4Ju z{ah$OaTy9H**%MMdrX_hQ&{uIrTfa4ge@*<@zswdwdPxST}2*<`Ufk}OUZ{+SXu+V zsr%VcLrg2O0{8|&L0M;u^8qdKn9W+>Cz1~Hp_*T6DZo)(C3b^jgD^n;^!qZ@{{ zk2h<)w3Oi0v{Fa|0)`O9R-L*hYQc?ntaVp00Hinc7o;dgpq-0_Xv!dFB;#9Lb&TD^2PB!u+hVNkHeu*3=o7p$t>+koT| zBg`@XBS0nIh{HLsAhTRdjNwPdTka*|7;1F7z5{c|Y<|>EQ$f3`=vhEy5({|f62s+S zREY@UF2I$pOWpASRWD03NC2_0%);f-=gY$b7Z>?I9^7eYZ!e=P`h6Uhv^rqDQ-u{A z@MNfAF|J^*9^WU7uVQ%ALzWjtQ(r9Z+$N5yvcCYf~ z6SGw`b=@212b4#F4Iy|JPh6fk@do^wMVeH4E-WA^rd)i$FVnmTRL|nt{?K!JxtdGh zUm1kmnsBv)c!3;a9D}qvU9HWDJH`JyI~S2UO}TKF>=%J&h!XgF1e4{~oR*S0b9x~@ z1PclHK=;a6g>k^)^59>u!1(BR%SBSI71pTDoyA4k7#Wt#wg?lA;=4#~5=;vmcwdLD ze`f>8NKaI2=NdV1zP>!Z{=!Q%&CVl_X!tUc&T2tu zen~7k^OR=9M_tCET}h5ui_@HB!jh^XFOIAaZdkKNnMj#J#u3W3)u%^OlVMtsZPoXyQs6>`1S3~OA@$y8 z4mwpduB-%m*+c|RL>I)2R&3kK524#B>N4X?u)*dwWPPJB^$ybot zz}(?Yks+7Ke0Wmd*X^1!$pdwQg0pEf2RiEGSlC$OR?d66xi^%T-%sfHt573SLlS3 z)ibk|F`wjajw5*LDFWqm3`>=aZ(;R|V6rNz%X1)G&^|Yl`zQ>Td1lCM?ZS zG~4maKRhlqtUWkg8n>pB#D6E)nC1FIxGE+yh*!_yweM5Q+a5qK<$I9aob-0}3nA^D z8tm#_Yd0@h@>X7Jy|+`e?_dA4#!vaX_phPLp3~teTFvA4)O`B@!>^mfV>KOQ$wUYR zcmUST#cN?>mc%Yz$d6KWzOX21jQAfe(quDFPN)#*kAUp}B2cvJca7jX0`S(jJh%E+ zi<@Dzu4>lD{~m`7Ho)VUf`}hihI%GpggLnlvPH-qR)W6fmB_89c4oPk<4jfeAmYdE znB5#k;zf5fz;?fGJCx|fsT&M}hsGV^R8BsG6R&R~vJ7sXH(l{+(C&j3f3^duAL?=b<*@D<4$p-s=f+w%B*_ckrVFZkgUr#8f@jvn7X+$$2Q}(0`EA0S``YItFP2?hHe`(xlu`MHz zRG%_8dNq{2W-721_@FxM_VHvw%J2fR_><=f`|01g_TVO3VVE;**r%KsVd-ti)9%V| zhp(K5v8L2S@1F%LE)HAqZhGF4NNyNvNgeo}cL(ClPgPqJSK`KqmP;>(^9~7IzZcjz z$Du*;0KEv%KnxL&=|S11Pf41H2|mm#O7Hvb^yTXEG6tHgtJhXs25 zAMlzn)fPm@S9|x!AiC6pLlpIeeLTK3(9+E|wkxamV&3;5VqW-jphz%@SP)}0!g95@ z55{TB52(u#k>RWHS&6K95InF#vV1r^RDk*6-^Uu2fGMCoV`7J2E9N!E{;GB2-J|Ut zhLE|xoWj!@$n+o1Q&T6w`7PW#n*PVLS}~RoUybg5o!D$mWPQfZe9c!QKv+OUO%y3? zJ$GT@&%^shsMPC5-UKBENlfoyg{sD|H{JIpiZ7bYQJRFY-C~WT2h*pqpX&1^GJcN{ zYucAYaoQD|5B76W-BuGW>rb2iIQYCdclLi6JIko3`|e-gBGLjw3J5p@g1`XM4KmUx zNX*bNNJ&Yz!q6=#9nw8Wcb9Y{jR=UObeK81|dRQNi1 zQn`1W|FfQnKAG1*tB+N}GF6%w2$y(nD^DKmV2F&URG6*~0NiUOl#H-hR`$ul>jFX$ zcLFW5*cf-d@SF!an%qy}^i#pFNkY1loR@AtdCmb+y80cPjIswVpI7!MYl%S3-fX_@ zqewcD0Ne=!$yk>!6@?&-G-pRJ&li!JPs4eSt{?IowvWfdzO8k{cDRpUXtVKWIp{7i zWUh zerqDuqEk&}NMh*qS4{lkx31UiiR>oDTBIx-6V&OPtm=p}2s?|*c%WbbU(Z^!}!p8JZ4g-~vA=)C}qMJ;OM?PrkCltqNKx4vz zy<}((e-R&57z+s9fCE+p6_ZF)CkPVd8%;$~9{p3h;|;G+AQ>k=Gou>(G>zy&Z+OG! zNni;Oqj)(uY}8~`LHYamS76SBiqq8U<37)q+|M8k`bZ!+{^2OIR}odxYBgS`bQmp1 z$8o>6Z;U8q>X~Z9MO>%cvVE>RzbR@m%>n*ai0VR6^vNl6U2<;lpdR6k6JFuNbks1H zhLP%ir-ChLblV@eg2LEh(5Sy#$QblT;u!zeuwf+1V8U3N6PH~$sx4U=AkPw)ZZT9M z8pZ{cO{=ZRVCwlse~HYP$I5>+5#^+NVK2(GqQDcsjK9DA%eD9A(UYs=t1X_@$H!@c zH2C5-6z-35V|P?D8p8=x`mFefCk%9~V14r+AH@F3mhR9Htf9lWqVON+hbI?k81#(a zlP?gI9$%IoYXS>+#}{hKWd;PSo5H2z7E%HMjB~=94K{snhl+NVU_t!bH5e$Mpm#5& z2dt1#GwHZ-Q~;ELMKKrwHR~?2L^!Y>j`Bsy>=>+9TJIa5l$Mi$kteDD0Qo`n{AQ=1 zXuiKgkL-b8JN?p{mw$1!aG!T%{H z0+Ie@VA>f@YCe(Eo+hFe4WKG-A3cVR7t!z$KRE0h`zZfJGMrJSvvyGK+YbMu@|IRT zpc)mo+<=rDl>>%FZp+EhenDe@l~IV-9!pHQY;T`1n^dB5L*dEcC+1Snm_TO<5{j{T zfTGR_1_CD3>~Dd`Y0NdY{8lynp%~G#!XG}2&Wk}^-@Rcyp{mPa97^+aU3vSG8uPYN zx%)CwwPIVNrV=`*SGczieFJBrC+t&_{3#sqW3ErwzXFwh@^NiU!!|#6OLNF~yRhgQ z4s2TS7QcBOAOFFF&V%2s|1341X8P39t4Vm%{ARWZH0)r)bV3BlR9cTQdXOi}ct`wF zGt9F5xoI)EO^S))^!Js0)EcP>B0i0YuEKy(W=r;69T24x-JR~YmoGLR*!?BE##j#Y z-EqgUxM8CHmh3N6`VJq=mO1ov(&maur`tDphGMUK&au7^1k-wOgmzR*N(r+LxOf4D ze)Sb!QrMr$Umg7|?e?ezC0zP0+oj5Ub?O{<^8NV}(T8~?v8$^RK=-tF!c4p#d|05C z(PHx?1Gk`TO-b-ViOl(=;hTy^xd0*G?{RVGUg8`J_piiNdQDrNonb9JyjBw)yC8d} zq8Tb&*$l3NH9jmVQ;CkO*QwE(DA9xgfV%2~_l0KQ1tsgRA{F|I(s6Y==7sdh(dvpQ zC>t2bqON7hJ)J>{{%%=cBt9h9rw$Y#&yJol%Hvb9%-S6+;3YbL8py}4JL@XV=S`$R z8lv;%n;{k}siwQHCk6WRJ~6r^*y>VFP|#VnlY{%7GRc1y2EE(){MP9~FNa;vFT>@Y zL|JB&_H+X`8(qxw+i6NttuOf7RtFzw1Bn^5UT}{l#S4c&?|UWqulgsOzO#<^62OP1|;{>l8RUNXRj0x~d^T;no`MjbfaodV63&`k(E{5?F8d;(-wa}n5B_{C zQrUg;WuhkaM?2a>B?>VG0Pb8<61jx0+Nu!96$8q~Qx(ysa-WunCq6RZ8Iw~zAY26^_Z zg^6GyY5s8b<_fb~$X>k&K0;RU4u4BgLN|!85?BDyhOj!($DfoMuagkyHpu9DmNu%odmACl^*aUlae7Q@#nipvGYVk!-2Q_C=Ec9j0nTfO&g{fgJKEa8`_JD}YB!s+}V^HfbG|mhKG?zxe^nooyN?4@{;)V-mlLQsR zzo|<@6RQ4re>Of)4rFNGO^tp&Ug@UarwP@!RHq2joBWlCA$zAGi1#3DUTRqbB0V?k zmmmQ+EegM-683%s9J0v9` z14&PH?>)Ek+GFTKibG{y23L$AyQe7 z3${M>BR{bHK`*>1wzQuE8`W^*@0FyI^7EawqC7}pCgCI*u}=vnGD~rMdB0?&*c%RpL3ba za9P+$Lhp$Iw%~6-j&Uth9?>9^HrB9NhleV07_C;3`aSvO#mmob7rOQxj&>c9r&Ck5 zuKCfL#bU6DAk*DE^~0oq68clxPpVQQc%*2Ehb2sla_hHrw&Viw`#ER1-K;pThDdx~ zm-JRA^38hw_JF~;HP7+7R3YC-X@R2HY3APz*^the8}IthSdj6Qm0KeK2*w&4+D$U8 zF9lWZXkY*h#sLSZQv-9epPOhhnpE#f9)DfW&-x5BL;sdt#jdl3AdbZ|uv#D76KIaK z{~Z7?>ccb!IGj{2K%i}j?AJ5RIDg##8M=HLFw*w6fa{vzJCk|NIH~)n*vQXUXD|xZ zkvhDTe2(nPb@Ml#N96}}E>Ej3X;H5G4+|zt>3_fbD&Jw+p#WSJ|t-TjisIza4qQuL*oW894^|s&AODP0vxBuWurywn)wcpnkpw7?Q6J zyf7=`rcU)Szq!*q#@NgliQM_-Q+dePS} z>EJ_$_ZRYVam)Jqc1S^3FBq1n_6aaLKn@{w|m8C!}q^;=FsmkW_U=icHif=4(8%M>B~)~PQK*l4l5 z3Oh$Pgu}IRnG-(p9`-*`CQe58kQd*U^-S!6+$Uee_-H34u)_E>|d z#QVD}rw>0O>4_lR#+cuaOVzrw*r8j3{AKMGE{|c)Ut9CqEz8Uf;hhv{F0=<`S7@DMx($x%!;h(4=N0~AA}bkhg^)5#3H zb{mqQ1{xYlFGzb8SDdXl^UNq}+>S0!>50I(XBtUk)u&H*X=TGvB~h*Lpo6+B`$E!? zqRUqrHP(+(e(xG9o-usx-TE>&{f99uEAU2S5MEPlYP2uy`^zA0b)|b-dqJBN zojsU|qla0J4^_z{E6lOD+4kYuRSyus@ykz zl?NN?&EEAAr9gMxuFWn8Jd`Iw^sOyJ?_4&%I)wZzUk7T=X^I*boo+Q(H3S^yn{%z( z{JJlD`PdMcSK%OrlL3v23b=ULNyB*pRdPQ%5;=P+U2Du^w7d97Q*0|ubzf|7zOq1z zO$G-+?}a7=kTeQ!chT#Mu6b%A87U;&wAB6;4C(aJ%O7&R@{L}C|E$c6keyp^2 zzOdrp&>wD}DiKARv-$DF#unp@h;rR*h42VE{7&6ETO95T-o3-ndx*VWsVz>ij@BXr zTj#JM#aPv|fU%}+j=Vy07w>?a2P>zk<%g-x&y`OY9t3_<=|kc2-!e^E(rnrCwD^ z^%Xq>7l%iaf9k>QtlS3>u%!%^M*%Y+kq=BxduqO=(Nhi(u@T-&o=o%i>K!H;Ai#B% zZCmyHXTiHIiL*hN36g36-!H{S+dDoGIC#F`tO zO9D}GEoXo$qTb1}W4ZBi5zNcBl zR#LUI6cZ;MJWlRqGtpX~Enk`0z&~g93J7G$$g#Z4w^-gKMV*}!WNl|;*{m^Jkp9I@ z{@|A&H<%oRwY!;P+~^uyJ618IYe90JmudW?zQ7QIq;0FOf|`&+Hmx2^UG$CgOFN`P z%$ghCV}JkA^v`DW%$wTIlmj9UQt?v50PQNOgZJEz(5Azq_b7lM36G73f}KjFAX#KVnD7MRa-&SM zhu)jipVgFOD->30G&CmiaiMj;G}y8=1_oYf>L#EYvXgBD;1wuoXTG8#n})HPGCx+A zZCH|%G_$c7%@y3qpuLuT9q6M6!&xBnfO_wPI5K5Am9R<;b4u|1Sd z9T5qq&6ns3^29d147`N30fyJ0RkpZEwznOc?(lJw2caMUKQ6ul@;7I!_1%dTH?>5E zzlNVEEyL+aX9P!`HONEs@94V#@6~@{lNY#FCoDPyO+_u6kZX0q?$eECr4n_HUQBAe5=Ma^?Y?qk4=kp!XFfGlXSMYkwcAjT+ z{yj|qj5bnm7fJ#dvg#h#e`#_ZW}o%9nqVi$Mk0<0xx3)}q|$O76>PRQOE{GZpjvVz zJI7F@b8mLkJ{em4=EFnk>TN9DhP{0AVb0j-lgY5Zt25k>_6Nc=QRnQ;+WA?Plb@r79V|VF%SYP_Gv+lD?fJ2Bf>u?=gPrx zS*_^lrQ297iwAcP?GNNp4EP$*_YNY8T0T%fgK8*Ih#3d$x0C61 zdq4HB6zjZpuys`gRdNKO_=}B%`Ya!LJO?AYfmQjXs;uUjihcHN9tqlK){$&6NDP~1 zUr^&DoMc~Iv7F;&IeA!g*CG6R4ok1@DcmtcqbKOtVDRZNwE6A+=jbWhXHFby zqwYOf-UAH4x2i;%pucL%lHY~mwDDN}vPnPXGHBVfKQR{oWk|)(kqqM58=PAEZPtHV zC5?`93dUSsqBH>P8<%^e7g#x77PYrNeN&r@*gi4M^^KuQ! z4Ol&O&ySl@ibv}>eJTSpBO}_&nsb)}^?;>}H`J+=B>FO#1#B%|B65l{g-RR#diOj-^eo*bH+YrF_H zU6ckWv`Q%m#n^8j0F12sCRI`fOuK*uj5|n?ykeeetc7wnv4CX`Tf~^#Vj2vT`hDA$`TaTaTT^;M2Ax4d^hoEyMLZ zuo%RJBC;~ldZKe%O-DQ4Ze9E~{zlVEzjSk&<&yERVA+@QvgMqVY3PP(;ul@-u|U*n z|D5d0n~0mF(}4%!-d;2f$3H&i$?uE`nmta=FO|w~XX9GS9dfg~P=((t;=a%J?OCY# zOZk586o1o!m zMj}+_F+G?mhiQsVsZC=NCn@sxGH& zj$!$as9d(w)d2UD7@_yz3Tg+SwZomL;z{|jxkT5C~_r4*~=W~H)U0DIap2jR2}u%U^7K#YuMaJ5cBF9n^Y z*Jr!`FDM{tQLlX0=&(bLXQ$WR;<#+oz9;U6j35t$&KPga2!mgKA@LdL!~kR?QM#HT zr={VQ#YINP14T*bY0~1T=4Zq1>snkunIbtrcknGVInxT*EfZU611lU7?KADT!AJQO zx0~9BYiE*AP9mPG$;+8+?60<>$k0glXcH^1`%p}OD-j@uk-C2e{cj)Gt;&GvDO7@L z5(rfEcR4s8xn0Pwlj9*$d`V!_-@Z7$X}0BkZ|(H4S@-21Ats_HZ&3TOLXq`Y?NyRbu_C;03BlGE1N&U>7sA%ON;@urMVZfnBU#8IbLX^} zlAOw~NE_q5NxEOdlB09jcwlQTd_;X7s-z6Ym~uFdkt*aW|9)hMtu6Je<(1T$+~fg) zbbQe*AeWK^yfX>jEe6M{ANbg?={a-*ZaXt1mFD3nXR5%1^VO|XQ9O`5gS2i&ma560 zxJ0h^*Z5oFTMj@pQ})fze*|X;gPc!JpYWePdU~-P=OJaZZEg*s7JEW?9L}oC7qnFR z&`bB-XnkY0!345yDvDypdRJy7ITyfmS>)jm@~|J1nA|^f(r{F|*LX|ppA}DK#fXKku2k0x~3-;hL<-fbkE~ zuxZkwXmU?iaB(CswzI^b@U5wYfy0)g*t2Dpz{uw7;lzTaZ`e*Q#-sHU4&!+y6Q}6d zE!VwU+>zE>TrB0Jh~q3C(DI!SBY_)T$V0jU|o>}VU%c%_PO*=>@eN)Y)XoB+P)$?y$?boI(5{pBd0!w&8U+Q1S-kS?q z<)EA32ljY0INlZ#vzRBJykB_p1QLC6%+IpVZqR9L866YoyTL;=oceFy24<^|9)t#z zR2;s<&poBEJiZp0;F&=}0ltvDl9#F&D69+S5DG^_60HW9H6ZeoE@@-}#$Bc-eu%53 zm-vu>dm84Dy#^XJDLvtshOl!`B_M*})*2_2?owC?3GSlxiwyZO7BdekG&H*8;LY|n zrJoD*ZrxZru2&Rv=ci#FF<<>Dz1AU}D(FkmsFK#c;!4zJx)No-Q$@)xv)ffGVbHd{ zT)v|H>sS+l-oEdEj(55TX zy)Nx}`=;?w!;L=~tF|X9$0H~IP4gVVIjT8$iBzIPmhE&U$iH>%=NJ4s*i2GlN|av{ zjO-WLxbuX&BcA=KIzQpv2a27V8`S2za_Rgy?Kj2!$ht{-Y*%34qMvv`7m>V_vtr;% zW;IFkh}nt~Ha%-5n2m?Hkb0nh){H9oZTrQ(EQU~4xv$6;crv!nb5ZeQ-kV#T?ZATr zuWktX0fa}>`*WLK29Bd=(e7IX5VsaLFwNRk57vMv$CtvtXPbS3SXydOz$o%9E2HCK zgHEZ90CWG@%zi*>{mK7vw6qV{qz*5IsS+u5nC@5`9V_sF-{5A+<~D7;rBg|vzKQsp z&gWK-k#8YZnMDZ9y8NX*gG^2;zrZgh@Zk-<(4|W->0g(;wRrYbH)m^1_Wadd`i8a6 z=GO;PtIP+xocn9N%MzXCh(9Z>E;C{D7>DExV;!rp!>(Vx!=&Eoqvykkg!72!c4arZ&zMmw&pdk}GMwegOC zM&d2whwANFN)aEyOxs6*ONQ+5ORSN8&O>PG58NWaGUI+itTB$AZF#i^3{o`vGy_Uf zYHe4q1B%bj!%i%oB&p1hhR$qqNE^`IW!$!%>w*t-*xqn$}Y=sz+%!m9WUdTrNu%^5OKl06ZOYJ)uHaqK>?H7>~b!L z%l9ZLB8m}7?NZzleSaY4Qv!@WvZJ(fXm>YPR7MIth#`#xosOKGT_tmhFK@?Y#=!4_ z-3;DcY8{7fyU%E-14J15?;oibxMmQpFf%s4=TMmu`;4S3 zSnsyzhs*3jP28nHV^*a&Iv#eKc$vwuXx=}MCJh9%RT| z{*<;Eun^E#RbaGKOJOJ#!9upJWT^D2l7}Fm7DAWi!8Y;_6$45(Pen+Llh2b0F&fAtaIo$8Ch)ukdLr$`V4%=W?=?}Q~Ky4rcuSX6lZp_H>HhQV%7 zTaB^mtiax*pgeE2QXhd?zTW|5orXP>gSBhBMAMnx^XnBjTaVIQu|)l4SGrwWIGB<8 z?N@^>-5>kya-_%C?X7q)2ulOAWUkcgZ3OU!Z8Rs5CF&ue-K?5*MQ;G5Foy}3>%hI` z1fT=!fnyk?72)-X!J1klqByG$Gy|(E25WQS5AAQx}@|WPe@&#c=$=#z(P9#`>livM8x^Hk$MpA_Wuc|6KBP|Rz zzpvvG`PoYI&Bu-oFTJME0aG+l5XU|e-mL>dYRnPYzbb z_;~~P3SjR$3W(NB(hgp`wyx7k4a>F`s`E+P+wcIkO@g12R_WVCjfAcEy`LID%>c+h zJ=?C-e0BU(h>`G1R8+!#&qzMUv!JrlikDG6A{3QyNRDUj)$kAt*`WJ%iAD3r;dn!M za2P^pwq=u2^sB|+O0 zf!Rtiz{SurihrX5yx?M~KD26GP;{;w0>UzwaQardn3Z3NHLD+Cxq&LdBSHVkHZrBO zWJEg0-xHWbFd*J3$I=iJwy)CSKT{*S8sp6O(mkS?xVWcO1C#BoACaOW0kfjR2d!n! z0)w*7^&RWgGA2CO3I$;6Bwl`9yHyhWn=YK|}Vw(7T z;vdmY=)Lrbf6i1=)A5zB#)%e76kQ{nH_oS=vo2maM#E`i864nwY1BF5crX~BR_9&} zZw|v|OYMl)^#b*B+U4y~1*sLu@h9(jA0;c8?OiN~{}0S2O^MqLe@KMtDS#z&j=xN@}y|N=V2^t$ZG$5I|8gI4+>_L z+cLm26RZ(cKJ&gaIE1r(h?&|(!xL;(7VdQk}!wVABlm8+Inuj0n_TtdzR;%xx4y>6ykcz0ku9uX<2x zns^lb+0*t^R|)A}j@MaLRx0#^C*6FJK7eRNLX9EE`Ae&*~kffECCP14)8}Fe*f&y3V>;0RhxChTrxfOXaW`x7UOmIoUJdv z>orGskERTbcJ=bCArn&bg=suFuzv@zJ@PPxDa-FbLAtr7QRfA6Kh!hnC?W$cDZmYm|vx59Q>i7&rw{? zS1VTbJx38p--T2if}8J#4c;)-A@yaPT zuTuN>Q!mcw{?kPciZ!?ppWAvNy#IC?++gaj`CF#8WWXlR0 z{K9#Q7B(Gq71>tn$hozE!F67l7!$8A{O&`4KU-V+Yujl`CY zQ_$QE4x;oj`(1|=f#+}i$uKISA1<$nMZ74QKIcpJ$|INpO8Q?~PpHO}ZQUwLK@IM5 zAoqp*1#tFbPJPH@oLrlbQBthRsDW(9 z1nV5{^f^uFZ zfnbpcoytjXPSM$u%RpVf48n}wlT?Us&F6;hyQNcMea?uc*@pFJ!ab^=vfcq_KU@N0 z&6!dcc~*ZWJiwmeeWIssyqQX%+ac2j50Qx@ufz;=gC~L|jS5v!0pe6s8f3U^xSG4} z2vB9woaLJocH>@e5^^C$YVPuBk>o<#HmA}sMXPw`%L>exks_JE<-+F-r zY_*_@4#HEELCp9uVI#~Wb#RP{On%$VZ|v_F^(lD*6PU|t+Sh5C4zADGxjbbF66l^p z(EHwD&Pi`hnVTm@|@%%lB#4LQj~?@DL4A<)bz zS`pz0v9)FrUlBg-WKjJs*%KjL~QB;zk!p*cfK{d{bZ_CLf6%nj(0(xLvja`G!wqAVT-^=#$c!hMLZ3#__ypjL;%`KFJM zZ6N{BqF5~D*8J#`mrN1Xq&@q>mQB}Jue&E3Wgr)KU9_$EIS%n-J-}&CCLcRrqxT!j z+;@5VEC5*t9Q69_4EkVQ2rv$W_bzSs0+?3ElkcAS!1GZBqi8xj%T<>Z+PrSOM;;XY zGX#EJ53F_AcmHA(3G4;d;F2Eh){OHiuA@J6aySim5%8Z(sLiqAK1h>U74x993W(|@ zoi5jJ{j_G?qesbSH=`iw1LC(83of|jY^&b27d@TN&(6398e)mj$vqNS`TPuOt$&fI zlcZ0l+p0M}T}U@nxW9FXRPMmPY5}k*AQB^DHC+VXr#(^8ng(60@s+G(l}T+`cL3$s zuC0J!_Q-TcH^Vo`wPit%yM_aP@sOhIwsNg9e#Lj`-`DL71O;7wlQczC_G$p{EwMTU z7zJ?M#ZupY6DSV=jh!|rY+6UTFI4(B<6GGDj|##eRGBi#_xZ}W_&hm;&BRs`2uFeb zlO|0~k>`tb5@zIu^WNh$PhHGBg3eIq+#iF9s@k@&($T$9zCRcl!w(M^Pn*qpICLa! z4IcQ2c!DJEJSbX6Ykvj|B1{%|I0P)AsYgda_P+(?hryJ1Lu4yn_4FQnm+ zw5=9lDB96s8%D?k700aoq`Nxi%?9Z~N5=TKi2%rgr!cm3AUsRomOV(rG>!BC@-I6uPb`2Xm|9= zR+V~P{d%eI_us=iWI8#5Aqeo3inWvQ`Rvy-cI4u<+3^&v_A}y2_tN-biWdJW@AR)puXCY z8t+ixIRb`#nwe)0@DUL|oIA%yrXif=ujI4>0sz_h1#64JiP${qBw#q3>i2nX_Wnmp zc;%6Z-p~MfY`xv0I@1t21^Ie)Z34L9@2gqi>VWhuiE~`9Di?eo2~V}-dfnA^o5dRJ z@|N2N*`z_Q_v!G`+)d$im|>om2<5&8na$Lj;e3n9vLLzj-#o`GK#tba?aaUY<^H1T z!E9_5h)x8xS1#sMQ%Z(g_D&XTIydTw1P>aJqosrsv@!o6`ii=N@d_fo>|OSGe+!+kI&x(K*xsrwV^m&xa~#Z(V9^k z4{Xu`Ff^O8eSkAY6qO2ZE1piG*cuyfVNWUmo1110ni8$$M~oI^R|Hk4Yg~jTMg%~e z5pvpvuriXP_+&uBuQO1i4rWVBKy)ck%EJjbMKJA!6OyA(h)B?T*5hs$XF;Kbt`Sm4 zcOd{SA+yf<_MKiSP-PtAeN)cJ(6<;=Va8y^d^`k@NYzzQZwNkaSIr4cjwVmxCEVw{ zrWUY^Q^Az*IwtdoO2)PJ=+!y|4iDf{psjkdUjIgVoeoiOrCV(f(Eg3(6fkb9(|%Ls zZ0J_K8})$Hus3_8wYz_L;FeO8=&4Y^dCN#!kpZ<6Uqf4sq=3`5#KS5Hc)((i4~vN9 zqSWdg_0`Od%j#k-&A}e}vD=^csIh`UvbskRUHJPYZ#-F^vx(5td)GgDo!1=nE+ne6 zepl{XTFh^&g(pVzrzRtz$0Os#j3CuX4Z%kd3_W+2z>){}A;bgsz)p-{6nBwqBLGW&d-Fk{jqUF?cebk*ZUe#>I6nu(N~#-b{e}!+%SPCM>k`&b5bWg?$H&@TQYY##K??SfB}?Dl0y!T5o&c=U&Re==wz7-_$$?>% zig%sVJ!aj3y9axtqzzb=1>=GszWIr>Eo~YUflKSgMN7JhCg?F5N=)nzBGv7jZ@Wh0r- zkpiN-5);Pb2SZ&LI!ySVF{XC|QX-{Wx*>G|29GtuR3S%lX-fPU$B??zs0FtAzUJhJ zj*_Q(q8Sl~VT2vdZSU%wujpddUDvl%>t7fF%Qf^73Megpsd{>YgWV^o@AU?UWF&1t zM}P6&L>mS-`I!`jsU_)#XFt^$)_R)-M-FhjGKhqUmr{&EdruqlLsg?^=)=xnt^h!jSVRvtVI+?^v|Wx_{vMPaKzY>#DM+8q$2>WX4} z4+1)I2PT=Ja|~=>@d%UOIr^%SDs*_b36)1jf1@)pRA*Fe+^sxa2=UNcLP$$X!zOw~ zvv2I>(Zh&m>h%d;HRSB3=>h)BQgxRqfdC>Q2-0j~hM0a=e=6o70~$q>%@rq=>xa() z0thxAZi4h}`-3e!z~;HYvkC{4Zf(Ot^1wNJ;CETNYy1T#5-~eb%bu^d9gD+WtpKV> zVwD-pg(djaZw+ElFdIM5LEBj2#+ST(gOHA?7?)~JUP70j3)8_>5F z@upeL3%fQ|2od9-op#qOi+64K#r}?=FR_4W`D*rYi=W+O;_A#hSMhd#pv-q^{`MTx z@a|ZUP+w1SrtsSL3z2#mS!%~-P_OB)Uo}pIBH$dczUo`j=>HZQlE3Ir-(dE)ij`7i zicCr3y_F4sdPYS;3gvOMQvEjC$2A2dcOkf8v8f%HP~!M~^Id8TpeL@DJ-Kv{+rPu) z_c>$W&~NaYFdsJEEPL%{x0<-c^HaWVayOi5w{bm|b7uXzn@!O17Ow?86U>l5yGSikw?QO&wIC8<9o%_lHwobJ4XB>bzF zZ$G|uD$7v44ooyKL{Wg7-%LmlE=7=x)hTOZN)x`i3cj_NZ^5>D)YV>twV0w<;TBnQ z99}8}jF6xzjYWWdLu@_y_}cx;UI?K40L&?ZyP*MW*kB8167_^qZF8{NHlilG1#8Su zzI(jNNji=Zsb+{x{_te;=1-HU(I)vrPZ@F?c*BPRnukFil_&@vj>69e($8MR;_{ip8VcPA!xP>{hX-@T*&#xIjEsPLUD4q(FHZgGgbE; zx<>10Y<8n8>>mzEhzjn426au#>n=r-X2 z3Qhkx#Tl{luMYQ12cm%gQ;KeQVAJ%`l?u`&KsLw69F19AOeeEHB&g(b$qweS+&J&& zk)t{wD{_FZzmV=N^C_q=huCRF@IyiGHcO~&~qz!CuEkdTEp^dxscw}@e z>10V^fp`9FApi=!J{BQJJ>p141E?VHZ&?)?~YDi}2Oi%hlvX_%~Zl@%f63o*91tHZ<|d z1;B9KZGkIUUKmV zdfq#1X1lc_qoJheWGM#6qrYvXXw!l7#3g@cdn~8Y&j6Dl4GfG6N2`@yZDm@XyKMSY z*Qx{m_fH>RT)LD@JP^)Msj#7l8un%~US%#m*s(L8GEkVSa2ELO^5>EME2zFThSRal z=2#Yyhf-2Vl=gd)X2LNg;6A=>Db- z&{@*SbpfEz2gtNOpmGR-J4NhzBr0|lJtGg3E?K#KiBI)vbBp7CUML}#|C1I+b>43n zSC^Pv)p_CFny1t6qqNVVT z-OB5Ds;AH zhTXUR`J-oJoGY}K!pSa4*z_uW3OE0h`_>Urj#S}jz4-c4r9d530u#35DkBx(1BWGS zLn`)HIOS;SpCtIGPMpm+9_t@N8dP;c^|5^<9k}*KEVZ4KvZG&^u&^ag<8R&K{Z;$^ za}$3%JfJdLR0TAcwJCL`P|+2in8LPlV@qz3zcbj&V+WNj&9sCKd^k)~Izeib@qnmI z0bpdWF=_8p2Dah;NwzgFda-AE*9z#Jun6eB03S`P#7PMzWIUs3;Wpgm5VBDeDi&dO z$*56jipv%;&uwIzl2`KM14%-1s2S>WczpN(f3jD+3^FtH8L1|&V~UgCPFcbCUQ(DA zUTJ8 zrg4q(EBCEomXd%6a~R%RReYV)l9NfTn`HX%GEZCx+peXV zpnO>0k2&3w=Ao>pKjE1F>QC}d!pfhyK8!26*(`B49sN>|;hw6qaRr<0~2;(Et3!h+Jb_Uj&x!acjXtq?}(a0;b18jQv8FF#t z>)~ftssE>ma+EOLxxq(lNI`!Q4Zy?GD+=WIEi(k2hLccK<=&{X#Il}7`qHj%o6E9; z1*;WDp#codA}N?>vbB;UD;EL@YwR8p&9_U5N#~Oy7Bm7Br^g)CWs3WKA9-iT8%KMX zscvZlGxzJXU#_2+xtZS1xa~HVAIyf)XOAQ$f|4(BtgudBS2y&J*2{E8F8Qy4 zbM#(iyzg3&F6OejY_u46hd!k_Lzzx2!TPw171te_3BIYM`i1a_w+-%>wprtAtwj)N zB_0f;Ba)ge*e{jSvG=26Dh_sV|9&(mn64liqqJ>jh~f9(H$YTi?fbS+*|A?yjC|LR zT#8+mv54AyQ~Q?z=2t|{|3G=2UN*>f1Z+E6yftY~@w-xm7Z=IIceSq;ovk&hXHAw_OuLBT<6|M1N1Agi_@yFHyFN!B`5-*codSiZYR9?81!fdzKK& z?w!WIZ$pSIgX}ZNzVA!ckfn%Z%bG31_4NLHzrXMK{c)Y^y8h`@$2p;w$Lq1&@3*_J z1!*4amJ1CF=heq;IWY3~Q+!4D3M$ke8hyjo*ZRVxr^eZEcGXE+TuTW@AMMZ_C(lRT${b8dG*c{@srfaomR8WLREYmcw zWe(eeR~{?Ud}8?H9&o?1scGN2W!}pD_hl~ApQN2w-x~5seaaYL2@U1$-hq}tDt%Fx zVcE~iO>At;sWdXycm1SOz`mlp7ck>e6>{(YSo)EDD=DxDNncK_iwb}U)uds0iv*FCT|^ipj$*h(OQ6^^yBC1QA+P?AG_}_7J7J3zWf}W z7Wq#ZNVfj2R-uZCm}K8*!&3pdW7a9$Oo07Q%^&3DI-LI{GAXiZWLJ~(cS<3hci!K{ zKK_~s&Vf0#5l?k?ukMpfz4E`2QS=qCUKMhC2L>Li>s{L^N0ayAEf#71RC{qz@o1!V zesAC*yy{Z^HR(y}z0cmN!QOUn5NpoK`mYi>IrtI8YPWdxww16!F(O7_l0Mc1hgBz{& zB|WTLC*S4AUR0bA|5BIJoSPP7wb(4C=Mx%($D3o;fOm@V3aZWnei8aIv}~?|{~pWP zI-O?dN7t~#w=$7WKgyUsfA^sq^O3J0CNU>V1v3t|mYKw>dxWe;=vU3jfX3H_dv5|4 zP05 z)1A#mdAR2xaA~bi|7HJE9h#jq{(aBUh3UwhE7lFKjACdZ4kssCGkpZFgQ=TAj(M`p z#nT5FX+BMXfklJYHIPlh^cbbsHm2K^dL_XfJXaUi>KT19e1y}|IzC`FH`P0P3LngM zg7}&9B;hJRFS`T@iL(aV*P@e3nH@nq4DJ8~A-MY$sZ8F&Um zLmAkhcqR6zotXG~{OXhP`+(*b=EHFL#&Ga71K7M7y~?v{)x3SP#2v^>A3Sbe%HR0P z990Q7UFwO`A}yE*4@z~Sp7F{0uFVrSz<&8@m8S0L>V*dHd`X;OI7)(JPPMC0D`OvK zHzaDb*46qpswGsrKBE#9(>xxzpLd6G>gUD{)KZ?WkM!uCeCuRkJk22b7M-uro9~8o z9QSSOs?}G8OO(@i!j@H+RwGs`gl9Q;UV-3?<}fmWTI1jpGq-;7>YaxCgH?m7+i7-u zgMGxuSJ2fj-%HcuN=Vm%I{tX)c7D%97N^-H<=X)~_&n%6NhyW~D45u0GdU}1CPf0=HlO+17(xUmA`(op>pqX4aDo#a zgpf=rUsTt7uEDcNGq`%HL5?Z^b_Nj^G>p=hHGHklSnRV!a33}E`Fo=EuO5#Bi0?n* zwQ)Tl>pB5Q*jVz#!F^EzhN1}tA=YL*@U#$3Th(>7Y_swE&{}KJ3bx`QtgXtL;JoQL zf2>jfGYV52&oGrk^C4fpGHTpiFy8h|a{2G_p{!ZHyhI-Rdfk#FtgEbWKl z1>#|k5<9o^RV&{e zf_0;NdkB=N(-XLxpf|+u0Ct3A{K9U%ez^rpXqH`SY=p!^ zApw=XahnHqk5eg{t?&6A-B)}qTSlslT59(4Dj z)3O2D2>bT;-MN}qcjf{;*F@)6G=|Ii5G+pY!8Stz$@g6uz)lLSV7~90oR++;thHt zgn_w_`|Um|@U7q;Vc;Ln%F{A}KjpPClh@xI@9d_#e*YnQeSbi>wbzapv3yh40o|>_ z2uVgl5DqIWjCyQ%?_?c%>8=%YHTU?*1_hRvbrvA9fc?Zp>s$pZY=* zU0SL(@q{dUW$I7L`Qa_5H1UT3F7wcMgA4b}_TbZ?*?bBMy3G4R3G^h_xPx5)IKSju z_E13B&&fKfWq5=A*spYP6QIk9_mxB?8qXbGKW2kLejfaM8~fkc8U8%yx_ng<-I@&L zg#!Ug8EN}#hQ8)NgYcNH2q*;v`B2Jz=CnXEhE6fZhH#CPOH)qv3<#2rr2&D%*v;C4 z$jriV`5cuMho_kZ7Tz#4rp}#aP1<#80DU(&(A9gf-i`!U(DCPEG_jNtOOm% z1#m<|WQZwR>ysk@9APbU52*9(wMZpmu?h#Kg9lpbKqRQD3HW!KfknI*ft5%ZV_v6k zBeB8{#6g###Lrm7o4b2^Q31a1Y`=_!a@B$vzz9dAe~nTyL{O8!4ml(qV+(`aKqEfrIl_1RW14xvI!)-@izgFM}7hsXXUGX8O-u(5N3LhK_&}Mg|;miQ5k$;tZy`BvgP)WW~&%&Ce2j}$SYR#UwCGwmhh1*g7bGzE%x*LpA zXc5@-mQ-Lbh&!>bB(8K|q^jLSZ|FcbgP%6-IS+n$0xSX64Dh>kOj4vgPoAxy75*qx z8#_l%{oelcaUq2DSfXgU;aV!Ld~WCkcbb9h^)5Z(p(9sd)}>v@FpheoI0zFEX0P#- z>b8KPLexBYdjV(SIdq@xUa0J#&((>TB(182`=r*0)_Sfh&WkT(-&Muz^U2G;yO$BA zm8?N;*!9aQ)oj#(z9>cgjc#^N*`7Zsw2{tq(iC1P6ME;_@2x*~>sAa_m?kH?86XDN zUX)nFgzw&9EH!LrT)TejvC@BvW4nv) z&RW9UfvxF%h;e5J<_UAYa}NSVnRq)@FD2%)HEOourEJ%Gbie!2 zS|b_xMryP)Yjo?keU~rj{cPVAkB>mt^)E6Oi;H3aYuk^p@;nAY3`Bzt!T%3q^ylp9 zz)!Z$E8an-Q?=4ebtBXZXj23Cy!Y9eT*?Hdz19QA?UN4&F}fZsciBah3(mnsmxX?w zL|j@LH(lSJAlS~*^iR?vm+S&cqtrVV15BJn&v*>^CDqg3@5MhkQ*~5aG2Z`fIS##9 zzjf7U;_2aGGig1M>!wBHuvv3bSUwP0zhqzB{KVAQMaU|>%4W0~kl2-*J!gRPN;KRs zxnKA4HhbNmPgv63V2iC63T`R&KQrDXbN!#ISl z00T-BR>4Mj4_A1&rjh)<;*Tvpf zV2?r*_`hwiz)iJOerYVtTM+u&Uod1uTM2y2%`xE9j%Qf6Sb|b}AJD)YU04!hs>o%p zWax8R>pE$4UC(uwZn}X&BOJ9r>N1htgpoBmN}yyc@?gMl3qjpYqBoz?XV7kX)nNxl4jVid9GI2B1``ixkZknCMxR8!# zVg=Phx8hezD$-x)$c>(pTOCF`03`ze=q9de?Wl}8GZid-Gx_%f1V)reDm-Yd_(u5o_#K+&whdkOef*VeI}e$6&I$< z&0LtF;x9=^P>`&;`H&jxS|~?Zd_{m%3|LmRX{i)Ju5oi5Bpmhlocs+OzsjB3rZp-I zC8r;H(DTOPKx3U-AF|hFRdiMnvo~y4PjGYW`69jfTg?NQ%}~HM)ifCzSm+0?zw$dV zz-+_|RubIkeme-+ZIDLGVI{hVM|7h5`FU)}_@7~r{pKVe zij@&YSKhUin!^g8w2z1gTE3M)-G;v_S$Co*w(_(_e9NwMO5rYfv|D}kaIY#r45_y+UJ!4o24u=BZ&M+^^><=YQ#sm8W`6nnuvrYlmaKVtO8V}LaSu+Dc3mr< zW7y|5$ny`WLG6PBlMnq*DEvVx4HC^3_WO4zynb){+7~iel3=~)Os~RcTMlyW@m;2~ zhoaS_1I_;^%65Ej%X1No-grGqST+U_xWS%QhJvIbsfhiC*RHfF;CZ{Hmf}59NpXPi5Ia&kWa5c3YL1)%=@Pu97dprSn~R?w`c1>!KtrdY(EZ{?-sTX? zuF-1BcKZUpH;aDNHLHN1Ow*cPZrY#YZ}aUQTv5A`rZCt&%>=pR zW|vOY&_<1(b~$G(y722ssCPn+K?a)IGzeKDUMYCuN4uBG_Q)s0yn@w*$}z}D@hACv zgvFmfR4ir-G~~S|YlgNTdkn%CjWIET#qB+5RuJ#LgK$ zK}8;n3WWWomE=l11$W7~#ShANF$C`7JA#yI2HE5)Y0`V$2A3M8_DTI}YA-3=-5KMM zMc2H{{wM19r90`#ksZfZ%m)UPYI9A2*p5zhwz)_DCH#-wjLMB7+5NQIiLYRbR<(Tk z!UaDk?)Pab@)2WG^Q*jX%~!mS!7x&pKw7+*Myf|h##g$sH!k(K4LY49zdl*gn}@QR z$iy4W)}50lcuE|)MlNNGnpM834)P}^{9o1kNS=t>uRx-6SKxU(dXxg!b%h)v(8&l= zwWxq^Nf;!Hp=AZ=!on-~Ho5@3tz7b}it@m`g)IqPTVKSN;Dc9r>l>>t0gM63SVef! zfrK7>-xUl};8z&Y-sekQ#_xW&2zPZcfywcmpg4u^KGz3Bi%=uaw~VS#zur?8t|j^< z-N81%lG8<&X%BKm4K986rWsUI9)?VC4k&`v5*+;z(wdkDw|oXqES@T{p(p{KhiBl& zJI2bSuspza$v|$92pu!9!`Uw72$b5!#(_`5X=&=)J%d;RITs9t0$!y@MvoK8Z0}W3 zWd5@#@R*K>eMlpG1ojD~T`h8nl)JJjlsFk4lQEF*8H6X9r3XYKAi_ESrOA>Bi%vxP z0&>F>8WNJq#8?8;DvAlDpi%M|C%(Vg7dyp@(cIFX#*CZG!EQZL9)#>b!>JI0Zh;nW zp8xv@am;MjiLmIu1-xx=@f0uFajHbZ*4TM0favd0TU|FA?CDiHA%JM}O9#`1uIcmYjDezAMLPff5hXSL0x31(= z`2iq*$(&?3A}Jr`>kl+Nn0lv6(jVF1Ei`dky5~3|o)9ATf)$Ehgo3oU&-7FO29S zD9o57ykt5)zwR9FMV8L`O@1w+Be~A)0a1t{E2Kn@Q+v1(A;ZA%BfG4O$u(*HAKf{Q z{NZ_u&vqyG>o3`4Sr;;{Jhi{P`nW!1X?E%#fh5ckKm9?fuf&bX$w}V+!M5=?O>p<# zB~>HGkOUPJv=$P(ly8+35AI z6@#!919Irta%~AM0^M--=auc3KQ2clY2>L#@x%V}d6J%g;m@^a0}x(Oa9H7m33PX@ z=Z#sXdjVxwJ`s#kdUXBjU}tZAD(8;gWlci?8@-X0j$?Rw?uIS4hjp*>8`wBMtXL{Of<1)FCIzF5CO1hk~kWwjX7C zzwsN2{9)Ry4kA5D6JZ{iX}_?RO3b}Helz0(+xO_C>D{K^o)659SFUeQeh&o|UXXP{ zI}I!q`60u}%J%%BYs}#68n`)ab`m-ftLyPtX6Gb9+vPl*;qbx!z$NJdJIA(*fxA7b z5EV*_mB1L}CM*UE86nT)fb%xIag%>YOPWLe#k+3e@fE^>@k5{oXCNE7w{<#Obs!br zV=2a&Z}@$7dauZLK(_APTahk|efr+UncY>r1_%JJ|GD4vuJTNu7lult;`^8`Oc{vG ztlrf`jw{GhV9x)b$xBZeIPV|27cC>`FEa4uc(~dK*0-v-0OCex%dR$lzhpM|ghxFX zj-_5S5q#=060cf1UmG1mnEC5Pb@&AssE9ghT44hp*f~gfpVP(BvDz=g6EGpfYVMM? zEi|rK(ky(=^{!pg4z%hZP*eo-^bsAQqMZ*G{%3?ua#5wfW36l}QOSGJJLdPcA>t)P zY*_S%2NOO*mZItLUAKLNa`;yT_P;ScFGg3Z1lio$j9o;$P2kEws5d~w7;?-jr68E{j@5j&Ay=Kz=x{MC7ZC|W8RYkMsK2Tt!t^zK*zqm1g zE_X$DCdS}3|K9$5^vZ18(CU}W(=`b3aMnQ(w&s*X__37b%N+jTw|J3Nk--P_OUt4| zuWqz8ZC_Btm{u?P+Z}Lm>qCF4CBDz;LDhc#{ASn+!Kd<;tH7%Vtp^mkFf&o`!RZlJ zP(rf53b169vG7_b!|b!TEk8lbN3DQN{y^cE$0`?>B_I761Ta@ebg#ntw0r068&;Ki zi8SR1e9=dDB@E#tCs`L{A1I>w?2@!}`XMiH={SimlxhP&@(^D>Wi@gnu21hWNsgh) z&;7k2-V{SvmR;`!*A+}Cx)!U;PKyF~HY>QE5d_)>KCyQp9cV6dOfTA(BEM^#x1T$Uu|I5XxOTWNuu1rbjE>|zOig?cvy_$an0If zG_CaEwZoyC?;hU|x_5far#=C*OiWBzh&rpkx)e3hcGrxoRl4+%{6r-AD)C~Y(L|Qc zhTi@9W8WtUdc(fB_c5b2>Py+Y57+}0tUOM$*+ngWyH>Au$}lzAv6mS6UWnJ*?E0L# zZ+@h9EMg%cw}-IHAROJ|p7o(w$e;Z?ZPNUE(=Yh%XoawPgLzBXK?jIMmNNEQqS|Hi zy`f6@Edg8O^EvyJ@e*5fojO+;l(NUztGEr49=p|3OWadAY@tOCUbd=1igOsRRy{w zf^*IMwTiG~=C6wSnWL3UL%zMtKl(3b9kD(BZGGERB;vhi&~QpxjDl;ms|w?KkDwZ= z6F!5$B3|)XGB>ym3!c!X`X7TLG@spj7QJJ${mSFdu_OBIF}!4&EAR#`(t$#XIvnYG zT{2|1s6&`^p3ruCt8L|a#UzVfZ$a1=s)grpd7_K3)OW-7`d1Ox*+izM(Th6!y$wpD zD&`GI9l-|kHBQgRtd9#9Z$_ni80Y8~duJD-lNN=2eb*|P{oWl)Q&~`5|1Y)iV7dWgJ?r2OnvqB@%Y$x)IA*5V zNszO_o@HeKBO%O+7RhGHhXOLD?h#0tZuVd{r9vfPi0Zs8$+%@?dedInubcAZ75Oi8jN1UGcZF*xz$?Y&M z0ch#&Zjl+X+g$neqCjj&M~^77cVuoMnbrUamGixTPmIE4`>al_x!t=gY zRvT9tqtO0LTkr3M!U}Lz0rhaiOsY_a+bj{MiyB}-`mQ9C0hEpqBTOJLitd>Y4~~GY zEugv)ingW*H>2&-?j)+WXL)~snTw8-;G!Dj5%=J@7(y;&mu2@R&-pibGA8}>)A@sy zAKxw~oVE0RX}InTwyQ1^VtkTLC#Dzu*|*);O=FTn`CDMRZ-zp2!q?@*`qU#OSfOL* zMd~z9@&})3h$>nh1xyN1Yq$fKg`TkCf{c?I9g2I6^ta`G0u-I>>h z(Kj4!R4G-j%7F~dX)6x6_j5U9>2&Onqrkkh(__^xT=ivF* zHvtNTH@%k!HakFHm{Ny+&-|M4{!>WZ(0vzysq6mO(+3CS>hE&)`jL9D^~?)a9opSf z+wD(5SoC#4Ivht)n;`j`n*fP)$)iG{s;t>+Z|8qNJ5!KDsgRcwqx}M(?(FLnb=Jkz#q?vSp0#zsyNYNyPEPQSLo4DI~SvV_0N&R zVdolinZE(Dat=GCA*%PHLW_G?8Z;aztyl`ihh8)FPb`Tj@srdR3*3U*5`p|B_HXsU(KrOaBzw@c zAy)dGBMJ!bb8Vl5QfGk)sHB~%IelQq;GQXli^KNuDml7gg(wf{*z$sY203FtjE_$bC8i%Kowo*O z4d2&AwH8E&Sn-)ns(@l@>h*#J7bQs1hioqZuMG$e4_ZhSw6xo1kJ~06>8HtV#ee(A z$AcA*mZj#Tb~ny$LUgXZPf}`|Qv9V5-RM2kuqouWRu^yg6XT}grx${e(Rr2WdiK>e z_2czt$ZmdlQIs-3qlSH=!0=RJ5lw~HTb;Hp<_!8U&icKJ9wEUI_%++bw6xyRg!YD` zG_}zY?@DhfJe-1xZ~ELo);Y>M3_6CaxZE?Zb&yMqN~Qh!{Z7S783paTs$2PedKD}F z!sA~L?)2I^7+)+F)FxUYp(vF<{?LWtlMJ|Dzpw4M{q6Z+S!}V!&6W=jq;$jfV>K)& zDW9z)6B0F_x^R7bu+N?HF>jNd0!2Hr4K)`usB9v;Xf)38YKp^S?dztd7CQ8=PN3VY ze0l;QjJz>;2NFb9{vv^Lr3hk0rpuDZ+at`=tNB=hgA>8Rlf#cIB;EBQ#}qN?ice^C zshfTIRreh#b2ZBBx5(*{&GuJ)9xdH=AXzc2H|FC<^#YJEZ=5_5=+*C^NWl7RnC=_= zXdQA8nURF`-FteAtH>^G&kgZfwd=*pL2SHIe(&q@r$X2c44OC zsWP**iy=wm*mE`aL@8lFyI5V8AVIHY7}VC|DQ?JWla24XMq1%P-YCfSk5k|AaO*3& zh}=Nu1E$xx*leZA-rPM&=5TzO+>`6?f1k=fK++Lsz-UHCyqx{C^UPb9*XcPhiOz%7gN2N6k8R zjxhdhHyk~9=kJKibc5M_Yw1jOeMmfv5x#bE35he7J#}ZiQy(ku?Ep9CSo6iS9yI%+ z(FPvf%qFKb8mV}*#hWLuh(E44_zQnU^qf4^MB?}vwH;(kz~qf|S;@o}g@=HS=b#rirk~zw&u2N#S1*EWd6ds}>?G3npu5dO!w)_oaWSPN zn4v7cDM=&lcyIH!VPwKm8R;HeIbz?g7%h1(Hi}6_OnW>x)`)UB82us8II!lDlBl$p zwT7%+Ra`o;{Y0NkqCKZ@EIRD|O08Rr%tacCGK7}Bw^E})p~tjw2_KKBm{GYFJwF7X zWf5|hY>lBr%evQGMVe6JWvv?~JXn$U6eQzA5`#1;9N3>!$`ho82tLZ8`w$!j7*Jp) zfi^8~jzqa(IxgDKj6OF6u2Aw~A4i(q#VY&xi&U2p3}vRxNSqQj_JasF42~uzhp(tm zVto2nFkW>^{SZFz1W6c%V5~g_QWlH8yE#1SHh(6kgk~{ER7KMNP_3msp7m1ux1&`3 zpN=vU2=9d~-YOLNYj?)MaVNl!-%jMQSb>8&IMM;fMLG1K`~QHyt+oq^umZJjNv*^D z^d&PY4E@pyf%?;%0*ar`35)mW+P5@2Fgt@nTg^lc6Q}^`uE=651gQ9hk5zEj^3De@ zcWoR7jM7BX+60LUkqho%zV07cM5y%>Rr?_3P!wj;pqL5+b=KCq8Cmy(4xFR`BM_Yo zyw~^$K-64EQvjr5{kw_pcb(qT$~8?c6i)MBQR*= zy{#c#=as*jdPQ{Wg4gbYf6~*U66-U*gtPS^%-0_u$o8wdt^G#GxzD~^9$sQrp0yd` z+rw0_Rx>`keSJRIgt9QOXq1aLYhhj|kQcz5*?D%#mSaJT#Quf9FZL~lie7Dfnjo9A zESL{ap=Z>kUF7BCf22SWDahMtaYgpE&Ai~&T0L9rSEok~gVVH=jsjWIPRJ+0(==m# z9PnwUhe~+FX&*E?sTt!3_J zJ8r!B^7o5bTF!o4t2V(}>tgs5ge60E#WMBB2S=bHre51T#FJaY3fly2$2PbtHfo_3 zOEN*x4~{M}{J~555-)h>QRc)Xnjpr7a`)#+YLnpV7X{>Ctc)Yy!gdPz`m;aYpEP;c zr4T{W?gm3?t9lCv@OL=zgo*q-^8OB$qRP(i04HN;}o0%j>yHzGOF;`{;-ppR;(-<1DWs;-N8-Lq#W?#dDj z#U_o8iAYAiE)+1z^Gw+>*bpChH~x~Hlo4iuAO3Tp_BqUww&XdVTbk;z%InyT!l#{Y z{-+j}-|Tx3a7C7d$CB6kg9fh5p*{4;!Yiq{o78nfTSr0f1Vquvn(M?jhN{9u=t8rshXi#L-nN7 z0LI3z5j9sD+U-Bi#jN|TmQ}QS8*io2UGuF&rsBUZx~+)b_jb$l_@rQ$99b!JN2ObP z5gAsz6n?&%6p~4Zai`Tn;{Iz-iOf7SF+Z`;=wpw#z3(UfO%rB68N_PKEJ}X)j9!gs z{wIu!pHGUg^@DpP{jmP-eF|tC`pFBYG5suM6dU3WEkc8$;s-yUIl&>3{FUs|X$pPE zn#&Y@>H0S@+UHoB9CQ@qciDTc*xzME9ih6~-ScKl0m7p89r$?i%Y_D(z^$B_1+2mH=a2^0nuJjT%n zLO<@vTAT65kSpP3`L?WK;m!-bWVy}d67Q;V{a$}?6ABctSbO3@Eb7R@;oPj|IRyjA z5a~ML=2Pmu_WCn77Xn%=f5c!0S<>!)!HC4Ew#>hzi?(q=DP4lXab)Q!N~YxM!2rRk zr+tm4^+Gr)!jggyHP0NYY&q^DWNIAulq^Oa z&l_m8k8mMi;M{M;XTBl^U9Wr$*L$RZs7(UJ73?daDD)mZp&vO;dcGk|f_=UG>(RvN z@sLcSDXoC#-P--Hp+aChPm&$X5{Tc8$T_&ZjN28jr87Fd9~s3rc+0w(jXK%yAvx%C z7c=-3T0X?BVvFjLf49Bapt65yQ=-3x5E7QFweN8>hC=wQv zWRoC>K`P&1DU=B5oVo6l?$mwh%kDCd!Zxh<&=*Z7fZ2t z@?UOL0n*M#hrZ-CvY%gY2RA-)uI@gsKR@3hRq* zE)Ar&^*OOAY({i!ZDns3`K3iE9AYRjQsC7#_E;JRH2VO@^zH@`dvlSqy{H8_@M6FF zb$l0%Bumiy4|VcNPtw8`ANxvu?Tzew@re&pB4xX(;!AGR>Bu`X6V&$AUD7cB{g~FWgq#t}<^t!o`J=)c+Z6K>tx8hu#!3Em5i>Hb7B| z)KqG8FQqh1F1@Tz4b}IrzBHTRN7uyY8>NGPaI5r}{*R=SparrF^9e>&*_Nlt%+?F0 z=l(ouN+yaL^;p-ds+&Z86@lN|e?phawm%s&9XRaCI)1&>mUSZ>6`p=~ZkxlR0>ff_ zia2FEZmOjiJuu??cF9lJq4*Wyf74!u9m$;)t*Ldf`8o{z_u4W)@_)`ghk+EB6_G=p zr{tQ-DZE_q1=#lX5lEgo;k_l_U6)$j!f)7g!;V(EK5c5FY!|V~ut5H+qUxvub6S<% zYj%*O^@8=iHP~7J%tqwNnJCD5!WMFlWUk0C_{6C5Fpt zMH_^WsmLv$bFYbluW+}bh$R$Ln_$Idy%LH+4gf}d6h{6|W)E6=2KR6xFxjdH&uJ-m z3!3TDYinN?t4#Y@o8h;krAPN8)s;0eT}MWjXO8*Y%pn;_{en7L zqqubW-~=0*uTG3dY8D_CS*1G*V4HUEG43ai*jw#Cvw7HT%B}WSZH-mc#^}Ppf#;E# z3q@~lZ)}o&n0NXK&GWB%t)-&|FiVBKYStE`=Z}=SBFWdvE`DEkQm{RCCts~L&aqYC zbm}Tt7tW6O$=_|W68iI#0FnZ19ingE=f@(1Q%XO@922%b?7MD$UfQbwu*cg>PQJpFYui0QfJM?yTca*`7N) z;jtR_(2@F%fEQViKr&rx${m4x4Kmj9hunO*6OWX16`nBazAs9CpAsD&E+rFjtZO=- za8`RxRlhc~3J5U= zn}~Unf`kI+3M1H3_r&m#@0J|L zQX@;tlE8Kca^yJ-*Q-5aQ|yc>`IM+`F*-MY=bxiV8sd!h+q{Lb0-NNsChdzUt=C}@U~+5VtB?u$3S*ptiG3FD#34YboTmuOLUjTWcbD8MvX8% zgutXh>U8z_0A9U~AOCVNrgHPt@vfiX{jiZ<635OIu&=x@$kxE7FT>IGN!FPRS3mbg zqLj;Yt?q-zcHOiK@i2KjwJ$Kw_FeYU?gWoX#|e8RyYWQle{&L7IC<|szV{e37XLYp?{yt@rQ;qj z+59@O{TqNSQZnj}Yh}nYQT~u$s^lGqr0dg2GakF~{M7e5za=NGOj1HXi_?Zs-vLRu z_o-ufK90Vjx-^RZ9W^{MOo5lrB2->5o#PhSMltz=lyIF7*o4F!t{20yAFk)UTG38S z6uN>S8=^{7ijAU_8@a_Vb|PK+oE??{9b6pQ1h+t4Y*% zkxh4Drl6JlQp>LD_5dyuu^BqSSeDaqz}F!Nt>Dz|2LYXQ1Uquf6BI0b`j`S-b0zxp z<-{1mM@u7G5Ex|`h;)9`ed}VL2@tQftYF+e-46L_2@?=!WkV8GN}-n>;x6lax#baP zL-W?HWN)IK#KIQ(DW2>Vj)(<^AHWHM^4Rq}5kx76Pjj)kMUIxMA`4iM60B5a;asGA z=UJU}WvV2rHsH%Aq*!)T#+0K~m|*e(*Nu9!TqlD|%j<7}yy}>aE>Vs^A1*Egpe@4N zcwrWvRjiR|Jp%Zcy@fn581M>d*`3std6bClrx#zPBUTI!hx1z~|JzcAqdx+1lR54B zuk%8R2eHs36tsmw=Yw+FxeOJ-hUq~gO-I_f%OkiH_{!zEe(>Xpg)aWvXS%vrf&<^b z)A|>1K5^poJ%$W<)l=I}ZF418{7D0%zGj3cf;CFadXpPYSLtnHAQCf#VfddA^?v4B-MBgw|C# ziE#8w9m1|WqBa;bm0;n?V?NyQIgYRjGslmd>}MaV)vKR3xK>I@r**?p?u@m$Wf7Hh zMn_7_g%%?^%R|S?ZkCX@XEYk`sm20w&a!-W;zLw?p*=bW--AuUv>X7rA&mb`Vr zbj@}D3!~jKTfJWb6P}eW6gp9AYIh@{I#F3j7Jb8roe@g6F^U)H<;lW!__e%hl|ZK% zRJ-bN7`@QF z)HL6n*EF|98#HyDy{#+bW}8{0M%2x&?;6nyw`=$R#Aa-**mP~JEOj~WmS=2F+K2}Y zhQ3<9w^e3;w^hOeag&6{Z00Y|cH6787+s;b3Kc39O+CYaupO~9{F$-Gnk(lj@DEs4 z0wpR_glvsSMgMF2--v1h3QQ$<;q}2I-1?C?MQKB7)yruVFl$fC)>xV@Z27%#eYZ%v zPOU;KBzWxfpp`wlMPNM{Q$arA;4+hNB#LmAs?D>f25&LK<)OpZpql~HoEeB~MU7*N z{I<5(BA#Q;pY&6~{1=`HrKdg&(SBmI;pqpod??rJQ;eZ*y9s%L!9H7`ZK$-+r@?m? z{-9hs`095~UOIS`Gg`CS!Y%orp$_5bNRfkoE^%+VG#BVk%@H`_w}(qLvMR(hQj0gV z4YzsHWoj=>Y{a>K%JxoG@cwyRA2o%mr}-yXHa$ecf3`Hzly$Wgj}U62_X!J@tqC&L zh^Of<+EfhQOeEvpxV`ib)k}YuOX~Cz2qRVDz(xQ02N?sIo(=8CU7L}`5@*j6V(`6z z^iKyF&#S1R7cXoLo&{e2S$;zg*1WR8@BW?W!;UQe|I_>*0IUmDdZQE!q6t;&%vVCK zIAE+4a>^5I8&I9(YzjtF9W zT=;Rmjett05j(Gj@W{OZwk=?e6`9_x*qsfFki3OTF zmJ33Nu#QFbk|K-3-rFOqd+GTw)ZzMUW@2O@pU>q1%uHEQ4%MxX6k+0TN4(AHctV9i zHbYkQYKYO?R?HlK(n=Jr1}43H_uWDmN9Un#G(HbH;nL+9dQe}|L-3hA<`kZ zj?)T5fw2|^jj98;6*##PE*0X>o6@~lB9l=+e9A^g20aoLwXi&e2QY$9IZ~KvB9y?S z)?5GtLn`P!8iFK4-*Y%FFoKJ-=tG=kZ7F@Na4H1cvhX9m#E7xzjq(F`+aKo--~5X= zRz>Q3_lEJC<;c#}xLtASXK=mX_Oq<)xd+Q%)?DUA-8S~H{{$Aj^gRpj{%kX3(`)>9 z%U)Rxw&n(gWy-LvE_Sly-$TbDQD3eO?!Hq>KVoS-k~RU65ORQnkfoxcsnv?qquua0 zdQ|OhL~(Yl>Q>G)H`aYR;=TF}ni(@0hbgC57nj{kGN+;XVyM)ylP$VW^H1Snh<#^VmV{|L;IcUI@D5QCIA6Ho{D7h z`C7~*=5pyp>DPbKD5~K0GsWFE%b_2n1ZY3|i5UiTwZNR=B`L?GeuqD=KVcxm=w?x1 z@F3s*@?g^+X0Ke~*TL+*PL2w*I+q@|j`}m*_X%!pSTwB`AH){Ef*OKGHtsW&C`PK@ z=?2sY{FVp`DvWk-962rO;RLyQMM2QiCnV77M+n60UQ(Q(eoeQ~+4UwwTpEYOg|9uV zuHNI3WDU1_BV@UUcLmy_`YYMTbHNU$>jFT{5zU{ZIQhGSAK?q5iCA;qEgZfr65QM3>_j8GJwPo(lba%3@J*9NJ@9ZfHX)*2n-D(IYUYd zNSCxU2omyq@w%@2Jn!|K>v`6?Uid~?gTSzVd++0O9Q)d%C|+Dh_IA~HpRpJB3Et<% zq_E#NBa*87y<({VKJv6z@;fcVtTL8CTIyZ)-hLRuZ4e}-e9p@XuAcVH zwhSJvH=Cj6AvxTfZ~_@+eC3dZWU}b1C(Yb<3D^q*j#>TlX0n+wD&DaLaE}`&mg6OV z#m2^~#16>;s3UT_sg*M32?FOF*eoukOZn zFGwjps0uj9Wu8Rr4fzW-a;PePP(s|`mXH$%fO)Ea0()P8Gj4_X+B}`a<4}F(HGZ?=J8zw4R} zL;l71Hc_2E0$|^%2R^-yZ0CZD(rchu;wRg)`4=0|zE=$4HGiKA-TWRFMd0wY9F{UK zTp}(I#>Tx}?N3%77$Rli(*m=KhK=4Q)&=Wlhc2y+*#MkV0_Nz2T~hRGQ03T7$73;H zf#j`c4h7rGzbX+A9f*ND>As|lklNy<;qv%(%3X-V1Wb|*vuz$(3V14BR-cR<{m%6l zY$zaYY2?SFVq$aKtdWNa#CB>CPaeyUSCj$_rZW~ArL#jLyhiH!G zvYYd;qC1o2xnm;1rW6u_n120H{T6wTe)EvYrCZ4cgg>AQKtCy>jWvZ*V4TT|qy^q7 zCV#yA)Pnw~;>fQ4Y^My%fro3>&RgfK;%t8u1~6lktKZ{H=r%MK$epvzWELnjJC)zw z@MPa?S8qzlKF}7}Q%}s=aV`Lc{XIer7XM-oPV-x{*ZXDqv$x~*c9+K9-sc1s^L#2c z9NR}|k(`MUxr@a96@$+p^m9xVf76jc|IO69OqUuKRukZU@m+i+^Z2vKPOcQWXN_s^ zoziR-fD`e61IoUFR0A`No?Akd46peb9+k>#gGI4J$Ft+#E5iUyM*tJM4OiA-51eZk z+ekX!JF7|oxtBA1!{4R;7W17kLQd}L{u{;PXDpFS+;YtQ8OF+G@j9&7a`mRuJ5Hn0 zA#+#YbK{5nn&VEEEn&}ycXB3iUxx~%sVjez<+{f6G`B_ zrkq%AAUJ=&^X}06s`ArKtRR}Z2fUlm6}H~300Syj>vY^(y%}ZUH+XNwkFYmUlGzarWtzIL>6y+?#B{$*1uomM zELR&1Q_N5jX;+Zcn)WAh?ru~tF^gJU)?4Ivs5u|o&j2HIp@Aai&_e?d z=D6G-igI2S_kV8~52tic}4=rw%duogCff%45eg6}2)i8sjQ|j+mmb-DR}$ zHoe8z?^)3MMTan=v{CQ<6s{~M;p&46Z3lIzCmr&PF!K<3l}IIk`CX0XU-#)TlS;X^8GB@MrUK^3GA zIouE+<*-6E3#lx~bU;z>DL=Ie$$#bq7tz(Qu>^pQTw?Bov?h=hvmhHyABF|+sttET z0C`l5H4sMUa>uyj5+D~m6h-s}!*iWcIWEko5Im@A^N?nVe`fQ!uulsXY~=jeV#|fi z#nHFh9MRZvL-1?+y(n4;CGxB3oIdi*{?Y@0V^jW@0B!<s~W2Rs)MZt$||hw$_-c@LafxP*FgBI&qkWw*XxaIQ!PeZ7T3O1c>Bv z&^(gM!CVK99m1f?oS~PLP;23`xSMy0ypuR6s+UXAFqx|8kuCsw`a>lF3uP_<;Y31F zNPz#K1cB@Fm?8sa>8jUUJxMB=E!K(#fO|56x8J> zJh{JJU!u#Pa!@Cr!W)y@`l_^*bDEATwiPm)4ob2POXeWD_0Wh=gtI!DW%O}H0?Bue0(|*-o|I&h#7a?1tzg17Fh# zZwD_QD2D1AdfAoD}h}yjPhxDf#gEi%Yp`&oRo#1rB()%o?6cVtGHXi5O2C&iq**Vtm z^mE$A)Acvnq5j31@q-%(4b9N*P{GtTWW&zr$|{>b^h9k#eb(?<1h~U^pIGXly={yk z&m}9t!`#!GM50HE#thAwX6k2xa3}l9=#tcF=Nkl|c=nYzZz+3>AR(=BXL{YaNf1ii5zFdjw<41=Pu-J08@$Y8y z%p4wGrZu8r=bE|+Q7J~dlp)$s=JDiydUQ33R|VBd--vp1IqwP>SdJ&Yd6FPb^=TS@ zFZxZap3tCsL%G!!Lex7<_g+GZf0pLA?F}6Uskol)IHy8?YovVolUrH-57Wg7@$zlq z;IV}3LeDk~p#O1FF5U-Y3OHBdSweKFFaF&{eq;JKEGUo&IXO?6euGpRk^tIf0$qcO zkE{X3Qi6uc!EmTXR}Sa70Zop9bt*$fm^oK+P+ZlS7$!6Tnmik|!nT?e1Jh;F*odVn z<-xmOc&ia=PX0%;b1%oGRCh|D1aw)^4OUT4d;rqrA>wK@LkfSUZ>-iBd(;Km9`oz0 zZD}Yj#{LaV?WLGtdi|*}y3JL3!7FZ!QEPspFk>_bRou8^F2K!y{f1g|b{$sIks(aq zC)o-RNVU$RuN?k)IyCxRikL--))MuRcLtWMVLFOuti58&^e2OkFH!&vw zguM}zO3u*@(qm9ws)!v5fEvn?!E{Uf0cK9Nis*c1pv7>ngn-k8^^nf!14!?T3B_e6 z@Nx|Z9L2$|)kMW60W0V72Pq)mG(Xz1`Jqy$^tV#}3qC&ttKh&+#6pGoN=7UuCs;?T zo!;_N0@1Ck z7?=8h2d<#r+nwzocRLK32)3{>urW8a%-`00qaon9c@Zyp56i70bTI0DAWHVqBl>{@ zSZnmq-(<1$il99)$q@i@R5phSy-XW1ytli!@wb}T(D9uf;Q25pJ@cw9W~wTpUQu(;!dHYWz;%0 zyZj+sD-FR)F_08%s*O&}%FLvFgy>v%z2-rw%ThZm4mvh!8K@<0)<3g$1k2_B^p4_6 zBY`d1h$YeJ%x}SNL}Pc6++cyAe#UZ+qz*)xd1G*eHDAYf~@gZa**;|DwCUzMF1H z{J*S?gvcJz7YaSWGGqi=P(A`-WGEXu8vRkVS(9SEuRzOri%zku4C{&Tx50JVnljof z+QJqVb|*xfSQ1A2{UVnhf*s;pJ9L4t^A(j}W54dQ3UIpZdLjQY3sK24P(n01TD}ik z7IlzTZI5+Crz-s9xhtQik0}F>ub26a$xl%;G~02fH}Zfn2J%TI)eGl|2ele}SOuOW zTVu(~xykcRd=9FN?%&BOjuGruN>_^Vl5*j?*8GmDwYEu<$ijtqZjCYZc8aIQ8Wmtc zcT;e_t4yVNbAcYNtyyCFTUk*j+gtR|k}hoWh87bi9sZy)O$oM+m&t0iJb|q2zy(54 z=~96Z0V#kX{yeJM&oMnv1C8GDt7D8Bsi7j_B|=ahM9yET(T0{87f`LaOyT*}cT|zn zyx?+1EYJM+C1FoqFwRO4l7D>^Xx^oO&sc$&!P|%Apl~|wJ}&AWs2O=xEXJk>F#~Il zU5JW)=PdJ)NT^Pqt2`Q$lWGpkH-qp*3_i#d>qJz^fhG`3A*icL9`@U|3s8#a$b?JP zyAXl_XeDGXL1cvzlSEIUTNTWfdGtil$@yo#|D`lTMdJemO@PB)w}p+q%`G;ffO8fF zM&niqkdReVT@^wbBBVG(3G|I?tBw#eFgJ5s2-GL#vRSYhz+k!;D66TIWq~jR9@vcj z)z?3PFMyy29gG7u#9;Z;5;%6Y1{o9k!i_ z&3b9yU(<|*@(RAV;##Z%S^62?iON)nSyN)RL=#@_Th#qWy{5VXq5DJ{Mo$B!FQBbc zr!#&l(H!#vJJO2DXDp4s*_oDb)Ufu2oMKIMg);o9hDPLBE6Y#UW!YM11WCp*LqIh3 zki#8)jw%1SS{sYwA10;nIaD{b0b*`t1l6XUl zxmVp*)MkUn@v@~8sXlLa;yv6QTXEkshP#RQ`dc<_n&F4j*CH0r&v)IKrB2VpjPfp; zrgxjUSEX$jlEFb(mHw2n=fC5Q<4dR$v@Yme?%K^jdN19J&0{-Yt`@{%sx12m8o&VC-v194QMyPDPjY~L4*3OD;_c|SBLVPik zUbtEE?hV7qA4-|o5SIMmqwnvnzPZsq>2<}M^?H5PQkhGh0)dj@oF*Ls)ScT}Z~q{- z9l3gP`v_^#FT1Ru8fT`Ru^WW6PsO1TwSl+SKPS8<&;4+_SI+#UoP1{tG^wni8@ zt?NK;7rf+B%r}En-InGuD?G|YCQcFBr4kmmklVctKUo zw$MFWgx=Om;@9-=r}1ErrU&>j%lR5lifw$;^*K)f#w-^nv(nhQ^){%5j-^k=<@(VE2PplfLWH>?CBoPgw@5WQF<;V}PBY7Jb9a zs=nF$3FW7C`Fdo1A>^j;`^R5%jfefAz)JEBqGp#I&Nft8n?_mnl@mlGIS@j^o;gRb z`CF-q^Ba$uLm6O@9t0!aSbaEKZ1uz{4E-X}cN9NqBK+ffS2j5)H(eN+pkGxq^&|Rp zJB8ue!JU`x)D31GcP;n0E(=bg*uOYNBGB&B_3Q~HNMEz-LYyHBH1fmczp(*Q9J~zB zd%(JF1wUnXpP5V{y%6AOXIe8O!NWX#G%*iO?N2^W;4l9|Cgj6499NT4rXo0 zu6|!6$FfU>mq|r^*na(Jx93$ZOIm?&6Y)}hK_9n_l8gWsVb9Kr5~O@4U@ZEgS$NKY zRn?7g*2{^0i7nwiPsu_f4pMS0(seuItz2{;K9C zC##|7Ssc5|Vomh2Zjmh>5HTM!!H@he;E#K|vzmas!18(UL!2&W}4@_#AgUpD2;q%`NY+ zi=-MvK8sL1HG~+^ zFkMcN*e|A~9myqLQdk%d8-12>&tc9qNuMMjan?TROrg0A?Dr14LFX10lUv+kFSOLG z8jM7KL(34CD|gc4S+@Nn_Bp@hJ&JpbSghVcGfm#SZP1QCxn5&at;!uuR%!#xd$H)a zB6RqY%3@miz&8OR)moQsNT+`>G$wZGK9)zVqfo80oT@x^O4I63qc1%SUGrCJRs~XX z!fa?`b8o3jdxM|q1{*{GI%m)I%}gQBrs6x_N}tb(ksqgfjw{MmEs!!}`OXXf!&lC& zF|%)MKQI{4bD8tFmc%HoB){1qY&e)^|B;A5ge@yWeWgpVEkm;T=X_~>r{f%{?(rN#ol z+UUPZ-Fno&;6+zZV3!!GTR1%anesesPIx{T62KmaVtfCBjWS!}j3Q)4cQaKJ=lR

A customizable framework to create your own LLM-driven AI apps within minutes

-[🌟RESTful API](./docs/neuralchat_api.md)   |   [💻Examples](./examples)   |   [📖Notebooks](./docs/full_notebooks.md) +🌟[RESTful API](./docs/neuralchat_api.md)   |   💻[Examples](./examples)   |   📖[Notebooks](./docs/full_notebooks.md)

m#~tXIl+hi&NvFXmNos_4=I@fbe*Vu>$Bb^i|iQ7Yyq-hUG?=Km&?xIE(V4aVvqD!mj=n zWU23PoXBe^JGJ&$KuIg(2;nrYHf8o~<|X`S{PH>68V@;0{Oa*%W{n&Dnqv#4Df39U z{^3TxsX!nG;TwOgYV87FcFT&T}mmJu@t~fn6?iP>#ECtGK~JO4~Q;HN(LOyP%6cL z{YO-O5+UV+<$-^ub7&BEc{Ed{JCrq>&hYCcXpUe<;c~Jap4oDS4dy~0StECe!;ott zOHjA~cZ(*roHd0Oya&M5mnx3IhSYGK{9qBEh3g2T5EASTE`C(Hyl1#p^;<^J+e z$;+z#l|}bX2twR%)oW_5szn%sX>VM#Gx*c`r zgvu^xmL%$!+G@o5j`*$bfmJC+&7{(4FoFDM)~34TBLTu_tqNtN;D`F~l0+IkK-zQ& zSp4#{w}#I?o8W^fj_J@!9$slUJE^%3Eru~4a%M{Y(7HHl)znX?2^Py)(x1tRZeBVKf<@ClkAV zFHbsnyHV;2h{v!s^x<;5(;rW1$%P};HFi6_@0QCGh+*?AJ*2x5ccGZ%5!m+K1CMy7 z&VKi6ASDqZkl9|yyajhpFUPzNlexq^i~%*#+TMW)pXNc~!>aCtiPVa@pK^=06`IH2 zvKpj4Y3_aBXK3}ZhS*1C2yCFoZ2Y5_v~3;Qmts>Mrn4 zT;DyFHT#T^&v(lHM3fw_N9j2%zX0ax>7_4bqzj{Civk<(s9az47PW;Kx;NF!FpTdE zJ!KAy5k#0l-I?*27`>HK3hLu^s23ubwsCniu>vdaD#(V`kMu<-u8y3oB!k$B{JTv* zY3$lh5}EdATJ8lY5UnY(zZWU58@(X>-!+clzV}PKFg3TKcwQq+Fl%i78AZVNoEpq` ztRRHLA|!_so#;RB47Z0#5@D3VUEa&+gcVQ|iPL(%JpF97=zh5v7y1o!SIYb~W zH;PZ3q{m+mkHY}fat}%rH~_UV>rxKY^+=QFX=L{=?z~Og8p3bbX`16dr_`Yds?oK5 zfiPX!0Dn|y^kuRp^g2|-c4hFJ9&l%{zoM4Q7B zaE;2h(k*}vQS^6UT==*8$Zl>J94?<>EQ*4{a!ko@#bEGWgZUANvDM)?U-ffTu?=$< zlncN#xd%BR8r7)c`dG|mKS&gVN$OE*2fjda-}nDr?+p65&5`-$0pOz}(_;bLII5{j z^#P7l5|J;ax6;&2IZ;L?hoj(ML*e&sC~?BSL9I}Vh|cnMC<--Lmm7chin<(~?Qntm z7PP$DMSv8QD>kYOlGAT$3_*ClMqtH_fm&}mXvDx`nhQXfb{2w9mz2T6-aXMZ2P~VE zkAJ#)reRda*Di!+dp(nb;kJwi&9V_V^`>{bX2UT+_=*CTI+s_4KC7+zn6u%l>0J^KN6yFu|s=~+S-@_ zZQsc^jRAR^G#`*ZHSsEq?^#iQzUm{uJ;1emqcD-VtXI(H^OZjiMdavO@&~WneSSUV z{84DCpFwV|#ik>cX6(a3a!2Op&g2|@4c?zSuVl2N4ryJczqtOcDt(%&=R=Wwze%MN zsxMpV8Y4b_6Zx#7PUBi)Y!2$Of6ik zjBof`SV0PI^g9<4e^d<( zxj2Sjgn>kT7~hpXsBOevq(oT7H)!9PcKS)%v>l!S;;UdDu&hAMH0i1eMC&`fLQIUj!impBQ>Ot?7FTY7{K%4;YcTSS z?OJ4{>|kY=c5RYq^C#-_2)rrM(&n23&>b(~{c*wvi5qhZg1004>V$b2zzc-gCXhiM)^u*!85N0V1w`vty0lJcT+U1HeCfvH#p`y?ZQTL*$Oa}zSo^kf^B1Pb zW)Lr7Q*ng@-q`LmTa3+?yi2K49-8Qpn#C=Kt99CeBRuSN|O5UjR*8;syS|-^}07%qu$G=jMzz;u`Oj~?vn-1*l@LS1;kboe@p8|*621@VxK+bVn0v?s6&3YG zbGe`S4N)hIU5Q*!D;EioVESMri#_%%0M z77wU+)S4(1ca_y~6r1&(cgv#77oM~Up{=-gL+@PMt}P0|biThNLl-pj|F5kh)Kmxb zmue$r=)avIkC}Cb)B#!kk5_HdRSFvVA~MVgPQmr zMT_;r7G3HtyqIm99fj52bL18oRaTQ>Xv5^76+k^lSB1-;@`oX`taR<>cb#)<7V9H~Sj`@(76L?tI4kMEUZ` z^H$ZJ73X5n2hB~rj+HhQRG&=Ip`x?j6M}!%kvy3GbGtB<>#3%k69FBZT{9s;`z~K8 z@rC3u!GhCRTh->F)wl^Yi20HC$aq~lzbq-{cJP3ThHCH+{HgYRU@(i8r-JAPR~Bkc zY{&F_x2(Q@)57U|HM!%fzws1BWw_zH)pR(&)2^B8dBiLG^&=S75_J}TlOjH%%D?!` z6zu+Vo_KPv9%hS#;k)sKDkHkNS{@jm1-`D43h-wfYpK$qpE|$M_TBCF6f7@0=E5B4 zcnro~=L}EZlIScYe1w)hM(MEirN6D?rSyev7O@?7OukHH)eR=v`%k$xw@(Zq;O6`&FX@k!85#PT=Ew0%QgFXb7&&8Rygg-m_=0E zmqiodjs0t;v60N8yR@mT89@~$^r_P~h=7e8P(Y4-Fv42MK3eQARhg`dx&kHpbURzpJ{U-b8s z{lsxslG+(8i`Z z*p%Sh+=B;$8%}DIhUdykOV%EH8){R>jycg-URG*b-fbVNRoj85|2xG7?ALDi0E2U` z7$*q0K*tjU``|$if$E-kkBXZkq^&i9s{BBdSwrJ9NzLwUUa#`yw+&aFe}~y}D%TA% z(mJ_OADSU53)79aDgvW(LyV^Sv8_q8eDY9Evo3S)I>=Gh>0BjuefueSD;>uxYxkAL z)IWf|x?;RRWcE)Fq5uTZ0MfK^4=f|&N)U~)tvagAa8^r#7Uf6*joxf(IIukvTC6FX z=9s-qEDP4T1vaken?%Sg5ZH+KO&Tg23|6Wv62<<|#;vYwUDs-rFDw%BVE_1R%9~%J zRpfW*bw4Xk2!35Rl<`Nfe®zriF~`H!kBmdlU~C6)TfpX=su8bk=G~) zak>KjFw*suM2|QH{tH|DaR$@%)G7yGreXJGM34D+90;WVX)RFmwH#%EB3(@o{)kfO z9uj~flar_uWj67~h{F&k7_qTS_aXgnKG24eYjX^XskJMclZSm|Gg)I9l?Nw6SRfpx znBZJCQxLWBZtET@@batciB6h{%CsvDA|G31#=hZ@yR3nmj?xn@jfu4kc8{hb#PSxW zgLB1=QfGs#)+q-UO%9q)tk3Kl@GZOhvWkD)Rz`m1DugMK=PT>X9dv0<+4raHz-O;$ z4T(C_eWuLtHhd?v^q4xLfMQJz*gyg+5)xpc_4{izbF`0`W(0`Hsha3NE$X#Au%J8p zkl3PLhUaC6emd!Rj4gibe3XJY`E@QPyI=N0N8dmueo#tTLsvJ_w^dWZX^ztQtIw#Z zB>d4Fl`u&!Oa9QrfoB1aXH;lf&`wF1z{~Dw*OT$1-OZY&#UEWBKUQe6%QEFc^`OrQ zWWxH^7>NkQB*V07#YQ%Wm94gMYzdwDW1Gj@`MY~XK>`~mAj;B2!r&RmE_-l?SIW-> zzm~p4CT?}#j-s4>I^@Ib*^SiPvGP6QK5gCkuj4|_T;5ZE*4-5b?%6RKB%|6sTqi=d z&ChtShPMw|t-qz5lATd-9%FG<=X&xqW!6D{|C7Dot$?ER=p~sd9}_w_$DPQRsV_w+ zB5sehu$?(!Be`UU!Yew{j6#YmuSiKY`F=h%sI4Q#aGF1SXxR*UNx#9IE-_&nN78Ic zkjv>?vD&l_4V-!!z}ds?FfEsvyX!)0SduN`G`}j2$gz7^M*_w&AZ5p}_(Lrr7-8m~ zK_KL$oxkgC@&uVWyBbS-JbWHCAlK)c@tlc+%^xSV+QsP>fJ0uhL)XJKa7qn-7CmeK zJS9?@d{*-vyV(%6#22uz8Y25qOe?$hY15vbKh!8it4z1?whxE2;XAhe&!#uh_P!`16^%zF|-b4`P^6eL}*B`vKyEXq~ z+)rN=c#DUBI;!KnT&-P(@WM2_i@kqZ{~`Yv_=PFrf*Le8$}nZ+Kum(l*F7?jyBjwo z&n-Eq+zE#YgF6cB)&zA*!RmN$6;mm6b?jUrf%eq0Qj*+Wg318J5LKy6Fh)X}`3F=o zf%~p}=C*7f4+=USHF2$SSGcB8fuO&OK>y3UlBd{Ucc&&KY9+bvAvru4F+uwQ9lRuC zOYdr^T4}1$6LC_vFXEJ(z;39KBGah{<_`C}92U7l0U_xgc=yDnEF3o`XPn-lY{M@EJ2<53fSPyJ zHJFux9|g?e{nLL(x!L5Q%%HdQ8DT;H8n@O0YE3Z_`ZA{Cg!|}yn{dgL;hf^qfzkg_0gSpD4dgM~srewAt2SFB*Ry}fy{s!-(N9K+1n;#knwqJXdX;zPxv$f|! z0~v`9uHI&9r3(LAfbb3qM*WqliiDwXa&Et088WBa5B?%YD8&?P&_Lb_D62Bg?qnE4 z#>bPqNB8pfJ*|e<7#Dc4&JC1yA>T>#dHS}Wr0^cy^=&_3IAoPN|3wu_$Q7Mo9OB#H zNR`UV29CHHhf+2{OmL$#gItHYt}e5oF84Cp-4)cb;!n^&62S`cw){6fu*XEH;u)^Up1and;dv~l5pe6S=9+N&Cc=P`Wzon>U19}73SN@rC&rP zeeOX~+PF(6`qGo|nj_=`68lux4uFUkDq_FQTY4YRFkaDX`En?CBGCvr=XlW;WPBDL z>gdK}-8U4g8J~d8YRIP2vFzSywcXBgzgV;1?R%Gnv#0fV(vpi)q*c*d>|TC0$g*`a zCr57~ImR;PJBI9K@0RZeYcT^AU^OGLtcaE`3twUnfN~rGacDrOs*8lS)myG~Fo9O= z5bBtxZF*qPT+=~gznes=C(C=-pN&BRa9JqgP4{ChwGPKVnPK)gdmMGt-IB=+TG1jx zKS+k`R;yD|;|{ch!4uNhzG?sFGkxS1h$iX@r0it|%hEsg&M4+0zTWAe$g5wiovi8}>+^L@w z9FtJLIk(Luj(71?#%-nLze3Kuc26o+tG>+REX2+xE57gJZXpglxK(l_G52*djw-YM zv*~-~G9j>i^wfanQV;tpFMX6AmHt(E)*@!Mt+03tv@I3U)KUXbn9|!q{cs8bE%D^< zARQ8&V{WYWaVUVturb!EKx^XRQOvop1|ZS{`>uOLoM7Y9)g_{`sEM??mUlsZ z_PJHCXuXA0q8*ykXewhEqmmrW_A8$V=?on+qYGO$nGap!g0$%GI)o}#FC-teEF=hg z%pfbz0pX62HYEhqO5;&WIa=jwECrgR2ehStvQd%Hv!^F!rjx4|6V-3kL_fc)qCE*I zTfYlfa!w%b0D6~%q8rRw3su+soc0~;F;V~3rl;B!pq{Nf8&`6V*h9RJ6jg#nk^ zyfeAaa2dWTHujSBBnTu>lEF8EAqLp4zxM+0C5MSlm@d?A(1C|J42VSLbB*-L-^K4HBCCGXG`iPOO;G3#t* zNYJJ#!$dSO_^M9|0nmDB(>4Tg7axu4FZFWh^R)uRp<;VEnBXsR!%+|Diev;KX1nx4 zpqS=)fv}A75->Gi_KaaqreWx+_z_i8A?BXy=q0j=Ya~8t^JD``evy9lEj}j?(UK6f4Srmtt%x28wzrZ_o=|AXx-+ubPWJ6d z|H~j3y|%2Ls@iOh9z@*yfG5Rman;P~5c}1Ks@;e$mf7U<17WLDcj}{;22xfC6_+~LALs_^Wd%Be@yG8gR2d<6;u*W z{jI2{e>2Lxt|uBy+j^W5`Q~n*)uhjAt>5~cZ%+oQ?yrwYZj!%no3oRsJLONGIwWw> z_+r~{@^faN;Me8_Ucbb?0Q=Xs#9q^{axT0ZZ^c+2nf0V?n{Jo%m%ZuF|6Xx~@4_S& zZkcgPdrHdK-Z#B%@@wy=1L~NyR-qSG^qpaj#{FJ$m0=)c5Iz`-D(=MXgy|C-w{qB! z`%dt+by~lfYo1Zx`Pq0>W;uKM{q3CR&+H3h$(<>M?=?rBQyv@MWa_g%;VqQbKP4({ zK2tRcgw0L5FgKV%n)ytmVsCtEPu*Z@{sp+C0>kaO--*(=$!*W5C@JZ{55^afvV5L_ zsYPSpvM)`qZ>f*^ck{~NBrYoudS{$%+5E_c>*_@B%U&R29yn)jSKja*2^K1qZXbtJS* zy5A!XG!Z7vHybplym`q^oeaJujL8|i83`7GZf%1+bWeaD*7bkZ6Q9pgFUZl2a~wbJ zJll&9fkGWZhwH~4Ev98K4O$d@2JpX!(;oHIFI~RyS!B&!?4}c`;^4|!KV+_^wX}+^ z&-f1`bI~6d)po+EV>zS=qV6Sn{Vi&CSgo(?*2ss{M{h`8o!K*eiJTVLcpP_w&2g=i zxm4P0o4WS&{Pr1gkFBr|{h^_dR(jbp_=|A~wpCU&K!QWF2HJkE^S|3x1OoPcj#BEM z8!_4=&*ruij)x!iDA%Xv^uS?`rXiu%IXG48e#4PHLU)z!PMSX=Lfl|B+7(NeCDf6i zDQ(*z4B@)_zFmpACvI7kJhdKIpl?drAX>W8;Xb#Em$nfLw!XDdQ(^wwK1YKo%k%DW zPMCKf$T8VZAY=dhlug;1JNnmnJ=4^)qaX$%Bs6i55garSI}9c70D(c~TdoIV%WqExqPF9DxEkfS518)1w8mA@Xf^1!dRh<3Rc-ZtCk>F!e2)g8_LKvC-4YL z{yUu$%Y?opPoIFN7Cj&n{w95ZM@Y^`2)I-8kF4fT4=%DZr{j4!SIH zxgCbnV4(iRmz!b3zx4i|BrACD{(YDz`T0Y8{EO)|(Y(N|P!z*%{@b~?Id~23Fx{FW zfLaq*F)AlPj`2J;@5UFa-XyxYWyx$RvLIea3e$SBuzvl4*+Ij|&U$PBx2YyL!Siu4 zFRG(Txc?i&tG0NCmu(_MV+-{thE-Qei<0^~bwawW4;=95t5CZ(cW<+34VZLlhZ~nO zjt}b&RK?f_V^qEl$rmSUqGhQ()i+YZWa9LmFa5fBKDl@kW4>>;`p|`rzKRK4M!DIV z+u`cW^yXg4v)6p=rFBfhqf&|z{%aeI?II6m!h*Bl^8=gW!fTS$Ef3-Ly@fZUMcs*} zG?$yRI)udzlCKi_xzCsK2E zGwy*@#M(0#Q*5CExuDCn+b6vKoKBp9d`;KJX7>Kcd+AC$EGh+)2t}r3G?y`K`RbkA zjtPD(-%ZTgI~D9;%ZQDSh%%YT7k{w}W(~=0S?#D(vf;EurAxZZTF0Qn~SP5Y*^C#9SDOW4uRTZ&&1QHH+RrS&}Wj_scmZ#z@E<(EE=3a4IukBJ>c zL|`katun-Dqy{NX2{(urz&NWEY_YGECI4b$cA!`y0 zJlRqRtP&+iW!pjNIN% zgqHV{u~U@BGZK|6ZG*+YR`6kN+=h7StQ6KG8aK%Hmi)^Me89T-&}!D3 zabi#?OLhF>!?!G z{42N2M6mJP@)wF5V}th2J-%_=n<2mEeohz@B6+*)ofAS3BbVK79eZ1w5RuYfC{6@m z9EsgRY_1#m?qFm7kIKa6yX!&pcnK7YcP zpa8~aj%58krr%SqJ^6X=NWJ+<(C*A{9$XiPStU9(;*?_rR;3Az-P=`cZ-L z?+?e%B+kb@z%CsR#J8|gl&o{ZcTZ9++6xRGG*U(Q8JFTR*r0wR(CetC#SN`~lUcnU z%&)4*+Q^wn>q^U*WSyv^@8BTG{{ovk4ggpsxWJCRJDSLrQNb%{K;+p|RluS27Y#nwV8%dxN3zGgeUcgB^4>pbhE z0PlcoeM00laE?hwi7reFEcC)kpDF82aU@NE#C2KwWU|We7U7%urUaL11XaK>?M%mQ zNQj(IbxcrN1i^OTfa4m%g9nT`Z=rbHF&?;u1avrX$TEKGCu7G!Dd9KkU7H1|+pg+H zI74lj!{m!hSZ?)DacY6*B5tuD16+?3yVaxFY04>+W2`jT&FWtq<6Htqe(PIw20^Lx zfYU>8ik1bV)Ucsj(!bB z6$hisLFXq(hr)v3=)Zh$4#%SrI5@nNF3i~tHG6-Z+Z{|afXCGpgzN)&d09dbERwLM z#VEir>!yL1051bp@?jVcCW8OYQ89G{Cs6w@8=E!zez0U8Vu>l_ zD+yPU-_vojU-464%(~dZnW-~1_cxuX?R)!oWW0^+BVA}$Jz{RS7-1nO()jsU*lv>P zNV1A4e~)nI&paQ$!*BMOq!BUQL@-9ehdzFaFV8-W>iF>47|XEjU`;nowCmCt0uYico})YB$6SsuMmFh8{me#sGHT|;e0QJtykUKq0a>lvB6 zT;*hV^zq?NNIiLPTVjjQ3xRNOUF}Xe6=N3BJj6><;vTxSbTcA&y)0&VDtN$UV{X#o zx{3+WoJ_4I(@`dfu}UXM937s!EkRvka&U*kja(lK&2=NtmxKDgI%YYRhx&W}INXUy zA~h~NXIai-yW<|z7x*AT=!V)m+d0=-JcDaBy4t5venl4}`~!7*1Fv^(r4U@)r)Lm} zYVXhYq#SgfS#|>tA0n+);@X;YyfIx$D~X7GCe=lSfn9!D{5KaS{MW;uRLl`ztE?cf z*;}xZ0KHDui8fZZTX~uC$HK0GYW}tZh>4#2u6Pl06+44FQH(*OcJ0F_dLNMA@7E4{ z^flJm|6(9a)Rmq8dQ6&i7&ssZb@kiP5#e&I)OI1}%pG1;S*_UIJv`vh3WThZylp+x)V;Q_4qCqg#55uk8g@ zo|qju)DDT^MW!R|Sq#8211wDUhJobJd>$O>U%`B)rSUrk@PB?IRDE5;<>hRnTIeboA{@ZEot-rgecXr<1XvDl~DLvmf zsM)2+ezYYccOYy3E3@fZBDsC`!&L0;YyRO&tFg+gl&Rrt0mXTJU)XpI4K|k?Ge7kz&EliSeR-JDom~3pfv5)g=eacbHWqSo+w|`JS1pHIH1zTEadezUTJRS~)E~Dte z;|i^ep+0LH>x-_u4+Lno!%DQg-^^5qM#Yz(1WSoa1frmgdNWKJDMwhdYg%8k6M_O` zemy75k@6|9x1DCXVOS3?qk#J&_Mg2}2?2ja2hd~9GDNfE*Z_*Bu@8p!k3_Z-ss{NG zCR?&~_w<4DhdgY$DFwUXIl z;4v7(C8tV&1%`HM@;%h6-sY$;07Z;9m@K1*iPW1+ex=s*rO1kYJ0s$4&l*e}SQq$S zut_q3o$HPjq^8i?ChluJyEXW58+f99x4AEr3v@ zPj@QXD^LYR7B6siQN6Xz_a|0J0dQ`RV@Zu;#w>4#{*PDZn5E)7dTr`M0s0*-BRPC#!{PNjzZ!>Of+!R zytlGls#MJBKwGtIU%_Yp=&q1l-+ldJEH43CVXg!Ag04^)G%Oz4(6##$E7#k zXAC)K{t5^L7E^%U`Y5t@mPbSG-NWYTnhQKxIou6RmC;*q_ABO*?|y4U zBIQcYk6MVDmOg{5t+)T2_08WtRgb1?BrVRIbYE}1!$&T>dYW@Kb8~J{m*Y-;?*-K( zzixg+!l%0xk=zjzFK6vD#WB>+hq`w}h*qz<%&f+n6-kt)=i^#HaM|Z!dnx8k@ID4& z?4#^~+H=qdzkMuW`bu`2aAzU}w_zMrQm4K6ZWTcWUd`KLx{Z?cNnmsf$@e1-IqxR^~nbZZvi;-h?nmm~)A|m0+j<(j~DO z;HPM+>4GeEAvILsmK%)5+9Nc(Bi$(-^yKofltYaF(&8xL63gtQx5GiiZO=3d}1>1QehS}c!JOnOb$z;;5#{k>N-ks7@U7v*=}WZxAtQ#q zG~^Rb#$Y|= z!IO?~#`ummtwO)vH7~s>O22?>J0eCj{yd$W#dBdXl2=hJQ4Rf!!DwSO{ECw=ymu<~ zTQ~bNmP;(%G*)IFfD9ap0^ki7QI zEVq8&&xNScpY9PB2fMRJONUun-D&1DiroZekCtSfvi_)2{u)zL{FuNTT1IRfaoH1W z@cF}!g7vbBE>G1Bm7Sh`%rD<~G;RRlQTCq?B$WiOyqpyp(1kXZd}?kJa)6=+ib7ZB z`G$(7Dan*2rvCS}Bqm|z=xAmfS{!1Qw-%(xlO9o0K@@@mYLe7|*Kt6fL8gD@FDD@N9#_N0hQ}dDLt~^B)Ib+spMUv_O!s(F zermbFw*xTz1IC5&I4xu1Nd%1Di>3frZl&ll-Fqx4k$N(7WGnaRw{8*aq-7P6V1M<; zhTS^miDV+l0|iy|)5qp~K^ku(%FDodA24*ktoX1y{8BUEiYRBijYAlJ_)n6a2cSBi z;BUS8H)I3M0r%5wxUAye7i-*3?m%liW9HFMMs>__<~&2dKu@xUq$0WnCu9P~d)U;w zExkT&?t*$cj=^~VEtmwNLEgy_T(LB78<*n3Y&6J4Vw3J<(7dF^zgxz#>Vji{bQ;h| zyLUST;3nLJ)QHlnmJmTX}f~{RN*W9?)0hPW(o{< zSJvtA*x?Hj6_r8si^HqE9pSfg0bS6HDF=x=@m3pw@U;%Voyn8E@du0#SDTsPbzm57 zKuGN?bwm@b03b@TUmuFr4=QX5A)gs*M-CB7Vl4i!au|B$9z7^mfN=IZq6_aYpLPEe zAVfEp7^d0o;AcOwb-Fx*t7aNZJV#^}cKUiSiFflp!j=bNl;dCvbz1Lq>F0#WPnE0t zU8GbwEIf#&_IM z*7>|K=*W6I@(%0mUw^!V)l)Qc(Mu{9XND2=b5@8MMlS_Y3pOw(TmUoyu~|-1(Z7PLzR!Cmu1VwFXu||#3^E%+ z-dSqq-r|j;0!J<(-jULUJ~9Ml`G3Azo;Dg_E|x~@#WXR5EKhfVr;0?qWlv^{T2hiH z4_=XaM*HK$W%9(pU%u>*u&1oQQ^8aMA=)8)Zx8&lDKZ2oc4l}7#l72cgAAd-qpnQyoZA|qZ4j$ z31E!yn4Pz&x^Mqd?7Nn=AX7H{7PWT(H5~OnB;^%m#I>r`;thxoYdOJb@*|EQUtX6Y z;LiRiNA#LdA8eXvCWe~)2*Qv`eaTJK_d6z&u=Gzr^NIizgqNKABU3DRGZ$)??2`^q zWk$3M3->>#M!?eUxA?uJcUQ>&#befjj?-Kv9|61poZ9@h{d_#d&EH+Syb{8g5F`n+En z#yFeo0b367Bo^*yo(@>5UCfyP*#Z>~&DCSyln5Nbz6649hi5^-bEepz_9s|qeO2D& z`J92ccL<>$vFAENTH8`M{VvOL)P$R3*;Lgn(5_6{tjj8@WU2YP_JZUEc#|Ge~EJ^sx_& zp@}yQ?ZHM)MxBAxM<-VCA!ZFb%I(BPtDE?VjoFRROZvcnCCT#J;5Rc92iU7|AV?-+ z+~15b#Kgb6lSYu@#uSH%Z*X5)V=y_NeDE35caDb!(dyd$n}p=ZXI@_&44Jf4$%_G! zT!gQlju40E_zg zN0OSR_N1C*IWzsRwOZpSd6>?fqmrv8?BbUbjiP6ZG3A&+tj&?5E!}gmMBg2}kA1K`)kIeCavtgRB^-6Uoto zJ34!J{345S9b@#pfu`Yac1CBdInGe7SW)J$0T~qPjSZe^ntOXRtT~%2sZ+0*qOPtE zc2`vwau1ar?Hr>t_+@eq`<)FYyMZ#Inv$ysW>Re#qw?5D%O#e9D6!Pyo<{$HtAzGL zOwI}(KoEGXy}nWhtNomNgx2WeAN#1KI$=2w3ow0sD{~Ur%rK3Pe4rFUtbdl7g8TKF zVv&O|#{$fPE>f+P2K!&9+4z4>vj;%A{ePBDHn4MJ@*FEjj*a*fAS-l_TioJ9%a}np z7aI`s;B(7zFeyspnA}4X60rHH{Q6vw3SMYAsRT$;tR~(3gYahKq5*xFWjYp26;#I_ z0Q|BkDH8prs{B3FViufq; zbeuWp!kR5c5odne+r<0b*;Gr^shEqd2B)VJRn;dEA|z=~o)%lnm*i@BeJBkUx6Lao zrz>lGawjMIqkFW5!s^U56MdQT<-@;peB|xfqA~J~3$GNGgU7^tX-z{`gZ~6>a2eAG zygZ*&8+Z||93EH#ZF0N@z&bsP%awRuFB+n#88tyy5%>~G4q;Rio#3V@9?`a9SIMH_ z!r`AmHlC`_fr@P%ZTixk*V9jW^BNP(w}S&o1YU^Ii6=xkEpAXZxJzhIp+1-Gsu{Oz z-VHiA+?}~xOe2xD71I0uabBiBBfd9yt-#~pNM;zXJ%IlB(>@@e21PAkMztYFn{N08 z8I(Lv45HWf-|Y=Do}C^M2WT!{&|NHw#;yIhrnp#?3_F|8iIg3Q0e3QT_4APqovNU? z-OzKwk+uVL2Fap2C5PZ`@TX zZ2r5-k)fd#Wtf$IuJ>1M(EGp)8ABFI+Q|0f;bc9+A+Y-meYg_9ZYj?lCcR8mPjWqm z?2)Xz>h(eitFXF*KANj+mgqV5R>S7(VnxiBS{tU-T9^lv!MSDP<7qks@O-XFNlM%IShcTw6|cZqGPm=oh8TB4-%&`4bv1 zp-=bvfTKb3Y8%(!Z!l^OwtUS(Yv(A0$8w{!_qV`5Xd}>NaBR<+kGA-?U}!Cx*WU!I z;6&HmO_;WFNTIXn%)LoyFrsXlq&Df7D*$&c#Dy=d@ZI5#yr#tvYEobMGu)5Kg)+;! zlO`ohZm-PkSi09YZ#>tASGH2BEO(x!XIrcGq7f4X@L>zm&5p#XjuHPHdx2RVKws^= zVl-jKWDdMXX=ddCyb=s&*3=ql3|+IHx^u9}U$^0}IB%@t@huRNd*Zh{Lq6yDFFWZk zyMyYjb$(b#n-I%*>%phX41+40L<6rIPvU0FL!%f`t-qz|pOR zI#V(Rz!ucyE}RKyyS?75jLYI0P?0iQXE8!pohxoZ(}ENxGu1+&vZt)wvFAI;=?0nftr58|myKTeyH+hqjVCZg1h5t3-%v_n2^N<$R&2MlxZO!C!vm@uX4n+d; zF0Lg^MO6K&yubg;=0_1q1wwU#H#pC`ekArj0N9AdCJe4e0!zy|Fl$(Wo15P$+IUj& z@pJT2H`AN!*q&;fp6zUy-u_2vRl`YKeTM58nPlJ@&XnuXraU=HS)q9JmeO z#KF+_f(mJ{(0h1z?z*7R$0Vd`oDwW&)~SuTD)wE_U#-w`1Cuv1Iu)jkhF9{}+iD|> zna8WTg<>R*Jc!MKIFNI51cwia#g!h*oZ!vQSt+z!qp|{25%ZdNmLV<<}m2R(P-N`h}I$r1hpS`P~ zeJ2C4GU{(WYml&r+r^HWGFc{>!HS%rfPJ))P#OF(#_V*jGHwT{K-!AlsZ`W@_6$)K zDjK<+C>f5O{Ueb)G+Rc?gD+B}{jR=T(dSpCfC*oeR9{pK;(V#G#U2h#TVp1ozB_cL zAs3IFgHH`EeQ!8VH_{f96rBp=vUm!)0Ca)worZNZh^Z!sQHy)llaxbTni#XC6tn`x zX>n;d@rhijaO`UHu2s>4?R|W*8d2!h8o9sUR=@9C z_pO3{6AxmA@@C>Ly&qBB6T;7O9PAg|gfj@9W$2S-9(WYVt(!i*snhtVJRSPdQ4g6KZi=$+oHi)&^WvX8U-N#+N(N-@3u zT7k9E5qm7BGn{YE5#zo^(Pw)xnTlVZszPmsWRQC_Uh!@35NcC$&^&cI1rQnrml*o{7Zi$k0dKCf26+F5&4|J+P<+PVh9 z>LP?+xSMu2BZ*RJQ4D^&074e<5S@Nk z->Uh(RG`%T&$FY9!#i=@*86-VwuCQJjOvqqt+06bq)cPFVl4m6xX%i*ao$iTl6tgX z>Rr1u7*kfIXO5twl|2Egqu7{u1^|x90W4Jt;2PYrk7)#wMAx7EH+t-*N?T7iS|sEX zBiPtPDhF=zkf7b`u3Ej`Oq`y-Swyz=)+$orvLnfJhZ~9T|KT*l?R&j=oOhs>E{uf1 zthX=UAz(l77f<4@G-eL;q=S4`UMd}lE8a{tx`$0-w@XI;;-N)66FxD&B1cn)aKRHm zrKMPZZn>Uuhb(M*(d2KSxbC@t4`b_V(-zyeUw6--^B3bFW&8h7R}T{p;xtUvMpgK4 zb}cgJ&#dfHaMIQ&ZV8=zdLk2>@CzO&==J0L^Wnzfe6uMPY8AFD1$s1z?ORe8E$I5` z=F+XBYQ*lL_UU}1P&=3NcTd@!s=ORKssqq`+dO38xE;cIK+DdLVK zuaP@Iu^8@C?^yjj!-e|ANcI1PL0|2kFGQ`8-Gd~q9{Gim`OZn;n^}&?^sAr-3_r1E zE2f|&1wj$RsrS)Cc_!`ZzaiS6!Jtf|!_q_bv6Y7mDw5mAAM+&YRnLc|SGK?!*hoz* zqhbKAy+=ZLqFkyhwlp#zVZCEVPz-YNz(oL;DgBcFal(Q?or(;_imwk=(c}ykUR-hK zazAmb?}2K~x5S9@{*HW*bI1%z#o*Cs8Se%rZl&di`YAO{BP6f*_^B8Wud#rRnhcI> zP?Ph=3RQ-a{JlvdTK%fh)!1{G2M`5Q?!^^>WQvI}&yjq{*`zSVjrbJQ6B6^AXs zkc3LV!4H^7^-puv{#gCn@WRLc(T+(<0)hddTa?eg^#=4bgA%XC5aj`2=ytmt0?(x{ z>YqJ#qft%E2ksSq8mAC%jw!5Ixa6XlO7DkT_scz0LOr6#I6AtgDoX+6>r8G3Zt>05 z<+R{mj(<2Z>8V4VJ^`fxz`@_1S)p&v@eLMA-gRsDgZqt|3v_D+ZH9K;lG@wIw7Z|>w&|# z6K8&V8{vC@6s#;iM3gJIfB=9oi)C%`QdCZ8S4&RNt8Y$ed7z8_8Y3bzRp8KeAz*jn zZ_p=NA`jW66n`%c6BcO}7@b8`;H(yYKycy2 zgS-KN3>C&vQnWuMYb5HBQ(CA9TWYF*UR89RXlD&LUV4#~=}9GiyqHiryhZxWM+jUnm;|$f;*9RJ`VD3C+XSi(5)Uckg5MK2wTmyS zpE!;Ak5<$T+Y->V(PKm|Cj9JpH)^#c-hFHs^U`mV>gl2)lkI2L7u;ZDH1?@>jLD1R zQy430W`O)6vnV~MeOMA&lytD{sJ`x4Zf;9W&MRB9>ems@Zh#b13(sQ+w5H_ohbXza z7$1EpC|1hq7c(p1sm`b^yOoPMT>hW#KC<>14V|} zfv1E;0c@WPwWV`j^Va>WD;sk^D%s`Gn%D?7eTgV!ULOk;O1@wDRC^Q8YwIJ0%Szg3 zR(puC+Ul}BH3rqlnEMF>#>sElewe0lVy@E{vO1P?ca8xxyh(yEH zYbZVe3uaY>6lR4w6%ANbim|Kq+2kfw(q+BRBE6><`bq!^AI4Z(ms`!c7^{E&L1$eu z{T$%;%{~}(j{^$0PNQ|x1A*6Xi~sev zjzTzq3>@59(%x#MULs@{opaGUZ-ML)pLmjsJEoh%a{2H{(`u`6ko%hx(n@`kx+*P`%do$u!0Ps1Q6D)tA$8>|5(` z1x=X|&@oevI}u1bSA~9k^&|Y(Dd_p^hg?n+74kFo9Y_PlQJ>`()A@^u|K&g$iHIg@ z_>Jz9W-ovr`bJYyaiA`Bs(yhl*Q?ShoV+b|g&HOo8G_uvHz~^Apqz&+5+jG*&P2?I zijEuIoPIP{)Ac#cZ|YDTn+iD5il0Or_+SVY6Tr2lO{%}8m)icEXlg7s@jHE=m7(8+MWUUtIROwiMpo5m|>dq z$W#uJ4TIlxoRFHIXvK#GF$ORPEWU57Z)xtxy3;Pmj>`|dKxOCJFvUI1cu6U#WJ=7y z_N)9EyScI!63t`nAE`y(I{@DRemR}$&gf_2ETUMT;ex6}lmaVPI{XXX2r9TPB%9RB zRUuZN=_)qy{ZL$XGBajb-nS@64UoyM0$Q8WH)MjO$O}FusOnz;K}h%Oxr%@NtrBU- z_lo_k{|ClNey%MJzgUV4kFd=5 zB3AAHy1Qijr`ZnfF7-f}VL6+MUICop?7!*OD%~BTHupf{U_#tyEN8`K9w%?8Cq8S8_ zzmp-7ts~RXCvyVJw0>hAE>I`&Yp^O}v$<<1FrfF$=vnMGA3-AdOq1Tfw_3E9MYmyc zy%R$jy@T})wy!+hC(_q(-$&YC*%7scHA+i{6_^EuD`w9jiD;&AXa=FVoDEWcDH1}? zxFF@Tk-xJhNEhs=!;&MRx%$BO^4EY);`#v#ml1K)loaq0x|P88SJS;E7+$$0=DFi& zLQYu|KFey-e;&~B@6Y^1@N~dx-P4WWGB8oDaamw*ePRUKo=d%5Q?m4B#tXR4Z+CIx zz8gv53kGHT8)I(!%XVg~`@vN!!#50{E7N{-kUGwTnB1NU9ez|($DVpCBylHD{m*JW zar@^-*grb0uorvDqki__aCdb>U%GcF zQ4+h|VsO;{Go7IFyTmp7Y|tVFUByM*sO*U)i6E?oi8T)cf2?BH5VGtp*OwSzC=Ze; zMVJ+Xa2CsS8yG$*r=n~c8YcfQq;#4P%sRqnF0L9BS6t-n!M-Sd5s}n1PeRwIMK;JZ z6~J!QqayQlfr(UmiVFwXLSwQ2fCN>ITQUDVQqM#|-b~G}A7dY_w>8dk(5h%3ELM26@kaK=^Tgbo`|cFM>fW z;-g|5i8a39Wg!^Y2hr}cRx@>1cZ3((7#m%rqq%(PI#DgzRL*UOSk7%1Sk6%4MQ^3* zh#9YG&zb7gJ9=(G5CN(m;eTG2kuHX}i9{s4QY;(e#Tq{udHH(pFCz;G=|{jcAvHms z0HpSpo``ty_1+#0;R(uW>DvnmN!cn{=M`;h(^i^8y&G&_$zw8~);*|c(1q%!;~ihW z_)UzF(69@}KnzvACoz948krg9c6cCGMkAcUPm zI;Jrsgv7}#vF|iXJ7IjSHakZ?m*&Ec&<8PmeFR92pe*uM_~(q^T)wGKw24vDA0btj?h#$%$46y)f%{!F~MIfm~UKiWT!4c>J@aJilIVD~X9hRjyY;Zc>`Vg}1Y3pk6j z>~{Z`4ER~eW2P)5a{>H7zb7{G&+I`k+WRU9Ug?Ak1yTx!^+4<0BbRk!RFzqav;=B4U=`l;^&32XIr0YPCE9q#plw6`5%mCd^4PA}$pQJ}1;H(#bShnc%&E^aQn# z|J^SLL)Dg67&atZqV%5&=doJ+diJh9Nq#Leff-R|q1{9#17UR~g_&&d>j{oP_wzk@ zTmhed?;8Tp1;)fmY-4`H9LbYqzAAZ`e1XGU(ck`AMU3Xg5ZqI@HHRT!yE9{Z2`wt$ zCa-!)U=DmROAklLbWJ>98Gl{D%&^6s9^IepPk-@$LtmUgKdJT>&yOxA_mSd|e>}kcwcF*#D6R<7?Z_jPb-Z(yj{{UYQrLF@Kq%<5p|^2>!>WaB}NsgAz+_9 zU{5B3t8V|~E@R&qEv@BH)7FyGwDFwgE9~v4o@$7cE$$`+T@Jj=)&_uraAjTo`+H%W zjlC~@=A3Cbi%Vs1>X?;>{5QBUjW=awT%9dJ<*0is4ZhBW(^#gd8=zzj7XJ%o!xBIw z)<~nid*UvL$Dp!%#Va}<=(>Wbh}yemfpQiuGHG2LW#d;>{uLSZ7gsZ}#Q~mHmyp)A z$#u^4#%KM8ZC({NRbuv&c5PE7zc1Ioz#ACJUIl|E2K4YF>j+Yaj9vk6Wgye1UEcEV z$GJ%r4bqnsuAxiq@YXZaxXdqWcY z!P3^uQRjTq?t9s{4Vy%#EQx)T^idso!Q62%R(vnUSQgH{9nKst%KjBQ2{XS);aNdl zoa9dkiFD)*Vv4e;%SV;zTU`}InX9A|Mgd#TKRJg9!M1sef95?zf+5@sXnb{@-+5Z& znT{ZgB<1O;Z+XyLQvGW~#QY2VZq%EeNg2H_WTyn}SR&HLcn#YjQGQg7yFn>_gudHF zV!J;GbMnCHyd-J-+DD4THGr%ZkhmJAD+|z`az_1=B31&y44U5$C=3$}v7+j*AY>?1 zbn1Jcfnd^$mxDRcAjxmR8-~5K4UtV)@e@@4nsXB1?s!Sah`TB%1?JPgc7GtVBJxnw zwUq=NX7xVdqb@nx^{LXlci2*clMr~=Tmr20rtb=vUNbM>e&g!TIx<^0UYGFO+T*+Q zx)jvN;{L}XgbxqN`3!Ez&c^osA1w=Jjco1?;=Y4@@hj=o;ubTRVQ=4j^<7<@IHQE5 zrUzrq+x(}BJo3nXd3(~Qi{YJX9U^h^ctppB?F)}0Uxkjb*L~&)J9|dT{t?cO7@f1zP1bYn^WHRgs}0q8qG8n3wq2)$ z7gt3Ay`JFvow4n=np(BD=GJq11ZN15&Q6q0 zq*_T_jNOwsEYrKuQ=(YHoMa8*>2_nq+vnUL-1R@bUL!x<+w=8X+Na{R-3&nZX5;8A zDfuUiHPuhOD)&)-$+h;P}Y`M0Q;i z+toCDc_kf7U#B<;ZH#zX6r-2vF25G^N2yQUpzeigQ~~~S#?xtBs0|_6w6`@L78h?L0khbA&A1-uN1TgVtlmdc5+3ILiF`X?W6m zxfN9W(O(L{v3;A$6Jd4H626zCSzmcO0uR|X> zSyk5|?znioa|a+Ns3nii=sul(SD?FgAr9fZq_rE-b&&XA#$NH2$|~nN=&oyUL2XiX zAur2`L#wY-6Y+`7-Pm25?^`BdX{Q|&x=HU6fz(N$qU&g@f-L{mx8gndM+ePnbTRqA zU1_D+txcTMJCoZF0)&3Hj!l0Y4Xihvr!}&=o}Nk3yLRYEbXa%#KH44;Ieg>$b>H_u z{~Da$`cw-yG7%l;fA&LpuxsHC z#IE6+zlvpsW%OyjLy}lRkh*|>QS-Ow1*D2=YVFEnMfXnEfiC}oy=$-}zP{_-_up74 z`6)RKiY-BskpzorURD^Fiu*dl=9N-6nard))DFPYm|VIJ zAgabPon+OcfUcey)bm1@J8Nqi-UiZ%hDv++hSBh`E<{jI4GIAiOZ~J&j~YG0;z+6m zwO(P}XQ(Q%aIi!fRIY>wDnuK}bX)GnM$=$vhTVSaieaWFBcxJLjvrzo(!+1Haq0*) z$A$kKPq=-H8lV3s`2li_AK0z@Gvr}c0&#$93FvI1lw}4jXq-(xQWtGoriL5mx^3yG zx5x&J$9!H4uleyS0@O$k67(-{GyZ^SE6_%&?w8u~qkQ$#V<$Nr81k{X;36Opp5AT8Q}d-6}z;&2h|$TjqiXa>z_z5Ya>@ z<}38+2VS>R9}qW^J~8LJcQGC$W!Hr~?gN47KW-_Y823xvZ)|II)`4Fw({IwB-;jA& zPOkkdTG=SNVFO9Z2}jI4or1~H)eDfm_gtQj|DAWzDd*>HJ{$WZX z@mRg&B#>ccqEd0;^BK;r00x)1`b$1nnA*P84FqsRI4b9Df4t=Ga+5jHavF6hL?nKU zUnCxEV^ZhzzuQ`Z>~6vgSOmYVfZ1wu8$h_^H3^n--I~`mq=4WgsYeN_I!>N2ru^3* z6$#10&a+wb65M@zzohiBXx$E8lsQ1dy;G8z5FW7qA%q|4gW)dk#VA<$)XZ^9HD@{f zuYYz-vCBn)76q$y(aIO$D|=(2h53@zFK5U z(_$>zP5gBASaz&wg4cM~ACsW9QUUoU#(vC=-;n5S3<|@J+;cQD`;c2%XRNgEvGHqB ztl?n0+}T|JCVmfb+7l)+I~?{hhea{_*Yy;pf7P195+0nSJ3(q_i;NRluBbYdat4Mx z^;W-xsE&!I0)V^%2C85nmvWE62j4L66D60Ti`*co)98y+ywf;1(e8qcy}13|qgGS% zuLk!=po2?q&^YnyG~c2t(Jy}qT`HU=e8kn5RUM1Y4i=ItS2O9(p94>gpajDN*`6ql z2Dz=S>HPg@Z^0YOf7h~~(l_RMN2%jDDn2FwOGApryHC+uF-Bn2OCM(GO1h%UI|)^L zP>}$32n|j}g_P}(SGbMPra{BUgp0BDzOB*33D=R*n)I6RZz)~r#Xi8#Zmk)8kO32~p;baU=ENa}Avpl9n_`(rOGr#P=+lmD-QQ<)cqaw=p zumD7A=#lDl z8E$6Jh}v#`Vd8y>)iTr#OBN_%d;j4d59U?|N-twAmSl+P;92ITKo6UkQQX8Y=F{Je ziyk+%JWwZ?R9RS5*ZW!bCXo`ml9OjmfonMIgR@p7neg5hBQFc)9}0S?%Ib8^+B`HC zosHcrcHOYw{Hm=7a0|vopMI^2=h9N0hu*P^Ovt*Q8>15{=uw!_G>(?e%s38DA*m51 zmsFE%(41rOP&l3Stg_~ zOE1r->;eH(%ibLE$;z)?pCz#5T~6KI?S3cy1VJ1+8?md=s^~;t^5aOeV|-BTeE0c= zCg4Lq{d40;=^DbU(EmL1tBq4m!|27={J_;|uYG(j_i5~pr>E1crQ$m8EB=n8O%(O* zUGr$(J~LSGZ|C$>pZ8n;_YHgHW<~=4l&#W-*zKO2@Xk&`i^NkI99+yO5~YU-%(SH) zB3(nn!n%K2DSYv;@Om8bnj~H|-MhJHv-3$|hC!9t%{H<;Lr0_v)-uy)|NIHcifWIG zi~HIV1+#9Iw}x&VUFt``ikk+M9AiCQ{N|HY*y9?mCoii;7R#1A93E8Bx?E61HVEU% z8a;^6T4<+tarqRhpM~^2s;#Q1(C?T}`zvbZs~&qOa;o`k{E+q0?W-U@j3>~sjdz&h zc-+q;qFfpONm#8J@7Juu2$)VPkWsTiRnU^42~bfom-!p`u)g8^&P?c;=dY9Q3PUM1 zRfL2<8AWZ<&ukUjVT$}MX!Jw&3e{wgE#<3-? zYHRyz!+vdMGr%yJBDWQE+3BsWo|JUUZk%|7hL7`?*ubpQo9n}3j7Zds9Wv&Bd2@7p z$&}{`Bd-$`0?(+*ZH`z1nucqW_kU-1FhP+$Fup3?Iu5+d+)0S-5119&d6MY&2Ly*V z0GGrSv0J^@uDygWIBL%8WU+dngP3Cs{rOS$Q4ugmHQ>6MpQ+?#6E8WWMl*xRPXwke z2Z5^2FZk_=&J3%6!^~wx3fxGJT2Hx5-yaXARevk}$uTL6dS07k^Bz*;mr+!X2QxW( zLss1_iBAk>3>ghSspisSK^8qVwr9g&N@jF3e(yBBCW-mcA+V?{M)U@!vj6#EC<#?% zwsh!+pE03&eP>nqQIBJ4;%qb)b?=8{E$i{z_gF=(w8Udqd)_EJU;V;t*p`mi?k<+< z+h3hp+`En)!r2nhRC@B|AoESnismvu$|dVA83)avM!#yf1^go;L0NCU!MC8GPd>yx z43bFt?gord%aPZI`PYB9%y_+e4eNGVs7)(+yYheV!bO%S%7aJ;+xS@R;t@8jdAy~N zL~Y9Rr^{4W7Xh*)5*zr=3xRkSi6#`=E>}m1X){OjV$LPrWuf9Pm67d=7pEsn3x1*$ z*R6Rt(PSGYwnFQ0IY&J$8`#l)8=K|(G_a*`fQyHaK5O!hgsL|tWF!3tg_pMCvC$5@ zxPL_Vu(NI^W}0`uss0g{TYYkh4GC(vt2>_%+1dK2euA{AULs`OJZke|%hz zc9w3vJ=AJ;qIuJWjMsKpe@5-EfZNk!ds=B1?VuMV$9XIE=GkIdJ+`L*B&h&#|yI$>Ol z;)>qCiJ=N5d5lO*E{%x;`^r{nmwp)(_@jJvj3^J|hg4NvFgen2YW9E8?AHdm!6$^d zdml8v4Mw3h&V{rG;A%SS=ORUcDpt*`f|0|jGI|fW@C%RX;zI>8*1TZK{gxCV+8iiX zu?&4&192N8u427P}B#I24jkQTjkE@N_#yxjv|_j4L}P@J=E+UcafwP zN92=^`QdoIYh$zCXi$1d^c|n#m0T^Kpfpi4pbLX{q)mM`raXxNP3)8o?gyi>W3DJI zsjw+kenoTTb(on!KQZfV~R%q<$iddc@qTc#V>4 zzG#{FuuPIAzv&q;@LV?ZAkF+R6t4O`0%xm*V9`}9K_YQt|M4n&vQ7>Y^BGNYa?So@ zF6{RPBS~2o1RuplKJVg558lt7sLA|WqNJ(blk$c_Qe&{Y;494d;{~Q!`3+&77IN9t ziXd}P^QF#b>Y1~|m*I?37no`Y`LluQN>S3MQi~;PwFTeISds;@mp#rsB}uP-k9%>C z^XEX37em1b3$TZrt77~Pk{bgXO8%K)-K+zCNy(8>8yihtH>7$&jo9(?>Mx7j1piQ1 zJ*hrg$LCC>ej9ik{$4+HMxQ#i^z@hL*}S*4NblVQm(R2P#oJ~ zBzjFr*}`6&`Cb1%FYvNh=TH;P*^l@4!IS4u6#QT|;JKSHrk?BHKbvYJ^%`e5PY2Nt z)UpDeF0$XNOkLsgTi~F}TTA^}_uSgX?ja{DJO7IOC;+1oHfPoUgKz9f6Ot+ynvO*Jmov17VnQnHZRZ^{PG%Tj9Is59S1n)9hHR`Ycv>plEBBs=*YwG0=CUv0Cwl zq+!(Cu$)_)rY;D&wqP`vWme5y3{gF~{+ekmsn!x#OWa`b9r&SVcMwmuCY4R)LAptU zbQQs4`<2M)>#P}N3pdX{+&9%Vq^3zT+DHAzPS*UC`jSi{ZyiucNB8QXCEvdc|EP(elL?@+Enir%eZ zCyzc;y0fCfHF+bWDw+A~s<6I-3POeXV>e`S$Z>uB5yy2xGnQg7v4sgj!kJS1h(11w@G9TrJ$Hv8mu!GrE<)K zNMfGytRQQ!7dvr+TEiR`$dWUA-s=~;a+!Amd1IAdo0!`32^1!)MT{KhiJm{ZAq4Rz zT7K}k(uO{l#ZNjMsTLMCn+YZJ^?z1>fse_~1H8y|yK1FTOZLGxI@L`sP%|ui5T=C* z=<@50*_4~ZN3ZYgl0bFQ&dhSKoQCR$g@rJ~-}6MM%&;P|I)v9RDvpjp9GxG-QSzM1 zpFub;o;iip_WiR#y$_h6iT;)5M≪3^q7+UXz?;voo?)gb9P%px@@IeEyPB^}I7o zX+&F9ck?mLQM2Uhr(Blr^KAT+AxORCmw@jMp@qN5^@dp+=*3aUpk@+MYB=P_g)=jX zlI!hF4I%Hzs;RRRbH(wdbkB3UUe3-i;T!Pt?+Wr#y*@gX z)r!>no6OW5La1tTs`F+|w;*-#JKKGY8@y>(Dea>@OEK?baof;3((RQeFEvN~ z-cHlwm7RB)DfIw75<<0Bi;Yp3W>Q>D_T$pNJ*5!iqU)1#$BiRqAt8*Y5a~qb!A2%Q zSKNDZajd<0LH&MBA`jSmT?_(WI$lNq+4vx!${3VM!yJ#JtiqqN%?G?u>Np^xOE9Eo zwfNm9z5FNQLH_{vdHd%1bdgc?yJ`X(BLdl3RF{^RwV>oXf-vu+Hr^^S_34rkP8)E9 z#YWmp+Ru#F=?t~u{1L~Mc79UmXu@H4-JFo!wHW33J)}B!Bo6C)$!}4rxn%Ud_;ennwP2rerE=S_RU^2PTkZ)#X>sW8E1{8v>zw<=X zHX7p?5XDDQaWbnyYOJk)5SnHs=|(3Nq0ryh4W7WPS^;qWTsVkE`nL^qOCD%YK=oR; zI_@YxiJ62S@(9-;>_D#P;ra`4l!6S(wZ}Y69_GTY$L7Tu^|aBkEvd*zD0BTEuQ%Fg zYN!=oR!pUw2w}iKjXs*|Q}Ha8xkgotUJt4fJ=%Jic!W@-LnqVJv(&hwnxvw6OA0JEKch1`Hu-D?Nb-f>j0+2GnWVTdW({VQ zGzLERAg4V}m-zn7hG5Q%Tqc;j(^%QNK5X*YXBf}1WLS_Op zvO|A_+r^{ZVG=~gcMHtPc_FB~I_os9baQgSFHNwyp+bjZ3?Q~C=d!pQx(hUsC%E`&i+O_)=#|8Ea z4zTi|fR2tv=|8Pj=G)36cTCZ|hY3-#MHcH)OP;zidctJnI{R#Vgd)4eE1ju|O@3z# z+-4u6IJ=7mPdqkOWW}$soBWoa?B7Y_k?PKp&|LQuNXp1m4pzlAde1)F6tsPp<*PAP z6-TT2?{QoRx3(ebWwE-+BlGod^uv@F7MF2V?$=*}#jjz7Hib+Aj|eqA`6K*N4}Ovk zU-=USo-p)kJ_zke+&ZyXO9&VB-Xs?1IeDG>?$tu3m$gk1(<2E&8Zm=@KU55Ua_x?AB@g?@p_eRYR5*{+*zn5@^;JnRWew@ zTh|wCkWkZNO$f4DDX+7K=ea2j5*T%j<`Twzsf$wH7_JMoqfXty#AtQTrzbe-GKaIs z71{&6N-orz#*wOq0N&4!|MW^P$KmSFV1NIo;GaxX+y)=t&v-Z_C{+ZcQWxP4O2IgL zT%Mcnze1fJUK4@ClgC#Z6j5}#_?R;|M&ztw}@)iPrqXy$&v2T zX=aoT&VJO3-tLrC`))$y@u1L(BlfKxiE{5aC9Wm8j~Mc2<+nd_x`{W=%d5*?RYuC8 zOMSJtmY12sJ(h4Sd6*Em`J^RNxSUYYfLFRNu|a%im00^&S)_;N za8c@s%auT=;=xLAJuXiW_mA!)zpQd?<2;3VxS722h^m$}c8`?w+}yK6mf@qtyvI4g zF591C+dYFKOgvrYsjysi=J^f}l7n1TUi>d-sKs?u^$BiRddiyso8eAMRUMerMHUK|hU3fO{tW#TZjI#!unu{Yi)mHznV`Cc)BcY|+TjK!YJ&{;q!H!CX8 zfVa-}6PlJdoj&s;xJXS@ap?>{Sm$*Eqr>=FOF)?z1-qD0pQqx$K1vN)07SJHfGV|| zuc7UxKwDjxCNT8{*Z|HI6)2DyngZ%C^S*w^9{0aBkqmf!YCK@3VWXt_h`nZq9!G$ zQG2uuGndJE+vJT+KM%y9nd%6w1%0!)O8w^gt6Z6TY&;W_PpfKA9B?^;iw(vfpay>W zq;3C-HeC08=dsLZ)4K|*%JBX3x5qEU#kB+yvlA{NAk+15wAUvgh4a%WmmuSIxU|6 zS!=+vEgEfJpqPcpeB@o5POVX8u9dmjLO)f{)wTe72LtzW+@?|K;hTgviRS`nss@qA z-QUA33^^3K>)=RYSQlF4|4|PPnFa%FEM}zi=Ai+l^n)Pas7Ah z>_FJuTtbGjz)YjvhLvICWGwmremfXgt z5Mkiw*e)KTv~x@^tN|T@DnaueiXtPZ?^}*AY$va40@WPn{`Pgwu!D40oVc%Pp9P0? zk9n;*r)I7;?){v9llyQi4($(G{%qV+TR|Y|OtGN=>Hd<=BAafc zfBfXSYEsv1YxeHL_MwlMZ+v7fvnx(?{8U^vyFzd96;+H+}_wf^z5P`yGjbx5I`=LO}X zz{Xg1Hw-7S9;Za`q%*;5c8`J)6JBd%X^F<`vO5m(81czc}2b7tAtd>&bT zJ1vv{03*+dEOGbRKVbzXw67A(zFsJas*A>FY$=47A2iO*e~clMbH%Og4AsRT#6k9d zL5_vVjsbl@s8a@X6e{yaDB=1$C5@{Sd0NOzRZL;Tvyz7M-Y#cW&|x8W*AoD0Rx{Rj zg@9;;TV2q}(VzQ+T==-nDzJkbOhTdf-G!DEii=KdSP}}5%2VNxEGK|i#1!S^kanIi0DHGa zcJa=P==JMqn}d}&nOGN&xp@D*PG|wx2#O6X{fuVcuSH(*4Ln8P zTY`dNXsB7V@$oE1aAmvBc1g^k)Gf9u+>=?NeA+Tb>om+V zJ{L#a0_UUkw{Jj$j|+&I8BM~8D^chv!V#w;h>9L(L=~(Oi;NeM zgzwnxjHrdE^_l*JGMNJIsV(+m_+a%K2}Bw)`8uwmDdabMP{9bMA*X%!$F^Kx(S@z+ zkqs287=q2VF!3flRr`sm69`J_jlr5#jKC@!5`G4;a9Yq|$iY-oazv~(;DDB$Zyl^cIu&ZABw^VU>X-KL0R90?;oMxuAu>q3-Et^##NV807YZGdOH zM(AWNT6fd2G5OnYe}jiCYvLLVY>DN7hu~2#lYq>$zqaNiuhYg7ZiLPN5|u4LErfC}HGU@N^}{hPV`w=_RM08I7M@=-0LM13Wl@<*E- zXffo&kdQLtv75JKBx|_RCn~B?q$?#`d^bgjp|-$W2?X;ew&LY%`}oKBhHFT;i?!(3 zd-8v7%;<^KEwQ1!RTKY#RiXUfE*Dns%<6mjFs&|ZkGNf6{8`hG6}?}UZ;bn( zssbI7b6(KO%`*qH3u6l_QZ!ubhz}<}5!<4_>=-QLQ>GE^oGL<(NMp!yjr!_m<``cU z{l!n~)y;HB|CjX))1fH72A@}?(HZ6pfl-?_o_M^pR>pgSBY(=L>2x9Hgt`(=m47H+ z5LJ8P>PKz1uf7(cIsf(>XUQiq2=yXmB~P9%-t8CzdG?pO$j1P}%L>;+j65Hqc(1?xJ^Ld9__Rn=t5IAqZ)ImNKA%&iLFM*iCpMZp9ev^30M;B zjjdlWnNm=q$YikO(1mISopkqDvA?H$L^|&fue5JNSLOKBowFw&Uqz{K=jQJxg}%^+ zi-Yry5NJ;Iqm7(8>3=_RJGcP?RCp zb!ALXsIu9oIpg_pZm~DbnYLxSOglE9xSHd2-vx@)rfXusM*&hgTCBd^BVF?9MjXM6 zjM9Z__dtuh{=HXD$Bzz&4GjcCzW7}cMuVnHs$>!KCak|1XzxCJ-Quq z%&15bYbsvit-$=6S$AR*Qp)!qGB_aKH0=)z5)ZBHeMipSf69XB>mu?5?Ke5u{=bmu zfb@){fBhya4hW1@p9RvWwKpPXBGjs_(;{~~TlnK1?p4Yp+!r^%z_b_43N`8#4_;Vc z8CY%PloOZi6=oKrwYRrEEB66tTn){set4?c^o*)W{BJQ^yB!_tMc_UHyz(1Jyyvyf z1D*2!&0r354n3^?>MU*-dpthXLT7VuNfJRd?5inwD8Lp`1M@b8g_!z{#(qpD!5i&&8jPqd@99qTj4x+ChlRjQVOgTR)=1H^@n|Bu40CB6b=9) zcW!`2?FL|`i(+7Q=znJxx#i+hmXnbMR`U;#!oc8D`os;bRyZth05+pT4yxX5F0gf+ z(+>YiIxWBD*^w))=FU&~1<`70{Bv+b+EkMaltM)O&FN;&11F;j(1>AO|Kcv6K(GI3 z#AK?-3w&YlX$lmYCu`Ug`IpYgwl2CIM|x5A-~}HIZXh-XV*&S({JA}Icm4RA#90b9 zqN?{)kzWL+A!Z(ebdgPImd3jgb+GmW6wYP$FH3Jjp!4&MH|HDn-b$;nW;>t5eK!$i zyTbyo22(0L>h>@C9IPt)U3V*9iHw>xeO!;Uom`E*mx>SUa9|KBMkozpAPfy zVn%9ujKzq8f2nvy*O!ojAEWgqV|33uwe0lpXML3YH zWz~(oC4AgU>c2bD@|qmDBr`%s8$h6?^jL6TWq_L9oJGN^2vTpD%078O<1QjX>Azk7 zV>?6o$k9Yh_y_Y4?|V?%UKaZel)DcOT6brj-S zIQp4APyH-i7hmFOA2ME-c=^+Bfys1sTDZq&w57X~z8vO^%SG#l0q}{cXkw>95 z>u@KK82fKP7q2j%M6{LP-FnFHjz~c}`Lg#X-@=e|rz`JFe{S|&fCT1{(~cpQ=C_)d zxcI=@srR`B78~1JvflSAwA!}(*k~&ZCWk4{v@%S%uPBj-E3&wml&u>ZSFID{RlZ#*V#!MGod zT6|H@vDgCOM(D`c;6WI+;7*Vn1?g63I2w0qpE11ljW z!9O<_N6bAYtas=~XIlClju?H`MMMDMwES^vp&Dhl9g4EPXGbEpQ*=O;6$y^b`<0Jq zX18$}A5J|1Pvy=F9%0fXI2?99;-+o0lu>_#q+~m^#7=q3*{Ux7fuR0K*zdws4DUu-2+ur@p$6ueNB8M zGt27D56a$3qXN<2GG2%?qf$(Lnwfkr_Di+1K=Sa*+cH1y%ERY*a*Q}5O`ScJ_|pf* zi?b-v?-zgZqZ*F*o zUS8I9`c86vc=@PDuye2Zozwl*sF1slK)sQENXD<1#z%YUH#cuEKs2@y!gnR66Ao`F zOE}RSHH`^6>1Ph2lp^)IOy^mBa~Mq{=Zd38qd)dN+GVWYelr^wRMDd8lza!fUGUiR z@pi67&Og4)ae5PK*(;p>??T=fpKIORKJf3=;suQDce0@BM~YgS(F64%M!8s+p@LH{ ziM>5ZUrK}E=N+NCy0_$V36WUYDj+s2xxI=8dpod7Kh zrWUR%rWiIz?u^qa)`yMJrzUsFhN#!3GO^aZmJ`6%Om)!dYof(DqUz{;g^}hJ-CJ+a zzP^7cD)e!iUc&H-qxjV>p7_whSWOwe%>K|Y9NJV3*zWWBN57Jd@`;UZfVjR{qY(pZ zD#gCIwOk0IDpwW)upvzBh&p#Kjha%qMd(jBhw(2~U5Ng;+rBI?XmbPD%inc%`S-H1 zN)08>Bvjj=u#djz!AlE2YK~spNLUndIi}3#x+fx~yZ}ifvSf%^_VklIoJ4e~ij#~n zM`yT+f5Iz>tG}9bB7GO387kly4s_3ptLBfxaGRsSOdML58PCs$W*!R0n@+@p1j$))8Nw;6DvN0f;=WDs&KT?P z&b&yRrF@p+DYG`?%810UvmQ=|-x9B!kdPM6U{#*IGe^o@DELf-)YN411Ii(l(odKm zZcu0K%nP9j?@d}4Zqq)!!=Trk=K5`?&t4tB+*mkLxVgNM^S&M7+Lg2+ zWeZmRB9kWd$DRkP>=x2-sP<}O?G(Gg{gRe*xgpvM|C?hA;}&xYKyVqg z+a+3OlUvK09EB25b?Kh|2S6d}R@wB2oN#DKPfi=KV(MegP%yln3Bz-HkX#{x+MMBFw>3uhlAo`C2x<2iwtB&*GD$V$&cU-|$KC6W9SMI2Mm+JA2 z9&2gneE$;5k3OKNG~>#%X1sKz+alp8MurUl>Z>h1JHmgI_9V!%P84kNxC5)*SAhIA z6$kN^P~5H%)p;hT6DD#A>{keCs(9D2Mx$Opcx?7wg^)jiy}N>5Yvd7==e@Y}(uw@99C&?D(l&9A=w=`6U@DF2DW@*v;UBl$dk zrQ3L2oJ7#KdrjL7C3T3oqp^!z=q)vRKXlXNtqFbUkZMrilaTu?g?|>YuIc4Ia>(H& z?fX`T00KhF2{IU1?l|DRDGPSStk=@Sa%ME^A2o+C!%r>T3%{=v4|98 z-~ETpZ4Otumc*+AROEBRG3JQK4P(nDwC7US8Wy{Jz|TA=IVU}5gfUakl{3P=Js?(t z1V?3o*glJ&J{}AtC=X?FT;O!6FI4hD_izKd-1*qt-bja|HWoHR&_~{i$pCt2f@Yk` zi(3p!v{n=ADMGX$d*j0Qs4_ADYo=HPyR`)mRe~?#`R$i*wn6q9&5f!5M5&HaoX#FP zjo+NHBvX%~#}`Lp|c@hZTEoYcO|z z82UMhrg8f?3J)(D5r^aP516g{-PV>0S{`ylpNbk-L*&Wp;eL<&prAk|Fv5Q1dr57i z5%Hy4>NCNRtF2ASmLd|%M@>wl^svsAl9P4aGaE6l@&~$Q&u>gj2hV>kEq+zmDKObP zF>&=>4kYiZWYnE9W5fDCp?EANQi0kmd;N;+2F>2p)_?Z^rHSj+Sb6BgswCK)bSstg ztvuVn!7agw%$q_{WCLdyb<_LYFN(0{L#%{|WPAH8? z_aWY$Y>W>mHBR)X4N79hhO_a6=X?Au0lc}kPSprDgQ2b;Wv?;dwgKk7XQY6*yDQL@ ziKrIVp^2$GA@~(DVN4EXl*G;K}5nAM^TBxCGrL#71* zRrDJ;zkQY=mw9Qq2fZ#W|5IE96H$d>k0x?eDZLUdp>$g{OaBm)loI4Elc8eKTX8@t z#t31*j$T zVX0xgiQ3WrT|dYUDe+{w?j27Te-24EjY9yI+T?NY$a>`ykJ!7&V0c;S7XxLBv>OjAvtIrL{u+WOBPsvk>3WRY6pt-P^{ZBn zNo6@7#uYB`;l0+e4O1`Oc)s3oNUIOiN4&cb|7GZX3Y%u)V-Nn-ZSQ?{Ny{IWh-$Ib zM$fABju+t(38S=AOO8@licgq(ScHSaNFjj22N1=8fQ>kA8d`fWLZgOo=@TcVYZ{Uf zqRRF*Ljx0w3_FeUdIJnpI{`b@m*WEXV;Ia;FlKv6l`g-jp)#uq7llP}o z>AKqULpfCt-b26XBeRj|iC>QU{qL=l1gePT!h?_ zN7Gr*a|kHq&gAFkuQ{2r`QH5Hw+-g>GXYrCOFC4(jP=SZGfX?2BT>*XDbboH=X2HP z4C^q#=T{c}9Z{7W{hBk{uClhYgcm=~0SuCv{Jp0Ilg*azT^8n_zOy z371&eQBn9_lDj(>W)0|lK#~y-jySw8%rEJkBg|8-dxjYeukB%Nmj)ZX;<55PJ{R&X zx&Dg!nbL_L_w^ora5Yaz*b_njllPZkR<%mI0W%(Wq<{ku%l_KV(MrdE^w@ODOCDtu(54tyF{bB1u^MK z{>(pZYVC=R#ofO)o;ZH7%kyK|B!8Y~hc9BZY`ig(_Dc#skr?DaSm}BMw6MK_HCzNW zy6~sXvXD3QO~CeOcWA@^_|a{SD!q!m7eg)@b&mNfMy6Ih`RDY<;u<`#+~0$47k=R% zJ8ldkFOzw_=A%t@M)J6so^`dWUz>P?vz1+KZ2N?&9FddBZ;y}sjR25ku`Nokz98|G zrSNxSv6v&xyCBLmYamYO(jur8B8&VkDYpv*dflZIggl9nxwV~i>no0K^i4(aCR!(P znNtY*`+^q;7r?@D+Ils=3xFINi-Y@w9ore+m){Y7hUXl4ksWtE5LWkK9)Tdt+U~ z*z@kw?9M;+rfg7?oxcnqv_AsLYuruMpOml+3QW`+Ik_9q^)?>&J}9>;KK>kSs}~IG zT70B_Ts}9Na=CdKrSw0X`r1%>GFJp&U@O0&KG<&JEB^ZLS<3CyFeYm(7iLh+Ss=S& zh^Z4a@J)?vw%)94u0Dk*|JQO0aq>BV0K3;zJ-A7McC!E*nJ^-Xm*r+`#G-7M6A>97 zV^M0Gvbb(jK5bu}Yw>FjUY(;-9}O!wgqgC z?AP0}Kte>svcGXQQH2nNQd(4RHX^3l-4Jz1Kcd_M8Rk{r*8|Vk{Pl4ER9Nji@^UKM zjPKeya^lKw%q7>!y)G!wia?g=`ISmWz%#H7g-H6A5yG7<#|X5=7S7kY*BcFEurF)d1rMlS${8cY0w0vpMOPNhnII zF+)J9wJSOI{|I!Q{_ioZ#Au%Irf(2fu?s6?5>>yqy$DnpAt#SNzzkGEzu~6}i`I4Q zU%pd|Nx8c~mD;Ib|1|b_0(w)g46Hs7b!Z*MSFeG%5P(?6POdMKL@D>f#GtwW0}h^o z1ueGwxJzIGd|$aKL@EC(I9p$GS0^;sfI+c~=qC&}xVx#7TWGA-YKbV_<3~W9JveeVF!<94yuJ^*0MK~cbYJuXB?<5qeTTWW<^vwzDwTIYxYg$Wky`a|B})3%omb8DO&-@Ri{!$e|c9E zK%b{;g9Yc4Tn@;MchwpCyEXnzTlt?ZkOLbob~LRiUDF!(;@^Gi1Z}c?4sY3cB#UY+ z!n*itfxw>viwGm)*bjLJL;L~dYCf#sA%%o^rtN7>1f^on#Hr1*Ko^zUC>(j$F4y}O zA39E>)~s83HV=yNh;-$(mXD}avJBPEFW2qL$X zdm(BJOak8IHr_*HeA({qIFRr4?jbfqict2*g+ZaMLDo<&!sZLN}4ddsS*fPft#)qn-ToN+sMNUXF-Zl84p?iRA zTiV$+|4sjL^R-&MBR`twv5)aOh1}hQqxY!qlmhkTDqa1GkI8}JC>!ks&g(^(2f;D_ z5eOOEc2$2slZVB~k#XhD8=qGES8q3S(i_!ZJ)1rJ5Q9bvhdNW^eJbShFH)5if$3TdL^FQWe<~Y=S^bMMSjyO9R2Lfoe45DT4aqu z7Yt=~+|D>Iw{=xdBF&ia2rKx-lgsH)k&E?=6Pt4B-u#ppo_t@oNxnY%tmU=3i8bea zpM*jJ&!~nO+Zt^Wo>sF3x8NQJQT0WnjIr^QZl)u7aa?tmdS4|m^dRy}moamTNN)C{ zX48){k0`u@bg-Klr+oQPkW2?8kwT$sOo$|CaE4pG{!11(>phwe(D(6P*)umCWr9NoCAY7ld6TOlVZwPcVX{Kkk@VLTjSf6 zvpF{t35Nn|8QUvlF`s<#Q=@l^L%sle9z{eyabR8<*J1CB1)?LSqvCT;;psbZ?)ok6 zaP#~6!(n?9k{Yie@Dx}v_fUQ6w=q@YbzHMNGdXNMBLvaCXUyf62#%wc?{7D_b82A) zj`shb=otTQdqk32TzXJ0&@5nL4!vV&Nd>pM4FOD_DV= z1ih{$|G-k+$_s07F2t~JA_1@_zVFrq%@$iAy;_hTBYGJSr6U9L`q7y?KjidQWE0Mq=uLhE-1Dnt!vG}xFCtgBSqu!w!(1}z zvuTtZg139m$O1|~*rZqfv11-7iK_-`Acou}!N#Y&UkiH03JgBR1JW6Hnjb}RWj!Ka z<=DZ7fUG+%o@t^|2vsS;J%P)->V0pV?prBeN?o$-nqu9~I7CoD9wxIf;Kh8SOGv*J zW(Qc1QA*e#+8QGpKy-ESr7px|+!IvY0Ahs$Ti4dSeA(C-KSyrj24azZ8)Lh`6lIOo6%Dqo12By*r=64$CL(2ja^B9*Z$l`H$j zw>HK2{AC?lclNP5*t7`v@F0iKob*72IvAuAg<7^uRF2+RWT*@9h$#JUD|MC*(g{nWd{T}I9Et2GbWSDkOyS!Yov z@z3IPkYlV_^eTc~wyQy3ZEoTt`=TlNLO(~x0xo(?T8gBuZ|K9|2>Ol5qKPX_i>$s~ z_p{p;?MQmh?l=Nz-AHTxJ$~Af_SJCe=|_kWDWs7N&MwY?O?8zjJE}3HF7D0d?cdkM zp$@H{cqvu1ehGnB#RzPEQ{ggD;KZjbFy6S63_TJEEPWODKg7ARQ7ds%!eVg`Q(cR! zHrwkhf^6K*^A>{`*+AVfj@3Bp=gZEra6~f_Ab*6d*&@-b%B#E2$c$84dsm)OEwz5n zP`=NHT3Ui;99M(uANr)=OekSz{N-d(ibPee7XqaW4dShSEN34J0!V${#GKeZ|YdNcwVa6R+0VrHVLDL~l z10oIY`xl!JR~b;{pl_>;LQX0|ll<85a#G8#whiSeYYF`RD1BM^L9JY-JO(Bv)Mz@% z&S=Rr=`!7L<0DBd!4;XcHBsvHE#{7#E)<~zD(t)J9Fr4CqWapUJ}>h3KCm82`bsO0 zq2P2@>!h?fUYB#Hdiwf-!whrdzs3HzN3Sb~199s8RrXVih9 zKdciy;qTAn~2X@e|*-LFvetRLMVF^B%-@lSW1H+}e0{ zDbFQ-8My;jtoMvBE$FutU%DNNMfDvqcMRl^4d25DC<6<@CEC(6S|X3GV#>{+CK8SidnYI@<4TuTLazqN8F`8H}w ztzdVT657@T*bLvfMam6bSLjdoip$dieIcvTpQEmk|dbvr%3!Gl}>ny>$@y5vDxL>Wj#wa+0xC-jSM5zs~#CyPM z_z^9t$Sg+tpD@7cmml)w8QLr+tS^dvwxOb%5OPu?>3%uns!7qRDT& zWbyBT10cwo^?> z{BPuDO-^`_^5vIv12`A}@sPS8F;E3|mXHpZ2Y9((;81mU6k;lK=;i3uMA!LV0fT5o z;@|vLl2_&RZ9XM^dFV#yJXTpI5(tIc!ZInFkAH$G-x;MTW@jaGjZIV4a9mxSY!ES9bbkPx5`)vR4M`foO_hy&Rj9DtAX2?sDd z*`KRHS(zFI9oZBW-Yo_Ktn-GZK1F_EzuuBtU0v4p$7*d23pRWE5>{I($9=u;@t@Ct z_>Rcg(@$G0`3;|40QDJO@1EM?0$3WzyL6JZ6gyRFM_>|za!(~Zt znuxkcO#lzAvVKZ;NWGR4eEXO~7N}S&WO7dmqyp!|Td?S1gSO9=RL8PEwk-9u3Miwd z=F05^k5$>=*sazlav!)XwWK|nu7%aTQ_t}zB`Ch2K{nY{w#S%fL)os4SpT?BQrZRB zkXk=2immTYzuC}Psh4iE1KRVx3?ZTsZm0S)ZlVe(FNL|PE8#OZ1{t*7CT6=?yfsq&uG`( zEl?>9C#ziMfsYDDOXBrrRnh5^a#= z_b}VQnjE<9zxGM36QgcYA_oTKfvTnwr)g0v>fHIG4MQIKIScjH-O}m#*wL?PU#_2@ zT<1v7F|v4jb!Xaso)e%(10b)lk{Q75{-)oI`Z zmwuZ+KEhFc_D1fHjR=Y77QG3q>W9#3z4x!bOYL762j-^+6nx5HGrT!o!Cg$$x6hY( ztOv0uGhEAEcJB+7^yEQ(!v@GTVX>|pqCg4-20S%JE(0BG{*gMGt8J0N(0k|XV%`O; zoYwA4_1U0}=~2#ez(-4of9@r)e38xkP_O1$s+40TX-TmHb(z55AqABTp@ z`#j(IrDwJlq9XhRCGqRk1|Ae{X0Q+&KgqV*} zfu&@5bHkl854eGrJ@*C!j{-Fc#5A&hHGgfJj>MQo{%68!W*? zBcH=!eo(XhdhI{f=ul^uh2F@^S+qFkO1XS66(70)YABmB!0KEpim11OHHjD(-ZKCE{C zt@#w2enz4=!HZxw6SrU+d zJu7Y^1DkMmiDsGe?Cn~}!MCj^55G)_X5f1eCL-nw0B$_L1zb?q7@par`6CQHAD0j|u+i^;WVh zpDCi+a9{kRTi*~jo)ZS=Ln|TgV>aFlOYnj4j+wZesU>B;Pc7{g?SrxL1oq4P2V(~@ z5JE6xLRcT!?gvoM5tN6$JZlf?7!#rmk_XIMnEn&BxFbLO{G7OPa4`LiciCk!3vdEs zeHkMz^Sf3q?HZ<+p3S03b~Zoi#XD9~Pw)FYQ#v=~7QhOg2)W|3MMbuFF})B7@j+At z#7xU#z%4;#8g)hgZmak2V#4*qw1no_SkDF9p3H>g-&e^VqUqRv16A$lRNnKOhjYF> za_dX}=KtaL_+>7LUV@aQ8cNS{N*)Zvj4LT>K}31{tvwfL9ZMy~>Ch`a`=ux{$EJT{ zi8}w7J-5oG=5%>V_+IE0s>{`M-xxsLn4-3W<)7OU-+T8ilACKO}G&n zSM5#_uGgoU>YS%KV2fe^s(w$^HP1y}jen0NE)+P8Cq@WA4CYZpmasyW0Tmh$d+5fb2*+VIPx@0| z8$D5ld2wj1JcFBQrh|_))Pyp7sWI3jabcYrw>v43XP=T8Z@{-J zTD>NB02@2A#-&M-ue`wPYoN&3=qtPbJ_jEtrk%sLtW{xMc>A6EgM%M$l;2pL9ox3j zyv8EG$z8C=N8tjSzc6$?!jHd6*Po+3>Dn*ZrKm1?_?AwTOzb7O9y#r0wXLo zCe!PI$!_B!nec&h3^SkKewQMM*8$A3wS*?3+9_+dJ5igiz!G2ETJ)JYHj3$JQiI80 z3)=u<1rL*j7;=^ohw9ZX1zb&EzehKit(RqiHjcYfe9EU8bgvK2a%4`7&pQ(4C^7w% zh&V!GXje6D%%ZE)K=g7Tz_5<6FBB?Neteo-!@?n?rfg8O`?^F=mrA-8QdzzK{#k^|~!=zGQ=ks8i5L)Y`S zGX)*)L)BNMUM@6sWcYq3LX%1Agf_L2Ay*@>eKVm%;RqW1mCe`|ri1 zzQW?@UU+G}iH$}%9u+gzs*LXz^? zHk*!ar{{Y|R4i1*$Rt4g=~$ewTK9LMnz4Z>UJ&V?x2C;j9oGfBY?f*PEA0GSijgS~ z9DKE9ru#j|syws55`RrUI;v66C^n+^l3fmh3&dpgOmt-1@wJKq~r zt6v5~#5)a3pA!QbPKPo9yIY_c3*^Q{Dg#xi7{I?etH<)5@@?^zPXodiw0|W$ao`a~ zcu##G?0R>hCWu=}YEAXGp#A^Wl;AF>%p z1iCcxbroEx1n_q%mWWox7Y2XUe6Ko883{>i!1yR~QE)OM2;l7Ct};BIjEVDfD`awn zBW8x>GU3E0oIb7_oK^dkaJyG^LvSlDeYw9g8Z0`Y>Gu2+RfYuB$+By|jlLT}2b$at zB=qE>6>j=w`JP9$X^xm8O1L!KL{f`~r9bo#{mzw;yWIp3Jd@qV-0t_+m8`EQ>m|C^ zK-@$tTm3+3iNqJgU#5<}PvoUOjD$lIb}t~}=pA1xFQ_r>y88TWqurCE79g$* zE{GhKE*Gp(wj;UH+0wHk*_Nsxz0!3L)n%b?CWq^=P%?U7rgCj_#9cM|xf;=A47A^E zYeNQyvW7b7co~R-Mei@pArx@WBdB(sNT=rzMvkV-TTjWr%aplL=C_f=91$6oHSUM^ z;P4mhfoQBWRVLRb2}mSJ*Tr{Um@K@VYdGLP7Js7v)x(PoJB(1Kp8E`1C*(6eTEWc! zWsr439co#-`e-x?#IaEQEV6l{q(VCS7Q%8O>5bD)BOcn_|7&C5^t{JN$oY_qTH^Nm z8wLs2-w?b9Ul6)Yvass~wQr97XVEKWU(rWqJUU6Gd%y0_h8=1B(J@yLDvE*U>~M;K zE_5UNf>AW_MT(C>Rf?YylVm&e=h0uv`KoQ=TH5~tN8wj?xR1;NrZD=TQc`fpBWtTsO3!iEOO1oU+X$hiYv9H zc09dQGQGv6Zv4y9CN@u3bM?04#z58e9^`5W4d*!%{%aHpv;991<64?9w>6he5`{qk zW+!Muo$Uc9n10yeNeY41l(pTH4oSi42KD4>Y~EA`gZ%-&$F$Y|Mc7+##o4vl)*-mN zTW~8ZxQD_LTmlpl+zD>M-8~@$g1fs13sz`wcM0z9@ZCJ!ecrdf(>)Hq0E!x8?|ofs z&FOzVQ{T#~HznO)7T zoGj?TtYIAgwI@lC&p+HrjmXIZe`>2Gox@BevWNA1occgD1sZTi=&wM(Lr(`btsy1L$N?qYTgB_ zNNE0OmCfs51>|mEmci z!<$;HsJdQwMlV#`(vH{3F7O5Wt2q&=`l89{r8)!3+~oJoMoe;FYa!j^K@D7am(mm+ zqi&YLsvv{8hM>;$yIO0QV4KXKPj8&bw2@9_SIW?%gcmxyh0yXv54)X-x8L|@G1P4@ z=;az{{WwJ0cLb@p1dNxEUo1&v9b+R%0FVBg_xC?wte1|Nfx)35C|^gDL^LvHxIqI}MtpeqtrwuuZr!qi$#B zKF&{ebNAZNbT>qzJJ%40xpEHH8Fc7Vnq@miZ41qXmmu=#K(=s!MU~f-1dao)hq8^ zDLlnHgRxNkovp{Vg)Hg4z6 z#-L_o z(#_NU(eCFmAPmS1T%$bJX5h*YwKjjI-bS z05UOO^rx0DYPn{XC}!sD%g|Z)m2B~by!9rJ7E3oZgF28AAidZRK4?nO+Ko8%L5&DAk3f4 z6B@4_t)W(!DTURsTGII*rS5HOzA#!$mnQFPm=J~MS6Ar2G86a-LZGED6Zs|CEPIP_ zT!!agXJ6wkxAXQ#D;CqCV=+*PkJQXuwWW@T_~h!&0ok&Ig45;79}neS2>Ic9a;|gp zzFbJ+81l$5FSUCrXdwjVJ19>c7vNM|{L%w?{Ze9Sio@5Hq+Y4L5(WI7z^uN58w5bkzJ}#dTQQv?I}7~*9vLrL^x@5d&L$8Se9V*D z>R%!F;kIvUJ3`?}uMjg%0U_FT`Wq|6_V8LC9NmY4j=DXNGx?1`dV#3KR*rK#Mh|%U z^qbqL@s3r}cX9-w_BDGJF%pU(wWhI3fQY(+4eZ5VTT^-voKhg6#(qJr;D>lXI|c&N z>i{hy4aW2ltihj&^s34W5;)Z3wUa_#6g4yNXirT_>WvS)s+8uy>5V96CxEeE9nayC zWBzJ}k^jE8Yq~(JwAI*fbB>VUsMTOsdL+2~qI74RTx0llmjBYPPELvHvsSZAN6cMK z)+4T2Dg4g%7g;0b@vqhP_Z}$H3Oa~+=59$h-M_P}UcdCnzsXe4Fz^r-DZP!mphYKS zU+zrz?yYja#{N9#Nv3n1POE6Jkd8a6c)oB&(Q~yVQ{^W~=i@$#)ro>#8h0iHJkH;;f`sxZx1YY%EfMMa1pqC?2wjl5kSja6g}NUc*;i;qpG-YJKvl;D=??8?Q?G2;=nRpU|p*IHoOA~5k#;nO0M^c$r$eUB;VW(z43B?85VT@O3|b{r;&9a=tt%_+r_=9?#KbJZ@Zr1P ztT#mzZ9k4g}Kv*pB9>afJ%UfyB26@ax2 zv_HNK@HvSaqgSP2$ z*tp9A;90o{KeT+!KW|WDFmw2sh|OT4H67SyL&Ja9&S8Nw)M1-eLz2Z!TlIH#GdN@qJ{ zuJ{mqMp{;A!j^#4;Ak86Md`joZ#G+k;veNUJvePwq6k-PXIOmA;psVr`N#Ez7cQ6~wOCyRjnff%#E4g-?7TXjZ{3+i82!~zILb{x!9&yI z$h9~4b)s}YmI(p{sUyRbkfRCDXQS0+?az|XFCSu$X0nqsD7}MBesm5`iL`~?%Cb4O zEF3p_LsoKug{+03cbK(JfdV_CoCNP2;L>A@^#QW^3rS$`XE5j|Ux6rkk%CC9>&MWp za#^^&Ao@&+12X6EjM(AF?JTa#u{^O$E_>0=`#;j(Gqb?;%o}qs9(v@fkQ_edI=7HAPYCgyf++bJnLcuo^3^=9=_)1*-Ilee|7xK8n(eP;|1$KUjGagkOC&vgi5h4H+UzdwSK`^>&TmQKG z8{V$emu0)I@|?mYTke4`Zb52g7C~2O!4-T@%szGw{aO3T?=u2XXO7lVSY@zCo+;$5 zyMg$_D=Jld`2hg9Mh`4$8;(A(Gv|%*D2yb0Z7N^#395>0BU~5+YlzObSp(RO!4^i7 zI)7Vyb>_sMKY$lKP#&XVArs^gdZ)rlrY?xtbIL{7<2H;M+*UJ^+8&WqM49CrtypU# z!)WE=k0HXfH@%Co^b1X4=1ATqM9y11%SWK91kB+)P#4r0n?BJ(+SR7WpWo8(dd7Ac zyB~7;jZmTjjD@$!zYAdDzUxA%6R}zwF@h!32&(lI`vg625r!;4zorCLRwNZdpei*i z;>P2-8ki);aM4W+(G3jI?F{5HaL!gzMgzSoYvM-L#p*?sc+F;m;z?sQG1+a*R3)6i z25fc78zp-CP7KKN1uNdjGtMhWJyf%PNl48&>65~#)({_aB{>6_{9$=tE6+?yrA7~{ z8vUXG%G};9j5Ccob)p%w242_O=>nWIv%wcgyDwIO4}B2a@KYd_ zj|C=}idy+nHQevZtXneluEMb7RQP)v7p3W6WUcnJM*m9jj1EQvj)Ot^->vKXjfs@# zKm5ifd*9FAj@QIozkzrHk2W%p@W)DLDTsSFFkmF8E?q?UJs!PT`c!#lR$sO`GNuQw{V_Z$Ut&2e z)!=yYE&@v@w1}njlSg3?$iwF(SMoR*~8T3u?y-3uWx8X8jKU4g!# zzC0Vtm0Wpha;@5t!258>)074`8ik@UMrgbQk-WuF9y!fu`HWYppbxwjlh*tE!*9D! zak3@_`P2*|3X7C)oib=5kCKxmrzgz&hP^{dQw!$%|K!;?#y>`V8J@@A*A(c>7mV!7 z$300^dYx|g$F2Q$i7xF$G^E}NQQ{j%FB;7Ttk8{!a@D4>0k>0(6{i3Lu~vXn!K59BwdTbaQG<$ zFaPg$_@=&1g#?-18HXLHm6r=Mg46X(cO~oH&UMXe30}EyrC>VqqM1|I)M`LN#h~Xv zLcsO82ATJf=X?5j`nt-z%c5M9pHTm{WJKamAtv87LjdBHzanWhO%w0h<9NORQrb=b zv%OY<=vnnV|MoM#7(t1mq(O)O9Z^jeFMGcnYerFQ>+0=|J`JjtBh!a5SKs)TANT9H z;L?ULK;x0Vj>PqHgTQO7SPv7ki&#%@Ue`-`xkIpv;XWtC^;lSM0N9vk7vl-C*!oNO zG5T4&%bQ5g1FPllFG+Zm4TXN$`T3`Yll&N!mmK`oGdsNg)ufkUD$_yU={kxQE`b-C zP&-gk=M7Ajph*(q@o&;WO={vLw1m)uN3%_Xg00nH3Y0mm#3N^z4oShi)f5*&argmA zR5j;8pr_c=mB$uBeA26s&QgrhC0G^qO1_X-{iqR*=O|VojJ4l(1@q!6v_8}nK#+bALWzV zahz8413%RK$nk8qEX1D0yn-=gG*)UKl;8V3#6@IWz}^*|a@|wqaC;GnUO1Y`2Wp-F z&6z1;#d93GBVKdTVZLH~{bbB<{}+w-vryP6mzYc-Z{Vt?&})jRU!CK8c&4e^;1wRE zEzzvX3uC}wQU$bj#T21ds1ny$jL0nQSB1FTSQ1d*Fr~KHFjmhP!xcMUgOLh`%Hvh0 z1FPbR$#`&Nvdu$I}MEvEV z8MzNQ5V*u{Fq$z_p%v{JUCR>YC#Y4|tCoVHzD5(}E1X{Qjvp~8FiIk@Ian6ZFXyAB zA8wR5+{XmvwHpZzjkY?=Y<-DtT!Y8kNq@Seg@vQGb^Z;u9<{p&EhHC23Gw-qwY@}~ znChp_Bl{KbCjjs}f|@VCWVu;mLlo+Wve^aEQlnY_nJg>*oBQM~)js}P0^j2nz!y=p zUH4YwkbibEJk(|h2Ef+%S=Am(e$Rk!C`5uZqT0w%jf7hk=9%@7!siF}XYWc@M zjObC|sfBiF+sQE_LbTD$!D3ShakOOweA7T2Yeim=<@$q|Iluk=cED&oI|qK9(570+ z1l~u~j3B|Zf=q>#+pvsEGM2WvNVyq6JT5%hE7u^iP1%GTq*LAWzdAL;6Jn~_`4IRk{a%PhzQ2;0Ps*QP-* z!wS-EZ$36naLWN~G=)0@k@6LH+F^!47pC20utBrqISdp)Ysjt8{O3&rDC-X@+R#EE z#o6NNFL9FjA`A9@$j?e)^%@$y@8C6@NkTb|#~|Tmjhq9qJH(%tdB?&4TG6L?3H=us zs@m~3F;mES|MCsQBYSrqBvq9=h5TzuX!K@BsOB>k$SQ=Y^cjvjk_@Jc*#-zur~;Ky zXEbX3NC5lfOos7?x{lrih|Eq+0lx->`#B&?%DAY11IX9VQX_kamQRpEFLSDb(=~Yp z)$S<7?zZt~>xtQr0Yz%&f~4%=*^y||U$Dl$5t7#7_Pa6LpR^`LL3S?K;OSX)<)G@g z9vlsS;QADXf69!#F9BA2LD`C`biAE@yCaR8whcT5U+xg0e#l?rHi7Ah7k!t1FeoFu zlJM;`zklmBg#XL=1(CMWAS`_sKy!B9Z9r>cdM8aSr$P(_?>ghT_^?FJoEQhugNz04 z%5)rrbqAyU<=(x%(^eq0VB{101oEWBeH{sih^5&$>p2s2{-4#4cB!AILp+cT7iKoo8+% z@_Af2MF-&*7zkV|_d@`d?gzELJ%x=ORHuEmRJEcG$4@< z>Wl-fc&HF?CrUt;1ILBsf)`Z@K3W26mK`JTS_|Q}BZ7m4C8#5sm!1nj^3Q^im_Vo% ztKeI;(SoATo&|$osZ4lAv8qYJ-C+r;hrr5J{h|ND>1-+_%HP2_u3sUP3W*?{V@`Jb zT{;4bk~Dt!>J?)5JvXCMlC~~0Z`texrQXi3Kh@QqQ|a~ntJL*o!g*urmsbVH-my!X ziL3iufmJu})JPigu*M;b8$kH^4|lA>`IqMx#nAs0Agl;Ag|&Ia52<(6>wN}GtEeqH zUAO<@eI6W>C+%Mq!L-Za94WilDB*59&JnOum zEnjvL?Q;LW$bK+o7iT8=P%5n9)-0TvA_y|ZZCH#0tH;KY50j0a|7%O}Rpch=vSVPx&9 zHT1pQ2p+q`_Fo`T`zT4J=v`&YV#W^f*L$SzsL*Dh(gAKG9YIj@^?9=VBi0~(=y+1V z-BevU;*V;ZG44jOkSM*N5ly?_YdJtuVO}op>UPswF=~}C<5=Wk=UWy z1t~Y`DR=#MYu>flqVt>mbr-U`M@-Pej&;x_5$v@rd8YB)D-=^!$X#tuV{*pEFWJ8J zAFDo#wUn(0L(W1B@ zh%NnMiCbuv4wkGCH=(5#$WRNuHuSr`P|}xL|8zA}qQ?3Vg(gJ{BHq_I1*~0!`gUES zR?lzK^cBU{@Df0K3@O8ulp7m=KLQXoaxt4Hd^Ph5$%n9>FgZwo0Hs$nZ0N}9YP{IV z+S%pKvM(_G7-Ixn}K zN;!*js)RCB1nv7zOv&!d;H3H+>(YC7vEc`oPqlw$BKwDjv3nC!QX>B*I6wI8QEPbB zpawE{hYUNM7ge}G+8>c((wc%^C^PE;E{ro?j8TJEKn(ttKw`9Qg)C_N3rQ_0e1*rO z!QUBp>8DQ}y8tR5zYM4P<6Dg>(V&E{0J`9xZHAnTlUyqfA8>cs(iU}=~1^RA6$_9E|6a+v^Ft6W!a!;7tn7}8r=7(M6{YSPX zLJE-&IroRSy3My(e8;rw;zlS-n0bG5Ji)VbA$Qp4tJ6kN?|Z6C`94kpM1pXLJ6PlL zOVT7sSUJs)wVjL^Nzz4v>yDKbT}m-e@|Djb5%^*hzxOgQ*Gi(bXO)Q%&u?#~+8%EG zAZ;=8z$f5FCg>whD_G?PJ?__U^uX*g_krK`fHhPuCj?n{cQ)VJ*Y5D38gEy0J>_ig zy7?z;^}L-&ENSR!-$Y744)YW6er{(p;;g`~BD&-m3altW`33dx`DFP1I*%=oV54tW zZhg6bS8Y*D$%%O&V1)N~^XaREvpu8<6cJjg5@~J{H=-vo~;}3o7zEbJ9)g*4| ze&gwD%8kJtQT5lTWD%oJawlUyMP9hRvx&Ys4wRZZzw90_F}vmOUSpt#b~I|IzUNS5 zFa^4SxGT@dfRASA+x}P8y!*#IQcRAYqM9wOE=yE^_XND)77ZMa$-DYT0k+>c!Y-J^ zb-su76w(1M0c-%_*8?2eTF|{$Qs{RSIhUVswy0QHCA{1$6LHPibGlBNlt<64(xdVS zs=1{=;+WLyQhS%Q)O3)5Y;XLj!j&^BE9`X!|Fsk|klrPO~>Y za~i_3iC;;k=vahEFMEApq;{fyNR?6)OO~cr;JkGCK}M}{KxC(T?_9w79_N##@IUv~ zjKE+J;xn@SMGE5@*9Tf72a?2AtV*b=NC=U_K6LbKtn!=`RcL;s?tr#-rxGjn$UH2! zxHeaww!ma#aVm62Y~MObgXDE@#KxMQp|Fh*!Z1uuVJ!9}y~uuHFetGcGC&Ply6cFj zGy~|CCYPa2p27+fHG~Qa9_W72BX1hjN&I~RMSJsVFyoRR(270?yqEJ0@$O~o2yH7s z7xw5{E}%T{GBP~;&?pB^{0@kX&m=)|1-6y;dnf?Wy9l*ZfM<{uXDi*?Q1bqx6)(by zodB9i&Bb?S)k3xmUq7ENQ7Ow_`=l?@9c^dI7ZkPfv>K)brgAs{-)qDK6E|F@=)G4$ zNN+3`{D*L@_gg;`>FCUms92KUD-1(7{(@5Z-C&(WAF!k6qY_L&+gBXU7Kp}sBLUEI zAod{33V@=^C;HYU**r8>@ydHAp+Cfqx^CQ8jP?SKIi1!TlB!m%S}M6F#EG!4W6H=K^u3{JrbuqSZ7M@Q9KU>jDeyK^0Mx@%uF z*Xry0AniCUBA7qe7=Iz8xcrE%!#>opE#qkfrSOf1n>KHK{mA?NEIHp#{p8Ej;s164 z*tS8^dX}RKW;$xR*Z-9AylK9kfma=IPRzpCj<=a=VXL3eR-49WvtXIC>Qk}t>HYwK z-f`5{so>PsJ=%#ceEFa2dVhl{JMunrfONHfThVhknave5DF8;wa5ZD1iCRt@8r5Rm zypJHK0%?wd%haEy2eVjttX3Vr>qW^P7N|W=$aH@(BWv|D74bVI-aWb#)*lFmb@%og z3H&f0Ne}mZOj)yZ0pJ{Dm=vPWd>ceyn;>b$g-3rCompGXLJQ)$PULX1hN0;-qjrhx z#h>(fv&F9MU$f0tqMdb>A?BNf8G_p0k{F*JuG4U2yq(^4b5!uD(f;%oq&N6_?!EUl zu`p`el%Ry6HX=$ON#zwfK6rYH^t#)1arl^F*tloDjQ`7DrN3Lk_=8O+f;PQSIv!To z6}$P+7;pNn8V|k%SEQgA@-*LLWyoy0jHtJ`-wT{!kd5!jzu8GggSpowHJbVbos8J_XG$dQjR0X>t$67BfcSkmpX zAeo3e#qKWgy8if5^)_SM(hbX#iAn@6pJU#}#t-Z|a>5@vZWKm6zsSRg8ei#M-euS7 zFh+R^{VoRcK@yKKCj82k8*A~aHRL-HXB#(=j+RHBW3#=M^%~8D6$9=i9 zx0+=9cffI{h&|X;dgF>|RZET@84*v7JeX~y)CjPD3Vx1N7R|zLnKnl}{b9L?`-?z1Tvk(nI-%AR5&;tR!Hr?+QS^fAJbvkFVJEaG*4Bu8#hG9M2 z1;Cxs+iWt$ds9yoGyXImR7z+HDg#fWQYZfOB90upK1SF$6B)DV!yyxJL7w9tHW4&` z$fkW>cE6y-M-^MS$p#t*#8f81v0VlFw!N(~<8}p6rMjgnr$QtyB(ZOo$#?W-!bR=k zl@pJpGNQ$Il23>M)hY{I3lKQ$zTE1>nb#!sk(l+nR%ttwr1~hpM+|fQTLq}1gke?F zw_M}0t$cY?&f=@cZ^M$3&#v1XtAhFwtRbS+r}|&XL|T)&LEOYM0DCX@f}scAtI-&v zWla4LRY47+fndzey$rX=W9C1d&pVQxo$Q(Vk?G`sq6!{4(jBOJrfY^PSuvIPuuWw7*Umgk)|Z$gGy64O1fSrCOVgDTQy=vpW?*R^wY6gL0I97xBN^vU*1GRLQ<1`EZia%Qw)l9R-Q znr?ouFX6x*HZ3B5R+&_Kec-**1hwWU1+s`D)TI@V_~}AKyY~V zPHRO(ez>CtMUN++M zkEGjp42ctm+9%oLXy#FUjdNK?aU6HD0D<)L)=zc^A-Cv{A$m>t8D+B}O5u4d4FRl6=L>{Vkz_KE_-|Yatf$~zYlnxMxyW{i`=?p)@~43)z<7WK zI?V(Y(OY^daG%i^?IBd)P_573^}?3RHN?IPDa$nfT^T1uRR4}RFp*JVP?oedf7bwv zQ+sh)gf3zwM`yB2%8d)wu*{A!G)_oxfCA*Wtao@fBm%*ha z`iy@A0prs%P6qiQz?mcGuyC>jQ+72ZN_X;-e}Ef*=k%08Se-t>^_NT7^XVgCnGhY6 z9-eSE`77Gl3V>oZo-__xE+{m}V>@d_Y>8?=_$OuHhZT6lN$<`0!ULW?xepQb_mO~M z`@bo+C&1Is+V2g4<^GfA%$DzM@(n{U2q+26Gk%e;Al2-s!n-`z{puazx!5C6@Ql~< z7zNbg|KBGpEkX6`0K=nkG`xu}CEu~MmLN}D(UZY%3IHf*8Hyn_{b-Jtf5= z{&JeOYul{%r&BWvl}4BJUiFS58WnRMAv!ub^n1+IZoul-DZA%3 z^^+AGrkVldoSdGqUqa&|u3&=0}}L_t??O1f!ucEI!cP8qJTM zvcdPJRaSr8nooOXgeo6OSZ@v@6>bc>e8|MSk*_I)v0vje$S<|s$JPHB7D3f&zDoNY z)}PVYB#Q~OMQv$=N^dEBfluaFgt-#kSj0l3%XNIxRwwoRvc8hx;r!u^<0ZkttT)=W zIumH`EOmHL|A%cvZK;PLP1eRD0>bj4pj-18fdDGg>;1aXlV9L$&|+p`$ZKri%Pl4ZL17f0i$j@;*5F zOttNBy!&T+@A5C8P9%-V2f)1rsA32dh*|W<`@gvb&F0|vLS7F5_GQ4v)eX>3zs7xM z2TF#fz~cCkstvuiv3ber%Lb`(DOtqcf2%_J^wNtdyel1F;H3vd98MIWUk0R+7RKDZ|?||3X zOiR>e@#Mv4Dic;nJu)keS!dbW8_4udD9Itc!B8sCjqL{2gTqrkR+gh<2C|{$=|I_u zH7A*=9xwoy`KwM%p!O+RyIVEOJq;O9ioToakh8z}OJ_2E44>H9=u1#~lln*himQdB z7jS6b0^>ZD8-CEIRs}MeACpm^9~uvlNL)!UMe2Yfn{RNB)81D2wF33FQTIXJjpHK> z)#B-;V$ONzcBpBCeyslQ+-qO(951fHM^+3#DT>-O5F*M2GKtndqXFm#vCezjFo#{$ zMC~T-M+2W|hn_Y1)T-mY$JQT{0O7Z{6YYzv$QtZnkSIZNIf+iyNC-%f6Aaxe1j{A> z$Le4qJaAD-yos3rkzPN=kYMLnl3GcWyZ~P1Bz$myb37`KJ|Zl{mL@+I&Q2|5-k_)$ zA-ci?ai>yOgANTtK_9>=#l38xNEjNlWQ^rq1j<i&g}LLF5%>9A!yd$?kB`J$zqv$>Of+ zx}%T{#s=H`wwS%6`#o*W|G6ckbKVI+RhBLSM>H$N^pQiQ8|k~p@tVXLEQmXLXmT%B zQd3XIfajZ1Z;RxW_mEpmiA{Rr%ABZo<+JR4z4IHyRd7CQgj^6}q$}`Q;6rtL`Yzg;*qWr`} zW)kwu<({=UPx8!3?L0J2i%H-!ipRpjl0#ubciq`Gh{t!eI7Mg>5o0rCdY`xq*Ksjm z3tTx|_g>-l#(*hm>yUwkt8fr>KSd85b{cs42tSa=Cc#vmz-GVC3;&#@aPXSjb+|(*Wz)m>XU_2r=PGK#i2&Z=iU0Ebg?brp{`j9!z)1XEx73{TP{79T2=(z`B5GbkY}$w9Efm zApjv$Yk_Kqgo~Wg>xyPLy*VP`FSnKc*M@Y}6&+V{@#`n9HT??0DB*ytN}CW>Me$0u z1?)N<^zI=k{k7>dL?cfhZseiAF>*#L|0qw|i4d)|<{O-~ zOwCqqAmQ+g=S*%I8olLJ%aEXF3&us&f;zJS?Lyq>IZCfUV38jI$NuMQTAIYnIXz$^ ziSf@7liVN0wjdTpJEd9a9iIYx14H<^qb{4LMY!dPPwwy{i0na9O>ERugGtK#wIKq3 zP>HPv#TUokLw`N*AY)L7gpRb$hY)G6Fml77FT_ekhFMz7o8kywq(qL)9ClwW#0E90 zSDI{#_A=~WWlHZ5CEd7+TZZyhGR?rM>o8>6WHf~l-HGniQX=N~QE;3XQ?dMv3j&o0 zQ^^G~tAL^Zk%&h4@AZCJX!bEakClTA-Bk-MaHIosYtwHG#sGsE2%bwCNDU5IV}_`= zX4zXN`b}5KRVKCt^4CQiw}LM1*T=pkI3GM5l2P}T1-b1b1G`XDP{~~dK{W&l#KQzJ zK1F!Tu)pE?B6>T0Sdc&88Ym%Ek_ZS|I3sa0MD3q-s#@=djnWZ?*QWW-x*v-tCthz=?|}avTHhTOUdBPUDYv?%CCh zrZF?VDXp88m`TcG54j)6U13ai7F%&RQId?+_unHg=xClI;^(1&i3AvXaXhyDnEbw% zY>yaycr)?Wrpd=6at)l0%Y5K;BX@uz_+LHz@r-R^k_x3gAcqL)YDn zx;wwKi+I}aa++b=$cz|*gz=dH1D+x?AZ-ECL?jyNLF#c9*_dO5zoLyZ7iuXxCR#u! zf{W5=NA?zD*E}z4W!(~BweTXH=_w2<^_Uc$N{yMa{EMSI`6Y3)6WK83IwQbze@&VD zfPK8mIR)e4{$-<#qDAF|)eaZ|OBViBvJ5a(fboMyGrM60UCePeP1(ZEQzGx)* zh0U4*XCB)T-EL7)-+Q^9L!I!Xt9}B0?i8dN<_!aVOPqtjn7}l#!)+llfO`|{+%x+H zhaynQ=2#$Vo&rIx-H-!=2&14lN2RIb%j|bb7`2Qw4oo6+r{{S45?-<#hH`>uWo7!u z`fcA6CN3IOU)^7!R}SY{CkM!pjuHlkUE`V{(2|kC8VOyVNt0yfcoPFC&4QG{10idE zu?cf%aU<)3;x%l>7dRQ#-&Hj;ryvGYfeW1nEolUbMehLQiFSW~IR+wrH@|pK+EQk8 z@xO5+_IDRNe@E>eK_rM7kxhdQokiM|jE7x@e`n)&UKtDhthYgg6+)gtY2|R~h;4M> zQ+R+yn|r{t8FiX6x313yPdrV^cqRz|0w>qt@5XLP&Ca1!I_x__&NBP9Tn-l(_$DHM zq^vaPtDS1>**WoX3Ce>%PUxb9~P$oGl^jhozoLY=eVZ$6%1>5A}4F2H~3`9b% z{#9z2MEv6-8(Y|w+{r!c0 zVX2*AC8AOjg`fSk8Ztcaqv`@ZDRGp&YqWa2gE40YA@36WF(^CAhw;KfTPb6Hs=)hF z2M3^Lw~4D+MOuMzs!4!M;%FR)#cP@_GY&q?V7n#Ee{pOEpR+4dzbB_e{4KqIKhzkI z(%*Zs{aD8DY>W@&)yPYfE_ysewTd3}; zjs(B!KEs3Krdp|(5OPV(bvI)BR9EVwDW_>HAN2U_4H>6v0Xj~7vcb)?Dp8fXzo8B- zKYZl&a|uo&{r^&;-@gSO#5VVB7@~?46Z^CU>#6Pkm0|-9WbKEFpro%CI;qAJi%JU~ z3bkqX0Fm?0NcVm&JI_z=77U$Qp-&^$n-?cL!)?k|y4Y${f!^H%qgh~F>>!N&j!0=QgJ8Hx<7?ntjOD}v_jMI?% z6Kpy2Dvvcc2vp!_a6#M+o>rK8mQ2+;XnZlLBss6H;mXFX@89h8h|zZA!Qye5fv_?_ zZn!&*l1NczxQUDMgZYz6*#}DJ7^1+u!M2XP!I{&8B3jcW-y66TR|@gPo$ED7_+7Q_ z>rbHsg_GO_IR)&!<&O-(SUtvf=MOGkw@Yl@3?S(V*RD04X2Js?g{Jg{aT62a?;-CWEen`w1g*iYpm(^^|fORf|4)O`U;+Yx$l;Mp5^jqtz3t zo24c3&Dnig7;zI(bQkWb#KPQ-OHj0(%$J$nK!igXs;_t2Cu0pakKOw2>>rnNHtq~I z>1$(>&_ZQJlm|G85RPnAnxupYeC}x2bem}EH=O#Yr&D9wB)a3I3JG~pDC+KiSAW)o zsE7I4Ofe??*oWz5ZXIperP#0XW=#k;pk97;8Z+)ZJ8wEOm|#?N;28Yxt&3F!+T z2`z0ILK;2w8+*nfX35T4;X#}+Nj!dXAfr~oEL;XI-G@!t1F$<~lt4%g2y6nR(|@D% z^x#wng$&8eNrWKZH**2kb9krmv1i-}B$~@|513zS6$Zc^znVI$E-rF;9%)i4KT9jgSj5 z>1@okon8r|Jra4wK1f~fISap+hekJtH~MVH7izsS&I&zV0g>gInpldr9-+sj(;fBe z+Ae)^8|8o0Rz zR?FAz8qo2U5cZB4h)dad7(nh9#MBOYlz9ffO(RY0y!n-zYaZco#2{vgq_woWJOj+^ zqI*J1cq5%s6vR{^!uPJ{RNOm-09X!ONOWJ?$8+TAeEM3>tyaYy3^H9RPpWx9Jbf(FV$*Mq zwi1yRR|%gl@5M>X2aKzHMO*tH&QbC;+!(EJ>UIPQcyU!;%}Zk;OQq>a+Rd}0L?Dcf zS{5f(8)Yq&iUAF~{KvIXS4PEgr`bX9G%_fi8h%J^GMqYWVfUXbQ(Oi0-^(X2;wKeAEi6`ebKD=GQ!J*6sh@sWN^NX6CXiHW)k2(?NyhIc}6q4lpX^r z&a)b+aQluq2D@T7iETLsn|yfFZ4!lc9*Ji_EpKmPTnq-fDJ>ypz)fZy+vSH`Qu#HV zs`I`L@%*d{SS6GE$n%B`$#$RResEkRdpo}4PRoS|sMoTO(>#@OwD(m;Ps4&f%yn>_ zM+An;2oDM0b)-&I>*fGzitjz8dX@IO=xVVRM0aRl%Mt9tMMq$ThQw7O8b|DD7ONNoBqXf|)i%ORe?Mmk2i%WULo2ymA5n%^G+VIP#j11OC zVYY_T47MhTw=mM+06@t4B5{8Cr!jtKVcr8#i4DZJJ576*ifqIBEg?6Y$6dUch*7D( zMv0oc#*0XY3VE|QSw4AoW`XUzo-XxrN6+<`$=%Ep2^^gq?e`W^^(C$2(mdf zSj>x|5K!ZnvP>-*?1B%QoO3pgWVik7_g|}BFLT%z+?PvGHqH=5jZJ78D|dRHFjFFX zv)r{BEFeR94>N0#{Kum>kb|=v7|{~eUW?>#0N9-@^I4cXX=fia2^DA~>jk>RN25eq zV`8FuV5k20W@jJURJ*HEo|GER5Q#8B9(8)da2 zvC&sG*E7EXNvrVy1$)$jWo(d*EZPb+ojv*@ZO4R!`fGwg&3Ti&XUzX?pCcuezUId$hcn2>6)}I;S*?sSN1h&@o zdVMF%^>s)qvS~8{HB}OR7TPh*iuDU&YDe!A z42ncCdhxjPD>T-_@%%;qO~jiN@dN>{(10BKbhtQTgna20e?|=|oT|%jV%mwBwZ_0198isdi70 zK+NV*q2=A5&FM55$n7PD^C2H@qG*;g*<`)V8$4Imu*6%)*xMR<`!WK5n4Fc~Ytrkr z;a15p1VY8W`Nl^Mgy?&s7+^B;5vMwz-EeMKSe5(f`yH-1TbA5`eA0bgXt|R$XJSS0}u7trJamcSSg!Qh?eTTIH*7&&Z>G9Dv_lib02_tLjt%P`6{`RG#@N3@r zhFvjT)Ko!(WzFG9=VO?K10fHJe&_bp`$?X?KvPh; zQfh0=A>vavw6&npbj9-Xl5alN9yHyUN+wWclfyAGyg@k|#( z(T&H1`1iOeDy2Th?2XIMoG8NzI0iB^MH+;6WJ9+k`ZoMQ#=g2eHg{>Aj`;eNitK}# z8_$my_-JXZ7*j%u!QO#cfF>3J8MO`H5+Yolap&<0CsESeKiflQmhUjybj238eUoRfLB}`Pt~aX|-6rElfV%Qa3|*u3eM0dH z>X!ebGL04W-gQcd2RrAdoOJOaNf8fd(Tp$sp6VwmM`|?fZixcq7wK>w%^p90EoJaG zLc3Kh7??f44+2yXVe#@S?>#wAlfsWMjf=P<-r82z}5`1-w^%NeXNI43zGH zA(-Pu>%JI??Pb2fLFuu>Q73+GKK=MP^w?tI8Wy5K1Z(=6BMEpBnb#QtXFzEWN?-&;x;sQt zK!#2!DXAfb?p7qEQ@TOA8>G{rL0YzsGp|GM`R-xy-%*?a$T@8wIf_>T&o z!gF7e(e;MvT7%eMC~A<9l4=M)9k=iNWl>&02^{3`c{6_Owey$ljmucl(5V=?pZS71 zG&Dvs=7i9iN`f}Zrb6uT?zqwE$T7X(ya{?l;(L3E^5d5wu8bn*OuNkl>PH#!IqD>q z9(BRbl^^>NzHQ;qDu_+&s}puxKUjKKd4b4dv_BoCCpt0E0}m#Tz?PZ zY8Q$=C+Aw5fe2r_SQW>-hhA?U4(^GNZ|lLbua0G4mm$nW0x>zPtr(7>@LHnnHEH3( zl@km~j6}!R`>v_}-pL)Yj2-u~*PvC?^`6p+njUp)d&orUUiv8IM9NeSpM`W@o*UeO zyQD>?@Cd2R%42u-cEYZ?YvVh1IVZ8L3Yw_f(8&bL#Al+JOy`q}d`s2VGsWC3Eo)Jp zoboT|D!9U2ab_iZkE6B0Ny^#oTmhkQk9mvk_X!yIZZy29y4HM?K%|P|;Kn?aJxTO7 zZ?K#0?pZC@7exorn!z(;uAQ;0?$l-C`0GHdAY*OQ1-nR5zR-lyzbAGp{K0E_l6Sbd zsq2=2CX&}|FOs;D=J^zg$e8BT)3_poa*`ce#Ws0_Ea=OUk0QeX8YrZU2LR^1TUGbg0k^ z=|SyuuL1(pT7S7$%wi**-DF*ro^2s(FjrqgIq9V4=Rj?AYNq}<9$Iut%t@$?x{ZEL zXf>^9Ic?7A9!|XnYvcFDJmI!ER9OaLa}*=LJ9EKD!n2-LdYKN5>rNe!kXU=%Hb0W7 zGnTb=h52xM1Ht9}JhgV&IPruW?C9K|!d=Uy*rKo&=}memS!o&jQe^YB_3=P0yT&w; zT-aa9C82}YJ~fBblX%IsW70b3N9sg1dKyo~jphiRzarWBBF?{?yd~&#o2e*o8+wxa zclM|;)t)GzX#G;eU!SR16U{2ALt3OQkAg#KBe_3s1VLSTSPLTP?&B|W2aIJTYYl`u z@QSv{qm}~R6dxj+i6qBKJW(;{ZRZ&ch&x%m9_FChUz6SK*38CL zt_N&YW?L?C7u^%DlfB#dz(EWb{yi~!i1uooGE-`Y47md#v@3uRFY|qnxkoH!N$V~) zdXlsJg5vO{sY}2Wc@KC9%zspN?il|iHec6DoEPv3y=S=Yo-2BGWD0Sr{Eq9EL9=_X zBfTmqHGp3`KO}Qm<-k2NE^}AsOyM%m(|UWf18NEy`RzhpWwE7i{hb#r>4)3|-%I~? zQ>0G~mzwz;NtNj+V7b=MKYC-$zXL_#gB!tMHVY*t0g+@FSNgX0kD8|Xx6(Qz!xrEl zS)h9@2oUcx!eE}rZ0_kng&MbrCOvQaaw<$bTKhn}E4C7UbFoKab5N}Vvt7&whu(29 z?DF`i(>u#2rt+G;FDDPYdmjr6i#0vJf*Ei}v%X!ot!BT)^l9_(B@85-Kx*#79%XSrXfI1iM(yvv(0rna?E-vX6W6nBbvmu zcU;gyCLWfWf!@{T{ym;_p&2Ekmc?v6@1Xat_v!3Wt>ZC5yV5sYb(w`-T4$LCx~vo2 z87rgy#{GHs=8?K)&4zFV@OU|*?Q+E4Sbn)!(W zC%o?6#`Rhq(;WeY;G4^3{El##r{}>@5CwBc*@&-#!NZ&)Qv*+AeB%4+@YzV~4102* zX85x_Vf7OH50=)emORyclQJ*+gd6>pCef-hzTo`H2ZaMs*4yEbZ@~O?$cWS^H|Lt! zokD-*rP_ON?AW*C>i09`$cA$r+v+OuY^VZuV?96zHhPe_v5XEW;V87gCDW>%C&h$F z&_5wxgNa7>78L>F+ef%RoBsea`na(R>$~h1vTH z__H8Sob*^egBQVNnbRC(HE^6@{V7@}<@6#&$WuxDC5@I4HYaH{4NnpW&>3r1uI|Ka zcIgqWt40Ji6H_VE+HHzIaxREx{<38I4GU6F11RdvW5WDypeVs{1zVFRxYVc}6JIbe zC{dp#)-kXOw@xNcD#YaT^>63tqxio}pWIASJ0bqErHfyaKZ?6i6+ViGNj!;%;5Yci zFu3UCs4l&upk79O-$uzjpY`0X$L7RVkTwZzI>&(=tL-vf#4y*}{YLLwi_ z>O-|XuI%vZ(Lc7A@i~&Fu*DB+f8VF}Qzy5oEWx&zz zzW?bh$yk<{3M{L)&&8_B{ed5l+>p$c*-YI0kfvY2MO2yTQsK*w6Dn7^%uB7^%cV>B zWg^65P)AlJozBH4Zml}WW{{1#z?NIxR1>t@oir&J=G=uqb|kDD+IrJZcCTbE0ow+1 z8Z}B@$9I@ar56&+d^>sr)DN@gZ(^C?{0b9qe5Tjk;(-6l^F-+WaUe@j;e3*E|L(r8 z_9ks5DDJ0kL>DoglwxgH(fS7z{0?z$l#3tyktVqLcNP9d+=%4Po(6oaIbh6#rL?Xo zUDXhDu^IQY1@bRlF!Za$y-X-u`z`B0U)AJKN?TneR;W!F6@EAOUM5`-&x1+!NFzi zl||e~qdcCG$+Uvla(G?Cq+@qV28YhLAGqaFUveUmeUAO~MtR z3OQ?pK*6_?c7dm4pa*96?(N2V&kF6{CiuZVw8OyOfL&ftz6{g_6xbSDFnyA768uF<2}~IN%VeG^eMxnA`wNoTvpeXrBjas z!ejukW9^|J!%wWHE!kUSC{x1oy2ywX;P(L`Ha;IMd9pH;qHBfX@m9=L%_{ZLzDe3# zJdTB52QP5dS`$5|wva5a$4)H32D?p}mS$nvwc#vaC@%tA*@wj`PM+$}ooUr?ssoIAE&jj>Yf1FUcu8z3>F z-?({*OMo6Fs_ecw6G!3W3LX}r+t+hdRXJOm1!ukV7cebLmYL_lf@hQDld1OfsFn3x z^B%eEaA7dmEiK1^%1r5Ock>7nJkgJgPg50<*Ux5GdZ4?fZoA){MYJA^ov?@eo1DeV zMkCkU^sAwRO9wxtZ>l8S^y#5~p_TXS3EIdw|CX5>O=%YLaXG>%Gof$&)x*u1#D+g1PgV za)k5B>3KqY^Dzi@Baa7py`C%JHOiTev`_d}t;Jy)OUXqzfGU~OxK z+OLR;@i093 zAtwSEK7bj=0$ZD0_`Z@nRYY#f3zok`$Q7+*Qyg8S&Nod9tChrz-U{QBXp$Kl+b0;Q z&;AWy86vLQ2=O@Wsg^hq8SN0A)3%e49lGhWPWEIC6vhkvvQ4Er-0Tim>BW(C6f~(u z`S?Wehh5jo5ieZ%FC}6&l&r>XFk5%XH_Fog4hW-^juZq z0b)o?QQwZ6^V3lz#Nufi?XHk78&!cSSp;OUL)?IUo>uS-`&Cw_m9cNwm%5lOHa3gI z_5B8UP*dXf>d%Zzg7w3YZXytwvMwHBX^lmhL1Xh+56o<*X=r)7|JY0 zLrTuccsugqUVGmchhJNor2B|O83n(tD(}qDl3EyzhUK1aWISI!5yJs(5gaE_{|p$| zsTd8tBI`-&s)KKvW2NV2c5?Q8R}SP-J2SK;M& z5B5sX-yz^tr=;qBLURHNemx9`xI4p07BIFWJ5Sazvfae_B7|N2w8F0JT}3vUM|(p( z8a9`1T|kgX)MA%(J+~v?D|)d|%gRskr@@7j!ly{L*kz3EB3L>1D?6fQ$Q8UbqHS(myaF3eBPDEiuRPH(qF-f0VRT320GcuM=nEMso{eC?iM;ebC@G zd+qM)NpR1lXDI4tbnnrBJ7T0@QfmZ^@h+l4HpsH+`FOx=CG|Vs-_Kzt{B;3MC*_OO zLC3nG%jmw{P1vq@R^t=*pXsP2mLhJJY`BBBduYDl>#HMyr95XEzE*5W4a^@e;g7f0 zDJh}1C=N{33lBWCK=JPI==-Mz#YPY#-!yGFLY0msko1A7HEDcJ-UFx&=9G+|| zf=b0{3`S2|$7lb3m_2u!m6yAp7r(237lj|NS-h@50AH6I{=EXz6U!F;m8-8wdhy?! z(-JI5!VXucW1AL5keNyKh)#2-^={DLsYEea<<611WoxzZngt+Lq@LH;aNR*W82r>c z^pcXlg;y@JFCCbQ(FaUT#_TdTHm9`Oe~<)lO@?~N`3;C zRh{611iy^#{%eT=S`V&J;|Hw;TUN6!#_`R8EK&A$3$O^LT~B5X)y>nV~Kent%Z)p=2y zs=5ulQ%?Zbr8-ma(Cavp;d|z|ZiVXQXz(W~TJ);uFDT=zk z##AKpm0(JgK4C{Zz}QcM4Q|44FtoEOeHYAtVV@c1uEfagvR*5Ab#QEUcN^7-T?q^S zS!YC@iN}}2pu1EvJ5ENFR4*l(1nviW8Z;$W;=rEO;ug03;$sOq-Hqe#P%z?euBXQ z)lQ@(pwlyQ=AIdoZ+NV&-uT^cx>>yngw`(7ANB6G=)7pPQQP(TI+16=uhV{(1D!~t ze{p{w^^j6)jjP8T9Xc$6$tV5Bw56P2ZZXo-M&}cGZRFakKV?9X1RF}Bkv1^E1?q)v zGYg$h?y?T&cR@)K9Z2Px1JllZ6LaI_|8}z9sn?BJ8o~^>R?VblsOcG zqYhu)DgVzwo!(2+Qn$IB3fy;^l%+mAz<7429APjs1ewP)%J!SR9xmN{NwJ70<8%SF zz=L79BvflHhQ}Eh;p6?Lf<)EwTC$*mZBEwepAs!qjRF1od2HDi6+=TS<8Aa-K&<`6- z*U`5oKUQt+N|CA-)-B4W4wNZUF9WN}MqPjzRj(WPcDmOL{?_{4OXeA=l z+>Zu=f2-vv{c^hbxy#H1R<4!SO(pAY@qGPU?^1r?*J8DQ?49kC2F=qs3EtC%Nahcr zALc(n7B(UA+Pdr(vu{&gq7-uL;r+Pg7QJ%kRs7C`(j2Da(D~RiAgf6h1@*>z3_fmu zLZx?Dj(Y8qCHE=I`}Jj4a`j=UjNtvIqx1q%H(ks%F#;zhrR{CqXqFV41zRJc9C#dA z0$zybJ0Pl1y1PWN*X-{ep!*vR({J%9(%S(Enm(f|^lm@1>`TQh`YtpK{4}4Et95(# zaT_=RTjKPwkF2rlA@Cc*kF!cN$EF|lTeAFOfRs~Ql7DZBS+mftR}O;rE-XeY7KX>& zHk|1*d`@G;-0G_(6})&17S6&+GJ{k|+B-Q#(=KT4KCmhP;fqzuEW&^&c@QP6CGEyn zS4a)!&dVfey|7U6rYoVlmibbZE(dOj-U52zkFh{W@zv??fn9MfUK>jeA1F)dPQ17< zmIr3LQpm!7beLL3HE?N@8PyMTHf&Seyjjmf@ilejBI1AEp5w|z^7dNJRK~`W!l&n( z<#8(GnxP$UYf()0kG#q98{bbiCgczD4(Anf(}SmC;{-!svN zC=*)iQo@0ZoT({inmX&^2nBVc9Fe0$-dz$o8VJm z$~bvgOIsUVt0^qzV?s~s)#Rvtqo#K?MOk&~&7r0bur<@QT+?^%yB%+|%Z;0ETv$@@ z*%0=t8iszqmWUe2T=Z9S|J$Ecs5Wu3r)QkKS>zZN@3uRkmhyQ%0i%2t*;aTo{&rVRSsmgqxFh>q#CpB-~FZ}qFzq-1W6%pKn^tv;|b)bP-Db zYvl&3{YHHi7H?UP4QZ`}Cx?_>O?(FMK8p)#aq2sKc0iOX&(&H(*x>v*v7J=3RRsI> z2#NGzaDSu)m=+r{Q`s*;L^mUU*Vqm zuJ@@YYlN%gF*f2&Qm(^+=x&g?dJtlb9{`=_-vt(b1kDa{&%X_2HXy87dH}6cQiQF2 z6jB_^@Gs2tBbHdCU)y|~6UK&Yj{zgSTSwp$NGD1|7+H0VY0ec`dT<|4KacLjAsyQgDy;QG5js06?{#XZ zi@2b^IgCpHY5XdPpRI_4zSuaYz3c6PDa6qZM2C#$!r;2hs3TmM2;tuVR(q&V{X3)# zv7B?J5?m~W{|B!LU9I&-g63|^BVEWF&+!vHbp3sDp8LVdN$D!`=CLnM*58G=_xC2h zZqFBLU|hg4K&00j7dp&}d1U(i#K8!K8XKY#CD)ud%4r$VTejU6{HXwIEV#mmvwzg* z`6`{})XyPP;Im@of)Y8JdL0t1vsN2^YgK=CQAtoY)|F?95p?btJsB}iN&8B z+E^GyI1de2%!MMPNr!Y-qCQ%1B6Zc?bfui;9Dq?{zdjg&xyAzcxtQ{N&Rw`1 zMhqRkN>o=>UEXU!Ovnl^nyTX25%=?e3*n~K=Vf{gs9}DhvE~C$B*kiK9cP3HH6IdM z?{^9&$zQnZXfTu)8C>an#AWwxg-mXL`5rxQC)=XLsr(salDlf@Qi_BTM-{Sdv%Humb%PA7lDHef(ho@Hx!3%=QDNlYW`5nK0k!+SwB zi;=8a_qlmj?CRnu@7#KAe!TXhNAIj-jO=F)#$Yg6nQnix87K)^0Z3%2oDiokac|;w zf0cWDZ@1y-xpTI@(=y5Qsts>a->osX(yW& z844oncOZ^<80=Qw981mudCO$gG}3bxUSfS?Gj>EZp|3pkMN^IqNEi-Ki$2@Xxg=H5 zMCigQc?b`92ustW`TjySgc>isC}!`e-~L-8`%X&U;EB8K(gmHAs9aOm%Xf8bzq|>V zeo#v>?8c%*JDxB-+0c``M$3`eyL_?GU4%!Er(aIEanmWk8UNJ}Khy7)@&10~SkU_j zXDmbFADOj*(`9o)4!LfEQWcKfa|-%>@msy=csYr{-f=!!vPti7q9W%??^!KWH94^I ze*N*6Cd%9)YGMKP63oREi#?b*U*H`B^90&^!xjn~x!O~>2>oW|dsgSGd$|x7^5UnO zxbRW~h{+cJ|h^m8$6oYm0^f53<*)g_gj8w0%s_T+*{V zW0p_Sk?j8Uz7had!^C8$VVx;_IuDT>wAIC_XQq_Ta;1Ruo^1)!qGj>!woPXjTv9!K z0)HMbPfF{|Kv2ffQ=?WsN@M&_@^@fhv%^l(ar~6~$lk1R^Eis!!QkLjjW zk1G(_RK0DFmmTyZuYDGqVJxVu&`wQ zC#Ygu%*}17{>0H&{Au*(cfZ<)1HiF$S?k<(2fP$f#GlpN`<~1({OC7{V!TG$40ij0 zau}cDpTAejp9V5Qx{(gIQr1!q!kWu0={sg%XONm4Q(8@{t8SAxcYD>m)%^? z$lGp6F!h2mNPesv3D{o{G)4w3fKAs+#?l%YzfyGqp&+R?Kq+HpiWfy}Miyn*y3*4^ z3?H~6jnMXc++wnz6%QTwP_B@X)WjXqXWo@tgTm%JVRn&^PPp(AIn&orBM3f$I?%1j zLD_%8v@1g_7nm54M5@F=m?>4Qj?UUyY08p2rC{>=edYS@Yi4Jl!1F)?uvDvJLiYaw z*`If=|DiFHr-2`@?mxR*}dwjKA^Bp!?emwAZyO+-WyLVse#AM zFQGEn&KsP}Iuc+hmd+dOf+7m8dvBdgO%JAN?~39+ObA=j)MrIftwT=Q%hj38ey0+G zE%;uMjOeH~+v+fVmC<5=NdKe~Ik5RFR>Wj+Dnpk(Hp^qfTc#3~W2c1zGq(*w1xsV- zsGfK+SXBEnYgR)ge3w^MjiC)xFG9c(f5_Hrjzgwfhqd?{bd&~butip)vwJbFwP~)a%|#gr)A>xs1&>Xq`(&@tnn21g}Z?g3pUcVyX3;JGX?lvc(Y(n(~h|rhsLzscUiEY?{Te> zXhXy~fKsb@`(b_2xiK3kAv4rH&#}}`lX7yL!G`}}4(Ch{!F0)#8rQ&QtlCtcDKbVe zKW{(Du{-FQpK~tXYfei|mYlB>NM%v$?Dfv-D?Rp6-Dy&bz69uX?AzV)Yj+d)y|&_k z7&c!EPC)85h5aUItL@VvlUX8`?%2frI!dA0cL*KcQ1DM*y2t_1a1ZgD=f=kp><9}} zppexbn2>83$|kl`#ItrEu?a`Ks7GD4g3G+=JSjywgre#F9^PZP##Q=~)7ChISsi|2 zWjL4$2BQPZIuRe9>4ooHj{V{J$Mx7dov87C4X<%s+m+KxhA&_1oku<_yqgcXGee3V zKqMM2Z5;M|6laA08TTitA^G2=^_$G!7!5zg0P=z3c9^myd^QxoINaan7 zKMD)m=C<06er-an1MaOE39^6%#nUBZM^b^9%9Ml4u!w3@oz6}ajMujC-Hpb=aO_>S z^9;3)yZ2QVPc9H~Codkxa67JTW`^^{}DX_3o3?e%)u|{g>eO z`m8IE)-ApvyICSQOn+5jgxO3?r=OI#Iy|Qoqa~d0-g5)>pXj3DC2&J`wfqE`3k3sM zdGj$QJ<{>S`bkZcuEAn@Ft~E9t57udU zglOKNZu+BZ>~!gvGZtA}yGbQbm0K<8x=!=3ozcC1?ALu>D!>r-=zE1V6g2!5O+X(_ z)PAoDAEP|FwdwDo26pt&R^kc`L9%cgw@hpBw-Vx-R~o0 zL_ZI{ZFLQh10%{Gj#}tXdJ`%c89YgF=3YW6Kx?KE0j#DMssXcHX^3MyLlZi%8v^9F z07A4pRGn?U+p2tTCD-jB8I-B6Ph@)XHlRyHyAzZ&rYxmAVw?4@=)q-8YUWrFCo0n* zZuRkgdWFad(G39`U-G9;wiR~?P@{E#8Qk|UVvL1nkX*&W_kG!{XKXN7MvauNAuXqO zL#xi)8;}X@oh6P9(3pm|=wdCAN9G(G5g8ZTe8BZ>)JkE{Dt+JkQX>VuIYA10XQ%VO zIBF_5b~+_zuH~e^dE9dJ`dL$DQ{LjoJZSY_QXw0!mKbsTM?6-M^nYn* zXktj&9T`JWj^W^0lz&%N2q4=4ymcXE7+*eG)?q`yCe`r;iV(2H>1WUD@XFYjBLt}!fY*sH z!L&R9OZkuldZWV+@*eua#$ zpJ&`Tt#_j`XfHS1llE?_1(Z9}X+0`U%xVCieHPRzjl<ZcQ~+%^|-F0=0hHT|9NGIH0LNaYYP z5!}=};8n>AO<#{X9jraZir8KQ0%*oPBsR{uI(iWJ z?xrhwj=AqAMWJ;RIBp{h(*08nbZ8Cz50fw4upEp}Ky$6Nr26i@QI%Iu{)`f|;Rb5I@rQZG0WdGYJTDr{j_J?FMN|8X38jRq56{5 z!Z0&_%Rix}yZC24J;TbtuK2atqxnJg;{#W()fP1(EtFaO@vqb8IXndT6bhL^eC9G_ zMBE)bj9;1TMknTq{a@aiP+F%&lyz^IbW0ZShkYihUJ@q3D5>Z2FLC2UjOj+w#~QLL zI)4X5f?YN}IkxlD1;N_#m`lx9%!r;>x%65WdO2KOl@S&1{ECFHWpuAlKHa!Bw2>D8 zYKj8^j3GC`)aUOv#sp|=Ru~5+@~__ola5Kg?5fWW&$;ZYC782wr)=9%7ekj4v+7J0 zBAm?&o#cBNS|Wp4;zlRA!#$W8t<6abZ10TW8tQU53QjA$tv4OzjR2wTb=YHAq|HrS zaNnJ<;YDn zQ#8iKifgU=HflrQA=gYHBK3(Oe{(?1WL#i%vyboCd@u!lvh0w%$o@W#mbqs%39FtN zmFyUNgZ8IO`rNu@Uzk)?2>zUcdqlSv?m#20V*VjsMo`(+8UPSrGu z>=-*d(mvf?u-*II3%VaYlBtQF3%^DCCELc*RFrcZNfn9#P0d4Lv3uUy`o`pPG;k>S z2)T$$8R~dJaq~j_)=Xk7T_Wzi)!4@Ts_I6j92cm{2;cghGdG+lf@#-&Xu`9Ai0%2S zSEa>bY8{^%?{*~V;lz-(9aG}bFRbI~3}aZu`%z-(6IK&G3xPE`r*FeDV>O6=?m~^b zxk!(EDT|I0kX8}5ibHA9-sBzr*T4y-;42<0q52Rqy#r}8JAFGje&d%O?RIq!VK9BV z7;#*+Yu|_nx;*Y2gFzch`4lyl~#4*}W5WtK3i)>e*Sj zqIDrkI^0#+Un3o6yG<@B@67cjk4J-eH+G7TgfHDvTIG9s;?Z&Idw`xMa%4Vi2s|>l z45LU+T1R*}Yi0AlrD#Ce%Khx&t`&I=i{hj94OQGX=_If=_QCH}*6`yqs!`sy?7w8y8VZV&Dp(-ZzfcmEgJ&5jMhCS$CUV*e%en)}ux?9oAq&s2D8VBo~WTppRQm>%Cc#@ora3o<4WW8enf* zTHBwyNXny3V6VU0>Gx?*G1_Ylt7>Zi=aFN#?*yGy3K^b5rP}Y&enk+9`;-^-}U!EaGclkDqW*oG$F(B}(laj2EMJw@0 zG4yXPo9JwGKb&W57qO25PKyW$uvHX6pDuY!^I(wSn%NQma!zc>5P%CvZ&9Pm9KMTA zuOX3mJPWhlIj-F~y@{fp1ev0=Qd{I{-1#htA2QHXDcc)l8sE`TxgAE?K02$16-ueu zV<1JX<%2m!Aha%o`G03&PHaOkVoFVGOLGRi79=f#)uz8Sy zD>XMdhfZ4vJ%F=AgbOc^?FYh#M$~H|Y83ZhYDt53?y`uFZ@%_sv?vRnx@}VZnkunsX!6KN&oh zcxF^nnV7(ps(GkBtpc+@@gkQ-QX&hFGPgCF`&S}9fzVm8kJ2cLHz0WP}wZ|z{FX^a%#p{@+j~+$TWRk1-ii!}B zXEk#a1O|koIlkMGUF5?(Ti0k2DM7V-QKn)X-F)!6%-cgG=F}hRCm0h|P}TrI0e$B5 zvgMo?5bLh)>aUS~ms`?vP(XNcQZM@XugBlpBi1tLqo4IL*kY1~dLai0mzJ{r1r46^ z9^=v8tQ&gS45OTTbWDSG=w852FHK*M*&EY>LnQFQcJ=_lgY%s;r0lGNvBKAm>8t*C z%JJ6HaP1Ey)zrt=glC2-(WC9~a z={~HHP0~Ajk=63k%F|<}ki@BbqsU7_)DnJT88q4>I0t_|}nQW6P%2?~F zLcA#yG9_qR8N@v(UDIN;6Wvs>x85K_xgIQ_(1;4Kj3NTr1k)T(953nfVXYH~gl z>}Vq1im5Nj`RP&2nUiHiFdcf~($dh4I|4!bp)@0;acHX>g-j0DZm1nj4(p76>`+eiA35uw(85cO%* zTtCdftW_h6UV%(_dgO__Oa?r1c6hJULVdRMp?`}7s-U-}g9k8y3_l`Tdeop-bn-9q~GyN3E;bs<`g!>Fq$0-vG&4mx9(gk4_DQR3|$7?w0c0n^6b(oUA706bz-Ayb<%PVvzJQ{tJ3S-`YCf_w{|_J zuzJt>lb-5;8AFs&Y3*9h9;Vu*EOgE|%AEtBac9AC_ijLG>GaO#m-r((-Q5F;@Dz>Z z2^CI#HT8^_u$AiIRDXE2=M=@r?%M-QAf;XFAkVbMq7!Z#J2Kub3u-zY1daH$r3*eV z8ZGf*^>Gz*`_P^?WAzjz8Tj2G@R-rcwAVAQMK%(8#(%G(LM)0~|EHq8UXh~$&Ng1J z$o530od_;Lteo(<&W=fRX-j;-j$55gfj(S9jUS8aG-nU=GfOUx_isv^UahvI-Tp6R zp)e~b@`iOjF0Op{eEh`gN6q+J$0;o7AHn{#`d{v}N|nL4#L`KCEe!*L08qLbEihSC zaIhtLEsm;m5>Qp7uTa$1(pjm>^EPy}U%Vy$a=!SDE}(${T@H`Jk!<~r7=LK>6G`h> zgI(PjN<%-e)?esp=G-o!36~*W5QvjqeM`O&&tkxNWC; zik6$7vBSIp0lVDWBnBqBA5|#2w#?WJBNUk^DkfkpIKR^yx9oPrKS9Nf#ld_o7w6Jy zs(`rc>4|62>{lRt1c(r3F2#JKvzF%AL4J42=s7+i$XMGy`SV>-D`U=CM4&hrbsN4{ zH8bkpq$+%|UR-oUkDmB}gqLBcXtjy{Bc~5p2YfpBQP_*)2I1dpTXk6g1jb|Rys-I@&haN zPnO#Pz>IJCok}nm2obY*MJ_$(xIxwa(1i!AG&ni9pEV501I8lqxDUE^s^>ZD4u4!9 zcMZ(tDEFgi;VXvdE23TCVw-$2cxeIFuQdWuMiqdQs+XkqmU^1=jQ((w#Ckm|1wKlv zstc9td*`~XXS8+i+%O;DMkqom5y+hp{R^q$Xi zp>ZAEMM8iaVFes48yD1csGb6!NU@1-=jHF8`iN07e~c<29EX%Z*T-@EsgG6U+Wc-l ztST99YLhgb*gs|%!ioB3@_RT(6FTkLk37>UT8txM&3x0nv}E&z>%nWTmw33e2VfSsY1(LHf@|=+WOo9F z)x51DGg60%aVVOy{dI*~;8oyZjfDCved{rOPsEj-d9P{ z5Iycue)FDi1dtqE@j7BLL~U=ClGXrpy&YM}#VhnYNWa#S_>)|j@4*|NMz>0We`b~~ z-lTi+ywueJ$6aB^{PF&^mMl&&BP=!kCw)4uNdVS8OvSnF2ryMv0saH}2%r7+cgG8&YLMKk- zwBv0^nW>XGA%`zX384dK-ukLJ*3aV>gF!g#L_v4@*k6AN+Q>}IOW#kt!+9S-s>3f({a)t z3j4?3L)3vD4jvsFVX|vy@=LJ{M5ff`J1;{3G<}QZ11iNx28!buCI^Op}W}CY|!@n1;g2-|8$+LY&qX0iYIrV3RAB&^+@sAT>EhpZkw60R($xpyq%*Cu2_O5wby{MjI0aKQUobX0p@} zNj0$MlQBIp4rd9eWUxiZ_m2M#uKP{>h3}m9kE(tc{P4$=8>fGN3SNEYz#5Z;wNSg{3b(zt(5 zxRaHsue~zj2fF62fVUC}oLjBVo|2wAVnUwgvmX2oTL1aw&Yrmb3r6q*S$x+W#EK67 zVp+Uqoe~H_4K*Pv$|Z@2w6#+w*9~lrgh7p66meW12?3A*13!!Nq&y*7T*k`uSV~S0 zaeh4gPdHZ>yN{gMBqr*^n7#SfZDxuYM-24(yu?cNSDeY_hxc9#lq9bLSqHSi z5<;T4juiZkAws7m0FQI}l1dbttL+L2j|QQHp<@7~T^xzQ`HAcGwxulh-rh9?)jLr2 zuhwgYo5a>w9(+jQxpuCLpvOs!xtjKnbnBq7ZW@OdPt3lPxy-gGdiO=0QaJD9f%LcN zW_-Jqh3JIBv2cFX``cv2do0K4sD3is@py|18e#mFcD4hnfum!m>xa@{lP$V$D2dhB zOTpg~*)&VbP_WJ|hd%_FJ%q zFntsP%8qJiiXo~1dGa4d6c4bW)2vd8{4=i+)^HLAuzv-V$U5Jnw?L#LoaRDbtLt8c zb^bquy=7FDVYscWba$s~2_mp)5NT-;l!iq&5+W@~N_VM%(%l`>-3^k`A>A!~9^Ct! zz0cU^8{aQ~F^2Lwp1bC}X2zvKCENGV>tuInG*0c0G!tqwASXg^~%7Lx^^B)2;agj>0GD2J{oiuF$9%d@*1Whbp8uf zhP)DQhQxPi;hQI=MFY(E$fa=xH?VBt36djCaFCBCndcOt%T4En-$=W~TY)A%pFvP8 zuP&So2WPd9X0oYWOjkd`T-=31x`cgR{8sUc%F5S3Q%g!N{ka**TI+1wamr8@b#BQC4ziBd~D$htmCgP1OC3wU@^dLqnY*^Rpq%;BN~RN^Ai`!JhR$$}iGjKq*IT zow4Yq%^Xi^uA9+OM#kkvFFaR=yGV1Fb(&Z`!eb!1wHMjc(GR+l&>fLh6|05r%*)|) zxr^RI8x;iOb_Ar#UEjaS&Z|Ri>dVpZ7P>HP-luVV=o~C__ zoA(b^V(1mV@A6ps_w^U?;t8ij;Oj{CZ0DhAvbd$`ZoeFGx8)Je=J0U)qv2k5ff|3m zAX}!zZw-7kYkk2*p^?pPB17 znk!X9AMPzuRHE3^SA+P0kvJF*1SOmtlF>kT&lFRZP2i00%2Os@PtEIafKFn-=LC$oSY&E*Roqzxd-#^GL!IJd z#QSw7W=S8UC0CWhM?&K&$xSdNH5NUyBeO9= zti}Jfn=`Ho3o(4cJT$Ns)Y7+~TLti?O6X={5vusmd9dspo1$}zbAHO}Dz(dO~8Jv=F-@-yKUAV{SAv$19ee|D@}KfkfpWo+$U! zao)#9b>66uuP?5=o+<~z_-2R+2k-HfT2{#pq_D>F!qRR+Pg3=)(Q!Q`abxi6x3iPH zq)z0psN{{lezVukh5A2_o0syPp~41g*N;9LisomK0#Bv^Zw~e==@ud~bgq6lOdDAe zA7&~E0MH-^8pTQN96dOVA4QA*XFw65pa)1+0t@L$W|&FLD^oi(&2ecyh@3u?iIe2c z_s8%mGs1{_@c(rK-otHK$CW+*_sVk17~foPqWNv=4ltO%TqDm1V|esrb-zg z2#Spv zGB4tJyzB$;nTWWBX&>)uwt#zUe?@X=8Yi>ZT;! z(-n@_ozsk`dW(%$a+j|{p>%;NJ>+5B12Sju6w{e8{vu{D0ZuHHiFhVJ=o0u z4o&}uB7P_TYZ<(YDJ~2QC^B6X$PV@Rr60+H5B~~`)SAFHw|&uMOC(bUFhKM`|y>`Ny333R{*tv7YdcNe%nYF`^3?g%2Bt#oHtEnuBC8uP7- z)X}gtU6L={yR&`V=Y#$#JXh3C{BX8beVTAibDLc3LM^`Uijap$n(EMs4aal`O^1UT#Dw`&T$S7PH5s3t1j1VvcvZxs-eekM~f6PnoqX z#58||sbMt!{JD);&Hu@^Brfu8?HIemXunHuG{M;*{`>nYw4WXx@E!&IeSr`*^P802 zza+!I4z9lcPE9<4iSXsekxSfCv{c300_dxg2 z;(g`4slNJO8uQNh@Z2d61Xb$mR})oi`MOXeJ~{YXsJi2ipfwUMEGCk2C<-Kzuw0l8 zs1a&DJ6G?C%G`0!*dWaj5aSnUt-$lUTT|9UF#HQ)B*pL!``9uj*^U)%5CB6kEeS*Y zJ+?e){cO(OET&hJ5%w{+5RHBlmPy$&#fo>h`ojxGoNNDcULVBou!Xk; z#-MAai5p^*V>t8}koltGBO2mRpvjFqNl!^4NauYo>VZ~F+~DKdnt*QRI%_CfAmT1| z)yhzbqz7pjW4{rp~)3dhJ4jPL~ zy_y6vk`4O<{7X6YG)u1(_hzw4+IADftw%U9bv!>g!Wy&pq)NES{BDStJDwHsJf_e_ zsMI^1Va7o@K$>BTj*#^kplyc`7p9mo;E>H2b$4 zGOntVfCir%!gpWitZpsHFH)m;;BI1n8fVBcBz|QzK!0qpOYTU`W$`16l^<0XR+U3% z^oer@4Y{nCx#~B$7WTxIIa$#U#{<6ZwRie}*XCjpElHwj61|o}@;_YWm%IQ36Yc20 z;-RyprGD0US?yOyX?IJd`cQ6HRL>{`axE9{|4`h0J(vr3IVjGGWKQ~8V9d0#NL2p) z-ion*;`_sZ46P;U6WQImX?dygTYOcGxIBA6-|w?4UJ6r z?6*%{8v)T{aa$~WYzJF9YX)->hajk_#qHDO{Rwo-#p1(^^Vb2xT?@XiF@B}w3@rhx zce<{;zH0Ty&ojfUCjMP*R3r>F57IN)&d4{Vf=*VhWy^kKlg21GUI=yXXG^Z<9wv!Ii7n936t=^Vlxy~Gl~KQs_l=k*3Wjh27^1a%O%bI9U0X1xCWEHY)=XnVcZc^ zNDfJ`+(M;XsqSftHE8A=w()~sw6%z4t9cu|O9euKw(A3Y*P|^8Vm&iq-qj*l_h!h7 zTU&FW2usn!x@h(zho}D!=Enz~lunis^fxhJbM~N1VPF{t-X)YuU^BzKTl& z;2W2|It)CTj>p|=8_;tjoC$vEOU zo|87H_;>WG=8GGLMQf1ox`*OF4p`R+}wEhVIn)YwiHccniw zPVhK?_;_vLqeYPUM-^v`Jn=*b>0)UGr+W-?tBX6@4i`SYos7NfL)P2%3Yqimu05Bn zz2U>Qgp?KJt0EH)%lZ1*qoH-Svf%UOCQY1~s|isEO4;|btzyLQcZU>}gYkZ;5#o5$ zcI-DZ*nSF^st@YI0-oDq0Yi&XX#=a@q|3U25U+7xm&>H#vOvlFTj{)Jak1=~hE;9% zO6wG|?)y!xlj+G37mYEOT2GHLoU*&d;U;{sIL=9N?#Tm-LXprH*&=_TT3KQKHlccl zbwx608ZbRwRuke_3{5?7bF$th1J;*M4fmHc5I4J7T#>V9Guh$3b=;7GS3166`Auc3 ztZvW&AAge=l)Lh2Q{>W4u)!X&aY{ebYQTs@PIGq4!)^7Jjoo18>&najQqlc58hBPy z#pb7-;7$sby!icm#03pu59V-Jbn^ZpIUDbg$;(Wv_QYqW6u9?u^`#z*7sP0qFg?CdFe&%PJn&r-mCIgB z!pu-W!EGzLhWXP}EXN9H{2tl^RR12$GGj%Sk1$nTsihy)BAr`YarzY%j?~?3x`D2@ z`_fh8d5ZJ|7Gp{*4k-DigG^7|SSK9PUMFFz{gVItXFl+T&*~7YVrI3Om|^d%7d((e z8{rbGiY)WGN9zhGGiq`1c z$W~8Ou&fmECr){*KF4j@yw|eH*n?SKA`wCwmq=$YAE!wk;}tw)vxFY&iqwz zCP}(sKqH006gd{e^$y^>gnrZOlw4sCtM@p&(UZ%>`AphMcm9&Gg?OXJeo&FqK zsF`0(m*JWpEVkKF6h)b^QtGk^Cs&oB{O2J&+#;DfBYYt5ewka9 z_%=r7xrK%nbrthA5%f*!s4|NT;D->XF^yNIFktFB!WgW}qUh;ZAn#3IS@rmJeMbiO zX~A$lMM~FK*4rr9X*^k@@LVCp@R%7zD`T*gkYJ>#E0!SDdRCc~HtLvFHsX{%9Q@`% z0pW!L3hP64z0CQayXRubZQF^rk~ay;$dR{NS2e$?3b)->$-0|c!|~kyx)=N!KEt!i zcMYf3>Az@E6VUK2_?h1}`1gO0w5>6Jc}tdKYVbE6?A>4PMV{Z6=A0m|%A4Yb>sLe( zlVOX0{_@Yp9nga2Ls$VdT$xG?>J(F#;fAGED&$2G7X+yS{hE`%{w?<@H6Jc2&i(T{ z#I@fiq(^;}wCNu2y=pW+rJ!w=JTa*Xrw*C`T$5%6=3m&DDvPwY;2yj1`NimM+Oauu zOdU@f#XG~En(M=3O4{WOeT5*8$h%Wa;hA#6;Sx{E6B0rH1@Vn1&)leH=&__!yPjj! zdqC6$8*x%T++uWlsYKX)Q+Y>R6-xWl?W`zr^xYr#ap2mX)1))`G%?&pT|v{{bl}-R zUXS?p`Ht)FRd!i^O?A(49H zMfC%5Uo_{#*C2sWx83%dcfDGLu4^9eZ_F8*<{ZeRBX7MI-zIhiH-EcWKK*?HuiM_^ zgcKO2eTf3%UCaHb@;*Oi{{sB-^_B3!1KEf1Hn{fxP}HXAOw^tjf5OBs03@}9QYR|^ z&pmeL-(L_1Ba#~JhwvUU-U=TG{3ZOke}|K@I~SR>NYyn<>k-`QEaX3ud&K@T`=e$P zQ(VZJR+l}RW`==pyQaOGzZVB?n8{QE^Fb>;{xFHrryoeg?tOy8Y}&Q+{9IrgOl zrxs@{&srcgZ0b;Lm{{==#lG-O`=E?4EY&zF=t0o6ctQn~Wlw2b<2@Ntet+gUaA&2X z1@30n8PSbWAZ3H{gf6e`Rqj^+ef>r~?utXc=8)_AGq;+9BOtt)KpF5l#Wul;$K|?h zs-Nf$T>dFs`V&x$=?nY6mi6_+!+58PuG%U@ALmc(dv~E5b9@+j4NE&G6)w{|oZF$r zWZYe)UR*J*TM&E(%$P>B;a|J@s*DR$JbI0Vg0KZANf$dkB8%Hsz_$5@xCByk@Se;( zLe<|y^?O(+@ba0QITDnIjIcUknF;>&sWH2bxCu!DJea5BAL>u-4vEk;HAAA*{DBQ^ zS%Qp~&W8)i@szX}&&enK6yb9`eC`4Y8<^RC=Dkzb}cJ zimBsaYThEPeYaS$!s8rSO@iH*9l0;- zqdI%lMkTOVm>D%tRESPLIzmMO&k)?jXDviB@M;p3OZ`3*V(*BQ_DUW&Ix)1g+rh?cj-P^TzTAPzD`l+%Ej(6_2ccUx}0l%h(J-?5JbDZf)C#@bNAeA#LI~=#?RM)VqDEJyR@k7r!?F?oji3X8-H|SGzrt^QWCl7O` zWUQB#&LG>rvG6hqB+=1}2ru1l?gKea=l8*KIIz8LlX{6g#BdiP%Ozt+1Y13ZFdN*FPpuFVu--Yvwaw%>P!g!MbuVMrs?deRBS87tjhKH5M0qnlcW==hZJ2 z$#X^C2)OWU1_*2j2O~;*;%{{N?=&kUk?y_hy9^~QTP-*^ys@G+7>k{M`SFHsC{f6( zSKOW9p5LFXiT^Vnu3+da6tzW10I}{+)y0oaoH`lh&7kAo7rm0BrEaBL8A)Q9_MS*l zCFpYU+CTR?J>>MdZu|9G1rBVw?|h{@o4*WgsfbVscw6h!bkyB@L_`YT$9LJ{Fx=fx z+I*N{{ZfFSO)^$1`N~w)uTT@WG03_e=X2oY2{_v>hg8XI@>LN)3LdSa2&EnaM)jjw z@q>0kw#GnXM_5jn^^G}c*RVSVc6xy5S{y=*GlApT$q1tx^x2RHMauhobw2VO``E6Y zFOPE~1y9ElTsC{p{w`J6ydLXk)k(FgzkZ^ZzDMiXge!8RbKbI)F>Lc{rxLw%VUltt zKyrkaRow*UL(h^R)0GczVH(a7i%AN-kT-R(it&R%jMTn`nZ}h#q^oxJ z!u67*L>JA-ET73s+~+A65#PqXzxu-rF|^KFY#!cpzN%ou84+~A+gY4tJVy_szy%nb zB(^tQOvGiZ(PiJ7s7k-BiGK$;vsY82&&jHhN%hPhwnoe zs)6%+htTq#P>-Yp7Y*OMABSWp!hkn?`I7f%>UF7qE};;HlaW-0XxmG_kJLCMrG+KhG$&D~XuViSgrzjb(zH>kebX-9C=pyh!#@kR5+y z%yio-{l}5B-?w{84To32LiJZ~e2Wv!sSZc#cGD2;!=eJ6{*MIJ#4!3)YZ!~ROEUPZ zk{mNx5uF2p>Y-NlOM$C@zg&T4gu1fd0K%+-Uv7SHgS40UmhYI|;xp(q0B`3RNek&z=0CQnJa* z_ko?pW&h2M@;>d5Eo(E)J>qav75OTyCrzW7aKLV_Tok`u^b?O?PkWM@5Ihb(Q4q#8 zg{j$ihCO|IUn6;GAF`h?X}#cIa}=5?TTh)YAqhTV`IqkY=2Qg8&WJ*(nSv4$Y(oMT z9pXd`Rp%us>yri2=gCA)d^ua(IUtF=R3-fF_`Zuf*Qo2M+YF(L^A3&O{aXIbn~RPT zk4j2H^@iYui^j0T-)v8e*H3zqZ=o0m+M&yG3mvOA*6pMEccrv;pDvH%jTJi^pi9`@;&~7`T$?i`81}{F#-~nO6E^icJAvgc9;WXLx6$MKd?=_4Slohuy?^ zjP1lwOiiqnvrx0m)JdbmnPz=5pk{Zgc7zj@lL;T>CvJ!=OC`rWSe5SXQ~w)k#1vw6 z*M3+Mhh%K<4M+_i_Y)C3B`A}BtZVLpblS?n9^?cw!A~1y4`A8}^Yb4;m1vvSmTf*R zN(o4ZRFpbgcxp_vZs&wbN06NMl#WA*ka{x~;kr}&1^W#8G~_t$#BP3MRAWvKMq>8joC z-8%cvy(=93eLf#T28HFD+LOwCfrj(RZkO{l=d$&)!M`g7zR zR2)pr>#BgAoKB$)lHzj~?Bp|y*l>srk?g#tipwCY; z3hreKa|#|j#4eVOn@+Z0-(MK+40((%EY`F@r?*a4DE6j3WCXu^9rg?ZT{LR*y=11^ zn_6e$X9MmC{VAU?7w`Wv18cH#!_7Janf(R<($DAF4jIiq21e0r7URx6P!6mwUsvAG zFQ>5EjD`mFCeZz1R#T)nb*c|VZN5CCN7uY#WPi~kt$zOr@R+ryy$%`N2&UA-zMqqA z12M;|5OZG@F|pYT>5UK0Mt{eN;YYhD4fU6D?rw+Dknc%3xPv9l_x>KJk;|BMo&%90 zDJ@%Eq?_);%i49xnCgY%3$}I7`lk%9H3IF94=Pp~Y7b)bs@q+)X*U5K#V%U6XS<*W z4yE4NHi4DIdiX!w=T87CCwlIW&UvG4yvC2d^~rp6O_-5xco;Z)^#LZF(kEvcY~y-- zw9u5Go1=dA`!e36L$3Rb#guAWwpJd8`>!^}+u|ip>{`U4LrmYxS!?{8Bx8PA5IM4- zV$N&~b|hVtpfG67-_xC4PP>jyHAWV9|NS%!u^DgQ{Kkuzrethj-G3Z2b{W%Vq3N62 zSVSvGdExEmYxK8@)@L^jlOxizo>uD;XaCcwQ<8M3=*<7mTTRi*MSlUq0*?Sk+G zJ0oK|KKvbJz$`V<1m?EOiRw=f3Q26y(cEhen_g!4@xZLh($N%$NuM%z2swssBOotL ziT50549ft?P$sq~p0m&m293)$^-;3O-%|mXKef&rZd?2&SZ*@Aw0Il=5q9&FRnP~e z`1&g}&7{z@pojSi0Ut~+KqkCK4@0Gw(64dYL#+<-RFoV!uC@HQ!bx(^laNj~FLCC5zNK7SOR^qkFPjn!LwG~-+XrU50f z16NT1y8%R(KDf>EQ!kx|>cs-87eJA$M*$=?u3tc6!C47N3}2q5^|3 z?@=;L-{_w)ZRIgg%b2!2&BV|29ZaUwm}-e8Sv*o0eyXNM=ml8V)EUdSyFug3sIWJ| zFW^m}P(t*{SJ3kJg4HZ_3tLoZ#U(6Ho6XXYUt#hb#C0xaJ&M3o5gpd(-#la_PG)47-{FQvWp)dF6h_*jdi9Py>{ z>YP{B>`*@D;46@J?=M5*1BN*oRW73xWk~37+)xDq$gcH+iH*g~WS&%+wawW7{6FTI zP*^!|(1@`7KYX_3K?NU+WAl@%xr$AQ-W97)|650rm2emDbe?@v)(O9~oNdx;d#zS- zBC<85<`%P@AMVvRc=#XN$dX}EwWTBdy;^nR4;NA%arFOGv84Y^6$@~0+t1p_@X(E) z;1gjR%M?P*kU!ExznS`|RKWX;euU7oc*s!gQKB-3YC0C?;O6K{w&Z-^{N=XbO{(uk zJNt33`(AiDf)7(8U30_87FFN8hb05Yi5DV63Huw^I<_(ah?I837d12&lH4-eL^VEzs9o}D|4SKF2ThR z-K>AwyaMn_t2-XjrCaTL-#v%F_U!Wz2iH2MZY=$VA9q**4;t@Zvjb80n=RJ4i=NP) z^gSN!KWTN&NUE`{rIW*A%Oh^3mV<78O=fA_8}_W^Gd|8RU`eZ9kzvs|(8da#@&A0k z=u16*kTZKRO4iiomeAbN?MMRxg+kW~7Nkm)F4Eqt*#3Pn;Qfpa*-}iOOBYRlA>4jx zB9}7Ei7O39&i{oV_Rp*Nkx~c!%=1L~TcPmk7uMZ50_Ur~3e8;+fXmOgbDiSZT`VMB z>Tx0Z{wdrsWH`kq0N;0n!$UCOjaEqY;%o%2g&?kQ9nPirzT~SDvD}80)Z++gnFL`n zmun-zqc$nK4+jm+*v2^}uL8sDp!fx@@Mlz}iII ziJJLviyy}`nh-D9mVqKuL9+eS!aS-=o+S&S#AeDVW^AMJhdJjXD`}Zd5B%H&<{y2S zUiOJl6yI2pcNvFUKD#OSN|rD$S>}v7mr&%f;ixhGxD%24mU{zRbSasz{#rxhjDzc> z!SoEL?~-_l_*3aLy&IQtg~ZRUr*4D&Sxz@Eqchi1g&Wto3$;cwmKMEd%;#w*)o&$r zV?&&3gVy$xJM3-qqOuglnlI@~t2V_?q;23Ib|m{7@uv+RSxV!G&u=d0Qm$bYIRu>N zN%Q0wPV91`c^2hTC(|Ynz%%N7yR7-ny$vNte zyY0WuCF~X@7MPLkh<^lzC1o3M91~aq=Ab*FcShG>zDt*bSjDS=Wg&ilbQloA0(S_^ zm=7$B(N0`dtBe?I@!ENeYDbg7s_z`1Ggx$Xv-oJ-|)DRirZ3}@| ztPLH%5IBzaV&@7Edzp7*M+ z8UF4wQO|@O%_JiE_Im5K*&9T#`Kpx2Cq@7x4L4Q*d=ne};|jVbHDV(N3d+I|LF|u7 z4rp&kX8{?qF;Ocg$rOiJ@vo7OQY=9bqqH>PD!(t5ooz8=xlhlh?>nkVsx1Z6rkB+q1@St>cyhuE$GFN#c8NS3h? zF{2zcF3)n_GJmwpvo9Uxs1r2XsxAbU5Oa`i@pc$sfgQuddwz}HA-o-Z?TyM{iEnAm zZ!JfC2`bf=nz3eDbv;Hu8db#LQAOZBGHuWk(Qwtb*Nw&us!$?Pga($$( zrO|9HtcWWgk6!4|r%kzVw@n);U3vn7Old7JaJ z`oPhI=&e-FA44?rZ|F{s!k@w(uWQMUAa%C1i(tgJ!2ga9ze=cS9lxM6A$cpR*E>(f zf9ENm*o5`fOeo~YnXSF0>|*TuzF5L^E1FeuV9T?4rFv_KQF(Woc@NdMuo_fkUo+^> zSvdFB9OYejc`9y(rx<}@UYvh+G-6bu2s*Ow#uKj|9fDU&2pBx=5wlv@&hcVWa#R05f(X zt@5AYp6n%J)TDthvXA$Q*%m-6Whwj%n<_>frrk#uMg(YE3sIB1PeBUjVXvGL=ea0R zcthmX5Bcj0pDaUGAFT_~g3QrP;ixXIT|GJ?dC`-ashAN|bsADJ5VuQe1d3R{&)r;fZF?5wmC9=`^Z1LvrnOh!XRxw8;4cf{UCK)yeFl8}(>^m6 z$_P%Y!iv0G;{9*A_dU?JyBe2%=aSWBHv*2&pv8lYAGq*9+#%?~!!SW`k_q1-Ny&A~ zbR}9m3b)2Y|5JOmoDY$l+m3;#X=}CR77Eb;Yy9~AqHExyB}_Wf@I%@GcH8*sq|(g= zWzNd%l{Q1d`jC#xDAfn+0YHE8y^D@gW7f;_&DgByPNB>h4s@2jdmlzHn|RTf{7+ZQ zr|4I>5ElYQn^5{f=7&G~lY@wV>$kERF=7$rLQzF0e(eAFA`P)_^M%)~>+ViFpn2VT z%-oU56Zdt7LW3_(N^p&OnQ5PF6n8Hra3M3*#VOKHtCW2S{x%E#k+Io9@CmeHM2vSv zQ_dnYl+4U-&G*QYR3O5%4Oneq27C-ggQcPHH3DE%=#LJHaeJ^!;1?WvN_H^0whZ}V zbmEG~RO3xx?t`r4g3`kYg;J;Ci@(?7Cwc0xYixSe3$^e~zkegWJ2b5$6jF)7%0t}P}!59lq4S~4kg_Xb`>DgC#teT!^WD%T}jFw0E4)? zr1_u|#H%tFu?l!t=iiC{D>(mi*g4iG0Tu)=3^_V^Q~MC{4nv(PNc7z9OE1)7It#ug z+=zpSg7g27EG_^uf5j<8QQ$wxzI`{HhXrvM#;ITI)t;G3qW6X%Gsc#LC7RP`!UPgS zh_lf&QQWN2!~OkDnfdirLBLH>flQ3DCV%^UPJwwh0gcy>AT^qWK$W`jJ6)yuf{MoQ zV%w95&w*atj*vBfUe7KP|&=1~v{ z))LpPN(I4i7K{n^^kv|=zxe!;{}~C-jgeB4?TMPD6JCl26y}s0=}HbzeLh)w&$uD7 zl?qs@l_}blTjjjCV3Fo3Qt9$Q+6DckQIj&G#SyQw*l|KsuSA=WsWtaw!m(PK94&6*kNt0xV* zHd_0T^E{Go2-QofxmISP`n;%JD77FL7U!ZA3rY=wYuG1pJUe{nE}{PIGjy~lS~=O) z`)ON;Xv3LPLVmXNNU*jUzjJ2d@{rw~7$RIxTToZUZ*?p)1OzSDU#V21V!`Z2n-*AH z+v`8sxXe$=8CYv!iTk?e?)+){dYJn)kfQ2jNsobcT}l$od=#~cA4j6-t*`kav_{Eo z!fNz#;sD)a)#!Wtc|e&g+a?8xm@b=sWt!DA;ksF)q1AF!X)$ta8A=P#O~3H{m&ghI zuNNFZ266Cr2*|BEvcUF4AxXMb1FU0u;j2rO91A#Xk%Y>z3UOs0;Xj$$>$xVxewJ55 zCdYPwojW@*$O--Y8_b5HI?mv`OsoAk4rr=ldf^e{?0`}qEDGfawGvJ6W+i)+K&{uL z+GcNy{IKd-Lx#Nr;sn@HY%8NCpbhmY71}0nt(g||rGBaa4Y_a%fBQ*Al`PF*8Af85 zX@*mn8lB17y)kg6y%gW8$((DNB7CqK@}dHDfimFLfft{y5S&jWfaAQwz6ad#*GPDJ zh75pa4i9M8#U?&If(8zD&qtPI9Ncxoo7d#t|8At!r+yjYD{|ulK!n}V#?8wkqxxkB zha9`pZe#}s(}5ZyR|w3&;n^G67;!GXFJ4u{p(q*Q$7EReHHhc&2MZj00L}vJAH7dp zr7rW`E+;28hv$=xE>=4<@2`wRVw*3?vG_hd2EW(>cc?la6>wEN{M~@p4Jh)^LzlP< zlh-6+fnjHON9=PUq^(6$(A@7nsA=0jA0-{=_;0a!`n`Lxmu`2^zTwu?z0lsgbg0n0 zdYy8gx^srwbh``)Z}-7N8+dv*o0J3k*No-s4KO4??^8ZA*b2GYmNGUd2ER2nU+rEM zTgoOa{Ib`kuzEi=y^cB?PXN1h*hXX}vA)BG?!h^!` z%L*UA=8OfjDnP{nZ8|7tgurdrS2c!+vPkomQ=ONrq3$<5toX-!0nL%$! zg>uTJOsm^|u%~+t76h@nU7UKXCU#e97Bj>wR@ioLL9Uiizm}g&vpvKBcxOhr5*0g3 zsfbY(lEf3!wTA7LV0)BoiYFgelp%5eC&JRNPC=3l|L~ePcQ0rPCWNjOdOjwF$syOH zsU;@fZDnyYgJjMVEP+uZ=PQ*Y>x2@GLp{N7O;Wk-nDs`tJnDgDa6OUVZ}7NZB}rA$OGm7);wFrHR-eMSKeFVQ(2v=HA&1F?*;(AVhSIHVejIgw-P5-E z)E9Mv!k9MKq7k)UGuzQSha(o~FON=r}SAef#omZajp63_nW_kXwS*{txerGUIrq;4jF9cm+HFe2amMsHH~t#eBqLrN5eW$5^xj6tWf zb)Z)RGCG1RkqS&(`I`Kx{L5Qok}|r)7@7@wQLnD)6zlC2ucg!LoJmzrjOx+@6p#cF z)6;O@e9klD9H>$TYYv?7=8v333V!_0X=on9G$h*xnZ=f5`)5*LLP@q;$i@$|tc6D_ z^{Kd9(MJg44TM=~CT>@jx{P8)_T+Z3dGaD>EJ`Qe1QzK%n;;+Yr^gH~;NKhOjOG7! zg`?mdv1EF&L!s#7$@1I{GR*jx;`-Tl?s<`rV;s$NelE#Ssmu%&^XPO^72Ib zRHw=Xi5;Lm_Ll)<+PqwvM2yH0M@weH0N44{l38p^Ewkl(SDE;zl1e8B1M?_Mm3E!Y z9Vd-nJm&SWGaQpS=YT9wp!v7~y&x*jJ~hpn#<7s0T3dva1Y$tkBA4k@`dRIHEV&h7 zY~^3?W2pCUkg%jIv$sF)g$vRBkwDVws%-MsjR(9z4o1i)AaMJ>1yJ(uhsZPW4R($_jFi| zYw^m9pBU>#Je}_!GUX(3Kql{(?K%D+-CPjIj<#UkZ>-Q}4_3^n4Pk3yJ@$--e7QPZ z1eKe*IPvXN+|H?VhB42K&@~_AN}t#JEjOY@&|T98e+sdE?7(ia;$ul|MbINFSj>2MY`W(xrz5 zQ0xoz4_jZClR1dJJ00-!?Ah9`IZu=-4iJ#Q){EyQdVv^zza$e-Ofyd<2Z3FWnjw^A z6|amW*aW91Ghl&E=n$2sWK*B{H?uI;vB7$12Gz6V2 zh2_|kR{v(`&JphaD3g5#JFb{K)Vm}L)66ho#Qs=?_ z^lc!<HdD5Z6u*OTDpnQj~0XmxzUJ4?}WMIbv7r2f^^6xG$kl1#rlkng3rPEKemEf zwIV7j?PP>bG-v2@?^8;>iIZjBM!bc6`=$PL2{W`SUM%ol4 zOjF|mSL|Pk%_xyZ-z`TLk@ue(6msD3U)YtZ(&ADUaphs=hU-Ay=Q+)R{BJwA#HJW5 zTrG|EC}d}G4&AsT#UJ}VeOAv@g)5Nue==4MOYq-|j>Ow5&VND1b>M8&M;{q;Yn$r( zs#1QcyPS3)HdW2FxA^1Ah$ED+eAA;evwhA!`~U9}0Q(2Zet3G1z5aVzRKD0`CBS7@ z8e$YCy3m^xK8L85eapfP3GjX{*A?Q;NJo;*9&k>IuH~mYU=$pZ1q*$JUjZ9W{v1mT zQrV-xPBNxjl`HyrwO*l|Pi-vJR1-*M*#j8bUwyYc-cg0ar}JfZ+FlAV<6lL&kcqAO z518g9Ww&f;G?YA{HbZs#Wl)^(SZ-c^CeSjrd^-~pBB#hk9ebrI@q(oyw{sRvYn_P2 zjtcT~)Kh&X@}g?If_5yG^8E_jG(n~_e=$5_d2hZ0agwA>euq5zd*`oM@o~!*CtTo^ zX;W<$sB|hvhH3twT>n)DQ(i+C?OVB7`e>Y`OH!6d_~JplGu;(TmFNkA_ulrdOsy;* zK_!RMoxoZ!o~V6BzBT%L7Mj*+yd`r{1vg7Ttp%GYZD3TVt&w`Du<8ZS!M#7r_HYgU z#%w&cz|OsoEdEumbWZ)5CJ0T;y4~WTZm$}Qh}mIcFh07wF%V{{2wO`%djv7L1V~^! zz@PqqBBLaGn7)2viY;^3?SoIT8gbk|C*L7>+}~Oi1ZOoV!~oufC^5;00OWpFBemEt zryuTXSc9}0U&;V;=X;+d@D_yDGLeUF^x3P@83DmBkO4mopy6g+`4_~suY=$cSr%|& zdZSu`GS)X>R|+mn9PLXa0#qODOjVz%R_YPIktomPIaN+~q3qYlNPS=g-H=JP0mow8 zzw=~$P|OSR4_g2$#DP44rgmi_&>*M=G1mGKG+`Ye_LdJ_4vgQ1F$xcj9&r#HTpJzz zC17#TNdllrG3^w*34co@f=|)rCH@esYIe4H^EobwCEv za-tdBYy%(D!dE2auI(U2K~I&?6J~to(^~=u3YQdjGI$q39-J?0(*r~~ILFGg{+i`9 ztigK6$b3uS7^@1{9w-n~`Z0S>N2Dp7%RXWhIUku;rl)TW*t;=0%5STZM>NqKuqyqA zh!7j8qd%Gg4tqEUhNTM9I84JLBW3(k`n!C(dsQcex(DZHyKA-ZZ5(jr!n&q zpmk^{_)(qfg!hq4C!a$@49_#IdeD{Y=(YoA2=f#$z)J)P9ZP+g7cy2feyLa=DOyFr zrI^vBqxDQe2-|fSGO|@S%5ikg4Ty8_o)y2sm-X-%q@;JemhbQcc`D4`RP;!&TV z&0&XvIP4KiDbbSNEKe?TzTg?z)G`Le>tH}695#V|oncDK6jw)bmsj#6b%%+pjX~6SCzRr+pbI>gnlQW_4_BdXoyM_I5Ol^E_#-15z2pBD zB3=OD_y6IZpnt#u{ClkK*ckuI_n@KH6VLC3@3T$cI&iGU&R`-wJTMXn>Lv%e}h)XnBt#J3SNR zG4|6zrv_Z7HnqLlCL}i@d2b(1hU{6Ji7M8*I)=IU1P4cWG2$BXwtwj^{bjI_c>FP6 z{@{l0Xn6#fG&solpPsFk)3;OjW?&oJPr9kQ zf*_?d%$DUVb-Dh5fxVa~vuf5=O7t3D+`sie<@&GDL^hM%!|oKJTnf0O}zp znswx1qdTGkw3qx;1=wQRM#(khf^Oz=0m)h%=L!az{)?wg1;la#Ejz#0C7*Nm!zacr@ksrf~)O6BkAcK8|pXhyn8K`L;o7)J0)iucW`p~t(h<3VBOxb-lz zZnyNQuQxxik81cc^LX@(%VVkF>jY0U1T+V}^u^x6UTv?q_jzCM>Pj)Jj|(V$QLEK& zR0p{Y$o2I2&`T)r1quY~1G=?&bYMTR(ep<(-MNT?tsIoTvnB-wJERyLxAnI;2NwSV9M7jcC-Z_`S!vo2L;Q z4v3qo%pU-6X9(ue^`j80)QFr^j+zf>D)$$Qi|5o9OYTz^9R*fGOK*Gv@aj%8 zUljOawQr`-&9)sjO@v|XY|DC4kDP|qs|>`w|MCN8d|~iuc0sJ|C7lH)G3Aqk$Ei1- z5oG!|9nZU#y!f>9+1D=n$oZH(OP1lGFC?tm+@2Dmq=MVF)YQG$kT>Z0sm@^6r%%_; z64|LWpTa>;|3F!N*VD++hkg6jvTu9HUc2y5OT-UZ_Jhs!iQfxtyTP@$f8G5?orMCM zPE3M_`^F=edejWV&OfpAymo5Vc`?TP1-@?k0y;=#h>`0OnfmLeO2{Q2ojJElG4BVy z`8?emV>TocaA8wjqjkB{#LwRMRXws3q~}TP_RF>=H{T5EjxFoVzUcHGN$6Rmj18Rr z`uY3mn)6dzG;?NQ%!?!Tk1Axpu(7u$YC)uaSz)5-Q)8AC`N(@jFHcgNL7w+|IxfPl zhH?(Z{Qo*y>HmpKYJal+U(-Mo^JS^`GjG0M&mmONLzZG4$NB(9@%EzWzdZpWu`7#;GV`7IX^5v1RzHZTjIQ$ zZPwE9HW)D;a8wdw#{egJwT?V$5S(*)%oJ>Je26$|FoN~b1-`>hUv34oCbzJG;|O6C zMobaANN%X326tT{BHhzQCcy`K{?zTiasOYK^z+B#@Gln#H6VN&YPmYG5u6h6ow#@u zlou~D#U4{&5J0e;YZ^yyuc41U+(|K@ar>p*&=iQmW7u6^e!M=brWBl8me=@%?y*Va zHU_FTa4f}D|9j`E+6Br|z_?TpU@Rg^B){67G*^?UKYRA>#`Ecu8n=`UZ)?;S^x#0r z#}Cf~eHIxj=;8y3x27op*W0NPk|fu?J^B>@r!bH!Ra^y{=e{9EjQ7PaD%> z-yFU%5De|x@##r5;zi(IJafBAHF?cuC*8Ud&Ep$+_zlB)+DW>2NzX*4{F?ebP!&V? zRSFnKWVTVXZ|Tn4X!hGbrlq+f@x&E{1;tB}NzJDq0ftwLS8OIG*lqQq*$L*nZ7x{nHsTpt=%R8nLa?SbGUY<1=E1Vdjz4;{cdC2|! z-~73Yg@-?0G<|bH`pL|}SoY=uLZ|OIB;wD|?8%?lyb=4!+IvX*gxjPJ%B_>3@XC?? z3ZJKYqM-NPl-r}ddnoV9zGYvMAVvKaQ~AdpGjbN}LAcSEb%^?DP11pkDuP5_pnAa8 zTKl(|48@Pl_ToK+K`D=#FHg6>xb*e3QiuP~_}xqLrL#uQzvVB%UcCxyeS2@6KG(qB zW1oH{7;k9un*0#Y?q*9^1+IQvf8_tgfc}^14D7%A3syW!^iV~TvF5~0!ZrBvP-RC?9jO`)023IE(b$1ocN}SuFy&=`juU=m}+GZ$*2X(21vUVB%%Xrk96<{Y_HVYUb~Fa<~5(o zp`+d}V9&6;dnA{IyvM-mY39qgV{Kn9>h|q%x_=_uU-|UjzH@BeV@|8Q3@MSz0Wp;i zAy{@fJ-i)FX8ziW@L-u(xOGpFdWEqKYcwdKrQcyvAYf^bUcFCV^eVdxW77eZgjD=o z`%b**;|0PsSEq}In!xHFNz%`MfFhJRm<;1OPLz{*sWnQPWn|%=YDXzXAVCO?X3JKD z@9*1|c*X@k1)E45!^EYc)>S%MR_HvllO%J>$W7g>6D|t5@hE!58hpnbbn|x%h3$6z ziWpoh>aVjZxc09dK;!?l0|<=m4YIHs^Toz_hN2W0XQ4b#A<6AJ2pX+IO?k2vY=$287z8sGK)^;S!mU>GEMtJG;s*2+0LMZOu%NueY7ihQ z_Bql42D2Zkq3bVM4x=fkH&=~Krt(Xu6L*mTcP@RPuGrA*%3GX468 zF5$7T^sJ{k4U$afMip(YKL}duY%~kv$A}%E%Rv1!Sot!abA4{jo2!2XaCu#cg*f!x z&5v7hlL_nQ)z7zlo&n1)5SRzB7#$I*;pvZ2PY@by;``K~Kb<7MH^s(OV&z3g9iF~^ zMkcosuMXJe9Bm(#_bKwt2+sQ_=B0cVka4Y_Wl@;?N3G8}@y=MI!Cq(r=p1i^rs2!y`pq95 zxxRrls7LSoLYP0g!&jgHTUhg|e#m5S^3?Hpe=w2l#z1?k(-xDbXQQ}R6Jp)-B$Etf z@}HYOy!X|tZsO10Yc~60%1Zr)J-Iv1GMWqCp?`a1BRu% zvUWn7`>)15Rr}!)k{hh}BrDhawRsQXYeH@OrBB999>(kQF7DW1c-}75KSZUhU*&l~ z`ToALlvDPB(#NB2-teDCp08d}2)=axa)ic}*wURxCtfn{ZolmP-Dy5QsU&bxmf!en z;c>K4y~o>BaHH<6{%yb7Nc=RZmGP+Mvxm)!)`&sgBt*1aN^rnRwV?eLT{VV8b{GA5DVG@E(VsT&^l}jOb*(XoK4kFokj-ZIrkP?BPpEdrR zU66xYE;q(f!Az!Q$xmR}=-=na0be}(Nb6*856DG;2^ZkFrr3|^AdU@%gM0z0HF5h9 z+qQDk4r0E&ZJU;k57=gA`>0V3!a#xFkNwa9>|>?_#hEqpH>L+ntu@cSH3KQBh+vBZ zXapR(B07O9FRw<~Ut$3US)YfV0+XI8`K~l>OOGa?F`o@MU(7p!9YAiSpeyv083XdX zRKl|p8sL>?0NuT2Ri>0irJ z>9N@0GpDoHCj0}IsX*B%VYzS6LG9WbVnmk9)0-iF&?{C-TV zj{?K%df$zaVpWa#XdD7O;Wl7>a-h@oa`@K7xOcG9qsgJjtKSoX5-c7d>#*KZ;Re0U zJ0%P+Y?yC6P7vT$XGTjszklqeZ3b1-`k!=4cgtSd?(*wbTOTQItVcT*>qRIc<>w)@ z{kS8A1E#WdNlyGa%QN4uU8(sr^R!Xt>G_9bweR;Yo8>j%2-K4|G%>8t*pXgi42rgL zeQT`pcDw4n8|Su31ZRXj3wf%b*Z56rawtOOq=wg_N53_E$`{wwy~3vza|Y}^R^xb% zfyZ;4s_G`5hQnK)+?5Os-c*;yS4-KrAvE={%el2=u(B}-7{-S z?Fu>)zUvJIP}ux z`Dvp!j~`Em`t@F(&rh|NlUxUNEE%-MP29&{ro)LlGN0bNd_&VBqiRRd4rJo+c2hzs zG8zVvEe@0H`tj@C{l$jGH<3NoewrB@hTbubO$VPd8fSMkaO(>e92;@Rzm}T;Ch^j- zK7-D=<)Sv4tqEbY%C}<>sPW{-1ZZYeah@e9vubYZ;{%EA8`Z}W^#ZO+5TKmA&#>R2|~J(EXY+8La7gHAwLStRGG2A&CC^31%IH0s_){DQMfV^NkM48iUxD-;!S zKrVRM4-|eD!dG)1hhLpf^6-zvVrXRS&^~v8`%65ScSJdfl4l?XU_Rw;TCTQ=a01r6 zLzrIEWQr{$fFv|e2_jOXKu|vQnek)sBz#poLCWf^derXMS`=Lf#;vW0?rVzG*jV8L znUYS*h_C0OAX5QLt`H3f#=I?v#(lmw0JN)KXU!LPXNhuSgEl6Th=TN~?yNuqJlq${ zH|@}72iBY}VJ^V2bTc|ULXJ*jjR@JY?FZRb3b z+iWvy==qG)&n+R~*QFQ^C}lDAWzu^6e6FU|DAv-sXDn}~*C2_lK{vu(ZpNq58Flor z4sDum8-@^wcZ~|#wEjFa+0N8LQphta00y6-REt23u1QN1(AKs3YFcy6ENAZRqO>`E z(qHmfVmydB#8tS`X1X-8UK7Efq(b?vaKQzf#cK+&V2a;q*l1@~# z+wBuAuW^6Y=KN`%<<)$RK8WlcUk5wq%`7^VB#A>{5Y-xcEDx;Lra5$I6*Gb5VRH!U zkktQW4?;wF=~d(VLkkxEd%pDGhdGf>Ct=Hx;z`lYd!I4i9&P_EyZ3E~+?VDbr;L~0 z$Aqs;lz#2lc3RWnH_K3+fFaq`aFt8pslEfUDL zqwXaReru=jNZ7p17+UC#)oT_DXZ-!2*XeJbROs6I9Gg*rA60idwPU6$vJgcG=)3ShA~ zT0vw%vTiFsely->DxTz3Mw$}WfH3??5-5rT$jpgIpitHUpBMOC{S1+C6!uQMFHi^7 zxH7?}Twq9GL8uPY={SJ6>XfkrQk#I3S<{Pzcggck3E;dpO6M?9ftQdM?g-p*VLYzY zhk$&~uq#SBAvA7<_=LAQRvs=0J=XQ>N%{|K88R*fKj7J&Hl?y4Qi4qVW-?jqnhhEX z3RC@mZ znl2F)hU>L`2xJWlPHrFqQ%=WbF$r^YZkc3fi6-)FMzSxRYd`$(1T(ySNiCn#I0@lWkBlXw!YbV1|yrmkKm*}0z7ul0D&jw!T)Rn<&^+67<9 z_2rNSyR5zZR#{Ba%|y+vEPF5busI=BBu0uW}4e66X7RbHVXmp zBwOulSZDO*uIgS+>&)V%%5`FEmhbGlH5qQ4{OqRrNSmBp+n--Ue@?oj`#}2&{`@>P z|K)9D#bU?rx948lSQ^2So2?7EraAuow;U?HQZ77-+uXT4UQoaiE_55s+_?1T_sPb= zt_+RqiPi;o?xCd5X%dTes|^U*AkQQ0`dz%411o3k-HNPWjwy z)9tn1kJ8Ll{QWL0x6Z7I)r7D)~eaDe~=3tRvLx3zt zOX%V*1fUb3+&#v#ZA4CbR(_BqTMLSuIlkDUjs`*{!c^ok;fzK3^6smkhoJz@8Z0-; zDOrlNsR{^;L1wUIN=}jfTB$@gv*scaOX+}9cW5O*X8$gdx*zXC+lgXlp?u(d-PdU~ zxoub$y?x`*o2|Ali74vXwflO+iSBKn>q|Wd<<->yt$R`8ScMaF4VaHoGuN>PsQ=V4 zh{fjO&ZypdH(KRb-M>tUsSCb&@6|u29eFRR)2rE*_no&J;awHoJo2MhGU8l$&9{M^ za%Ba8&0J+`oK;u0i!_Q_rk54NN_8P3res%%US3|)1DU~KR^9g#C-0=P*te9xSjoia zY#KI;K31dc%XQ7*HK}3aP9msjl|m98VB*q-;b7qtw=|w)LE+9h8n$)3PL%>V4Pnqk zq?Ly1SZ>az`jmemWYo74P0NRu)VDK#>H#7aWJTHu!D#!8C6Wwqa3`K7rOXuDo5*}V zV2VXia`XV^f$>~(7VVB84)Rba&+`KgLod$NDuGm)K&o=)?qQJdR>%ROI>q5366_|i z>Hyz)2!gTeXe5Y8ffvPwUKO;4Elsy7_ny1X#RP8=tz`1x2KWJecR>-e-m{8l-GNP~ z0tO4^)4oJac&HB=e;;&jHM|0KwM7nC?u1Nb9t=dQuySt->ASTFMwziZ6I%LfPrn<;C7I zWL<;g4os}{Cui*XJE{85W0;r6)RhV{ZJT@Xa5yb~lra`1{WHjm2IT>AElDEpQfAl-hqJr*ejAI@$nkMoRH0spheFmQBA?90 zP0@q&uMTby?3dN1bwyZwln}{o==vd_oBZXO%Zr8(<#V4KRPLYiOzHLN-0^aHmuXS3 zuUFC=r`Ucc-<^H+v}V~20>z6Zw5}hM)#kznuwUo%m}^XQ%pM$RW~{lXLl}D7ZT{y- zw`np z-9G$KA*-ma2QjDcg#F_hVzK}7(K;`$+NJ?g%u~rrO;UyZ#-``12t#X|QyQt;A5L0r zfAnnVhf(s6FW$T_MR8}_I=rcmuav4bjW{kbu0u50GcHBY57|vB%eiYEq{$-=TVHM# z|M1Ix>#NO#!!|p@FMB>v?0UQB{KZ9WxK$@r^xY*}vR&_HdO8$U9o4rKbwDk{5+#we z{h-RR{D=}_Yd~4k^2h7`hr!AtEh=B$sifI@dFIj|MoEb}YBs!b#_!Ls#ZK=(D{d!V z)pb3Vcz5x~$@$6WqTr~VQPW;yT^a?Z!NIlTxz#d>#Gg%J?;?(UzB6=ja(|#*G%OZ- zG1)6{Jm`f_Tn^d0dG=lRz7b~K=kLBp`D~B;TF$pE)#HEJpW${_!Q`P;TJfKJ@HVR9 z`u?}uIxGXPcf7{X^1^@ne$ua#IGY6$Y7|~l7de^9nydgV!+?FcUkLZAvJmLL9{Ht7 zbJ6^eI8vYLay9&qR-yY!d9mFhMz`V;=L>a3K7*%8MxPbcm3`MLmUw<6{LDtjXgJ>* z&xSPCWdfknruo;Ua$d8-#}L$Z8>DN{E~hF*n0p5_J5!(3aW*+%@K^G2PJ6i$fECgg zsw!0>9Z%8B@u6e2-BIRxGP{#V948*^(O9m(yQ(#XtxB72&BREP6%?#ceq}K1sn7YU zXXKWQ4+p5y9Eu!~tjX&!&P_g zZtq7pzkjRwi(%cp-8y}BYLt4H4k$4xmGG6Q;dA!pLGgdymhAoQ1h^i*!(MG4D4Y+N zD(_W3pDfSEvTbeO4aH715Zxnh-!>NrEu76d0#f3^pU5F_bud)Gu2gWY`SGkYo;z%n zDk`k0a9QPgm0+Bf7~3QtL^Yu7(s;sXL?(*r%OFi>@tBvNmorg4BUlJy)C!QyHe}Sh z-9EvM=RbNNz|#qCWqyOsMOQpKj~r+*>%b+D@}X=iPz`fTv4>F+e#W} z4uro%VLQw#xn=NPwYs#M!aoo8iZm1Z6dyr;|4MPr~NB_{{-Wx600e`X9>`VK9U|>(XnV={h z6;hz_DFjO-J^OO?)RTD0XHacgEMprivnD%}Glso_1QkRO$cNO!d7AyHYH~a+2#YFB zP77~98i|XvX*OMttOQ8%1j)LE__+W@^Mz!5e_@)H8v2hIgy@nny6^hs_ijp>pTOhb zsAZ+v&a4}pSgfk=*|#-sPFr|cS$JW@OD*(&J|mWiXg^`~=MD`#*}~6G)pwZ;qT^lS z#RF642N55t&5d8|7*Fbbw<9=k%#an$OcLbI()J&;C}dcYX!{c=a~!FZHAA%iqT?QCZ~alKC0tpIcYbN4H-9uN07AJiu;aj_ z8f@l5$ymE z?$I;RQRbSoyvgOacSu;}6z$h;%R_&D$E_B%pWfY%%x~+3*dH}~e|hbhp$m&SM#7!e z_Y8k8jXW#0+upN$N>gg10&d-(pW_FgeEu$9va5dK!mY|m{aRcse!$jfdeq>+mS(@| zR<%-ctzSg=KcwHlL?lV=v+hTXAzOli!?I8d)YP<12_5VkoXruyjMk+22N2imid zx?x{Djpv`5_z^ssH?~hLuU50{?l^hA;a^JeAB>HC((;R+-Qv}=y{}5XewQX|0s^z1;fm@dkLRS^m#bjpfL zCYqGVUvaaHY-qG~9{6CaPC}Oj91>6eg^}HEU)f>yL#RZ~7?$U7`*#pY*4@3wuP_|h zmJb}I-0Lki6xRWns@S`19TxwUxQmK*Zyz&0kUi(3>KK%pzDK1~!mf`GAJi)&xi|H8 zk))-`CdFVE?jyxh?|Cr+H=Xt65`1<;whv$znDe=m3Dp3sQ`Nbg*iixbyb=dnFBQp7 zuOb<3yW59@?>%0dcl~}?4yH30W(#E#`=L-(KT%`Q7qz3jJ$Uxo*Jw|aat^?1WADrT zpJIFM;F`qjQ@+I*@i8pQL)!r;M~+zmeRwwe_WZt$dxgO!7tC%EMI>83dvfoE2q^oC zB`HzfOwgOT3vKnhyW^7ZAKPsyZs`7fDk=Ldq{!A?0fwJFRcZVE{)&a^|3fTn2nfW% z&;G?ZcIrYBz%{&U6>|(hi?AeZ@+Nj5JplHBMY=bc3-al!`k`DR=ZQ3VRh}>u)21m$ zTZ1IOkHVV-k`thVt$@w7;rvNjkP4O_C|p^#Z9<#&qWCcjl7?UDDyys3!}hvDDgbTM z2Bj44d%vY zl|#2gng7Apnyt7B&jXN2u4m%eo}CcBtKgM&>ouLlaj_!f32PZ&b zKRByW@>V3Hw#D}SE{f%lVU@eKdUHx+6K$`JYZjETbvkgB+k>ijKb=Hu0+gR^z7o#7~w{9u7 zSut^<9gwRoA>QkUIPM%QPmQcfkE~)x!iqARwAPV_B*|+sYFO;X>^k|(#RHl%26A^= z=L}+}e3K-|)wTn-vKsV*PSCL8)f@*$*IQ;CgQ6yNg(0<>4`dRj8quRN2hnOGTTmn6AaQ^KBP%W<_C7AB<3z!rYlfURLjC8vZFf z72Ln5tu~~y-|mZfvqseRwQ~w9s_Rb>1m!&NF7lOq0G(1oRCwGu6@j!e$N4?p`g5uC z;GQrR+B@p2xXQ&PAM1SMm%;g4HXF}g&UyQN4)CM*$wpre9X@<-m&MgQa#I*#uML!PA%oQ?a36 zU&5Z0-;Ub$Z%Sxze)qi%Z?E13?Pf>HOfl(jzWQ}TKs#Ox?<0~En$7M`dPwCZxA$ZF z>T9e-B*SH* z75SrQ0%?I*CzRtC+h)_jV^PsS1!rSoAVSg5YeWy%aqDgn6s!O@GJlBc)RJe^6YpIs z%c#0K5h5c^ZpYouc4XCQp#3eM*bKahsst_1yZ|RONo;SR(^K4t^TEC6DmD%!r?Oto zJ~0Jy7SP77cQzDyp4_Stln<|={XU*|b*#>4f|SW4DlwVQucdNUfNnC9oy`Gk%t!v4 zXMGNjEJX_XY(bX3U5{M7s8@B^rl4JpYzQwqnKQ<9Nem&fR~gc2`G`wfs>Zear(eF> z2@K;(cNQoaYa3g-egYnk~Ld- z&*QE+gS@`$nrO^jL>~)MmL`kF6f0K4^32+ta-@qecLj zK(T4rj)d_AX{+k>@*d#D%62IqQ+*b=b9n#9TQqE5GH@9j)u~VHO^Ky}#A_R1{K+GF zJ91Z5RW<)!9;o>;corP{H1%A~3`5l;u~a8F9Y;+mEa6ouz!)UKU87G+}paW@KM!L#a+$hqyJ zzUYjiqK`fop4G;u87kOyRS*~abt~|@@NA{^#oDkm-m~-TijrnKuZ`+INsYV~d#Fj_ z_mbW^sA;p@*+dd4oUyM7SBysWL9lrUYx5g*kTePcJ*B1@ z+7*SOc1_v)Q|8{8eK$|L5zAez+O`l5;}KK;n2zXMd_JEz9yUbbg&5yH;q4nK%Rl$R zXT3{`qP{ec-e-9Jed@tsjrKzKnxlu`8S7+U4H;v(opR9sfgHC{Z5MzxbAY+Wg;Aotl2( z2g06FE%YfyHZC}J(VbLywI=X2t}%-td`hovZ!3!^)G{yf=HNBF4T|Jni_$*Zp+y3+ zZ84w(y);(;+@sf%`yXa06Q!F)nHWt%4PBnVHJztYAbLf~mOqbH9{$v=R zB;%QK=1r$145Q%)8Bk+OXP3yBgB@n%-yo_GDCNUt?W$WY=*r`0ncMNpHF4 zK+hfBNx$Ct#P8d)V4EMtYR(0ZGk7TVUx_$YtE3ysA3k|8rY~QE_BESo9qEO1^#W7` z?;7Y&YOwFXwYs=>J`pw zE@wV>$FcuPOf)S2h=4@z*}NqJ2UPcgy;{@=DRR_pzk>z$O|kq?1;zsiD2rD)4fPp* zz~@~832>npus#Jj;AYtdG$rkLpp5fZhxM)2aH>8hUD}Xe^sud_05!NXrwDCGx8tC$ zV>0XK0y5%R80xQmYYAKv3*c3r(p}{X8uCDP3Ia730GrkN=~E=#yCv-L{aNL(f~w6~ z2J5>TtvejwXf0}vuNSRww}^f{&WH@7W8>=i2juvB4&>&o9yRZ`X4J8tHdT>Xe*qQ2 z1A@_K{cQ>PwU8OzZM-kFp@yy#*4i|d?G`1eKzBuHQemct0O}YTuO0{k5EnoP7Ztmv zcfu{&qoWr;03%ezv!2j$%)t~BE7Ih?kA6kbtuC5-4zFn3GK3i&5|M}xd+j@l=m7E* z5F(qBC)=-m!ZX}nQWMg6{0_&D*VHb!r&D@eS4cke>N+NCV=VT8rASNIvVG^?+1!zR z@9J&O;KZ(=CCf9h3fnbf4w80uvU0*c{i);{pB{GN*C8Wt$+rs6*7?`%(b}h z8LTLlHqp8P2-@~r15@2!jf6_JWe1=F%W0SfSL@{1qfKV70%%NlZDuZuD^E*~supVx zmDJ)@+cq23&o!J?ZaBr9pv;{|%8jawUP$>|U5rK12;ZOM*=hW`RF|ZsPZ|3k`~V8@ z2oIQpudVR#!qMFqe};oX=8tYz7P>V5ycUI*J#M@g#&@lBk2T+Zq5ts-kA|tLdo|$%?Q;iM4-}?$g>+3l=y=izj>UL`@p8m+k^rnN0QKxlAK*g@H4#>#W4GPhbGxxU? zTwEz@sQ3)Nb6{c6d#0gk?M%+1qq}>ez2>MvhY#Q;ZbP9S&C35~sUt(qD(p;EY&(~q z7(P>GVZSOiY96t1z`3UTlP`qcgY)ac+s_fnXIa1{!CpxqTelJIA5Qk@VacFwQK|^x z{>FJd1v`R>Exn&;SRo0m?x&D@7BEw50=mMr^tja5EAEebp=?zzDKi0OTVG9x5vsyW zypK1`-;`04Zj!@%4$Kp2xAuvCOd>gn*zMR!e2BxdgKc>rG_H6XJ&}keo0O{RhDZ!Q zy2d)*er|oTlUSB24UJ~t+3Nauk@K6oN76CA^*d;+yU7M_m0i>lFU|*vx#dQ~0WZ%^!76{Fw>i$$9rOod1HlU7AOe=6 z#s$|sh7!?5Qdri?0#@1`rau1SLFSF0+7Dd*165+|!Be@44_zfW!)fvDdR*y5(kTUy zU2tFTUEiO-iVy?I@u*!JtyZnOfWvJ8dQjIs|4NNjlP|3XBp@u?glW8wUQf>>+37rY z;MygStdio#P(Ip!AW@jAZUHHxj06s`FM&jc!hKdXYb#6ADTwGVSxE*F32vxanwvuzi$dFntNv+sxSG<|FiJ#C41JC#evT(H(n z#JiZ?4qCO^bwupdD){&ljkycHAf90xWZ`cj@z1qG8NY>$$V?vf~qn5NS zxIR!cmeg1y7$#6)FH8^J9cXR#&{81n*Z~!D7tF2KgYlH23j@|YGEHWs`a=26C+S=Z z{X}Fve*e!jGap51N*@3mQ=g_azqiWO1u!#FBUDov@}}SWx30%4tC|c}&Iyb}kzaeEB!q>z&T$GZCkuHJP(}!l*R%|2ba6- zW?R;8&o=5GOS$w@^ibt0rUoOcrVdGg*WX)LLkw#-8$S(ZUf=)y#afTsCv@FvIJ^H%d03m4cgX;#D<<5q z2I2$ZduUa5G^Y)l1g{syNy8fEThmK9uT!Y# zlR0hNE19+BvQwRPT*bEaz^_2jbS|JJsn8)eoE6avp= z>e{KXi<&ekVq*aQguQA}FMRu(9pf-=v7Z*ZNLSkxZD2LvfF?g`kh(U^Ql%MKlH5lf z@$hkGZdn@|b;6-p@o)>V+qYTO*`YD<`Y5+w@UquzUj0OgFSCEL7Q5a*td|cnK38z(VZM90BLzgoVmzJ-Gk6 z8pGaps6>}M>aGRFZf{rDk9%kKyIBVI3{;y7R=}MSms;r7%nZrE>sxTc_J)pthIavFo5vbFsqH7{@&&(2IiTg4Y4CeZ9vz%*T_VZ4 z-4M*e$y?YOb<9bya_`L}ol0OFvk28?tznQK(Oi_7D*J3pE{d zYO0(r1f1ZyA2{fkY+t;guqLY6N0k-}92D7icC)hl(I3a;K#4rP-s5(4uL>gY^9!Dq zaw5Ka`|sKHKfm93&^~i<@t)qQ+!64jUFYJJZKv+8sc@?{`}6zP{=opL!~-@qHsPab z=8u&p=YWO&K3ZlcpkM9i(p-w)sr$40_YKFZ8V{Z|S&rlP$=MnE`1xfegN>w;?TaI0 zS>CSSvi-(BX4oBQ0YOtHNSUg*9SM~eibStboep>J{rdj;xncw7CY@VR-Ryp0ylEhG ztKMuIouv`B9qPKzZ?}d-#`L<2(gXR?-Qo;*@Sjm83CP$5_ zm$g-t#V_7u63C+YvTwLH+Ns>&oJ3A?GW2Wm3KO}Yns}zKR9Q+X#YjhDG1fH;}tG0a!q?k|~JH2v4Ca*5_ zIh`Wy0;{AYEA~WKx8jhl?gB^sK7QS0j$<|HdN7e>Y$s>{L8u`4jGB-jcnYhHklECJ zF-Fz{U&{Qj-p`^AnITAQNKn+O!Z^0GHpELbyEX9*R*z$zUDn00RWZz-3KoUunD&+y# z4aibaE`SV*04z+jYMq0DsE_MP1WTJz|2MD%z81_CmfAb6R~BuI%0fnkJ^2e*2eGdQ?DR0t{>P;JF~tezjx6m6e+ zEs9jI&WZhyiX+H@&8rlJs=KPcs#4P4m~yuPo5~9};BzNJgHjo4xw;dY%>-Mr{^}lv z1K=u11*e*tiJPGhwS*0?ZJqh>dUpMiEi^gp_ak3r^A1DPs#4t0NfK4l$spAIR*`l6gTvjw09DDBJSbP4naxV8&;2?PaTVWcutu9I zFRmfOMf-QvC!|-8HUsYp6(}D6Sjw6eDzx7bI`|MqSQCr&znRIQRlJ7*h_f$(k7?(0 z>L>p6oih|du-wcMMgMBnH^p_oV6_4CfQEA%BnTu}Lx4tDXfGaJs0fRlvX?D0_o+L4 zJbgqR*5P+!M~Ax%`FHqQv9E^fVr4U`tcO_#UA2D&C6V%?sWP+^hjS;~X^KCenXN10 zo7K5a1}tO88lU0ls&fi)n^UP5&D0Sh2p>N(S}(m5fmv1ieebofrz`5r1?60B+iGCZ z=K3C=Zy%p5{ruLmzfgAmnfZ2f!Xyx4om;JE-raw$c#;*Z88XH$0LjaI>U~mkl!4cU z1()5m3qhdTxmjK=3|8LT-rlaIJoPqgNPl;&6-dc|;%cYYrC*`DYsc`*@&0aZUJ)oz zLJc=~?GzUmpWGh>PV+=7XO-Qw#>(s;4B36^&Az6kVMf4DDUIBk=V~?1TCp6ftP;W= z55v>{p5k&JKVZ4VD>*N$%$$GPVA#mcEZ32pR-JyK?gmECj=bIV^$CxJ9ar>3~ms++{({A%EU^$ zuKk|w7hZ-4jqV3>sXOr%4iC57P!j$;JMFfF5pa_lfDvQtCzO93{JNw}zbJ-Q zi7*srfW@2wTrUfxN1|@2>oP}jzbnLV%)7Erj`v0kzSsNu#=%S;-{<;E<`Z7G-;9z1 zrh#GGnwXNUs^9t8B;l3CDY}*_c*+jgcR1qNkFEH2z{P2IzxA|?^u=bUCcRzs0c+@gxH-L_C|1>HJ63O6oYBoAlI&~Nq4~JL^4dK zjt*Q5<-`*GzYdPke~G&*&*|LLq>eclKgg6jZPtN&9kc3p_=?;b1mDiy%lPkXBT+zU zWtGN`ScY!IgYII9gfG|NMaF`W=j}9P(_%{lamk3@4fysHvsiAY*^*aSG>cavj zCU-S-xUVvj^+a+ePr&yW^P-(JZ*@ct4DHVSrWQ4R0JS)W`4@m>C(6$}W0F#G;?Kzu zEq528cFQ8#t=j{vQWkStbgKojgVQI7mVq2nGJyV`Nat2@`FdSc|yF?1M@YSU= zWSQ1x_FZnUDiFqjSQXN+$$t}jH zICW^Hj);+OUZ%h}S|}TbatgRON*->{F88gJ&EVGoFO2LiNZ<2^R=9yc7}K21x>@l6 z?;v~29?y0+ANU>@&*oysZ}j-{Pm0`Fofn!5dgPl9x3VG@g5Cx!J(9l$ z&`2&vW4y(lWv?-vq#18(e)xoh^hoV9eO7?%zIxN ztx#7Yl0r>#rtL1RWaRi-9n<#e*|Hj+2|2(`eSU=mp2PncpE+R_v^TPGFEe9cPtqMX z!K*VL#$NardkC&^o%q>TIUwJb`uv#maIea=M;V5|Gqvm*&>FKE7p%7F5W;!xS-jDgOD^EO*2}{j zHK2j&XeDBZ4dm;-T!VMf}Q+cm|RI34kbgru{ zC8E6e=h5*T8rO4GYzYFxNaOWq@SwaoV`~dP5_9=)Wdk-;ch1d=oD@>%Hs>=*$U)&>rj;aw+LLP(a505N3$oROjkyu@*W%E z2TD`hS9Ud|jNK9Z@N46;f@Y?Ct2{a=S_2JstzqvDzBl@#5PI-$!^s8vw#0>Ogr4dv zn;T3QqC%h{vb{}`j5Zfg4B=&RnlGGHi%q#oAaf6Nq`jccL-F)WA>+&&?W6v9heVQs zriXZ|lk5gN)<{>7^$W8%6n4fnfXL#}y)%P&XNXU^Jcz&?=atlO`uKWU)51f#j9_Wn zsVk2$vv&Bx0-qg~p!F3qDsF7fFDG;M0x0!VVDjH!$o%clOwGH5B%DhqsQih)Y@ zBexzcmzU!*GYJzwe%j>Tp*5E!3M_LD7P}&mGpHH(By}VQcdUeY9kKXq-6hhBDsHD^ za#77=RfLKgvA`4KnbIoQuyix{eTp`INBssunagac>%#(4Vo_Vv613JnHM2fFC<%lG zVEkdA3I}o?z+>@iV!5Lsu#%5AZiP!pTe;q-`F|LD^Khv9_y7BerHXd)T{*8oWTO_yHY1U-Ebcsvn9_sIi5TfJtJE3JT z91L1=(?3lK9tmmrp7q)Cu;T zF>6}C->P7;HfBAfFr+I zCZ%HU8&x7h3(KBWP=hD% zt)%RYSM-@{W0UUC_Rk%5t$h2Sn0p}prEHkp5hr6*b5EC9R5kL2`(t)EHY+bjacG@D zlD4$Cgmm)k4ZFasa7Ev>%D&wudWKV{*UXKC7rk~)0m3Wi_N0vEJC~YZ@Zlr2qSZCJ zrgQzm{(0tsX77Y~(#f2S%%EH)S@_oD1w|R@|F49ebr)HR7+*HNvXyqF0S=2~<*{d= z;^LQX{-1K1f_ayn$_Q68W$ZE4hS~+nwh{-52OJf1F%(xBCIV)R2p>_N$>nY_)G8oY zEF!}cewNL}g`(!#cC)RBdBQ`95J?S&Uo|C^j#Dl&ZY-1x3Z(H`;b$ltPNqc~|4^*v zOMs~SF8OTXCE?RKi^c}8K@ufI9oW12l8~1>*6j5y{rrhPM<<>9-mV>9u-G_J!%;J0 zcyLZM3>$unu%UBxQrWHKgv0muaCGz=ucU5NtQ_^3zeG1l-LgGNd1r00GaD{o^;$gC z>YVIyhM0t$RH_qaX>qz81*TFM0hqaxpiV(H`i54EW1A#JLvYcsV9=n;PHF^?L{Y1d zk}iN1EaqGjC>mylZgPVtf)dH0FPfyz+Y4KYap5Wi>yhXSFORqx6$KoqX`n-T7#15L z%%mDi4`;S$1crTM5+`hwyinL0XA*m~b%<^7%vtK*OJ+apA#}(U)1u0>8uyrST|(3L z`T^Qjy`S4IbBgD+alBa$qJGs3>P&+6Vj81?yX#t{PKKPkES;xAK|=-x5O@x?&$$}f zGV8;Nyv83Ly#^;q$2TTM+PP}zPwBojr?OsU`Z~CT-;&wU^;mFE+pY6GS$kLhh|143;gc?#9aQ4+~LZR3}VL z9)#$2w~sfgc)22TS@O(h-l!gHcOOu^=IGcnbmdYckJtEH_o&C^N-Q-f%Sscao$u#% zw+8$=itBzntI`mEuvE$6`f3g11NAh=yAKzRs<6{tYK3%O>-;b8;w8sVwVvo)_k>&K zUEAPag{CP!xL-W~Wz7C*UtGb%wL)v&lTRN+c~ShWaZrFAiDIc`_Y|!ujj+M3QWeY* zJDBeNmc!s9WU=l~-xns%>YIN&KXH|Jv5ih#vxG^b>zg7{rz^{c&Hlk8Ez%~Z9fq23 zDZX3NCDsQ7Otnl2b(7mr!ZK%$`H?=QhiUt+3mxjLzAi;>RPagJUc1 z{Q#rN1|f{l3B}OA&o?8K>PlhUO6xBymjld zfr}n8cS}M&rg&70`w05An=PKS$(S0^-uF(9O=siG@Rx5%EuyApp3spZJ#uoyKIQd) zDjj$~yA*Lk_Jk19qHpM8H_rKa_4~`kv!XA5`7{KCVrez&r7GDgJdD@}hP2@+cQB%q z%%&3cJ%i*zlZXL|Amu{3rP2WxnULL`4+W6%opet4TL-h~m_CPmLR2Hc z4&GacUgfj|9OPcoih{v<>iQ+C(*Z|g4U}nhkZy8tQKrlzJmK&i#MT&>5dz_%Wi15@ z>%tj3@OI(1z(pnkESLpCi2`0UKE#t~1064lyaPEpZ;JdY&V&Np%X~i{otq`Vwjg(GRa}#IWX^-tOO^#`rLQ_(r9YC6+`vXWl10Wa#` zxo?9E3ve4y|7CDojzPYyUp>J!p}R58EJH`a?Eo$FwbP7Nj}7h}*o|@exo^VHnE&XK zs5?KoIO~9Vne10{AF8{@J!I{6te?1=j%HT;aQtrKCB1F>DdfP6gnIQd=Zcp+tADII zjbJhp`uu6eTLyNfwpk~YWH)lY^sLF8uI^+zK6sC39k*^AA-}2r{ngKFiKXAozkl>O zaQEa(hN(eS1FXUk_3rz)#}F6REJLhl9*f@jV0~QTdjbgSDyJ`AZ4ikMS#rzzPmU&s zKZvAx(p#-t<7SLEgWT8CNb5OvT0{Geh}D$1-Vi4Km9NjAPD8C$ro5caAq{?V%6I$s zYRYnMaWMlh4dgD|nz-ROpU}^Tf3^Fw_u|g?@{a=IV&)YzSCqE0lk$6Igz&41_3Iy8 zO!<;V?=x7{-EEg1wX}D1=s7o_@L1OU%W39NsNK4Q2>uNB(xYQ~&4)6l>CCs%;&`A< z`TsUx*?V6Kc$McpXuarkb7(Vx8M58#%>^5*0@~(X!kVO8K}^NMgot=nqrL1OX2r4O ztUZ*eDR2LIQ77YWvqN*12pm*gbu4tZ*RZshVQ3i25zkd=M3)h&Pt zRbff|Gp6bge*B%8b22J)3TE9n*`;yrzO6xhD1VVSg_*)nyb!u?IARovJ(n3o!a2dP zgy;MjC|caAH$J}7r#F$Si0njnaY;`$=}kk=_m`#CpX<3cVwc@GVBYN)&b&TX@zjmE z{30=V@RJI=OqKi04l@c7J`Wo5T2=(S%nD!2$_&#vi+3hVqF-i3SF#C-$WWY<+DH|Q zc`Y*xU+YR(XI_ZZcK-Nv^wYYm?%RqlKL4Kx&K_E2YJDbezROdod;R;1FhUTqi3m>7>{I66qbfHD&RgfB3JQQIV3~d2T${Do_ z^6|?stRNWnZ9ikn+>GGbhk=4he8?PprU2y(gb^&oL^J`WIcPYzd@Awa8Yw%;H?qA$ zRu?T%BgF|(da>bH-BxGyHn|zY&HmjJKGzfvPriS;_=^SQ*zJjOn0%;~c$ zM!E{qOs`+<%bx!;%lUe{yBxb4e(mhWkz=-bG1qz)t*QO+f+|za%`~_vVODi#gX76j z0q^)c79KL+P5pH)#j}&8dZFyvYTTKtZCBFnE8k;l-0JLof%B1WdG%CTS*GUdDmPW! zIYuqr$CoA^wWuAc&2`LhS)ldcbUP_{YUjc3%Aftv2WdMT57%~oZLZNte3tpME%U?g z2Q~NIK?%l1cL7=*j!c7wr_I?ud3HZ*EmbhK_b&N^(Ahbtf;oHsWDFLW#Xs`kKV2sbhYdkN@YSQrul>y$=6*`Zi9MG@SDL zvoqO8()EUp7X=A$Ls~uZyo}6)Vq2Yg)W27kn&-yu;1y%{grmQoW9x2EJ7X)vTWFmV zlbdb5ZR8yBd(A$bSUGkj35~!7Fc&4p4yS8P{u74b)uv3flU=HSpNvF?Hg;^5H z5?$4|dLnYJ$FrB}ZpWY0JfH*1X1j4h@x2d7uEr@@NIizLX<8Wmih?Ww_v7*ENGO!yg@sJ}zY9uoDmm%F; z%5`NOxY6`|cS}Lqa2o2QifFq?hFM+bb#XeZNt#}D{XWxi->i5XIJ#b zjmT6Y#Yg-dnaO?G_7R^y9sf-S1Wx0f@SSYZE3>?XmjhfLr>YJQja~&k=>Ni;P=Y{A zW%UkVWLJto@&_y~Rf;cm(T9zqM8%ZwK~-zUMUR5rg<3flQr zE%A*=#fsR~2>iXh5g*4<04J`9@bLniy#T586Qz*~7dZ#YpSyUn8PK`R7z~A&@=2oN zG^kIDtZ^Zp&L==X#kq9t z{t~j^({X?M>s!O$+t^OX-So^siW`~;S>F(1DOSD{&b98I8%&EXtmKweZ7>89Zt#W& zu(9O!7%&znE8FpJFK-Iu{`RJ?W4jc@LszWv;(8kTZ4U>Tfw3Z5y$qdWO5O8Dx|TKQ z`Lf1S+_L@GU9YlRt@2}DC4Ri|wFlMxIca*iTtTeDJj%z?3gS&$mR&S5MjgiT7o@+F z9a-*L?=T6T{EfIP_D9A}QJg5bw(_Jmev_4^H{~p#VY&v5FsBWI0$S$2D*YHe{$N-0 zxX;v?v8~uEW%D`7lKa_SZ{26(X}qkW3~Xx0p+X^O4}C*Zeoq$KLr-|gi}YBJUw(Ji zKc1Q)9aCzhK6Rr5Y5mV)`k#OHI{y4~;15^LQ*WA}MTk)km4#(huC!mvbXi4jRjqMH zD1JLuy|g8>`kHubMGbyK(f01D@k6?yr9J%ddj3mjpHZgI6h0gZWxpsSm52r=E&gyi81b z>5=;{Z&^&_yIi=Z&{X9IRRL}A(rvH|g!$31Q?C*^)NM8_RfXOz zjOIS=i|&!i#~n)Ymk$REKn-&LjGuF$?&}t5rb-@tYHtat9KMDbJ@wU!b@S(Y%*vWK<-Eiz*Wos z{G9MQ66IppW%rCt9aK~u9SD^et&CGcIihdE-IN*Q7RW???RZBF->2RJ25mNEP+W?i zWtrK!L_M%f#ZrF;cmWm;8A#M(!uN!)Zkq<|zLgSmM{%*Y>7OF!}EO*ZRv zxSf?^7c!lW7ZkIXU4mf`Hy5zvx&?VTWR1$mMOvIoW%OF~U$sj2KYdb*u5VhLF5@E( zqzFpCX=EKm>679?Xqr0Ltxztf3uEMmXd<-bC%1fFNwH7edU{ywzqG6qw|ueTrbS4* zVIy$*DRU!5UV_06A^~8X#RvcI(G?BhWCR?0-jtAXuXZcUtRQ^xGr6>zYP9@hE_;~L zEM^BRKGrhVb9}_lT38n=W{j&agrYsGE>qmNjhB3OmC+&UD7(}f(5GN>nMtQawVs5) zHEpdrrfmPmG-9*g8=Ys9`X_>0=au1?q6C*yLL1t=E=!8!sMpU!)>1iwI6i+|c|`dx z+~R%vW5^1D1NecF0)3{*A-?YWYF0i%yuFOty6Wd8+l{L~aNT4f2YwaUY(0{NQlO@u zT3FfGJ+MBuWBj^gEaKaqQd6|+vD4TJX8#qC;vo*3bmD_`RA-@mu8E{W!C8|7)^==J z_WQ?t6aG;(d+6_*aI1O3VL7XMM~{XokuN?q8axYh4=oX4O5+j#;QcE4yTXQVgu=<= z4X)?b<$dVx-n>U&Hj&U%`Hd-r_#CCO>QvA~B_XA5$eL?nddRQ2dF)Q(r0uRya_M4@ zwau(U^4SGio9Xw8>pfo7|f9D<`*gs}IJ!G55a2(WNMj7TdUzC5;Lj&UEn+q}5?iNtocUsp8*{ z_?W(bRN`S7;?IuVxry!`Id%2k z6!kfg#X(Xs>&^oHQHWd#fLDwE%dBFznpr62$u z_blgyde~X`HEivbD4r>F$O&f94NTylj3aj(dC-X6mYGPb@!(>2nZI{mDJohzWty#B z^rcKD_Cu0~It+X%@attp{Ekl03{|*RHxXDANFvdH3O00)Fp)ljNV<|#h=&8}m8_+< znc#x3m|f)6N7?8G5WJnuPQ2zop{!v)G?&wJyhEUfm<{K<+`F!iII_j>p8XlmTsl_r z<=Kpf3cJe@S+ka5H{&YZg|zXuTeBi(=ykNswlDfBb)<+0xy1V~6|$PQNmWyeWd$Be zc%%++A-w0udB*st;hW>O*UcX^P$)=pvj1!GLw{>=k&=lffruOPH~g&8v^^*V9@!lD zm4B5lC};hQ*fe(_-C!!TcMu(9_)9}#i{6z#q;jgNRc3zWQQ5la87hLU}R8`^k2 zRs+QS|8d@iS}s*YcS@PjF~-P zHB?;iw7^QDoxTAAcjQsG+5Ud>`eTT9l3+xb?iEQxSDk97@Ib zZl4~UZQ9d>njMi$on8Y68Y*D}#!FOXuTHgyhx>mrdl+nG=Gu}G_J?<5Mwsfc9+|yU zU+Ov~BosGmZk%0w`9&#{f9k_WX}=~qSJs+JXXYM8?H%OjvevjUzkIq&pn!&jEKKmp zVIhN9OO3(=IU8V0xw`lKo>=wo9<8%3qn>$KP3<}A6V*N65SeSq5v8}vo2wchu z8?;#l1GQVDx_TiKw=c`HA3ldVee0?79yU;O_`RgQ)HaX(wR`5P%oMGFstaR?MBt?h zdOt+?@muy}4LiA7qPS|p{=DtV)UlZ_*CyI}t$qBf8pk54R;iW7sk;0UtMb1T-k z86Ka2p@v`A0uQGIWc|=#6ph)CH}$tsnXuRFVy11~fhTolw*39Ew`$aO{Gu;_=JQRP~>YUGa*=z-d~cweyl2)2f=T-z8qcYte0gM_JNY{^jyB`QoZo0 zPR6xy@9x^CiX3;ge}(L5Bt%PRi9=Rxhsf&~NK*SF9Dm>RMr=9C&Z#?Wj}1V*+U8Q2 z99uALkPe3Lj25qR>d;}Ow^WGQO&~yJ7?)WZ5t+zFj~CB7@#@fFLj64t28jX9@KXk! zH1^p^>&1D|IFFn!23^bq=u{PR3-x}c!8wCCrLp*qq`)g8c2!zOzVnW;CmnoAC={`O zv^-HZr{HHSBRR;%LdKsA5_M&O7E`5RN+<^1B}EC(vv;j3IJqP3DYyDnf?A>|-y}lE=dr9ERm6 z(;%~Vl|PT`#lB}I9!lRTp7$#b$@#n-L5!Xh!KsXR{QmHu=Kt`9W`K?5{eMzQ`dm;I zg;+sim;)7zXfDrzZgG^~uJ48zs9JW2U*e45^1+48hu^l+w5=zu6uRR>rEre|FYvQ~ zodfs|A)s7wGPx`*fv%<5;cVJgJF0~(qQssk&C>Its&HsYYPLW&3}9|Zd-!YziV_b2 zZ9M5(Wwg5ZZo(JcB)|Ic(acK;58@x#K!MQORNLx*f&*#_fza-G3$JVV+E!ggQW(o^ zdji>Co|o66OYR^4v3-rMSS3nwJPR+4*w^CRoig7v*36pEu(sb!dup>; zYMpidq3}Wf`=i(qkO0j#TW4|{q4rcLM02=j>S@oP^rh8*?pl>{@|*pmO1z;`a_CQ# zO%;R=O!~I*wF4Cpv8I8tToWc_idfhC_WTi{4rvx^M-QzHV5O9 zH@iyn{lFI8PT?zbICb7v!9-AIOj{?{Yy^&NiXBefL1$@mqbdu6|zFz+BDKW{|Cge zpvAigjI)*jz(kh7w6(l;emOZHRm)br(~M1tI{^f=&eE52~H@P&3WfOnWHdraz> zOR~i#8dah{2-xPI7mG@eL=R@&mX5Y#%hv~cKP&4x*zU+9-52fJM8xcq?8C5xlCEgE z%&R#dm<+m^@=qUHWIAE+q*}YnXnOdKhQQ%b{^B+fC->MA6C>h}D_msbvfLB*NHPi> zPZPyo8?>1E93Lu0Ireqkh`^l4(=#GoJ^+`!5BMTAaj~0DLOQoP#81lVWEz*J+;yEu zv%h`ods-wj+H{<%|3W1qnxx4`)LK`gQZ82&xI_-)F33@ zIsT>rMW^`=Jn%DX!ix&8{2%6Lv=@*FqF!|z55xx^8?m<+dYBWLOW__@o-}7A!`I!I2EKl4z zr-E5~tI@_+!$)a`K~Ni^ql^fz9xP=QN5Ifl!$*t@89vG6QVh~LWD}OZ0B`p%I#;A3 zq%wkn^cV>%{A}UGOsT=~+vfZ&h)|j*o`oVzI=3n38Dt&+(m{lAF{X|h;AtIyGf}C2Z=(enowG11|`IV$bk+LKNDRtQU3aaKE_YXF>rv)yL=S|6{hk|ZqG#iE;X!Uwa zw_>%MrQe$M!jo*dlQ`M4Hpi9FK(VUlEYQ^-k7jBka8t2PG#r42jD>=(t6Pw$-bH0@ zFhSYl%CJ?+U154g*&r0ClH2#4+Mk*y0N8h73Ra(*1pE&G`4HrMutH|GY2Y zrDR+zbEM0*jl6;%PjsLU=kYpG>)=x2&-I35q$9)|!^5 z^yJm3Ld1lszWVqj)gSrjqSn9?4>ab5(TjNLMJ93cu&*o2SwDYXyS>S>Je7TIvi<$Q z!$&&le~xQT?(iL6@)gdCq5iqwWB&GjARz+j5{u=bo#_KQWI$uWuHV4HEx#vEuC`iF z?j8TL`Pp9K&qUrm^{C1H@9wV^z6!T(7x_2N+fmi^DVN?8q%|HgB-|j>BGi#&{iiuA z^olI@Q4bhLmG&6Yw!faM|Ggkszuqlazxp=so^k&bg&DJu4eM||9KQ+QWHX)wT;etq@P#uNwFlT<;^6sRs*SK4+m}9Q^nUvcB z^H`bHdzRgn1DF2P|IamROVLDJ5uh6`r@J%0Oq50Io}ZwkM6 z)>51v1kmd+oE*Z12_nQaxIv9>;m9Eg;P6t303=Z8y>Nnq zRL5u6Un&SleUXDiY2mk;=kM@r^}y|K0VM9rpV`SK+iW1lcKl={ugj`+!Jd&w3zt9= z9Yt04aQnG0ZaH8&_@TT~QH98F6Q7F~$vwLTx zLLT?KbG>3%Sv-}9n1HeQ`ej8Os~6{c-W%0rngD1UQ*%3QSsYTYZgo@3G#stZ6rv}Q zu9l6aWP?T*8l>lz?|G6T0(Ynmp z+obXVfx;CEm0w7F+5=JzFhs6QE|?SQ(5J&Um{G2Ez$+J|2=q~uI9)NA3itM(19L<< zkdiO;e9T_PP-9q)o6&Ct_m;IRF%a^hD2QT}Mg)4!4p?Zl@-;I5=nKK(La`e)YNW{) zFFV*Ifuwv*%805j9g00M=dK@)_3MtIsCU`6h->o9BPOFat%$BK8{#C7#Hjr z16R@j&s81s4tRgTd5-oCCi!NK56F^7x>?D=nK}bg6N5wR5tL_-9g(1KtU@f%lf%4> zHceKIYy$F9m#|qNi^H=-K^?d=5}{g$fNW`w2){MR-8rHhI28^aFo^oK$wy8g?eo~Z4mhjJQ3s{pIQ#>ZBL;!SC$9QN-H z*ML0Y!8?Jh6)@e`byS$li3YT0; zd~kcyYd_Gq_}NC@H$yM$G5wB7#?niqYIjf_3%v!d)RepqpS-fjF&XA-Wdd_d`A?(gg$pXv%d zL}^uT3W4@dft0)%A0N-%aGh!i+T5A^PJcbdsM^Tuo{2^)dTnmUrJf zpgVRbvoIK*l{A;iA?HE$gxf7--n{0CyKa{;A*5C;K7o4i&Ji?we|%R>SN}>FxFkr1H%N`w5~wW`t~%Y5R8X zasQvDVbcWd*VA{xPDJK7a$#;{1W&UWm!$lvW+vz6m+AITf0=EI))>~mr^_Uw^g=na z!>HEiXFQ!|lU+JD&*fkN!f!7p3f z0-A2E8#l_mEuMPDlHoqXRQ68r7S5g{sjAmfK(jBt#@#^0N$1VYV!in&fiu{Rt0egD(3t^6L%TMM07jwo2ossOK)7Ce$CD%=yzDY{cIA^Ztp4Pk0iehQkrK=av_zjemGsTzB%(ajAUyP{dcB z($I7YHCQ+wSS!tEGWe(z44J^&Wc?S0Ai}Zen{c7*<`|Q+y5Pm(7DPJ4CP&|kv=$P~ z9;l3#KG3{*z{y082pgLMMV-RGml#nn=BoL^eD3^lR5f`tL*yP4W>bUy%F6&b$YQQJ zD0N26LA|5!!xbT&jzIj4i*g9q8xh!Q8SXZUUw}TLM=rtahN%tvBIU#Lm;V0D*c&A$i>iC7AiJZE_O^=R9)JCmNcT0LIK zu01>kbu~~8{ek)1tfDI`{uMUTAT)z9%(Q#{^rFs#v%I4sNkMJ;B8F14D8hHQ`rXD4 zcTk*_ClxhLKKaoarB<;)8+iY2VUa?BH_4PVqCZAw3ye2yJX| z&*uV^{N7?w)w`6J1TZVCfXI;Ma{tadL(TlrIdSW%==K3wZiWZ_e{;4eu*l)t&UWgu z&k>HR_cG6LSa~psH}1Mw-fpJ8d0(R2wQec+)Su1HFHtuZj{ZC3ONnY1A2^pP5Fuw3 zMid%pV-&6+paXHpg^^$PRV=hUhn+1eB!WoJZAT0sw}q@^B4qUY;P~`9X3(@D0+<@_F7jUm z8uklWlx-t(-MxOj*^LOyNdW)Scf`EspKIEw?`ON{u zs@WhF5btbu^T#?SP&Dp7a6R05F;1uh#|Q0SS3|+Y3Hx4!=p@q;&rF^jhex*yTRSO6 zp}yH|-HrKAIU4+Cxs?}1=iW|rUAV2L#NsAff0MiPB@e0l+8;}XUi(hVjyP4T*|d4L zkY?`X1l6k_oo}s6&#v)^E?LZ+{(e#4W-0ilH>u}&!WogbF6l*OS)S{6%g1e^I>l8> zBz4}Fn-XWsIdC_e=f}d;#Vv0>xL@|>vy!X7B0K8u1(@N&#P1`Uf;?1{Z4SY6oD7z{ zYv_FXI1DF#H~aB9UBM?!GVN!F`!ZMJc?3y!$}q;_Cei%ldA4XH$ z={2O1?FGtZIk?`wp5pV5O=9X}kJg#8K#96t7uwRn{`Bn|6!~nSvHH3>w5RX&vbvBs*rCQhF@uW@3D+Zl-MO z1pe^d<2t$XZ?ZYh>0N(a%CUGqX*4kMQp4o0TRnDLe^L#cp0ktD?jTmfKN8aIEUuX9 z&j7;I6T1iXP4E(v@T7?7c;6HeJT>gs35DXxb|537R{?E=P8^Jld#pbg>m3!5)5S)v zCX$#^vu_VpRh*v_xS{vjKQ@~8>*-NMw@>cP?pJ{-##B8<=83eyK_qGKr>yzeVrr5) zQI8!|f>O+SEO}qoqF;;zE(V(83VSTNjD&DEBoa1qX{WBgs&5>cQ@jy;NmlclER8k~ z`aFLZ73tpN`uWO$2sT1Q4jeV%msN@rZm7K2(G7gir+%-KxsJYFm{^IsJT_U9-8 z5be~@WwE8g5Ua?N7!YCN$!HhJwL)&2Ioik(f5!wy8)=)Fnt%Q#vDRcRaCkUi0ZSX_ zqpqzJYX05+M2-kA@63CQLiW{A451WWn6Ia*-!W>A*+~)45}4WMMA2sSA?~}M4-1&~ zn3gk_y^{?hd5{BR$m5~dK_a)H{TwI=`tm^{agMOI327tr7J&No7`w5fZBM$_G+rAy zVRT!KTH;egBzv`oVrRbNMMt)=p%~hJndx`~1>t07Q{!6mj2uySeNGW+XQkLWgE!Ag z21^MyU>?+X<7T7*K1sGi{#`L6*(E2#UMA9DKGys-dWHs5@TqzDN9h@sGAY83_CPi? zMua@zo^dY+)(O@OvOSHDfKr8+o$T~*voKp>;5=*bPrT^-E~CfC9fP31NeGVrxUVO9 zdqws-?Y1;f^6ZC%qcj_Z7J-R5sq$@uJ!m~st@qomZ}x}5u4SPS!W7%KS7I^G<^w(F z`Kl8^+vjup1d@fPl&n~`Zhk%6r2OzouH^h4{m;4~3Q&CBX6D#(6T_z`c28U%H{#G% zFMcWWv^B9GkJbxK-VYwX>oqy@byTkX=IJ^0$y<+VChtKhnsyn^zX-{ov5|3fv2Ye{P1gznS!sVVuT4X93D-sJJgqs`mJY|{Ij z`NP!^_mFxr)XPPGliFu-IbeBHK1lxa$J@s)Oze>6CkgF8|H!jiq3q$MG`;{51}(CH zDK}yc36N4!i6Gu#uyh>7DI#}3IoD>67AFb)xAp>27$?DLisqMdxD73BQ$Aursy%2T z`C&7O9ju-mR~}tbFjPL!d+Lj9`{^gEE4@DIA=k@97-LtRDz3+Cj}E#xaU;SjWx)@8 zcxDcbw36*Kr4_R?eRg4i&bSHshUPuA=m#hx-je5s{jVR|U3Ef*3|uzLJgdaB-*%pA zn*Qhh2x6F~T~CVGL?E4BtbVO$Q0#o!@D9sv&N;@f7c3GF`EAs+a8uRF@Ed#x{6}3F z(!g=UG(BEI^aI9ZOaX&Ba>&3$jz|5IV~d2#Rt=_VfkvuX@5>XE8*;X?J-6ptW{}(& zo`|!!hng8q9VDAN0&HDG^=zu%D%%LL;ZBBWq!N)G`8X(2IBk!EE zZK2xzBGcHMtC##b{^iw|Uc8Ht3DZ;X54*XpTPZKzJmfmymj(2l?QdLG2H^v{s zVi=M4JuVM5Rh*XnkDP4!(wE%#={;HT-{GekK}=XEST?B?x{!aN7sIQORA8M`ai{zS zWm+_)Wk(9HU;8Zk7J=)}wR|;2%*Q%DzjfbOLS|Dg*+(xO-N1a9_KoT z=$BSG+@ByjLUt79>{af@3P^D3=vcDm6Li}JL{%2h&1#~N%w&xL763_K4mH!SL-#Vr z0*u*;!gXOsy+_kw!q;R+?|g{7J=bLcjH1{Q?)PCo+3tLZ@X6`70@t!l%mGN^fDK7j zLOLv|Yxg#Dw#r#_4LU*D2OvvG{OXth@Rz_ua+P8?lp&%Z_ksP5#3(v)VKAQ z#_(TJ4WJ1%y|X{>U`@0av!?63Dc>U>HX~X3lL~oF@<4d3spZ4R$iuzMsI8mv8V_CO zp8J(MM0;P&_FG(-^hmPj6`QY{73Q6=erPO*>P~O*V-SpqZ&VDW97+An9U{wW38@;- zd!yEJV_((HI9a2>pCM^N3sTai4Fxw-Fz za-g&=@F$9K?a z>bPgDJDDgI4~rNs_ecKBO!GXbPPJYu^OZsJaR<@Hn66!0Sq6O-!`op7xrmtjV7E+_ z4&yh>0dsDaf(C8Q2YM`HguRO@QDLLc&noE|vgT!&KDk_v(KdtS7}FGH5nBcUIe%bb zqu1*>3V>5BgJ4)*F#d*X(PfQ4uPyIDlA{8^=j$sy=ApvWaEy_dvd7|IB$U&4r|ubOOq7wo>)vjL5jbwCI!-#aBc0qT-JP!p%HoLQ<- z>;Khbyvw@?%^gj5_g|RNY?}&7o;0Cx7pW1+H7DEA2fIX97xL1d6;>2^WJj9<-I7kC zo)K6MX-NxK4V%++yK@qL&CrGR-`hx-ypkC`f4}A|&iO-y(>*}q^`Vn~deY*6KGiT^ zfAfHG<>s@S3*7&5I^x4439yYsK;}IINy-U3thHXEW~7Z9w=8Ujoo)`XO!Y5}7EAT% zb1hAiKV5opxc%{>2`w>}R2@3TL{7;jDL=co*vwi|VbFf(?SUJ41bFOXN-7df73vY~ zti{#EuZM3A4IdYCb~+G_z?u(&^(S@}ne8}e>Na*XN%MVJH^{^1qoQ->#Bi$4VP+|X z2ITmR4<2MpSf@8L<^rLVnSY*F)~#4hf>Zhkvd*X$>Ias}d5(r}jJnP0#7zd{iPb2V zGOIj0q2?YaGBX1KqDsBidPL$qMa&)Ruz!pmj4gX0%7nto#tyrwue$0ExgjiES9=bj z&CvCH8ZA9YF&D$8o*+BSRr8U8qxKaZIo~AR`Uxaoz~|jCgUx%d?aW7bfdKW+=jExe z!8m&|`;+&wT-r_zi zdr{>T-Nr4sApOqi<<9d_)$uw}P3r$M{=kn5x9>-qtTEJK7T*=>?9)#QJ#F&hV<}9$ zz7`>}8pb!m0@df&np5(765>oc7uWtkGQ#`vS#7ht@r0N%{d$fDmLE(H5n^`mvR)&U z{pjk#cG5&ec)sg{^zVtl#VL6ete41z3FvEww*%@tCRQ##Xxco`So&GE>dWZ%Y%nL; zBIn-}M5}WP>CizH1RN1DEnL_pmF=JN%CHTPkIg|eK6_3jb-v3)ZR1MdB`GF#H`a-DW@>}Nv8#c@HbSpS=`3;J=tDcLQXXZqn>90SxqMX zYpkq?^PCSl7@1{g*#r>v2cPks^$$NtjEjQnptg0j<&S*xtGC2x?L8r_3Hw)28p?$l zQLtJD_6TAbSji#j0R*2*VMc2xQP<1LtLGut6jhwh>88XJldn^*%z=J zOVXTuz#6g2RgX!nkQT1dDMGr_#Y$bm|ORCUIH z@xvJk1#g|bqvwGXW<)~u-kX@TNndCHua1#@P8N8DKTU0jWg_R zH?LqG%s1P*sV_yes5QE2PlFTziV;}r;D|#jxBXqAC}@%Rbk#R~urIE|=({fp{I=&P z9GW`DO+!bjEq zxcwDrso+Ps6lh6MPa-9n!F-paU(9|oCI4-7h3C(M&3>Pw4dtg~@*$K2yC*L>A^s|_ zP{7lrbWu{j>$lg!^BNiA8So^ea-X5*%aVDU+&a9SCA+bjCpZ_oP5^d~-@fRT{u)K9 zSrxP#%ut8z7X`{F*iS?w$tH3&lE#5_50)bd2a+Xt@dyMO7nhwTusD~EujF$8?jrAN zHPWUUHPGmJ;SZnt%wEB5GL13$V$>5`(}p{fF-;_iub{;NNnYEo>K{)C_(Z@U891dSn`G zQ>{DF({V~j)waUug~sT0G1@G8RD^WKX^g~mNs-Gqw9+j=7V_x;-7)Lz@Qbg{oC74J zRj+G|&K_kINX~wB;32q71%n8%n(*|+ns39}TpWtzSOw>{rSe>iw7P1AUllSuthwT6 zat2S$_p^3$e?>8FD3eA6eI~?SfH{J(;J5CJ`*ScjVuUVUu z9=d>nOfp{*;}ODpE`2cdbyR)A(Ej!9bJK4eQ>)RusxO68Z$>ilYkUSAlB8F>RRctK zH)oy>Ft^v!P#ydVp~H?So^aodPaHEHkuDtl+zn}Jd{f@^GWF@OCHE-!LAfyX3l zXE~$|^2mrVcbSOf<#gi=;ginH<%C3sY*I75mO(vN)=(+4YI5}P3sxUe+CB_aqYok>b)D=y8REDvdmGuDPZZfBwVPLVxfZI{Nv zyTVzm5%)Qt26U4!#)!yoZ8$h6JqH%Iu(r;JclXeybz*AHqd{6xS2J#hW6e9HZW3U+ z&$%N|P45{C%>gA;OjQ4KMXKMBV=wK7}WW zA0Js9uq-dp;?>;vS$>%l9$ZV-*NwI6km{|Bz~X=vf)B$d+gCR zuuwxkgRWW39*7Q6UOAB8aSJ4QRyBE@r{EDGtBJskHhWzRA|;ot{>W4OlPgO6Gl_^l ze>xjaGN)1m-B~Y-Hs&G(3*ehZdnT_|jYO+VhpV|qiwo5r%rho`5XU>cyJI;Mg44GS zM^TKOg0$1W1W%tXz1u-8gl}I@ScOhlK8`_mVDN>9V1f;BBOU0l?`LS^Um!bK!wFyG zW5`mJIbN7n_kzlZ2`hU%yhxj@g-j5`gFk3+Q3TWA%P+W_4T4B!yrV4H>*Z0HjI)@? zfa{7rZ=Jq!rP@Z%jGafSED&Ia288Y-wA4ex_S8e=SOvHUK43^CJPYLso7Yl2(GW;V zh^;hQ6J8KLJ(=?eQWuNEO#>#y*wk7_<$!Li)KmAFK2(H$!h;)x2gSoG7?Z1Uu=!lO zYfy#Qaf(Z1GTx2qF!zuD8mYgl5RFGlfcxH`Fa?RaHb}16Vq70kB7VvE&6TO@Zv}-b zQ%_DktLqWBeiGfj+ydD<#rbMZg?ndVk=A*IpI!Xlc2+hT|GctN*qV*I2b$d-0E*cD zob65*W5rjC)?vPZ-8$hRv+oDT`Vujj(UOUOIgI_laWif$oQ;<`|V$JZiV^!L~p5{AOy`c06-1gfU`ZUw5~ezPPI7FiCDKLIUSVkT!8gR9U)JeL^Tm6=#pDsby;nL?W*VMaE9vqs-ugR(3;nNP z=Nu&3yRHohvBt+0(G);3SwQp%=SOy?UgHiR8zv-_RfPQAKH+W~BnsOk14)MQD^^;Z zi>Fpn_o#g4Y4Y~i8ay{zy}qVCd+tX)*{$0ID;e0~^*bUY2qjQ{yPI8lZ2(UL!!)Ms z;o-?Uz#LI0S|Ao zmK9K?hOikDY7)OOmvD2s^k5Qi0GXJXWmZmQ_aAkE3zCtsNCe9E>^5q&IQG9C#$GbCJBu_Y^qnkduaoI=qf_9~eA|A(@-jEaJ5 z-@Y#u6i}p7x*GujY3T-OhLjeD4go1C>5v$@K@^4>9Xh0A1QewN8EQnNyW!b*-~V`@ zFVFq5mVWS3)`E+@_Zi3WJCp{EBV%5^&soqOmbKd9IaXUKh7Jajl8&Y}eINu{-9yK1 z3Q>l_2pMG{6YI6*sn+9P8wlEK7A)rwQ_kcL^5mDBjDQoRPLf*b!derrQ=?mN%g)uv zv86>Gjtjz`lFmK|1|Es!$28^1$cg`drA8UM;4M*qr_<&Hjs)}T}2QJ(DIUrxb zvBc~VhJh-n6`Ypu4~a-Cv%x6enxntgyxgo#N?PNT(?sW8N1s{qZ<(9+-@;m?9L(;l6d*0~ue|`kQwb}NS8SK2Lj9eB zcj7?+xCvM`t^hh}GGo|gc`2(43@UAe@4NReLvspbhQa_7a{*#i87`Car6sD1}{ zyUE?U-is@M5PRUaw0+~GkQQ&i>H``C!CeFOr(dqK-~nHxUYaN0;xnDGZp5jia7+-iwI+^=Yesdt+~&;PaDD=vT#0X>S%e$@XDzAqKjW zpze(-p5i$siJRB1dQ!Oj3`1k-Z^-q^(hkln(v zQdDc(1h1nmNDlXHe}}-H_3^^=92MF_jGc<D)&3nJo4wQPb9Z{_B9s)j(s$%3F2dy8SO==rnBE_0ZdmlD*1> z+6{=O0WZ`)<7Mfo0n=PYjy3nU9G4}H?nVC&JeI69R+pGiUYE~4-I~q11WdD^QN!&T z0~HxdFH>_3N_6WoU#2I&NJ+8js(;)s?r0?MBWP$n@F*j>_XV@9fUtVrngUPWE7lDN zF67%A!bN!pjuS3kCveaj#5fp1^s8DUSecQ-IqW%!)c-8w*SI?jixt?! zvw@KZpxQ52ntSVl#1SRvjX1#Q5>x}|n?b;{`p#i}>M#AW=pzZQ1?LGctb;hpFh>Z8 zjueEcBXnKvD}C2pOxFVFC{ws8kjYZRy6Lz-3q-=nev55B-LIK?RrGB{=bq!^GwAE$ z$%&D+=S^*%NUTZ^-(FM!lBA*ZT2DnDNYZ=bekvk$R=^-kIY?@vYWETC z^xaPY^C$8Mcy|rVL)3cdr?&s4CWPmzBb6Kh_P-w2=qx}m=Y575YX&zNuw()*%u$_b zb!=a$wu(ArY?9dD8rCxGXoPFJ`NF}uKDkT?k@(X6!&7#B)VW%b z99TiKPQH_s{Yaz;p2=AH(6GgXsiOXq-Ihgb@97hX-#?bjuyo5A)5J*?d++LGW*31t zdpp(&eYNXcdmA#rCaxw@bM>UxHT{Fz8-8z0!jE6H5@P?e4wKF;u2Ve-4$`qP&;o70Bc_K&M2y?~$3LigzLEpsr*W~iga1|Au;fn$Zp z&js*XC6=p|wgdv&Qfn*I7rdAX^`Ua(Y-}e%5QW7fhy$QrZAw zia*zTOqGjq?Yo|KZ_8FYRB3wb&3`tC1|hT$^yNUD2p-E>`g<058Zi)rlWkc>D#*CL z!@M=cgET$xYWtGo*V3AGz8983LI3mpFZ+nCviSgHP=ug>8yX#Urkf%{tkx4uLr|^z-vX*E_C_U4xSJ~Ob7NpS0tA> zXikEzzjUHB#g;#?pbVAte0v9S*aB6@Albqsa)s0Iy59n&D5n8VNejNGfT;Cq-0Q0=&= zAezwu19-{(G=#>?gu88JeM0~a1b1SL+-vNkiOtQ-Vt_6LW`#jxV2#76!=`r&Xp9;&soz@bh(*_R zK6Je$&JDRd0fQueIW;~F;Zv~SakQ`Rc&Pfje#UjrOd{iCZ-ra-ofHN% zw}zbB7Hh~zeXemnT%Ly3g9+2o(s7O67GW~{>NZ%@tv@(qR9bAmHkRN zM8O=*0|)Afe_Rn&TB$7l54=98HYH}2rc>%i9e)y6FkmvQBCiEtw-=0#5paEKHcR0l z$wWr2Fo2WE3}bEl>w$X26Es(}TM#ZSBnvIj2*HH)I8Dj``Q(#bL4jy(jBj-I(XZw|Tpg^YI+I5~bI4sll{|(}< z1oh|)J1-7!xn5_mg0pMZu_72q2CVeJfcCF>)3qdpHVF*WVBNl_1;}_s+_mp_@WJl( z4tlU7a%)>+!8C|Fu+o*-^_3guV4Jt&e!JadR|8BpeV=|ys~a%bx^~1sItQa+aQXJ>M(ylzzg zLTv3p#r;^9BDJAwFve*(+03-TZelS=F(y|zowCoyvycYW+7t0eGHJl1VuX=rPkv_* z2Zcn=gCMXw9GMkl@f$TU#9kxWU53s#&js0-9dy1^^SeYWWi|^Fg$`h0WhMjV8WXEc zdI`gqc)o=L%7-g-k7f_GKOZ0LJ#Suvd~P}N|J=Ce7|+o`zB6Bk6G1W9oUpCzWLxFL zYpeL@DWQwpWY10SRS!WW__5eX>lJ+?=48@+Chn?tgTFntRcr56Up6J$U1;uMi-SjZ+*N1ZRWNo(;HvN}{{b_9 zTj*=D2Bz67Eboo>n~`N7=Cy@83rgczk5Zgp@e9+orFA(r|Bt~8BaV1J^xkkLCkE|~9`ig9%3+>VO=O@tL%8-$yi!Ac!n&Y_Vbbccv znH7zCithejp=j4o|IM}|3!32L1>=v;iD3Men}cE4clBSAMrQMyUSn^s5g?D3ljTiX zCjcP3)_p;ab>wd=WD_Ou}6I=tm+`J8FxrE&GknL;kd5V&8cRewKR=i zKKM}p1v#JVH6ri zzTVarNK})mS$pX9OC#On7hfwI>q9mJUi0SfUUV}jK_$lrJ`{MqftK|zwpyk2>hc43 zRfV^VI!@Eox=lKQrGmG^>EG@11Wy>5c|;PoIjEy&9}RLN+{3rcTtE8^{LIb4kA$bitKWkIDUw!mg|hc68g*0aWYbpb3Nmb7F<;^u zjesE*3lGUM`0>1wq6Hwpa}61GbD9cZ$L$)_$CsS~9tR`$T`l)((qwcty z69bSxk43y+iyJu_r6K&*-x^TG1>Iap%!j7b{^I0BseV>V{H_ahE=t4lx{0ZOYlVzJ z3>dHjY%XXBY6MqUN$apc#&lYBng-GggnP)rQV^b{l`xe!ps-0#0LTzfizBN3JKxL@ z+%)ytz>t1vaR%Q0i3m79<6r6Ssw8->$95 z2v2}784#CnA zC6kqBVxC3t5UXk`8uuxdCfL|`fVg5cT)pXcFNTc3wWu&?{$3SlD7*7JX+#|DTPA&` zNiH&DlvoX|>d-}HKvr%$TNxkk3%dyvYi>5rRvZmGMmmMWHlMJ711yh7(ey=`gy~Da zv4TDy{#L-UApwHdaN?%SdLIV4H@6JrK#5~5ZfI4DCxK;OqJFNw?mK4bz7O4+Umlh2 znx+qYeOKn>o^soXfD(xOEAAvA#N(5HC~kfivH*e&g*os<3!xvWu-~)pN-W-6_gdWA zSz1ANp+!St9U1}?De9fO)VsK+B8C)Y6s?j{b)LQ?j8(HQQfX%nEoHVHFJ=&vKZO;e z3cYI4oFQFR{H+Q74Xympz_1L_kVGTfVS*bx#v$}_x3ofQR-8zEax#bz#6G{Xok0u0 zP@jm06Rwiie*0~L;-%s^dM>9t6+;`8uHFR`4Z;f8w`_pYwZ_`&{i%O(} z4ixd4ut|-%QhIxf;v6a6mg&2lCe*Y>!84Sq1A`3m-oi;7if zZf4Rz#O24bf0fys-x9u0kZ;jHec!nc-8KR~Z4FJD9|s*Z8>%o`-ac|P0jvMd1Zy*Y^Aca)t5j~a6mAsAc;H`h0YYuIa3G}z_{}3t0MeSK z+7}CAE*#|DmHNOkHN`WRQV`!G6bQZQa;4dvdNGpH5x9mX`D0^b7+&Sio?|r{=vob3 zLu%G%03zJ>x-%LQVUsSTQxk z*s2kH#c4+k7~j=i*e#Ar3TY*RfM|SRW{FrytKo1}ty1J>b{3$G(OC;|$ph2QLlEH@P?`ZB8_+6Y1_zuIjyoZcID!qyO1}T z0D9ud>K+;S?sF3N5TUk)CQ0gE=AR4?&P0AaVk3PdN}LnVl$zCNu!-4MXDYUry*#GU z>!&h@Z={|c<{*vnsaax|u5M_tYdT)Yks4O%{S@N2uXgDCG>0G~;;9%v1aWg%uL8>K zzNACIJ3r#CUJ@cA>-RCc;+)ZroZPun^b3#QEM8bn^%|V}dJAqC1>T^!;Ctx3+JP!xWe$NW&|&X7n%q;J>c+C9=25~Pu;LBXu~0CS7%(4m+Y`3vB(JUzhgPiTg|$b8#In4eB*-$5EwD$ zp^JDC%O6IG5pn^jfJfw!p>@-^kll-Wn}N~)Wz3nfj^oX1diw5z-OPV@=(s`B$EDqG zF*tcluz||`3cX!@=O@C8w*-p~P>p@1qWgbX;60zjA@X-EfG|V}0U~H|L&`S&N5Sk6@I+dG8#)$tEJW&94d7bbA$jYaRkqbF__et)GwY(GC~zTeAN3i2HbOu zAhp`{Yiz&kwM5iDJ|MT7Lk*lyn+-vV81u~FGEl7BgK$96-ElKkYv5JgB`f0vpiL$I zC)Uf7S(V^zXSP)h5I+ITR?=#iS{ncoZqzB1q|{B-V1vI1s50w4Je~0~%|QJcyE~4@ zY+Lnx+gKGMsC1f^j6|F4SN=HOTX^)&;SjYsR5S)6cq(bti;PhQtrAZvY-%LMPP$LEQn z?AGS*Ct}^D9Fx>r-3Qq_>iXR5;IWz$&A;|waxkBymZI1UC5n0}d<$qY?$PnK*iy}p_8VB~Jb zfKOccP@9$#ldvk0x<=a3+CSkm{p?IMGM@$@;y2V;C8%!Jsp9mM@k-+|-cf_-C=X?W z1$Obgei1QtC**gEB#F&PAa#Fh3zdVl5Z^R{3f;{{b9LxI*}m?8qnxC~(P}*LHrb%k z7XGC4HuhJgbwW3#$rQWaV^?#cBkLnI_rH7@wJWsQ)| zRFLrb&&*_D{7{8kVR8&#qr1Lqkos80(!S0O?PR|oGV0iF|9p1O4P~Hxl4UKW<%7 zU52iGMb3nY;6ZG3NT*D3A)UYVv0j*L{MSah{`z3m2af;8MJhf;Ab~`Jvx4UnVdBIH z*8Yy=$kUi$`r)Mh_W7Dc2(#xpc;s2-T%gE9AlMMlbJTo6Vz;& zLJq|<6?-}XJ~8!sJ6<0imn;RIC=fyWLpOp6MqKlY`UaFQrP>S6c=aRqO5h`sy3b8- z5FECv5j<3=9X>S0ZRvo>n4M4|T1}2|86}amgm~4PzvWvxf z$FGKr<0rH%*>MttzWn&sxAkc{4j2sTW8#k3s}+duP~f8Ks}KUX6OLaa19N% zf?nrxM@Pkv`&KNXq)o4bYrT2EsKi?mQ2z{2#t(vTp>f&pp%*_*x=cbc;tINb*GIvF zW`7K`xP=xBT{TwgJ1ugLBu9{&-A1>nN2-@&o&Y&VNK#z)K2~LxFtz?M_FKy)|1Sx9B96HmejdRYi@MFCimjP$Ajo^9fI@qT2Krf^DEp_^DJ^eAPkOnra7Hwq}fm@xfABZ+Z zNZzE_N5tk3Cf9Y^wzax8kx#Bj^;Z(5Oj>fdV2>GPR-~>?cJd!RiH*f(CdE!OV>@E3 z;eV6OL=#;KUR`}_LmtK&WgK<@*yt%|VA#r%VcOEp95|!n9c?X$xdn1qff`$iurr!4 zKmLizFWgIas+*zHQ;a@rN(Fnq@SqT(5$x@@;ZI4W?elp_0`{xJ(Fz2L9Fi2F&%=oj zPIkQh_&WY>#KRm<H~7Fh~8zF8A|h|NVl8yZF{T3C3d z=1-r4kxB-F`D|4ZOK-OwKb>Zi9{i#6}R zQ*e5QWC{tdX5+QRF9unIMh7ZS5rW0(uA(u(mC;3(m|g3F+Q1+gjlW`rd7Wdbx3J2Q-zoSc@a;P20-yhD#GuP zB2qMZbytl5OAT1HfL5zs@Fsb`PCL*>dzuwv@#aOOzh;_n?O}Q50axp`?g9lW*a1st z{2;2<0HSvuMkw^AAvwzD=7a?|@)b~L;+EMMashUd+Wyqm!~g3z^Nvmc=-beL9p@h$ zwn5m}@}{PnupAMTanJPz*kR=X1>$AG=lOB%F&oL#*^kgF@;+ymV7|?R*^drz`GCHa zM4#sdr+GdiEHFmA$@nl zzS9C1v+c!`<6xzM>?`vH;suHA;U<+hNYubo35Q9OZry;#^6l{b7LOhxGD|omC99GnRsnn%m>C&^ovWWCGfLDivsw%=YJ9?jb6stwoyg1 zP!p#5EODMlnVKc|dEhhsTy9jBx{dGF7u|NhFWH=edE7=su5LBDwxg7MCxv={{bz(# zOHJ2+l#$J+`4g4&M;wq?TnjA|!$*+UD!Rcam|@9PZ4)KmB)xz=_6s0)YuRX{3E2EV z9KdP#Y}7-&ZnCj=tRAOjtBtfJ0rJ}Xh3TugiwBrFF1rmb1wG_cog0ag%2~AyY)D+a zcFjUC_}&UnpQ{ap)iT}PhtH0k_kT0*EnCA@@*{6BNI4b~xDCUxbnO zyAv;OkzZ_>(;V&Ic`X0<3gcV1GkVqiyN~;6nlaDk2pNl{Cm)q2-ot#HmK*7?cc=tI z7@OAvX_ROV@0y+e2Ev{T?VYWoj!%_a!LgHbXEdYjy8>*(jnvpu1j<$oW!o78CP38l zRE1kZGxbo6V?BiC6n=_5611#&j=9kEJLw+X{&7}N>0@z)mFfSb3g<}TX5`NdhNt3p zWm!8ts!WjfIs(1bk$|Iw8?N07H5spzXff)FEs8iIL;? z91rw6C+wf*jTO$A8zRVv8 z$JQV~IApbDO$Yj4q@!w@4xr+mDY6MjwULT*c`o!)TRADNK znXOnsCV+rC%21c@~kN;Fn~#)O~5X-+j=VLS*4OMO-LLx{;x z>9gQFFMx*KOsO`pY0~;s?4GXGSzHLzJ_#vEvo0{$I+VJ1F$A7zU?8xY`FF5(z2x=7 zJ>Bkp7ZQgN%{*@}Rv0G-+}dYNgG6;Wtbq-$Fv;xJL>Uq3tky(sL~B6Jvk_E({Q#dh z1*XawPUa~yOSc#+5R&5lLjuo+T5$UeNNiW>E@X^6y*-s{`)ZbWRBJ(zoAfk0C- z2TNADL8p6^B6n9TUp&ZvxoUTgnGRVf-K$Y7Hlme#Ppx2gVXTTwffutL;P_T{SD*3q zyZBz*)^6XHqzhcve&;AnEum0}v2X$zbx)4khmBiLY0j4-Lly@^q~f@1+DHQ8%_F7T-8!)Hay-oCMcEtbn%$w(fohircr;s+b!Kx{DLRrlL0xa#Z3526~CkUSE! zIL`PebHtP%aLIapP{;1R`yQ>_(fQ$vut_Lg(2Di&SD8imgkCh z>25CvscjlZE@`(D@(%{=Zy8-iec)>&m$(mAC~8f5Q`)MGP_}Yr*bf@ z!QH5Gt~xH7tR(%br7v#hy_7nKQt|mOsb1uHGv3|`;L;5;96YygvS2>1Gr=l0Kxmc0 z5XHbNCR+GV_`&5ScQQ>79@|Je9V+P(dxFN7|Gvbb(PQU}i_Es&7yHu(e#Zg4vN}ni z&(FFcqt}%pjDwT*9xs#JaO~byA~)d!AFjhZuUo%ru7vN4oe&rYo!@%wcM+7?@{4@^ z<75oZ=0~$?(L0x8jyOXr57JI8L5Q>2T%o(wOqj1-)cU zW$^Li>f;5p`=)Pj)Q;T)q92fn-*c}XcGQs0T9Yg!Z61=T zO`U$nca3`8v2@p@!R)U$e4_E4o>;q=>=V%ecA!A1>Kk1IO!Ygk2buH%9g8@&deINS zOLWS7Xx1mNF$B8xGM>ClMGo`T9&Vr|cn=YJ<5>#W#V%5S@oKyJ2v{|NZB(aK_p`P< zqdM~a&R#-vdK@6onT>}fE0zL`sk*QuBT9_j5{P4V4L~7;Ypm9u;x+F2uc*lx#((QX zQ6K(p&f~!5{7(b**N!E`0CAlP7~z8^+QuITa?u9*Kr4SdNY%fql$H4hM2Pq=Lhc24 zxgMQ%$D0H-{Z{>XvH2}@ulRnxUKNm1=IVTouE=VfM^tWyL*0}%TGONN&FY;h+15eDRDfkICWEzijVON?=Eh1VRGJ ze-iZkDbeo27+n~*S%{)LPL*=`wz?xu{a?1W<_lniY8O7ox%ZBXu6lTm!ud}xg&&{y zd=q$S&3hg+e0sCDcE@12aXA+1@#Lo&7DuGdgV+?JGpmW+?wvOJpUFO&p3@-OeyjH@ zdtOQPDjbt>6m-=r_IR%g<$h+~kuv#0#KfXMeD3Q=tBzM& zQxXdjzZ|x!TI7%4FR*XZ2AST*gDB75M9WUfUWbnY!TH@hZ!kcC9yV^a+wQ;8 z*&x?P=Ya$P%DONLJ}MNi-5V0u?mo+l?tgx; z!Ds#D+b1yX?O*)`i-@%?#lRH^4$B%5XSGtAs&1K2^o!Izi*Gs;eoU7;T^-x2Z?(7o zJaBvVRW_qgy;KWV;_I0G`Xg_uM42^v zj5A8nlZTQAH5og-zqD&t_Xu`QRtn}njLM{#B+8uqV>z(%YgDGse)Y;aj>hfeso^E1 zTgRC=-`QLh3u~jjS{G}CiBL{%L!PbFVzZynhmdeurtwT&uSb_#Z#pX0T#54i zF2%m{|9Hh0!--Q?fe|KRvjf3>B2 zE&mWAQqz!U_{y-QN_%sM3(F=QM+rm6VW(JGT4nfLuFy}UOA1!<5ly{M{Sm+mXyP`* zlXOaN(x3Jkfd^WoN$E{FVsqjg#~6^J))#|AH`GiopOqW1h9N5qbCup7#U4N2dvo ziR6`BKh3{kFTNV^nezX=pt3GWP(+u2lQ!Th1bO{5koe|xd>-hs34fSU#>wG9It8}x z^j*tSrnUZFcmH9J<}&?g4mi2Xbu?A*;gQ6ZW)Hj$`evMp$albnb~u?rmRghPX&_lj z&iP>>Llv?EtTU91aT{s%`mwN&{dYu}*nBi%j1L4mEZ80Ji+ccAT6v1;+ao~1a#y6{ zX0`$Wbk%@IXvSKWhk%jS$9uDG#1x5>#d*SrE0jibZyE-^Qy=nFHPPr*BN{Shww3f? zw?T4Q3*bZRRXd8GE8b8qw{zT4E3xCX+7IM4tP?3V+N*y#dsa9jRq5{`wf_WK~(Q?2flJ2ZZ zwgd0qJ)FqD*9yF&f0v2;CWHu(gb)Q}Mpkygdq~wDCKuv5Pb?l$f58b-#kkQ4)6H&7 z!%w&C8c$;38vHu%{bVVy_2gHMecauLHiJcKOvUlzWoPc|t!JUf-+kSUe=$>EYJPPmQqUW z_6>1QaOzmOM3+2g=VixKS>f!@k%?aHYXt&-v)@%X!Okw&q|4M?Qn6J%_Es+x$X-&eS5 zL%H_~i&h*G$eiL_1#o-)ylOkBuL$u#-#QIiS>vURUD6VJa4n~6P2}Gmg4Qfyj4ZlC z(8u)B7wbZSm(%5HINZ;44XY`SkVWVtS|yFptwt~|{+zc>d!g6K`ivIy%h0h!u@N#E z1?E*_mEPh&1Q)5%4_rj*W+0^+AfOJ#fcnZqJn)+~iB_cBpdtbWFJa-vGZyXgZG~&< zhrz<5a*;rKO|GsUp)2kKLPtvvB6t+#e%P9^Kq8-e(1J9n7#Zo^d+nbb{LV@U+;@;2 za?e+&)aG$B^qEXd;{1+YDyY`j>uX7njdpCup0^wdSOi=Mf41-tXpVpqa=lOW5UTwa zZD1U7KN8M+eEjXmeI}3K*?#+-5)O+P;|iKO>!$t`Pby2s$#*@j0X2P6M^dpAmca(B zPup0aS7(t%JHOW420BK!INyL54c!m^j6VPB2<4}9d=zpS-cJ{T$L@Me_mUttk?#%X zZ((og5%DYXR_-g2kjue-zrB#H#ViNQzHizc5#{;k4d1UiJ1m1~>4T2j&u6X#L|lRi zlFm2WGLg^PW>aC)#l3bo6sQ39m9(FD#DT*?9*ggd7P@`Wov>j+H5JC9;|^S#k2{#>e$vBo>sOk078bw)s7g0iVv zq((LEK8ZZfi7Bh{kk@YDG#078hHSBuIG*W} z1KIG=G#3H=dg8q5x48W^$5~jiQ7=qB%eD6EbRL?Fq?`%qe7CM}wkdYDfhUKcJ%3Kl zUY6-hxG!fU$p!aPk`8nPn9b=;c_zO3vPS;x=~SQ~US(f#t?pa_tRwE+g}CgN&Qr;M zuoa`2I%@fM@dB9nzv}HjG)pE+IuZ%#KBn7iJCi07mIhHXk$~Rj_`BCPn31VCk02bb z)WZ-Lef?2i{@Z$VJIuqNKyfMOfo<7`FrEemBOAC5TOXJ3qtAFu-1vw`&0OCcji}8X z&C2T1RiN%=BsI%o_%)9ZYPbn?`rGHmB(|)Y$z&0Rs~Tx!_=xq7+9BL7*}4f9i6YN;T`Zv`?8td7QvT3&Li0d5UF*=0lc6u(Pk1?Qn3_-uIWP-$oc$!ap-7wiV9b$-+CI9*&)yphmq_RSK#qc)d zqsE_3{J9c+0QT6lh1>(DP*08K>!!r;-Qf>WC_WZL}}CJ7mtE zfWq>^d`}2b%+*Ajx+brz-HpE5(GEPFc6gUBjBX8?uJX0Q43YV5e`WvLYGdhN6{M|? zx^GWCUdNvQdc1OS{?3t9MD;D&z@CQ%L-c%$d%Az!_YNl7foh5}Z2#b_UhI6su>Sbp_1*=HrG)_y~r=VSu=*9FTE`4`&Y+Y*Lq}m@$@F`m(q#^|KX|WyY7>11X;MYnn%f# zQCIb*>G+5r^r2I$GLbQLTK->7>keKuqMJvXjm~lEQbVGrid(`M)XQz`ta&JG%~x;{ z>k;t53@n|A(z`@Ce(jI7E!dNW+vfTp)XyB#SY<@Jl}sEW7vXz!U5cZE15CRDsdXY$ zwvpCwm1F}m={_t-$N4b2_6BrkbqLe==S-V1Bng+}Q77zN)Wtw}Y`C9^Q_Z&^wrGj3 zC&$ex8Hu+QvJYZ98}4bwDi_sy-qYn}w;H{_=kd$#XfR7D9FFnZenau){ix(JdlVz2 zTJPPmgVFOe;aQCgr=}x)jZ8mAa^5B)cD|GQZ<<38%wyr#6uD~)<=(0 zIy6&L>V5}Lar!O3x3pTyCNsVq_YCBlv4|S?I~lD(`5%%#0xzkmop3C-GV06+FYSB- z1W8NfihM3IU*@}Yrud^-X)%OD>bBF zL6=TQVBDBAElD}JSAkStQz;muPpbcir7~-vCOa$)55L~ll&mC~;{juc%}Ji1EG+qg zC4b^Qzz>U0!&;>~tOc_TG*VqOWzFg-GS(to%gxxG2kX~gbe$O%t;Jr9WGJG{qFX>wY0m^3W_}s2>GBXC!iN!-=gUpFfQ!?CO$4>nq_$K{s&0B@W z{_vGYeQLhBi3qK4F5YCV;AizEx~>lP+Vu1^t&GtyGRvI|B!~FUtnwVSd?xy!NNAU3 zjKVS%CxeoY?dFb6rrjlZW)}v}rmST9h?hpslZ5LlgHtt&#p5fnKgH;UIek3++Esnd zH(JsnY0x;G+A8ZrIv@w2hMAEn! zrM++(P|)eS8Nlb|+J;%kaGzoB7|1o<3fwiAJ=k}nvLzBmxIC~O?gB1^DAB7EH_6NW zz2oC0ZTIPehM_C;dZK*)=Z8sG8wnxv`Db)pvFY3%(>|grw;{X|&-z+Zw>zWg7v4&2 zc8-1E>ycRws%bxDU;ldGzWz2fx632FRobnFkMhxr*ID)lNg=H)?2Od} zhf6cFm8ja~WKSv&o-LixhQ-yi-+)e5cRQ|FScg?F-U=xG{c4kA>L_{MhpY<9rjp*E zL?Ohu#tJuSqYhHmdNXj`2Q!J*fMJaR^bDMrBdk z4LV4uOP4SnjfP-RNFDiZ2`GN7tP~zlyRT^Fi4)t+2qVEm;Gy~p^+x@Vdn-e_U*ZE_ zWQ=;p(c@jygw&0A1wxo}!w)T@%x^Du0vx>(vIa}5jr!_mB%3Ib$JJhxb z3RW27Qd6xy3tEaP2I#GJ6T5Ku++MN~H5Ozrc7dac(($cYdRmgg`*-hXB4dB;q8E9U zN=kbv(>BCzC=a&~LnmF*_PX&Tk5H5DQ<+|0KFa003EwW`#e5K^=bNF7??N!Y$(R6c z4foabBP%coSlYv&JTCD^8S2kGIA)ja!7wk&LcffOLmL&nL*<$hP8hvY!m9@ zg6$+gU6%Pe-AQT$=O!R`O)d*HfG9uE@d5vd$KPedo98UpU-GA_4V+5F2dI2S22$2O z&Q7jq%$D7N;a8a@8@wg?xZK<&SuQ(4(~SjT>%)s+x5O5JNwS3Ok*ky_jh?Z2E*jhEPea&7VkC9N`%N0jNUXE_cFDaAanH{N#Q5Y^43Nl?s!R9R z&rNG(Cki#OAYuZrA0E~`*Wb1z7b;tP@W*Q*AtdsBqkT0YXLrsolS2B6g+_W$GR)&J z+=la|;9|ngu#6h*I491q3{lrgHu$J*)Ova*vsl$I*R}1qdu6$NV!Ce{p>Jsa5H zwhP(n-Z+wsW_1WM4d#CkHqEfsvDF%t5wa(tA^k$&<-Pn@(%t3H21-kIK55hSXA48O zFX*MCFMRNf{MW?8;W=B!MJKeD$7su?0AF-c&gL&;VU-PPY^U{Qid;*`9{zGc<8hye<_4WYI%2NwyqQy6@Z+Sc4Q9KJ`C zv||8;(g;!`*&po0o2gRz*sWwj=mH(saH0zV0!wX!(wk@vKQXl;F!Aql6~MAn=yku&HIr zl(XkK`Gv_fQGLv?d=|FsXdX-U=Vf8hYURd`kJ5`|)BAKht8@%QwN_KmLj1z_hjmHb zWZa*l@6jLMrAiOv^LHu2MZ7M2S{{;x`^?i+rsPt3*XvO$*OvkHaDnM+#txh9v!N}9 zkh>(dWCTyzZ$lI>l8vU6r*I*W)Yg&VJbB4JkHdM2X>s^lx4Qtm@6rCEL%Lh z7*Gg4MB!?F7JOqsXoEdE4(`N8&l?k+{X;dQl`??Tfl)Ex|yeJdyslQFe+Ot@51xvx8!YqqdX^ zgKI__`}7)%F~UHzjBTu?6^)6hJG}p$-1g`3Qo+KnD2o9sNQ2BFci`2Ik&{eA`@WGE z4QS4Oh)^>RoX*Hx#_ZO&J=|LUuss1LMFL;$&<92rwCxLK>(fLF@wXDAjNqT|BTQaA zsvGxEAL*$usPy=@;ah_G{B$4VQ)qULm8%T`Dyu=0 zBaaC>uD-4Lty~anUq;a{rUF&O>A~9Yi;?zy6Vgpz71!VO;}K!-RQf0Dr2#~yyWPnF zgr-i+JZ%;)edk$fu+1<)by-05$7XFa@&tbRs7I+F ze(D|TqC(H>QDx@=vmPbB==V{tuEF#VeKP-vwQ_@UdU7r&;if-oQ){6CSGKO*M7Y5P52^eyNG3yyvKxXM58> zAs3TpQ?(vs(kIKeXpqO^k2upd3=$QyTJ38R?_M{}L!xFZ18VrfvW5?7p^wyt!Z}b_ z5WDVP9FSR4>q)BSOP}O;x4N^!=TkG1duM+%ByLlF`K-`88MjuAd$}VW{3=saJ$bqu zC-r0WlbzAeD2!!bVoirh=$dN)&G3vyZ*X2>?~ObBJvX9wIy0qRTe$o(XWYw{W z?kX8=gQH{Cs>4>%l;-V&xcN$+&vub?y}u^mL4mQ*yxAa3Ura-k_-`+2e~mX+uf z>cEPQ(r-h$!x_}&`KDjvX4^_&@3x11$5}2{8bW*B%YNHu)3DF74$F>yyNdfOIM_4Z zIe-*TuUj+1AXAauY)}MKe|a9Z{Cw|}VE}Iq%_D5MX@na6;%MI4Q{C8V!ORy_(`QT_ zv$`_I!Od5o#~$39dfKEk5Fp$$hE{KolFQFH8rsq+jJ;D_`1_#;$8PLtF~o}g5%(X~ z$xd%L92df*BmN4U=lcX$otmeJAASGpN0i3A3iu0M)5IGd-0KHz-+v8cF| z&#)#*Vw<3xm6a^qr_sjsGb9ZcF~-(Pd?yjF@r-d*M4U){ZqlTL8^75(-fppiVL+{( zF3L7%XZOd`c+izHv{v9Eyggy9lh}|Awron2#}4=rK=m4jdM4C;?x|!z2s$T|ncYds znC9UADIHz!{lkFJwuun@jUqzsioJb}WV^S|fcRB{Or71nOv3RUnS!y^X{J@VRns8k zmp_S3B)G+DG@QYW`8fi<&Y>P&xqim=h)A88m*n|012>K3NhaaiaW<6V*`N#A>?Fv= zvd#LZvYr77=72wi*=@mB{X<}xhN5V%;>V&FcR0@vR}muHablYdmDIX=pX9QujU2It zA%V*4A)$h4wN4mZ^fE?@fGWC2`p&}Zt)DLBt=F~KLiCQ%@Y+c8?+TUV@e${eaoJnj z9UU*pKzrHSfxaXN-fNMW#~202$y}cB8#j5D;b#3G#@+)M?ymnEzS7hPf@sle)QA#M zvw9FEg4Ki7Th!c;B*J+AwGpLu7Vd7l4mGMUUs zSexHD=kxuv6K@P|MiP+rm|mhpVkXMvZDP@+Ht=UE)EQHIHxl`7grC@We>9F zS9ucT!h@R0Nvs5df?}J)5CEo9P9V%8yY0+hw_VihiHxcz=9$n!t@e z=Lx7!Y;Be`a9dX`{?(eSRm{=s?8zq~S{T_)D@3uplJmCg zqU-jx^(qF9<@4LEcQm@6EW(_&4=AO7b89DhyKthLWr$EErVN^BFes#aQ+8gQih3$Z zTC)wN+O3;iJ@$#->J&C{=Al2(%29F9s2_j*AQ|G$%tHRZXy>eB>Nm)TpURf7>WVP0-bv9NcO>958B}N<*Zio`6GKcL;UUB zc>PBV#roFEd*imx;93LfY5gJ6>sG zzWVM?TiCd!wp^T`)N(|(T%U|v{;gLt|CRLR z(gAZ11}WE(C0_AzA#JFyX;0uN&mwe8sD;o10gcyZZ0p!ekW09B!Xmg5{nb|8ASsYp zv`O>g=`zkGRKU^-F}h&IiBBm?vU|>f^ty@H=z0G*^FF^c`hiILR0L^$DDiV!DFc6Y zAC0FF0de}LdMU#KkDh#z)e1JK_cJT>q9XxU3_cKEDbPmKS2+%tBZPxV|x?orM zMXzY!rdc&O_>vx2!sQ-{5Fy0|9=IFar>tn{gjtpvM%T@dmss3*18?iC9;^1a%1`wI zY)dqg=SqHG33H{RGMj_+Kr z`_n*u0xsXqriD!@FA1SNVtc@VvkCQxj~({Ycd7KOkk4WJc}E5^5!~q#h)t;ZB+!NL zg0}tBKlP4+zv+OrlTm8^B9I$BI(+mg|NH?Dx_M=4@^9zL1@FqZ{8Ah3`7?0dZQFmV zXg@D~q}91HF-VLmoTnPWQO>_28+mACeZz?RzWP?=xwY-$VnuuGjnaI#j(b!vv%q>{ zRMks}fsG0E#u$wvH%pvwyJ>t#>mKOFt(ae&qP6JR5kA8l?%~c~Biqgy+Wq&$>(=FO zKP~@Bp!eR`=kXBO4|}pNx@au|qYJ23-us=%9e&$9crjkC@B62wG&H{eNfu(O%6vH) z*Wsk8$5wMCsmm|#}iWCbO6ThU5ZD;EA_Z$O~k*K`kdA#4r-PEI5F^_DY4Xc}}A=!<=J+0&qkds3b)@WZ* zvW?>OkT6Bnp~oqB6(W=u1DpUFyM@}S+t}=;)aZKVo{Z9+m@%QuKAY8fM?-=ti3~+J zq_auvC9|TlJlr|aQiL_c7aZS|oKG7l7+*11S!*jJei9fJ-C>jPV6)=5!z$ri)ztRq zjfH>0y+SiHN%Ux+vT`5bgykRx4pY2dYoGoRKL3rhRhXc)5bUjgB^MSdTgqnsXi5pH zA8r;D81)!6o(d zWl;_(yuaxem)F{>t69Y=1&xIaT>48oKLcCnoH`INEbj-t<@S1SW4LqC$0jnA7Sxs- zR%v!2p69#JAi0e0M)T9Z9wj-#&8r{boXAjo5q+BrydUYn(&n#fV&R8}M8~ZgTJL4i zcx)Ztt!TepA=g*J8&XjYuIbEY2K$c0sE;OxPm-YHo)|TBs zoTiA|;@$kkg7=ZE!Gf^cIU!X-+pWH{-vwYgrjuTJb~+(Lc)Bp}W;qi|nm8Z$(b<|a zM#(+%G)O!#rM;m?<3=%qb=>_o&&*dPkNl{*VIsB&aM-~JFFF`NU_)r6+zQ^X+tKFu+DlCtLC}YFSsq^M8+xxs4LG@`@QR< zE5tAlhU%Hao@cwOt%@P8?Ri@du8TL5a9d8XYFG=9)we(Z^EP0u5XPVD%au@!^=lP! zqE&)M#Gy>>i&Lrx!~~lp#%fJLX$1J&)oY2ws9k>sJ}E-6M!vCC!@`yn6(cxttrD%@ z%ngHmV;*2bhB8>`|Ec#gwMIM^>lyvXM}#naZ4)LyR<#^_pOXKs)#_eNM0Cw=oA9Ud zSTbZ`O3j0Wa4Uf{VsLpB_zkGSGq*wontUJz;PJ-J{o|ya`I@>!e*5dFwG+r{zp{&efn zn%JjbigD|KzT(WmyHBVr&Jvs-j(=+KYZ-f;)QBrkecHHt#xj?%2;g1F{v9TrpUs3@ zZ8%8aHa7k6jOBuSa^9ckPF^tc?|cu5s?U43faFYgh?B-yKHl>>GLz2OTz`wO%M18= zUGLS|ABgEu?(_i8vzx485!mW_F9J^-cNrgX5h0deO|q`f9>{;O9y-10a&k))V&t6g z?aye)^|{kYlf%WGA@_|@(M`^Z{d3j|F#Ec;DVDbVqb+mdY04qre>TJK);MUIe^^Nx z+Wy&j)ApUjrhay|JSf86}Xug;0%TQ3={ zXQO{a&(k&#-Z5G@iS544h1Ao)J>KAKk~6B(K4rF2osasVNedIKAVzgip~TQxKKHEj z+TwmZr-ha6bUliCddlK{$BL>g{KuhaD6i8ytv;zf6(b&Y3H7v+_DaqISADSQ2j?#CYi%$R-S*Q2s4MKENsq*X_n?|KlBJ(kAE<-3>ZSBe}K z9&_@m6cmobHfG(K+q}!bCqtqm^AB%GF*Or?F0L5dtw{b2c0Z4+>c;O>YaHe7^nrbC zJtm2JjnCCwP~J(#R-o~gMI~7%qSxY3u$jr79UpU2RCh|0YH%|xzcixYgOw!STfv~$ zsB!Hl;r4hs|LeVq8rNAVgvDn{#)-;~h&_udj0SzG#|#ydLE$SFeq^<@7GXhBgDNe7 zyd;>2LY-32`m7k+O4~yH&r9yxHV3~_hU60Gt!d^i{#eVL)z1HlB$tmpVezBXmc8Jc zJ9~UVz_{Iq5q{adN&W&^B|0J|ewFx0niCU2NY(!=EHN=8EHrt+9)34khOA1x6m1QZ z->iht-K(6Vq`Id5Vlf&e>`8YMLch~dJzMqSC(FOxtw*Ibb6sq0FWWcey*@>i zp(>Y_TVr7sk~gC9DLSdfE{qmda7!3O63h?Y-M8C6^-S_Q!+x2NF2+@hNra@wmm zccf|1EBAQib%RU0QTc)jPx?%$;acLBh7_4(3XtvB!*3CDDBSJ<8ER2hm$iYa_>6LX z$rsnAtrVB=KhDC zud0*ZjZ8jMDe}5-{9v;F0o*+hW8k+w!s}M*KH-~s_rA(IWlT3zqWS?P90h@AzXjWu zv%SqCzh6~Lt5(-2k%o08F~phcK}{N2ome3s)$x(~Ejk|LNTQRv%2OE4@IB+-XU$BM zL2~7VJ6_MdM(CWRo-vhh%<8(nnz*k~CB60mk@=OApR96Mq!m|U(R9z)-(-oOU-SuZ z9R-+lR(Ql!DQE^3CmBox#Z`-ak>Vz5+;yii7#w_S^ysU;S@WQ>O~O*x7xg9nDJ`1@ zKOc5@Z0)X3ToxYZ~&<*gEEV-Oa zh85Otw$RNis{LZgg4>wSX`1wML>~IvclfJF`)?5;Ger5MZ3^cyb>+qh^SkZZm8Hsy zmaq!9Mw-vBWKGXQoV87Tj~)Q~&C21|R&uL{e!0X+yRv0x?&x2{zPo)Z0|D?HZ??q~ zW~)zWQ&mMH&utiU0#@q}$JRqS?Eo?b?0JX!c3)}!k}%MOtZkXyN?tg8Db8;=dp**4 z(iVvo4RH?S&7VCoox6yXZ;vmc&9!6~%S{wN|46m{S^k`5s|8o^LIEyg8|=DsOLA{U ziK%g|m}UnPIE4v2oP-KC8~>y16{`g9w@#uB4{R_>3w?V`SlPLp zz{m<8E&&8VqXOQ3&Aw6A_1)oAM6ng6RgstaL|{|URU!XG$ekqZC&xroHZ(j5Vpj~3 zOe?ZjD}XXbXv9^iSPFDQ0q+JyPc{Qh}{LG|YiphCkfCJX8dNIXA}L<+NTwCbDP_ z`%;j<-YaUWj(vxvu6-9IYc{ofX)mr$ut2OoPuuR$F!&LL0U;{BU;A)_sj5P5x@jrAhpZqo~nFv3yJXo0R3x0ATKrz=*a@I9<9Iv+pQ`@V7Q=JSog~ zb)oKAr?S6UkbePVZOgrdWGz zD{;t^pTyWK&Uf5_8-wA`e;2w-M_nTJG5M@t$qLn)yny%Xv1~vcdCW%PBf+gVgPxQI33Oa+x$lSU+L6; ztcD|m7>4o^iy;%7sZ5dt3>L-Yh&v2M`lPM$3{RRW^^nPxy`;IX0fC9}a8J`3dq7qD zGfvf*(`4NV>qmxiewRiA14c8l^0xtTt+-HuxN65jfw&rOwsEae3x}8DM$Gw~&N^J9 zcvA}JCRPpRlwR=(4`g2#RON3LKK`7dqGiV`b@jPG(u7l7<$#g^8NxIXdsRUCNZmEt z0ch3_#%gwd%2-gnpa-QG*Z6Zoi-*?h$&fc0tsP!tnLSfbGtUwDM9p{J)jZARaXrxT zlkGRIZ2t3a4H@(u0%-~@;XzuZmioI;iW!N6uwKnn8*ZE27cg!Z*d{NLE3oV^Xn%93 zltPwA1D zB-3yJ-G0(gYHI$bxdnT^K8w>7_Wm+%R(#z}uhHTmmGS1-l3MCoz_Q&YcYbS~_ur`$ z50`Upe4fmaKpNADUr1GL@7HUh(-O-Ee2E8}lhzde2{|!WtZfTV5~Dvl&zt0>4=u!g z|8xcDuPtVQE)urEC&g)LlI}Z=v;CeJEXny-O0Ro>z;BQq3_Hkk-l!gV64JAjeIp zlcgNQcg&p(e(CPt3MY8(G>Pn%p9^MD&G+OW+0TjF-Qs!gk*|ksH`GqU4e9C5?TeZE zER7v{bkpM3#1A|3=Ts_WRSMq2?yL6`79qx!HbyM3Xtgh^+J42Bk6SDx@qmE)HLXrp zn-rbzfh6iC-yT2dH{vbJiicrgocui;X6A8TQ_$4s@sO5oHENs)f#`3uXyl|@SMy#) zDC_#)8&TaSg_1#Z$u@XdixPQH-Wa|}w)yKuqV7}m2TSW|{D1ed^c#0e2z7=7?=R|G z>(=0?a{?#g4o~~Fv3iv~A#%QicBX#WITzgTpLE+V*WZmj%v;i}KA}!!@)dyR1WBKt zY)Kz8X6U$_^^pB&bZ;$gz#?tV?OI!Gug#roSm$_;GktWFrabCn!_!Ch&-)PFF5}Q; z4Oy|pzltv|v2Fo>5qiRsM3 z)AZ3-a{tCF}YVo#=!2h@D4xx0-(j`NS~{u^r1%;A9WXQ`#n)`erYtp{1JA3eL12gd=AyDgx@a}(7vxes^L}BPHYbkj_;%tUQ0MFY(w*J#MwsYu?Hwz zdLZn$)Wy(?pt6O^P0d2@~$`qDOShlRC%Dv;rF^+ zqCg+h_52E1#WQ?!@>zv0A5J?nN_~m%gdxZ!JBybez+5M>w$YcE>(JvIt+CN3P$4vl ztDQN|vrTSB?#A0#2AkFE0=>$H5{;vD{Il*^(N&>`m~iMD)~+m9Wh((H*x+NM^h`DH ztmqFBhPk%y6}F@qY}5KYAs@KJMjx=j<7$?rW^LjZn1_f_VpXGkP;PW`uk%YAZi^{8 zKr)X~^p8A&_V(|Y`c-Q6kFJ;{(r7m0RiJD_T@O-fG&979h_-;tzQjjm(&y=O)nM?0 zz(L?!;u<9+RhI$>P_J6Q6BwNnIjSy_qN>_Z?M>^>pmmmggNk9MNSrUx7%5QKl)EGDyJ zvV26W^vqJh`Pd|ZLFu8m1A(c&2S~v`^e8IK`R)M2Lk3slpRehyz0@OBRg&Vv1V(&B zl^wd^Ve95nJq$xE>rj_Q78E+(wl*w88SHF^n0jR)r|%X%(d$7B`@!wx4iPZ^_d5 z339)me6RQS4^?{?kfyOa$ggMQZLh6=o4>VA`20`rVEaF+(Ph1Pv3<2e9S2*-#Bvr# z#iz3q3#;iCiusiIR<00S$M^e3If_YYw;;j(l3DQ8>pj+qGK(ZRhr`IfXleXK+sO^WLG zAz3yqb=x$MBYgVP{9U}=#a+9zRr$k_$o!?~1i9WJ>@h7%-#qa3L%lF5ywf;mF_++X zr$N01It@v8Bb1_5^f|C@(gwlEqTvtyG=ID|k?v>m^4)s*A#t>Aha~N%fn4W*u`7os zr7Q~p2N&w+@`GilIC^};M-_$ruS&WhU9rdP@*^6%16{y6$UiqGlgCT{oFatbh-)aOUlnet3)Jx0Z;Kp9?4scHPFt4IYolr}!6e8KVH zn1UL8gN$NuH&$i@wsgoY6!8s`s$a4Vk8bXOOx*l{ZF=7O!q`AL-AJOxBL!uf=_LB3 z|7p{MZG8&6v`jjUyPOG`6u=*q`;G?v*6_E;LrRUQlgvZDyVg@+>qLzHQ{nWdNpZ&0 z`?`_s19;I?a#cU^lsO#Q@Ov6#cS*RNeon)j3L6(V=8C!n%I9j(#Z= zk?W~2u}_4WDRjS-qlNxMa>8>=_I$@ZE127FqCSo|B#MhMz{p&F!4QK{HQ!6D5ITuz zjMn$SKafswVTMtTvje_Q*?DeoQKg9q^*HD6>=%V5mzn2O_JYX=s~-@5Qw7KxZ7uSd zV43%f0Dcvs@==EqquyqwdcTp>Rw{uxq>6u4F|vI&@vKWlSiyPY*(KH)rmNr;CsKaW zBt*ld8uOLDn+uXiW3z}@PEt$lbHb>;S8T9;dC)p^&6eT2A++9>Ywqli;_2^$kS}2g zGO>}&0mR7j?WZjl+dR4SwIT%sjc-q5a-LTk6sZ9lr0OzMK+gP`l`{I?Vx(oUad_s=(35gn7| zCai>xoJspF7PlW!7YLKMF=P-8jZ1o;ol%r6RuAi$0GZM3W4ISGFA`PEW7QA?*6ott zpMa(Q#)HnG?ew^czxi?N+g~I0d}$fq@3jBU-@mDTAkFf$B3==4#Mz}+g!O!831}E zbc|B+-iLrNiZ7O{x4@y>-G0=XK4SLBPUg5iBr+M@N%@|6`1 zB%XIinoAvM*njSi5Z(Z)txz+Y_8;#zQN4T3pNUZ<`-$x}cJn-iJ-xupcd7o8wH|&G z24e4yW#V`g&_pgjM)s-AHt5Z6Hgm=HbVA@JVy@gU!CJ_~8|%cMA=JqyaSGAHWl(WQ z8fkUtv~4oH$Cv>YovQV1v<&G|I`5b+hmN5N))H^}UwK85G7I>9J( zTy!r@eTCcl(0|>1Ym`=82P8@yD>%8oGOqrAR~nBNXx-Pf&uJUZcGK%4zO}JxT$sG7 zATS!ik@p{%DegY)VU7KiKKpZ5b>oncGW&q3sqJg>^iKEHz1d&K*HxDgI;r)C6FP}p zyXQ6d=FCY}iS@NCR;M2Q#TeF0o{0hWv|Y*6RFumJ6W#%~9Cf&gk?XUGRdL;kA1RcK zwRAP^JWGXs*qu;P$1lJc6khR3HWKZI8)rV;Yf7Wjr%%Y zx|lY9LB{AE`n^a0N28_q1kxl?rHzZt5BW)g@(S4?2j=6etLj62WC3swNW5kDBsP*?1gi!;e;oW6mx->6@ zf0R&DH3cRvwF`}s#`)a>Y+qX7K3U`5r^pdU07ImiW0d+9cvJJ~wQo_YvRjGV4ZJde;bz&n*0^t%qD@(ddscPQ;7W`?cT}nw#0NaD4)_k zn2r~1OKd*~Sf5;~bGmqq_nG7{Xrl9@8PiFu0xDcmdbvt1>Ipta>w^?wkXryTc?do; zx<;(y*oxzCQmb5W-;honPTAhrTZRO~gKqEVJ(`ZMRw*>{m&wXvUFY*M+h+*g{hIC@ z&?yxtbfMB;b0xpur|r&)(24A*i?C|#Z&H|iChJLx)=!2QB_PB+qoQyMzMHk%OrSb* zL@0BNo%A&cPS=)jP8}*}pY%s0Zpxf4GV;z|OxqiXsD9FO+3)uqc}z`x0139FfPqkh zh(TKf^hPAB`7jyKNMo=#^d%g*`G1EaIXChPZY()!+=-h6HuM0}{Xa4zz^!WxDvTep z^*`-a@2dx65`($J2Ypd`4Q5X^I9cNWr(j&ezz2c2TF0vh@N7dhu2uf3c`nUpUEi>= zE{0LJ_%#ndfr<`r3baoA z3wkDhQ1HJWtG4|)IUq(=tC@%`u~ZHfpmGjxBsR+IgYGwkL+a@ z6Q=(|4TN00t1{zhG|_lf9qHTP)Stims8{(5>CiueG0 z+ioBvl1*uS6ehrE?XtwGk)UzrW+AJx*B-u?F}{=zW+OP@_NQZsQNQO{t5i1{*O!e2 zN$(0btVAE-0}WD(U6nSvvMEidqfuJ7NteM7@<Q3B?iZkIm(Eh4Se)cm!lX7znyo+%)52#R~tQ9&uVyj+=(Km~K zjcMS0>-{S4dCgo35y6V!ep$Pa$T&s`0+BRsJ;q6b<(m46$t93*R~(ufqYz$;EeSsx zeqNPLvj|cSJSr{PndL*a^>?M6?_`OsNBO#IA2FR2)YZu+7e^K-6(N=%Z`98#nZBHo zJ#^eNc23N9KYJDO4hng=-0!rQ)~TcOh-ACmarbTuQG)CV=}zWrao@c(d8S_HC8{nd52vL#eBS>c{Nnqw}Zn#JOq)5_H68)B0pRQPFn4(k9Pu z&Cbl9?@8D)y^@Q;%KC-lJ+bp(i{SpOulYFTPOQ$ObHc{`w2Q@{BM$p__0Q(&p3NO> zACpOLX3ZBM;sSXL1r*G?pI^qB@)+VnzpDYaYxY0tMJ3#3-xS)j^LwrH16@mWg6`og z*rtz^t8@VT?Gl#WcGEuqw!&^V+m#WC& zOH6(^j~%0lB~NDnB_2(lGL%W+Wre1r)8o?Hn_2)ZrMT?h7?`(h|8tQ}Y&D4Mc;-l2 z<`ApiBG^7=hi{m43A#5u`ox4>=HwwK`RS)qt=da}?w7^RB#ZBie$jcrl+4`&)VLX- zTh&?rn~(oLhD0$&f9Z|yS9QeptTVz^3%7rQ028nD{irfconkgPRsk`34ftl#8W`QU zt!D*mZ0vMVBbglQ9^ zf_bAf6N^RQfQE>wNhL*zRjUKmq|S8Rg%+momFb{Z&RRzj3eve;$dO4fKtLz5sYufmH)i5U2@!$~U# zmhdf^ABrLH2>l2$REG$B4SeV92z^mD{wUOPO&;mUvoM6^%H}C#!YpC&=VunAI66k? zZu@$f;dWSQNG}LXN$)VxMy$**$hBVvMd8BepRYd3fALtOv3j%@U86v0z}bT#t5E%$0ecA<@NHaTg}X67Y9xB3d@@Zf zVt>p;En&Bh@TZ}Ss3Ju8yfWAgLRmp90z})^Hou-d!sh`qAB~kLQb{YZSPx}udUu$$ zKhwWJlDx)Y1zn$k5W^h9b5p(ftd_J@oJY$V|5$==B`0(_J$5s|l&RC1xe4BXRN5Qj zeZtIrOVv`HqntxGl_7NT`~B2ZGS%4D2cnl_lVg z?Kim_vd7vAs22V#?5oF+1va(RT^08y7OB)Me3RF7_%Qtmj}PQT?=NN4-Va`T^5?rZ zCmt_VQ2miG21EG%!=l;|6Jb%E6~(PJ5gN~x+{iJl*4tyHc@f~=*T+ex%!Ditp2{sL zOInU_(TQz9pV!IU^ouq}r@K(6?kliQ(MIq7!-=`2IwN^gaGyRxH$|&wQ54yovn2fG z>uhQNd|Uo$+%X&51%dm~XNgL>u-WoEgjk}XlY#{pCrGglyP!DW&71wHCOINFJ zy7`nZ9>l~sr}lYw=0cZWE@$5}vK~QL-O2KjqiuEbBbGn&^1Dx?@u}XblOWKsu$)ZN zXIqbV=0HZQLh9Cb%r>t8OSF<>W8l7JRO~6rart3>{H&BkOc3&n$@TCTX-gT~M9&_y z;7$4+PgG?;@`!7+QaEuahrV zg<#L}oqlk^!PGBbRJYeKT<6hzlq;6U&$}jkqAd(EMqQs{T_w7bL#6`@8lEPOC;S(F zdb_WsCN~{jRtw$TfL3k*Ojr*v-uSfG>RWYV$Xi4uHkr{z_?8Gm2T-L^=P3Pc=3zn; zv5_r1d|bg_q#A5ZZ2fILH%!eTfi~PPrU0{Rlm@tr z@at44#}ezzKk7U$#(R38+&|YSuR}4)Mhp)1GP@t#f)lZs%QfO*0zwQtKRw!(IzFz? zWO)Wxa-m6bEm{|Dj(mC#nWzTN0+145JEzdV{#(02WnK2P8}x5D0W)lxI8((H2>cQ9 zGBadL2OHV93Plv9M>}2ZHSR4_OSS1htk-rT!XHj(_DQWubZIroM%Dffen<@irTd~) z6zPPTmz-xq7%RVG5d!o^_`H#P7=`+tT1%N7c*iQGjZ2HP5YcD%t1!}H``LarB0 zUZQ?E6)JU(nfKO1ZPqy|Vvmcj0Q$H-XKTHYJHJ13HWZZ3YZP`aY zwdACbbBZ)NnVFY&!%j3_A{3wl&`COE#CCRrDW;Nwgb~^CEx&5R4KO`w<*p<6{ zxb?`>(O|U3zH*zr!B1Mh;vgNJQL5-dMIERw1<7J=lEpBM`;==0*DF$LCn$lS1@+xv@o&DzZinkx-iI$X zjRR3WOmgP!cW}Y=Bs^xPZ%;3h#%fHuH9;i=gdJ$v-xFCe>Isj{r|g#vHI=FSp3alz ze^I;uEp3%1k>ryuU8ShjAsdtYN@yJY4)bHHA-!SEPkPTKNV-?qc-6_^^Ji*Pd0HC` z5vS!(cGo5~7B$OJUCnQr~DpDB+;eeUbJ9(}2^@=Zz4>1thgX0lDFAX)WuNp#lh z=hM;L)gbdD2*3ddVgB@Qu?($ zGTdj(jme6S5fTk`xYSe*9Nybl@y8)UiH&V??TQewT3<+Nku|#T*c!2FlB;}-W;Ts< z8cn^+dqORviEA%L^j))o>s=7+N@yu5F-j-bDAMW1&D`*nRK^p-+rRu{8)fjZgHqQ1AM?xH_n81{-y9 z5`OFgL*k-yS4ngkh!}UP80mcjq0#50x!fRhhYCY*3~p{v@Fx<93{9^WeuyxW%5St%iSc!3p{Cv!TMTanvx7}&x$hGePW6+5kIOpX=#EHE?u@f)aw&1AIe4i1 z)KXWTGv=7Yua%c~OK^H4nmyNC{|8xgN0@Hn@IU3Hff8hyV(7K-w=E^rA3wLOupTvC z$pxkdcrZb&?2+^FqXD}S-hR{mm0Czzgh6x%Gu{_mU;Pof2j4t++@WMs*cfT~NfDzfx8Gj z4T-e72rHtUiRqBFL;Ec6HM`v*2XWOK#7Eg#tH85?ArBQ$Wabx=p}qm^6+U$T$qxS= z9gfAFDkv#Qvk(}?MM3WTOE$V_KO&WzwbH4m0?k<@sF_fAD%jbXcupzAVDcXC*gZoj zHjPgHhkKYA7Hf9?AWaUz;xW{&cP3B+x_~!CtOL-D>TU^&O-*?(N}L>0(_E2bl}gjZ z0M|!>k`~(-c*_t0l)Lc(lI&9{PNQLx&dHJc1JFJNcKoSee(Z?*mlA9Fk{746eFn{2 zL&!UEjv~aE<_B*Q_{YQ*(C*P^ZDZ!OHm=JnZU4#7oX7SC?v=>I#*1h02m=P~zIx*N zfivp|37NgKP6Hn*tTmnWUCljn{06R@aewhBfSXTGn@bF@-TAm7Ck#T4*-9Y?<*WM&}}I3^T7daVc}#ncNfWZJyfWq1`kZIyRGw^a&}Gn12kAT z-~R;mhU+GHI*IxEtQf?}6s{P$Nh;O}9)==9Pw!@@t^sm2V%4^ zpmV}i!MRWHYpOsTN~^`_6Aw;A7v2&AceaB&YY&3_mL?er*w(lU5%eq3o{E+e7ocEj z_u{K^!fDUHjDc6PK84lzZ5RMT$-Jw=UXE#N7CSjAAg#j?wfOu!r{8t;l(4s^*(cWq zcKKQ*Mm?luALmgO1A(2*fWn4}y8(N2+b@@`L4&-0*C&KJds@n~W#s+;Y)VxLpMx?{yQNHWkG)(9&9tpu@?;j z3aGs&ysquk#Y}MaS%2culbk5f?pbJ+bBdruF~!x~Qf0q)vCp0-GXug)`+O|19l!N- z>sCyuH`=drRcb4vHp@`CDZ0%oS2>sMsP#1^##%`R-&Vfykw(c*wcq9zV-%XEk6ip+ ztAoM?Qa2=RddU!%X=&(Axn98>$lfZ0k5MA&*l^O%&a3d|Q^I)cd?hPeh;}Zg!Ams? z*i;|%a#foCx2w|ohLLSdFY?0SPT883J{bx`Mb#41qAN7P6_GBQ`sI4HAPi4yGZhU@ zV%3;Wxh3mX;z{y@9+0mU;g&`2e7>^eRT?H{j&0Gg&M2IMKeX^i>AwP3IZpt z7Ax_%0?&-Wa$7aA@-vu^bKiwIc!>+Y7cp>wz~$QeSi#2OXX={$1Zv9w|FcjCh{N)Y zR^DIW)!OaJQ3|%PK_=>7$;aCq$YhESc7FD4VJ7Vre+L5#P4Yhr?K08I=HjSe(5iq? z3#5TlS@{8Cz-s&vJwH#^GjS0(NxrsIcD%mb@?9z9nmgSs)UKorU_!j_gXLw@AaO>K z?M-i08*V<-Z0hQi%GHo?r#H$P`FNvIEdyJK7)V7h)`lUvJx+N2%)kWB27fszDooP` zlD9Tyte`ef*=TGi*(GNXocMPZSSyA}3K1KufFg}a-=&STeamCZrB1-AEA@EpRYvAJ zeP@n}XH#0ww$d3q<{#)9UjnFI9!19Uwk!d7Dw*;Dxk4!-z z*PKSPX)P}F8CjN?^XU6>p4;4)HC+0h7I~A^z1MYG6NPu1RkWAWcOTa#ZvhLR&1wME zvD`_^Xuf5zR@B^(ur7n;4ZKnHH}QM5M(T|qSRvf z@05UD%UOglTwip#Rx@>_w}7`YrInyh0yZy#OF!eTWeU;^DP6!z$0@lV`KG37E4z1l zr9Rh8b3xhi*eo_#pUKbMU;W|JQ=LC-(rM@tbA>jHx5^8YsMe`L8NW_(D) zpW#oV**W?L<4D_OfKf7H3gZRk7u0^vjudY6dO9U<;A9n{y^b9~AWLH97k%m^g^?Oc z>@6YO^;yZ}#aTIgEKXQ=AuC`0*)m69SUzAn1PL!_0D{i%y`H<2T?RB}d!J-V8pNAN zht=~Ydk0;p%*(Q(p_!95d6O$^Sy41Jw@{(H{1h_}fw>6TaRr|3l}2S1&hk(MXn8gY z6?q=hEmc5i!;qkMMQCBUU91&Vto*2#GSEx!kG<126K&6(9FujDynhE(9gw)O1lGMI zIw$a`>qOD~dqhcy(D(OcNjMCZj7br#*<&S_40J$B0>p4oJ$CF z;dA*x_pCgXxG~LlYnJ|96+2ZGnaN;0T?FK|zY_&au$8Q)2;TRp3hvS$U>gyNK=5jO z+AI_}yZPJmuuy>3HrEg!(nl<`XS@3Uws!FgfulhP%@EabJI>cMr}zMrDs&liU(?a? zuzxFOpcqCUl-t&(L!D-?Ny7_2v1Bl!ae#4=#A2I23X^ntJxensq^dZ^d;;%cpW)ft zo)=OOb(TqDJt0>TS6b@uy}B!Kef!Lnl z5)Wo@RzUdT?=Pog+MjR--e&MgV~&5T^)2~;784Nbm1W>3zF6*DN5_>wp0a5_3^>UM zQwH6>`JY6g9BDJTk3*{r7W6+;Y6v=eDaYDZmqIHqhvlN?Lmr1cnpOZnAOS z`sq=l|JWMw4h%BZ{Bbp6AA#$>mms0buBRH_*=xHSYR)^_3P@|;wuHFa~meq)L z0q+Y**`H#uj&fPF;}ID#3O;*wz3OB^ukn1&4n><7P~!Jzar+Nvf_);5xuk1}b?dQ@ zeGDO})v4@$WlCrA(v=K6`vX}!6?Z#<14Y*_4Dl6BRwE+Gt8Ay6eu&(zem8D!356S`Ms5OJt>38>=mqf*u-v!)LV^Pb; zQ1_e&BL*9@xF6e-w%UEhUV4M6h?F)SWcrHj+FJxa))>f3HcfP(RD{9$vhoK`bAHzf z-kRr^G}ES2wONOFM7vn18od6mNGybYa0T82wY)TDgX-FH4m4&GS_0jo-BX0P%XeWv zsZ*B)vqcu_FdGWhVR+Z9yGR2VZ&-5YpuCcw>(0N<(Feu~>BMf^V*B|V5=ZwDKFMlHw@QPa^oip!Or|L?$L;YIq+ouL9!YzM)n^)1?cpH^>#BA;D7=(lXj#347`0&j6b(h{iG}Q_&nk) zF+H(eRpH$A2EO6ltBTBc{r6Yk5l}OoABb8>qHsUL0;O}GOEb}bA$tD*p7fwWLHjv( z_GH3%#iky8AC$hsI_pRE)whU=@i!9If_|D)lcm#+QR0-?O&)q+BjWBFwZ}y0?SBf$ zl|$ATfT?qz!RZF5zQkDH^Ai}io2Zm z#;;xu0+-(RbLA>$0&vaT$7zhVW|fv*;_2#jzT&ZuUJud;bmmo@zi~w|1v!8M(EGsh zSwWc{@N`oTC5Fl3NdWIV5JR&ZtOe}g+3{e3D|kIiKMxc?S)@2Jt;LEm2N4S$ui{$% zHF$o2y(dGli@<5(YdRnX!8IxhLe!|3k9qriY8qc5+C?HV zJrp>h^R9TgR*cM{q`l$%9Nz$<(?qpm&Lky?G-SFOK)I zKgE4{Jk;yo_peSWq{uNN*(+q<_a$38DNHO}T&U4@Q@Adrg_+!k>YhK@L`|Qj6f+;+HOSlT$2;f@Fk_RL~ zVliAY2juEtpF-HZLBqE+u|LKNcnf>2nqOFflDYzgwq+*EAm#`wX(C7l#+N8Q2Sa8G zf`+W36Kw(8AlZrB61-PDJ;ct$+;c{zlaY)Eh9S5pn^X2fiV0m4 zOMS-=0Wywn0fIck-}kXZTL8`Kn@=8o2d_UPSp9YJ%v}(OQ`UIFVrZ4fT|CI%c@%C4 zyotEHb8)V{dD6bv2d4CEpK9wEX_Uzav@dtT<-R?I#I{i5+mh>AZx_eT-iz0T_{cQy zy*^Z4NE1@-P7f=&J9jnJ^>%^rZBH1miVl>+OZSVog1W-5^@7`LFlo0g0y)_S{?%9L zX9?KnR&N9)o<~^CxJPyQODktbfF?7C-(pMv!d?#*rnpR&HOiM#Bj~2QIq0E!FU>iR zMXBt4lGE;@a|W(0{A>si-+Uq(U4E?{KAaxm@kg>uDVZ5)*LUGXUEvamT4ZsXTU=wk zVc8rkk1y*^g{3l=Pib|#*mU?*Dy!*8V7}P%z&}>m-WxCqbC0hGY`(mR96EE-{txoS zQ@@C`7+Q%qSFF}-t}}mma(n3g)BHwDF7LLWy2kago6H5DGKOee>xPxin+GhqpC*49 zeMckTK(Ngz;#eNx(t2}+%&AVxBX4PY&0yV6gC5RM1`R`%7&pgE_S`PCTCU%{d6Wy- z?{5I~-zHO3+sCG8ns;2#UE8SJ_(n%P3$q*)P@4af>=vcmHiaud;*IV|+QoT6Fi)33 zChOhp0n4vnczSGtkFI@CSK41Z5~pArXzu$9$P5PyTZq(osgP~+KPvIKr!kt~PVa)3 zAFOd+C`rNxelj18Ia8D7AuP>S)xT`T?$S={^G0X;n%ev6No0lP*@n2D>)vD4bQ~|( z4C>MXFQ&{E3@J=)rWMQzmvRBqWl{nz(h+pnB3093Ftn!84ND022LsBl*07c-R`eH~ z<^5I6lE)3ykO;m~e{tGexuavOG~ZX9*?6+pmdA#v$^01;4CZ9fySbcxMY;`iCMe^J zW~=*Nz;yBE!w4sK>yP%ldtgsK60dLJ1YgO^rD*4y7-018j|k57XX&&R(E{;+^3`uk zDzr}yeeJT|j!1jVfo%3^t)9okm16!}HF9Mp{G0jzmbGH7NUI)3$Qy1ozqiGrpk}d z_BjnoW}W*uFW4df62KP(DLJpC2q}O4bYVMp?VCg*dIQ|k@zu`h;OC$}-ZT8fXs}yb zFDP08;L*eH08j$qBy3p?6gh5k-3K5@sx0v>7pyKGON;?=d9Ya(%yLc1tX@1t_ZN-M z!(#EXTet zMZF6k;Bn}+b+oV->c5ZU5Sg-1Yk^WLE|+)Bm+d4=Sj`o52ZL&zO7G<70CY0XIDnHD zbz4#*H5Mqoz>$IH=``=LVV8FWsIfk63BZ|RDsFpP@^~iKW?OOE z1J_mkq;bTA1k@S4Rp@eg9JmjC4n%v36sy2x*h-w`xBEaZ%NkKTpR-!M_cZLjTGM4o zGfu~0a5uoY+J3Q4Agq9>2H1%QwMBk)9X<(*d@|2k>Qxz`3-T(c?T`srwBE zgtw+;9@Sjns7u-qKnMahD??qk*!6~rkT>mMMU9KPMM!#-H2)#fWm#Pdm*2k`*u6k5 zS7+KN(1yAXRE()v+%b+fP=t^t?GtXqpHUPc)Az(HDan&x8$4B5ZP34C5^uK3FnDUI zxb|xH(o=86e+|(2;&nf2oN2kRrQLrfa9GRKd3fCPEvV-pf$U6I-KP?xUyxrEJI#@r z6G7AKH}s~)Rhn^(OKv)x^^|wo1%WpdT_}M`CxWsE&_jE=D`ebpGayI1hN>@oXO-5b z7GMHG3g6(C{(9MgK!Re`j<<%yH=N4tVVVRcH|vWVTc(wcO5S=F8O&#D`HzAs_`@k# zk<%g$X^x;C4Fop%{R99y;6nMD@5I3IKc<%cXcWz(17(`zg%fAXpR&G#zufTKvUR9K zl;c?ok$U-C$yw>sk6hcgfF^dxC!3%BL&L-<&7`%0vt+g0<$CB1(fG%JEF}K`N6J`k zJZ;Ujj~VL1r)blaNONzmSygH>kVKyYegoR4+XW8?2j0(+?&`>j22r{Nx%DrC9sPY? zC0sG3C_SKHdRVIT8IN&wcPpM{_G zk#nJ7N74HRZmB~WT#SP{l;>6&L3g>*vE~#(3H9#W8bJvpHKfcVQx{^^vI#O`DTv}p zou`AD@D^|Z18=c^G(YWHqqib%DLQPQ$2n)5ktT=hl}#dZHAz;!5iD z1pey7WKJ(bL;a&8N}Fr(cW1v$ttp7|I-TTn_7Q&3aW3s1+J60pif4b8aqdfz+*_3y zs_!1b->Fq=%R#>5#e4&e0SJJ0`+mF^WLtUUrT}oMz5?Xkj~!1zB@Y2a^14+C!E%GD ze}KlKrDT&Q{dH%5c?{6{@}1I1LEdMp$@fXgW26ky%12YEDCCt<10`}6^gdQm%%wn=9$G&MYs(09v66c=r@Q^h}5Lv z{IM`MJDRZMxTsG!7h8EVtrt`)4i72N%#JEIAUqeU+S6A&0UWm4+N$amkGs=Es62|o z*9}P6WDRH$I%!k!agtwCz7sU!8Wa-+iEC(ml)>qeJlt;S383Fb zrThCgiW;|$>*qVWg68$2u6xBXXWErPH~P`DX?H98~0p*+)o(F=WHJ}J74sacF-Be8)w%Y2!%b^F+ENJ`em&`E0q|7i4 z{Q&yz$8+MC z57~h_xa~g%n-w6->+H7q?m9GXv~|KaC&c3JfSaC&JRol#+u=n38Zmj^An!IQ;!SX% zbqdjxzt`Y0&sD;Zr^~=^`3~Ct5s<*uKofBAn|{?|DPR`$t3?`^Ah! z!HM>qD!Rn(klcxwz9u#HToPoqshYuT0 z0~suRycg7svs85LUcPXbD{ICHa(AdAL=!aIK;t!R6-_RyfGA2GoD@L1N9S#TnWbd9 zf8-vrGDrvoNS-HQy7xZ1bEbM+!BMz)vR^@5K?#Rz1T06jGeSnBXL*mciQ>bUj@Vj{ z`_xhB`-wHt+HBX?TQU;OMJJw*T#n;<5Xbf0YM9nPTh)F(%hX#VzaAXr7|$0I+5fc^ zwH|WzuXSv!X`P@(Ogu$TDt+!9sC|gxz=8t1rP9>@waf8!7E(9WxLbisMtyHADYrwtwmHDM~1Q8fZQkda$E@_IXim*Q7|%?=yJ~iNevgp%;@R=jVY;485 zMccjajX`FVwoFd1=$mrnclZz>=GUawZFsNo1nYc0$1D4O)I){JlRF3p3#l*TlZ7BQ_;{j!|@#tHXrfOym$=blEP>(3j(Mq>&-r~)i< zQPwArtf^qy$`H>=t0|DhdGLj$aNu0Gi0)=2`i;Q1?*NsHK7rvK%4> z{qMjxIsKYYkQl_jm*-4gwjIz_=(7z1bs6uRt*?%oPis4z2pmZ^sVLG6IhZaWzt4PM zFQoFquzBgn@?5f>;(by^=j02aCWQGufrzuAB}EyUH`>gW>(yqe0o>S~nMlorS>@Gw zM!5%hMU^l}OE^q=XqYgmH`B2~+BY;lzr`Ow(X3%wMQhP=3`;jXh8X}^^vSa^Uuo;r z-$w(U=Dp{%&7eI_XDk!7Z>7A@n=`1IJ?bI7p3&)@G3U*Y((BZ}|IqbmUcme;I@H#F zw4&5r1M|xks8urmnm2LWzk+;`@7nJ<>+3ur>IZp)ghov3#ynKMRPfnn_**{ge%n-_ZA0CL719+`CZ+aN-(C4>uw&ItI)IV%P8PtlTT zI3vO1MOaZHr^GIp`syG+ZdJpyp1m_mX6``wd$l9R3HhH)N#N?d9BvHl@^^pY+drHi zh*SMgGDVC4wka=Ja1RzgwE*z;F--Gvt{M%GT!WjS^kew4-b50?`EWlY%L8DUkQ zoz8B&1{+7V2&H7~nSeZOh?q%hoep*aJFp;feFO4)gW*&f)6_OdP&^Q;!ld8VdA)lp z7i^De_kQZBrivLSFh+3r^cCN6rxdkAqpHSFk+zv!Y*k~M!z3mJ12&HF(y_4n94uvH zq4GS=uELV0(ba^c^3Sw>!54qspYfL$@=pt{hiuDAYU@P3YITp{MDVRT8{P#-WP>3! z?Ki$40~infm1tM&3mElOCu|0IMHi|J8)Si3{5d!aQYl2PPRnz8j}%tx@HES>*g=BU7Le@`)q#QCY)i_ z;+|6y;=JALK80;z(`y={63;3)1|R9+N1X9j`x>R-e?L1S;cpgQ7*C=hj@&vyj+poe z;^}6^HiG>2+v{+LM2xn+_1#rx1Hdv7N*Bm6%pch*Z8N9Ys(7J&>`LL=tC(^5)3>WG zWbZuBo#VGZV?QJu00OzA7Crul)l$afW?Zto=R<315RX;b zDrlqNgzB5|U7@cdVGAf*%ZJl-vLfQVz3e`*egZ6*1*GqdKt}$kLkmX=Yx$VkOQVUCnqvJJnd5M5ttYfl2d0RXFt|N8OMR zy(pX2$C#@@Ilew-7CufE-wZDD1&oJr#5!OH!(+f}RoLE40Mh?l$^?n&?iL1%(^MZn zR{3yDTBH|t#@;B-d~|m}tKpOZTM{DE0DA%%rv{_Ju9pOn3NhL|hh1HJp5%xX*T_n6 zx({%8s7c4QbFg_tzn>V5(!(*bMg_*dyjvUk6b{M&;qUw{vbj4E+Ce1b>PGKoSQ(0P zV#oTbCgDGHf@7c6vcD?B7`Cvsn!DxHfjUS?%n)*Wfps>&^4lNqh1?ni7$OmGOozqh zmMjBW@YW)31XT%MkLhshYUU^<1FW9|2YIU#>PXmM{6;9nF0+Jm)5gQT@|os!`4pQA z{C3~YFI!NeK4i>)TS2f;8NNa|sf291J`U|6w`{*TZJMv|w(k!u*~4MXcPnL5kGqpe zKI1Uf%VTJIR0V}&zyfu?o#6PFxt?SyyYV_>L=SHb$G3%6^&mW_P(0fCrxUKzk#`o+ z_y6W3Cj4-bjmB<@-h~!vND$^s7QS^N2T*H#1&+04{Ptpc#9KmAMIgBZQvrg{dQKL0 zmn!9NS%~$sL|IqI>{T}Cs4Xp6B)W#|#-9t%(-gAUg5!1EA@_}-$wttmAuc$H9JdRmjtZ+X#bMekwsdY}rTz4u1ix^;b?e!wDc(0(=3nvcF(9k`H1b z0fg`6`J?>gEy;R;`mLAqrM_Q-7?q<>D|^z`8q>Js8P1YgB>_FT9}~(i_Yht!p;dsd z#k|ut+xU?)k1a~=Q4+?S&IRg&wRL>=(&EJ8?CzxzNTgAIV0bQw@-MinaJWTGNXFaZ zk}6S&7MDdvghvjLx7z02O;n!+a9Ail|Mt>xjwuoT(5;D(73T^~H4;U=s!2tG@Uhlf zla+KcOSX&(m$PAS_5!apEwl=YC`?kDK~tEU*ZDvp@eh9*APx6#1v$ST#i|$I zr{_NnNgAv^quF?BOzJR_^?4;EGAb9}r+3v()28z6^_Sl`5?rt$(-#{Xsc7&qu~LSx z)y1+8^zwQu0N8lu=TfJ7B~@!#yIbT^U{-H#a(C87e+h9E2SFy+Pt);zp1jOk&$ddI;57NR=aQ!z))eY(MmnBDb*qPMFEtP*Q|! zHH(pgK?V>znmC42h`?jZaM#}ZYP56L-*jmq_tH#}2ZLy4NX`05g=+_ca3sQd!>J9T z{*PK8+@{j>!)+>jxBPSqIAEGQ_Ez`uPd?sQ%w036fXeJWTEP4IQI0!S=MW5jGP+MS-SfXULsUDFYmr#tmrhz(f3;BPt z9BEvej@^YkXP}RtCA`eiLGWZh2eZVMCCw9RvZL2Tf zPjzKsCuGqA2PL5rV14z`nzYvmVLDbs`eEUYX^pUo*YIw>J6H|ntVOkc z>Y*qY8BwNj$i1g^4aGa-41w0FQWsCOpj9}0j31z7>TlFA*ge&tP>)d>JMib=!ip z_sOu6#VYTII@Q)z@wAnSNgSlJRoaXy2<|3an!heypIyJD(&*5D)~ftEI+;@f>~IZ4 zPj+>tzWK&Rw|as&rCeZwu^{;4I&oP)2KR%H6C-j?yU2M0?Qh50(t>HOgoH;yZo5Nn zj7Gevf2*vpDfp|Kp{U!ya~Mzg9{&`Tz5v_(a6|ngxW_{E*@Ug$`TN*Whw(2NTuvZl zwCKWKE2vffR%zPcZGr zD6B*}yKs@+Dt8Bm+DtPkmqnmh9PtJO&s0>mmy^nRQJ&^<-Tol!KB)ziodESzq=)h5 z+T*g>FYjFDNjZQlR|vF@5A4RFy;(Uc`78oS)Jdgln(iCeEf9&=_usMIlGK52XJ}F+ zG_xqPmHHrp>Z@CaX9^Q0 zIc}he$K>V=dY_P2{JPatRS!iO_O_d-I{MSziLGx{5}M`M5+_G3m0N2acoMgU=7)T5 zAib~=AO6+ZuI5+NMtJE@a$nu(UgVntT*vYwHU8}uW}^uk!Sq&XaWp^ zl77JwWkm&&b3?Q7^_Y}wDTpGw>k>2`R6_1bP>{D4(NXN=N1NDu$was;ifB%q9E^Ez z9mttFK#nuF?|nd2PU@u;T>Vf18$&`86a2CZ_fY`uHUklO14+82p`DqDcr>YoELvPT zq7|lqB3+B7H{9N#2zK7Zz_AI5VGUZVsA zeS0a$Z%Yv^Z7b*2vdGFrlulwpuk#u*j+{mJwT->G9clLl}rRsS1kW`uS=Y$DoKlKE0JjjhG=vOfPI4 zkU4Hio*cRUA!>uD5nb>FMM}0@eO-kkUETOxEKhSJp`ou={8Kelj0Q4`=Q`0)pp=Mc z8Oy*M*oAd*6v!-1id?fbOwgc|dv0K-0mtv&$?>0eDB5Ky-tNP&Kpy*49{Z~F>Vg63 zz#vw~@rl>z5tU8CHc(g427MvZ+~y%yd?hcm!CX?A1nT0_yh~P8GQ*^73~A^yhCGWN$b1=7!%rbv z2oTZglGij15}*`0F0BO==@C^U1rQ1I--u*~$JJGP^VOo1CYP_jSKyMDn}4z0;(JJQ zJyWF;#@8DsnO~#Y$s}Q->ejx;8Y^>q=`UJ$HCX?RK&N z2shru8nys6yMaaTkUF1c9DgM0uG+g`zNfgT5k+LC$7xKZ!5nxS$vMdA>0EHv`F7SQ z2Z)+TDRExnt5#??`@ns_Y?hVz3^LHZeo^b2-QYTr!1B-ys{;c0|F`K1teec(+8$jW zFHgf!bPa6o_&Ip9Q?zAJq}XLXODx?=5k>NiroR^baiTcCgFpHNZ79~?DmNgw*jb!; zZJCnyF1#J@9AtbZ5;lA*h4mt@UuGWe7G@gbq}&P}=qmxdcZHeqC6#0NM(!?9u@nso zKD}%L?MIuJGvPzXxa+{LiT0RV;zQb#;jd%)=bRQ850$BOZ1>?WUxY?Lh|T(!jkQn` zvQdm8m(q&7a%}?}^yBn|Kh6AzxrXhkeW-}>b1I6Kc4LzX>9*!QY8_}pkp)s(LfR!J zcCXCG8H~Hf(B!B}%}qbb0n7qm=5fA#z}p4g|I|t2@PsF`{%MF0pRoRiC`cv_A1&f! zkHobDHe|S#_~$2YIk)(SC39BP$&i(K8@8#@p_XsE2ZsT&jG|lpZR>u_h0h;Au^NM| zD*#((Q9G;To^!>6!&k8@pkfk#ejlL1u);$Qm-QO}$(Nr%G2E=Sxs^_ZH}M^V38hq! zE`{78#PUa}k&5W4Yl~R2=I|?Fn*LnD}4H3 zR*rlqlIjlbMkD0fybM4vZu`%}{jH5-dTXttxdD_A34J~z`HU^<1g_@Wjw#`sjGsZ9 z*nPZ-XtK<8owfOuj-$?nedWJu_gr38?NYNqld7WV4YxqqJY~r;e^!o>v@IxkD}O3~ebaNhx+u*wl&RFm}1DE-i8~94zN;Zyuh;m|s@B zQb@%(M%O4&h^Z3w0*tlLY1XpLS$`l10Rc-4v$K8s``tEADn z3zF#E#rr67=XVGR3hMi^qYEIE$D@0m_HYs_JtzI5>%uzY8o(`Y!XvRLhE*1daWxA^ zika4UB>d+n)BhetAB?hxvJOT`{yqv2OFKuY3ff3NnQ=+!^TcQ!UH{KW2{{EwBeo?e z4=!RvdG)fPXr#zPm#R)Yo}N_jaT6R{m6Vm7&`bV)hiYooQ+pAz+_Qo}_qN?iaCzsa^_c>LoGt3@NK zoYf4zG@tZwYLq_rR^vs*2G$JB-NZnHpRzQ%A4QQdh;OYmRc4{co!M~=Rp_7RZl=Eh z!AEXD`Xe`Rzjseyta!Sh1MHh?prfqzso!lncjtu0=_e?XI>>aRNPfo_(NEcxAQbIk zF%(H~$K=UtRPRSuOa>M)ev`yQ-z{?>?5IUieol|0WRyJ>iK>xpwWU|wiEvUUQu5j+ z)(7}rn^=T!kpM+*XC+w`{2#XBaZ~NxffgtxWMhI;xMM~mxDiFd-KW16!DZd2tBT@2 zE%9C2v0?*_y$%Ylh@wab)X{bH{y!zyNIHtudaenY_^Jt-&yK#dv9@__iO=Taghoot zY)0q0)~TxEWxiv7dyw)!KgbR|=stK*`Tx^{Qc$c8wKrw7g+|OburYg=_=W z{KJO$v_tP)+2!zuURCiTA1B*{hQ*FF#CJ*a^RdLYL!%UTJ51?SJ=s_z`*a{B5AQfj z@PXXZHgs+{vxo|%yg&f**O%sVNAJg%TKOg*_ zgY7@~y%`%&5tki^=-8j0547k{JS$v8#axMg-&t$=({i^pn_I`sWwxgq$_otkpN{$8 zUugXDGr|3RufJ{gKWyL6qk`jn_VaQ6w3u&->1T5J_oV*o%|DUjKTJ<} Date: Sat, 13 Jan 2024 17:53:38 +0800 Subject: [PATCH 046/101] Update README.md Signed-off-by: Haihao Shen --- intel_extension_for_transformers/neural_chat/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/intel_extension_for_transformers/neural_chat/README.md b/intel_extension_for_transformers/neural_chat/README.md index c0cad91a432..7010a11b1f9 100644 --- a/intel_extension_for_transformers/neural_chat/README.md +++ b/intel_extension_for_transformers/neural_chat/README.md @@ -4,7 +4,7 @@ NeuralChat ===========================

# Introduction From 1d6cd8cec5866510f1c77cf0dba37a273646aa55 Mon Sep 17 00:00:00 2001 From: Haihao Shen Date: Sat, 13 Jan 2024 17:54:45 +0800 Subject: [PATCH 047/101] Update README.md Signed-off-by: Haihao Shen --- intel_extension_for_transformers/neural_chat/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/README.md b/intel_extension_for_transformers/neural_chat/README.md index 7010a11b1f9..45887561423 100644 --- a/intel_extension_for_transformers/neural_chat/README.md +++ b/intel_extension_for_transformers/neural_chat/README.md @@ -12,7 +12,7 @@ NeuralChat NeuralChat is a powerful and flexible open framework that empowers you to effortlessly create LLM-centric AI applications, including chatbots and copilots. * Support a range of hardware like [Intel Xeon Scalable processors](https://www.intel.com/content/www/us/en/products/details/processors/xeon/scalable.html), [Intel Gaudi AI processors](https://habana.ai/products), [Intel® Data Center GPU Max Series](https://www.intel.com/content/www/us/en/products/details/discrete-gpus/data-center-gpu/max-series.html) and NVidia GPUs * Leverage the leading AI frameworks (e.g., [PyTorch](https://pytorch.org/) and popular domain libraries (e.g., [Hugging Face](https://github.com/huggingface), [Langchain](https://www.langchain.com/)) with their extensions -* Support the model customizations through parameter-efficient fine-tuning, quantization, and sparsity. Released [Intel NeuralChat-7B LLM](https://huggingface.co/Intel/neural-chat-7b-v3-1), ranking #1 in Hugging Face leaderboard in Nov'23 +* Support the model customizations through parameter-efficient fine-tuning, quantization, and sparsity. Released [Intel NeuralChat-7B LLM](https://huggingface.co/Intel/neural-chat-7b-v3-1), ranking #1 in Hugging Face open LLM leaderboard in Nov'23 * Provide a rich set of plugins that can augment the AI applications through retrieval-augmented generation (RAG) (e.g., [fastRAG](https://github.com/IntelLabs/fastRAG/tree/main)), content moderation, query caching, more * Integrate with popular serving frameworks (e.g., [vLLM](https://github.com/vllm-project/vllm), [TGI](https://github.com/huggingface/text-generation-inference), [Triton](https://developer.nvidia.com/triton-inference-server)). Support [OpenAI](https://platform.openai.com/docs/introduction)-compatible API to simplify the creation or migration of AI applications @@ -38,7 +38,7 @@ pip install -r requirements_hpu.txt # For XPU device pip install -r requirements_xpu.txt -# For CUDA +# For CUDA device pip install -r requirements.txt ``` From 0054fd11ddff938fcc33e0ead66c7c571dfc5ea2 Mon Sep 17 00:00:00 2001 From: liuzhenwei <109187816+zhenwei-intel@users.noreply.github.com> Date: Mon, 15 Jan 2024 14:26:19 +0800 Subject: [PATCH 048/101] [Runtime] calculate accuracy of runtime (#1123) * acc runtime Signed-off-by: zhenwei-intel * llama2 Signed-off-by: zhenwei-intel * update Signed-off-by: Dong, Bo * use evaluate Signed-off-by: zhenwei-intel * fix llama Signed-off-by: zhenwei-intel * fix rope scale Signed-off-by: zhenwei-intel * add args Signed-off-by: zhenwei-intel * logits_all Signed-off-by: zhenwei-intel * add copyright Signed-off-by: zhenwei-intel * fix rope scale Signed-off-by: zhenwei-intel * update model format Signed-off-by: zhenwei-intel * update convert Signed-off-by: zhenwei-intel * add args Signed-off-by: zhenwei-intel * update format Signed-off-by: zhenwei-intel * update script Signed-off-by: zhenwei-intel * enable GLIBCXX_3.4.26 Signed-off-by: Wenxin Zhang * fix conda env name Signed-off-by: Wenxin Zhang * fix pylint Signed-off-by: zhenwei-intel * add gptq Signed-off-by: zhenwei-intel * move to examples Signed-off-by: zhenwei-intel --------- Signed-off-by: zhenwei-intel Signed-off-by: Dong, Bo Signed-off-by: Wenxin Zhang Co-authored-by: Dong, Bo Co-authored-by: Wenxin Zhang --- .github/workflows/cpp-graph-test.yml | 2 +- .../huggingface/llmruntime/runtime_acc.py | 35 +++++++++++++++++++ .../evaluation/lm_eval/models/huggingface.py | 22 ++++++++++-- .../llm/runtime/graph/__init__.py | 4 +-- .../runtime/graph/application/main_pybind.cpp | 5 +-- .../graph/scripts/ci/cpp_graph_inference.sh | 2 +- .../llm/runtime/graph/scripts/common.py | 33 +++++++++-------- .../graph/scripts/convert_gptq_llama.py | 4 +++ .../graph/scripts/convert_gptq_mistral.py | 4 +++ .../runtime/graph/scripts/convert_llama.py | 4 +-- .../runtime/graph/scripts/convert_mistral.py | 4 +-- 11 files changed, 92 insertions(+), 27 deletions(-) create mode 100644 examples/huggingface/llmruntime/runtime_acc.py diff --git a/.github/workflows/cpp-graph-test.yml b/.github/workflows/cpp-graph-test.yml index 96853f20ab5..e6d46b1d30d 100644 --- a/.github/workflows/cpp-graph-test.yml +++ b/.github/workflows/cpp-graph-test.yml @@ -62,7 +62,7 @@ jobs: --cores_list="48," \ --input_list="32,1024" \ -- \ - cpp-graph-test \ + cpp-graph-test-itrex \ ${{ matrix.modelName }} \ ${{ env.GRAPH_DIR }} \ ${{ env.WORKING_DIR }} \ diff --git a/examples/huggingface/llmruntime/runtime_acc.py b/examples/huggingface/llmruntime/runtime_acc.py new file mode 100644 index 00000000000..9ef1c1ddad7 --- /dev/null +++ b/examples/huggingface/llmruntime/runtime_acc.py @@ -0,0 +1,35 @@ +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import argparse +from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Evaluate diff for a model") + parser.add_argument('--model_name', type=str, default="~/Llama-2-7b-chat-hf", help="path to model") + parser.add_argument('--tasks', type=str, default="lambada_openai") + parser.add_argument('--model_format', type=str, default="runtime") + parser.add_argument('--use_gptq', action='store_true') + args = parser.parse_args() + print(args) + + results = evaluate( + model="hf-causal", + model_args=f'pretrained="{args.model_name}",dtype=float32,use_gptq={args.use_gptq}', + tasks=[f"{args.tasks}"], + # limit=5, + model_format=f"{args.model_format}" + ) + + print(results) diff --git a/intel_extension_for_transformers/llm/evaluation/lm_eval/models/huggingface.py b/intel_extension_for_transformers/llm/evaluation/lm_eval/models/huggingface.py index 27bda6e6045..e3e64e2595f 100644 --- a/intel_extension_for_transformers/llm/evaluation/lm_eval/models/huggingface.py +++ b/intel_extension_for_transformers/llm/evaluation/lm_eval/models/huggingface.py @@ -252,6 +252,7 @@ def __init__( offload_folder, ) self._device = device + self.model_format = model_format if model_format == "torch": self.model = self._create_auto_model( pretrained=pretrained, @@ -464,8 +465,10 @@ def add_special_tokens(self) -> bool: """ if self._add_special_tokens is not None: return self._add_special_tokens + elif self.model_format == "runtime": + return True elif self.AUTO_MODEL_CLASS is transformers.AutoModelForCausalLM: - return False + return False elif self.AUTO_MODEL_CLASS is transformers.AutoModel: return False elif self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM: @@ -608,10 +611,20 @@ class AutoCausalLM(HuggingFaceAutoLM): AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM AUTO_PEFT_CLASS = peft.PeftModel + def __init__(self, *args, pretrained, model_format, **kwargs): + self.model_format = model_format + if self.model_format == "runtime": + from intel_extension_for_transformers.transformers import WeightOnlyQuantConfig + use_gptq = kwargs.pop("use_gptq", False) + self.woq_config = WeightOnlyQuantConfig(compute_dtype="int8", weight_dtype="int4", use_gptq=use_gptq) super().__init__(*args, pretrained=pretrained, model_format=model_format, **kwargs) - self.model_format = model_format + if self.model_format == "runtime": + from transformers import AutoTokenizer, TextStreamer + from intel_extension_for_transformers.transformers import AutoModelForCausalLM + self.runtime_model = AutoModelForCausalLM.from_pretrained(pretrained, quantization_config=self.woq_config) + if self.model_format == "onnx": if not os.path.exists(os.path.join(pretrained, "decoder_model.onnx")) and \ not os.path.exists(os.path.join(pretrained, "decoder_with_past_model.onnx")) and \ @@ -741,7 +754,10 @@ def _model_call( input_bs, input_len = inputs.shape bos = torch.tensor([64790, 64792]).repeat(input_bs, 1) inputs = torch.cat((bos, inputs), 1) - if self.model_format != "onnx": + if self.model_format == "runtime": + out = self.runtime_model(inputs, reinit=True, logits_all=True) + output = {"logits": torch.tensor(out).unsqueeze(0)} + elif self.model_format != "onnx": output = self.model(inputs) else: inputs_names = [input.name for input in self.model.model.get_inputs()] diff --git a/intel_extension_for_transformers/llm/runtime/graph/__init__.py b/intel_extension_for_transformers/llm/runtime/graph/__init__.py index f033e9cb677..12b520de035 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/__init__.py +++ b/intel_extension_for_transformers/llm/runtime/graph/__init__.py @@ -225,7 +225,7 @@ def pad_token_id(self): " with padding!") return self.tokenizer.pad_token_id - def __call__(self, model_input, reinit=False, **kwargs): + def __call__(self, model_input, reinit=False, logits_all=False, **kwargs): if self.model_type == 'whisper': if self.model is None: self.model = self.module.Model() @@ -242,7 +242,7 @@ def __call__(self, model_input, reinit=False, **kwargs): elif reinit: self.model.reinit() self.generate_round = 0 - return self.model.evaluate(model_input.tolist()) + return self.model.evaluate(model_input.tolist(), logits_all) else: print("Please input torch.Tensor") return diff --git a/intel_extension_for_transformers/llm/runtime/graph/application/main_pybind.cpp b/intel_extension_for_transformers/llm/runtime/graph/application/main_pybind.cpp index 541766cf423..cf8f74d5fbe 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/application/main_pybind.cpp +++ b/intel_extension_for_transformers/llm/runtime/graph/application/main_pybind.cpp @@ -74,7 +74,8 @@ class Model { // deprecated API std::vector> generate_tokens(const std::vector>& input_ids); const std::vector& evaluate_(const std::vector>& input_ids); - py::array_t evaluate(const std::vector>& input_ids) { + py::array_t evaluate(const std::vector>& input_ids, bool logits_all = false) { + if (logits_all) ctx->logits_all = true; if (!check_input_and_count_padding(input_ids)) return py::array_t(); const auto& logits = evaluate_(input_ids); for (auto& input_id : curr_input_ids) input_id.clear(); // clear curr_input_ids after eval @@ -673,7 +674,7 @@ PYBIND11_MODULE(qwen_cpp, m) py::arg("batch_size") = 1, py::arg("pad_token") = -1, py::arg("memory_dtype") = "auto") .def("generate", &Model::generate, "Generate token with input ids", py::arg("input_ids")) .def("evaluate", &Model::evaluate, "Evaluate token with input ids and output logits", - py::arg("input_ids") = std::vector>{}) + py::arg("input_ids") = std::vector>{}, py::arg("logits_all") = false) // deprecated API .def("generate_tokens", &Model::generate_tokens, "Generate tokens with input ids", py::arg("input_ids")) .def_static("quant_model", &Model::quant_model, "Quantize model", py::arg("model_path"), py::arg("out_path"), diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh index 661e11e306e..b03ccb675af 100755 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh @@ -267,7 +267,7 @@ function main() { if [[ "${compiler_version}" != "12.1.0" ]]; then conda install --update-deps -c conda-forge gxx==${compiler_version} gcc==${compiler_version} gxx_linux-64==${compiler_version} libstdcxx-ng sysroot_linux-64 -y fi - + export LD_LIBRARY_PATH=${HOME}/miniconda3/envs/${conda_env}/lib/:$LD_LIBRARY_PATH # setup conda env for LLM # get cpu info diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/common.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/common.py index 13ce0db0ecd..1c56e837dc7 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/common.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/common.py @@ -86,21 +86,26 @@ def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]: tokenizer = self.sentencepiece_tokenizer for i in range(tokenizer.vocab_size()): - text: bytes - if tokenizer.is_unknown(i): + text: bytes + if i < tokenizer.vocab_size(): + if tokenizer.is_unknown(i): + text = " \u2047 ".encode("utf-8") + elif tokenizer.is_control(i): + text = b"" + elif tokenizer.is_byte(i): + piece = tokenizer.id_to_piece(i) + if len(piece) != 6: + raise Exception(f"Invalid token: {piece}") + byte_value = int(piece[3:-1], 16) + text = struct.pack("B", byte_value) + else: + text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") + score: float = tokenizer.get_score(i) + yield text, score + else : text = " \u2047 ".encode("utf-8") - elif tokenizer.is_control(i): - text = b"" - elif tokenizer.is_byte(i): - piece = tokenizer.id_to_piece(i) - if len(piece) != 6: - raise Exception(f"Invalid token: {piece}") - byte_value = int(piece[3:-1], 16) - text = struct.pack("B", byte_value) - else: - text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") - score: float = tokenizer.get_score(i) - yield text, score + score: float = i + yield text, score def added_tokens(self) -> Iterable[Tuple[bytes, float]]: for text in self.added_tokens_list: diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptq_llama.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptq_llama.py index cb3f646972f..bc6b98661d8 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptq_llama.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptq_llama.py @@ -80,6 +80,10 @@ def main(args_in: Optional[List[str]] = None) -> None: f.write(struct.pack("f", config["rms_norm_eps"])) f.write(struct.pack("f", config["rope_theta"] if "rope_theta" in config else 10000)) + rope_scale = 1 + if config.get("rope_scaling") is not None: + rope_scale = config["rope_scaling"].get("factor", 1) + f.write(struct.pack("f", rope_scale)) # TODO, bos_token_id = 0 in https://huggingface.co/decapoda-research/llama-7b-hf/blob/main/config.json # but bos_token_id = 1 in llama.cpp diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptq_mistral.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptq_mistral.py index 9e48aaebcad..0a634fe73fe 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptq_mistral.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptq_mistral.py @@ -83,6 +83,10 @@ def main(args_in: Optional[List[str]] = None) -> None: f.write(struct.pack("f", config["rms_norm_eps"])) f.write(struct.pack("f", config["rope_theta"] if "rope_theta" in config else 10000)) + rope_scale = 1 + if config.get("rope_scaling") is not None: + rope_scale = config["rope_scaling"].get("factor", 1) + f.write(struct.pack("f", rope_scale)) # TODO, bos_token_id = 0 in https://huggingface.co/decapoda-research/llama-7b-hf/blob/main/config.json # but bos_token_id = 1 in llama.cpp diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_llama.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_llama.py index 93bcd8cde76..88b351c1d67 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_llama.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_llama.py @@ -182,8 +182,8 @@ def loadHFTransformerJson(model: 'LazyModel', config_path: Path) -> 'Params': rms_norm_eps = config["rms_norm_eps"] rope_theta = config["rope_theta"] if "rope_theta" in config else 10000 rope_scale = 1 - if config["rope_scaling"]: - rope_scale = config["rope_scaling"]["factor"] if "factor" in config["rope_scaling"] else 1 + if config.get("rope_scaling") is not None: + rope_scale = config["rope_scaling"].get("factor", 1) bos_token_id = config["bos_token_id"] eos_token_id = config["eos_token_id"] diff --git a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mistral.py b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mistral.py index 8ca762adaa2..424feb53621 100644 --- a/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mistral.py +++ b/intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mistral.py @@ -181,8 +181,8 @@ def loadHFTransformerJson(model: 'LazyModel', config_path: Path) -> 'Params': rms_norm_eps = config["rms_norm_eps"] rope_theta = config["rope_theta"] if "rope_theta" in config else 10000 rope_scale = 1 - if config["rope_scaling"]: - rope_scale = config["rope_scaling"]["factor"] if "factor" in config["rope_scaling"] else 1 + if config.get("rope_scaling") is not None: + rope_scale = config["rope_scaling"].get("factor", 1) return Params( n_vocab=n_vocab, From a09f92d12a9a81ead56addf2c0801a9b256cef59 Mon Sep 17 00:00:00 2001 From: "Wang, Chang" Date: Mon, 15 Jan 2024 16:05:04 +0800 Subject: [PATCH 049/101] Fix doc about LLM example recipes. (#1140) --- .../text-generation/quantization/llm_quantization_recipes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/huggingface/pytorch/text-generation/quantization/llm_quantization_recipes.md b/examples/huggingface/pytorch/text-generation/quantization/llm_quantization_recipes.md index e7d34c9a0ba..f9d80755ba2 100644 --- a/examples/huggingface/pytorch/text-generation/quantization/llm_quantization_recipes.md +++ b/examples/huggingface/pytorch/text-generation/quantization/llm_quantization_recipes.md @@ -2,7 +2,7 @@ This document describes the step-by-step instructions to run large language models (LLMs) on 4th Gen Intel® Xeon® Scalable Processor (codenamed Sapphire Rapids) with [PyTorch](https://pytorch.org/) and [Intel® Extension for PyTorch](https://github.com/intel/intel-extension-for-pytorch). -The scripts [run_generation.py](./run_generation.py) provide two quantization approaches respectively (SmoothQuant, Weight-Only Quantization) based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor) and return last token prediction accuracy by `trainer`. +The scripts [run_generation.py](./run_generation.py) provide two quantization approaches respectively (SmoothQuant, Weight-Only Quantization) based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor) and return last word prediction accuracy by [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/master). # Validated Models From d753cb8e384b56c4d6b450fd0d7a68e0925c600f Mon Sep 17 00:00:00 2001 From: lkk <33276950+lkk12014402@users.noreply.github.com> Date: Mon, 15 Jan 2024 22:04:57 +0800 Subject: [PATCH 050/101] [NeuralChat] support llama series model for llava finetuning. (#948) --- .../examples/finetuning/multi_modal/README.md | 8 ++ .../examples/finetuning/multi_modal/train.py | 59 ++++++--- .../modeling/llava_models/__init__.py | 1 - .../modeling/llava_models/llava_llama.py | 113 ++++++++++++++++++ 4 files changed, 164 insertions(+), 17 deletions(-) create mode 100644 intel_extension_for_transformers/transformers/modeling/llava_models/llava_llama.py diff --git a/intel_extension_for_transformers/neural_chat/examples/finetuning/multi_modal/README.md b/intel_extension_for_transformers/neural_chat/examples/finetuning/multi_modal/README.md index 0ce7e3da1f9..5a26ebadc5c 100644 --- a/intel_extension_for_transformers/neural_chat/examples/finetuning/multi_modal/README.md +++ b/intel_extension_for_transformers/neural_chat/examples/finetuning/multi_modal/README.md @@ -3,6 +3,14 @@ Large Language and Vision Assistant (LLaVA) is a multi-modal training framework that proposed from [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485) and [Improved Baselines with Visual Instruction Tuning](https://arxiv.org/abs/2310.03744). This example demonstrates how to train mult-modal model on Intel Gaudi2. +## Validated Model List +|Pretrained model| LLaVA | +|------------------------------------|---| +|Mistral series| ✅| +|LLaMA series| ✅| + +**Note:** For Salesforce/codegen25-7b-* series models same with LLaMA architecture, need install `pip install transformers==4.33.2` refer [this](https://github.com/salesforce/CodeGen/issues/82) + ## Train LLaVA training consists of two stages: (1) feature alignment stage: use our 558K subset of the LAION-CC-SBU dataset to connect a *frozen pretrained* vision encoder to a *frozen LLM*; (2) visual instruction tuning stage: use 150K GPT-generated multimodal instruction-following data, plus around 515K VQA data from academic-oriented tasks, to teach the model to follow multimodal instructions. diff --git a/intel_extension_for_transformers/neural_chat/examples/finetuning/multi_modal/train.py b/intel_extension_for_transformers/neural_chat/examples/finetuning/multi_modal/train.py index cde09935a6d..eb8f3fc58af 100644 --- a/intel_extension_for_transformers/neural_chat/examples/finetuning/multi_modal/train.py +++ b/intel_extension_for_transformers/neural_chat/examples/finetuning/multi_modal/train.py @@ -25,9 +25,8 @@ import transformers -from transformers import AutoTokenizer, set_seed, BitsAndBytesConfig +from transformers import AutoTokenizer, set_seed, BitsAndBytesConfig, AutoConfig from transformers.integrations.deepspeed import is_deepspeed_available -from intel_extension_for_transformers.transformers.modeling.llava_models import LlavaMistralForCausalLM from llava_utils import * if is_hpu_available: @@ -133,19 +132,46 @@ def train(): low_cpu_mem_usage = False device_map = None - - model = LlavaMistralForCausalLM.from_pretrained( - model_args.model_name_or_path, - cache_dir=training_args.cache_dir, - load_in_4bit=training_args.bits == 4, - load_in_8bit=training_args.bits == 8, - low_cpu_mem_usage=low_cpu_mem_usage, - device_map=device_map, - quantization_config=quantization_config, - torch_dtype=(torch.float32 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32)), - trust_remote_code=model_args.trust_remote_code, - use_auth_token=model_args.use_auth_token - ) + config_kwargs = { + "cache_dir": training_args.cache_dir, + "trust_remote_code": model_args.trust_remote_code, + } + config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) + + use_fast = True + if config.architectures[0] == "LlamaForCausalLM": + from intel_extension_for_transformers.transformers.modeling.llava_models.llava_llama \ + import LlavaLlamaForCausalLM + model = LlavaLlamaForCausalLM.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + load_in_4bit=training_args.bits == 4, + load_in_8bit=training_args.bits == 8, + low_cpu_mem_usage=low_cpu_mem_usage, + device_map=device_map, + quantization_config=quantization_config, + torch_dtype=(torch.float32 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32)), + trust_remote_code=model_args.trust_remote_code, + use_auth_token=model_args.use_auth_token + ) + use_fast = False + elif config.architectures[0] == "MistralForCausalLM": + from intel_extension_for_transformers.transformers.modeling.llava_models.llava_mistral \ + import LlavaMistralForCausalLM + model = LlavaMistralForCausalLM.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + load_in_4bit=training_args.bits == 4, + load_in_8bit=training_args.bits == 8, + low_cpu_mem_usage=low_cpu_mem_usage, + device_map=device_map, + quantization_config=quantization_config, + torch_dtype=(torch.float32 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32)), + trust_remote_code=model_args.trust_remote_code, + use_auth_token=model_args.use_auth_token + ) + else: + raise ValueError("No llava implemention for the model {}".format(model_args.model_name_or_path)) # for training model.config.use_cache = False @@ -189,7 +215,8 @@ def make_inputs_require_grad(module, input, output): cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side="right", - # use_fast=False + trust_remote_code=model_args.trust_remote_code, + use_fast=use_fast ) tokenizer.pad_token = tokenizer.eos_token diff --git a/intel_extension_for_transformers/transformers/modeling/llava_models/__init__.py b/intel_extension_for_transformers/transformers/modeling/llava_models/__init__.py index 565876c2d41..ed04d17bdbe 100644 --- a/intel_extension_for_transformers/transformers/modeling/llava_models/__init__.py +++ b/intel_extension_for_transformers/transformers/modeling/llava_models/__init__.py @@ -15,4 +15,3 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .llava_mistral import LlavaMistralForCausalLM diff --git a/intel_extension_for_transformers/transformers/modeling/llava_models/llava_llama.py b/intel_extension_for_transformers/transformers/modeling/llava_models/llava_llama.py new file mode 100644 index 00000000000..d720b2db5a5 --- /dev/null +++ b/intel_extension_for_transformers/transformers/modeling/llava_models/llava_llama.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from transformers import AutoConfig, AutoModelForCausalLM, \ + LlamaConfig, LlamaModel, LlamaForCausalLM + +from transformers.modeling_outputs import CausalLMOutputWithPast + +from .llava_arch import LlavaMetaModel, LlavaMetaForCausalLM + + +class LlavaConfig(LlamaConfig): + model_type = "llava" + + +class LlavaLlamaModel(LlavaMetaModel, LlamaModel): + config_class = LlavaConfig + + def __init__(self, config: LlamaConfig): + super(LlavaLlamaModel, self).__init__(config) + + +class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM): + config_class = LlavaConfig + + def __init__(self, config): + super(LlavaLlamaForCausalLM, self).__init__(config) + self.model = LlavaLlamaModel(config) + self.pretraining_tp = config.pretraining_tp + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_model(self): + return self.model + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + images: Optional[torch.FloatTensor] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + + if inputs_embeds is None: + ( + input_ids, + position_ids, + attention_mask, + past_key_values, + inputs_embeds, + labels + ) = self.prepare_inputs_labels_for_multimodal( + input_ids, + position_ids, + attention_mask, + past_key_values, + labels, + images + ) + + # pylint: disable=E1101 + return super().forward( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + labels=labels, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict + ) + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): + images = kwargs.pop("images", None) + # pylint: disable=E1101 + _inputs = super().prepare_inputs_for_generation( + input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs + ) + if images is not None: + _inputs['images'] = images + return _inputs From c0a89c5a41ae8a36fc74d5dc499ef07212c833db Mon Sep 17 00:00:00 2001 From: lvliang-intel Date: Tue, 16 Jan 2024 09:24:50 +0800 Subject: [PATCH 051/101] [NeuralChat] Support compatible stats format (#1112) --- .../neural_chat/config.py | 2 ++ .../neural_chat/models/base_model.py | 2 ++ .../neural_chat/models/model_utils.py | 34 +++++++++++++------ .../tests/ci/api/test_chatbot_build_api.py | 21 ++++++++++++ workflows/chatbot/inference/generate.py | 10 +++++- 5 files changed, 57 insertions(+), 12 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/config.py b/intel_extension_for_transformers/neural_chat/config.py index 35fc0d095df..6c56bccabc4 100644 --- a/intel_extension_for_transformers/neural_chat/config.py +++ b/intel_extension_for_transformers/neural_chat/config.py @@ -406,6 +406,8 @@ class GenerationConfig: max_gpu_memory: int = None use_fp16: bool = False ipex_int8: bool = False + return_stats: bool = False + format_version: str = "v2" task: str = "" @dataclass diff --git a/intel_extension_for_transformers/neural_chat/models/base_model.py b/intel_extension_for_transformers/neural_chat/models/base_model.py index 3aee7670cc3..05102d54adb 100644 --- a/intel_extension_for_transformers/neural_chat/models/base_model.py +++ b/intel_extension_for_transformers/neural_chat/models/base_model.py @@ -52,6 +52,8 @@ def construct_parameters(query, model_name, device, assistant_model, config): params["use_hpu_graphs"] = config.use_hpu_graphs params["use_cache"] = config.use_cache params["ipex_int8"] = config.ipex_int8 + params["return_stats"] = config.return_stats + params["format_version"] = config.format_version params["assistant_model"] = assistant_model params["device"] = device return params diff --git a/intel_extension_for_transformers/neural_chat/models/model_utils.py b/intel_extension_for_transformers/neural_chat/models/model_utils.py index 0d2a376f4cd..7339f73be94 100644 --- a/intel_extension_for_transformers/neural_chat/models/model_utils.py +++ b/intel_extension_for_transformers/neural_chat/models/model_utils.py @@ -794,6 +794,7 @@ def predict_stream(**params): Determines whether to utilize Habana Processing Units (HPUs) for accelerated generation. `use_cache` (bool): Determines whether to utilize kv cache for accelerated generation. `ipex_int8` (bool): Whether to use IPEX int8 model to inference. + `format_version` (string): the format version of return stats. Returns: generator: A generator that yields the generated streaming text. @@ -822,6 +823,7 @@ def predict_stream(**params): use_hpu_graphs = params["use_hpu_graphs"] if "use_hpu_graphs" in params else False use_cache = params["use_cache"] if "use_cache" in params else True return_stats = params["return_stats"] if "return_stats" in params else False + format_version = params["format_version"] if "format_version" in params else "v2" prompt = params["prompt"] ipex_int8 = params["ipex_int8"] if "ipex_int8" in params else False model = MODELS[model_name]["model"] @@ -1017,17 +1019,27 @@ def generate_output(): 0 ) if return_stats: - stats = { - "input_token_len": str(input_token_len), - "output_token_len": str(output_token_len), - "duration": str(duration) + " ms", - "first_token_latency": str(first_token_latency) + " ms", - "msecond_per_token": str(msecond_per_token) + " ms", - } - yield "\n| {:<22} | {:<27} |\n".format("Key", "Value") - yield "| " + "-"*22 + " | " + "-"*27 + " |" + "\n" - for key, value in stats.items(): - yield "| {:<22} | {:<27} |\n".format(key, value) + if format_version == "v1": + stats = { + "input_token_len": input_token_len, + "output_token_len": output_token_len, + "duration": duration, + "first_token_latency": first_token_latency, + "msecond_per_token": msecond_per_token, + } + yield "END_OF_STREAM_STATS={}".format(stats) + else: + stats = { + "input_token_len": str(input_token_len), + "output_token_len": str(output_token_len), + "duration": str(duration) + " ms", + "first_token_latency": str(first_token_latency) + " ms", + "msecond_per_token": str(msecond_per_token) + " ms", + } + yield "\n| {:<22} | {:<27} |\n".format("Key", "Value") + yield "| " + "-"*22 + " | " + "-"*27 + " |" + "\n" + for key, value in stats.items(): + yield "| {:<22} | {:<27} |\n".format(key, value) def predict(**params): diff --git a/intel_extension_for_transformers/neural_chat/tests/ci/api/test_chatbot_build_api.py b/intel_extension_for_transformers/neural_chat/tests/ci/api/test_chatbot_build_api.py index c4e196625fe..d1944263df0 100644 --- a/intel_extension_for_transformers/neural_chat/tests/ci/api/test_chatbot_build_api.py +++ b/intel_extension_for_transformers/neural_chat/tests/ci/api/test_chatbot_build_api.py @@ -167,6 +167,27 @@ def _run_retrieval(local_dir): _run_retrieval(local_dir="/tf_dataset2/inc-ut/instructor-large") _run_retrieval(local_dir="/tf_dataset2/inc-ut/bge-base-en-v1.5") + def test_text_chat_stream_return_stats_with_v1_format(self): + config = PipelineConfig(model_name_or_path="facebook/opt-125m") + chatbot = build_chatbot(config) + stream_text = "" + gen_config = GenerationConfig(return_stats=True, format_version="v1") + results, _ = chatbot.predict_stream("Tell me about Intel Xeon Scalable Processors.", config=gen_config) + for text in results: + stream_text += text + print(text) + self.assertIn("END_OF_STREAM_STATS=", stream_text) + + def test_text_chat_stream_return_stats(self): + config = PipelineConfig(model_name_or_path="facebook/opt-125m") + chatbot = build_chatbot(config) + stream_text = "" + gen_config = GenerationConfig(return_stats=True) + results, _ = chatbot.predict_stream("Tell me about Intel Xeon Scalable Processors.", config=gen_config) + for text in results: + stream_text += text + print(text) + self.assertIn("| Key | Value |", stream_text) if __name__ == '__main__': unittest.main() diff --git a/workflows/chatbot/inference/generate.py b/workflows/chatbot/inference/generate.py index b4f1192a087..45654d31a63 100644 --- a/workflows/chatbot/inference/generate.py +++ b/workflows/chatbot/inference/generate.py @@ -162,6 +162,12 @@ def parse_args(): ) parser.add_argument( "--return_stats", action='store_true', default=False,) + parser.add_argument( + "--format_version", + type=str, + default="v2", + help="the version of return stats format", + ) args = parser.parse_args() return args @@ -232,7 +238,9 @@ def main(): use_hpu_graphs=args.use_hpu_graphs, use_cache=args.use_kv_cache, num_return_sequences=args.num_return_sequences, - ipex_int8=args.ipex_int8 + ipex_int8=args.ipex_int8, + return_stats=args.return_stats, + format_version=args.format_version ) if args.habana: From 2c5f8b0099ca03149675dbbe7fad87d476d4d096 Mon Sep 17 00:00:00 2001 From: intellinjun <105184542+intellinjun@users.noreply.github.com> Date: Wed, 17 Jan 2024 11:46:43 +0800 Subject: [PATCH 052/101] [LLM Runtime] neural_speed_replace_graph (#1129) Co-authored-by: Meng, Hengyu Co-authored-by: Wenxin Zhang Co-authored-by: lvliang-intel --- .github/CODEOWNERS | 5 - .github/workflows/cpp-graph-test.yml | 7 - .github/workflows/script/formatScan/pylint.sh | 5 + .../unitTest/run_unit_test_llmruntime.sh | 5 + .github/workflows/sparse_lib_CI.yml | 29 +- .github/workflows/unit-test-llmruntime.yml | 1 + .gitmodules | 6 - .../llm/library/CMakeLists.txt | 13 - .../llm/library/jblas/.clang-format | 7 - .../llm/library/jblas/CMakeLists.txt | 34 - .../llm/library/jblas/README.md | 47 - .../llm/library/jblas/docs/workflow.png | Bin 172385 -> 0 bytes .../llm/library/jblas/jblas/jit_base.h | 303 - .../llm/library/jblas/jblas/jit_blas.h | 96 - .../llm/library/jblas/jblas/jit_blas_device.h | 277 - .../library/jblas/jblas/jit_blas_epilogue.h | 332 - .../llm/library/jblas/jblas/jit_blas_gemm.h | 3544 ----- .../library/jblas/jblas/jit_blas_parallel.h | 701 - .../library/jblas/jblas/jit_blas_prologue_a.h | 432 - .../library/jblas/jblas/jit_blas_prologue_b.h | 1707 --- .../library/jblas/jblas/jit_blas_storage.h | 973 -- .../llm/library/jblas/jblas/jit_blas_utils.h | 683 - .../library/jblas/jblas/jit_blas_wrapper.h | 478 - .../llm/library/jblas/jblas/kernel_avx2.h | 978 -- .../library/jblas/jblas/kernel_avx512_bf16.h | 92 - .../llm/library/jblas/jblas/kernel_avx512f.h | 2162 --- .../llm/library/jblas/jblas/kernel_jit.h | 1489 -- .../library/jblas/jblas/kernel_jit_injector.h | 930 -- .../llm/library/jblas/jblas/kernel_ref.h | 1313 -- .../llm/library/jblas/jblas/kernel_wrapper.h | 781 - .../llm/library/jblas/jblas/xbyak/xbyak.h | 2964 ---- .../library/jblas/jblas/xbyak/xbyak_bin2hex.h | 271 - .../jblas/jblas/xbyak/xbyak_mnemonic.h | 2349 --- .../library/jblas/jblas/xbyak/xbyak_util.h | 1058 -- .../llm/library/kernels/CMakeLists.txt | 136 - .../llm/library/kernels/README.md | 59 - .../llm/library/kernels/cmake/Common.cmake | 28 - .../library/kernels/cmake/FindOpenMP.cmake | 24 - .../llm/library/kernels/cmake/FindVTune.cmake | 65 - .../llm/library/kernels/cmake/Utility.cmake | 43 - .../library/kernels/docs/imgs/2D_to_3D.png | Bin 20740 -> 0 bytes .../llm/library/kernels/docs/imgs/3D_spmm.png | Bin 92088 -> 0 bytes .../kernels/docs/imgs/4x16_to_vnni_format.png | Bin 38616 -> 0 bytes .../kernels/docs/imgs/cache_inconsistency.png | Bin 46905 -> 0 bytes .../kernels/docs/imgs/cache_mapping.png | Bin 167350 -> 0 bytes .../kernels/docs/imgs/gpu_naive_gemm.png | Bin 24781 -> 0 bytes .../docs/imgs/gpu_tile_sparse_gemm.png | Bin 27178 -> 0 bytes .../docs/imgs/kernel_amx_bf16x16_calc.png | Bin 9681 -> 0 bytes .../docs/imgs/kernel_amx_bf16x16_relayout.png | Bin 54393 -> 0 bytes .../docs/imgs/kernel_avx512f_pattern_base.png | Bin 35101 -> 0 bytes .../imgs/kernel_avx512f_pattern_unroll4.png | Bin 39265 -> 0 bytes ...ernel_dynamic_quant_matmul_MN_parallel.png | Bin 37093 -> 0 bytes .../kernel_dynamic_quant_matmul_perf_chat.png | Bin 14324 -> 0 bytes ...el_dynamic_quant_matmul_wei_preprocess.png | Bin 27634 -> 0 bytes ...ernel_matmul_avx512f_p2031_p2013_loops.png | Bin 49118 -> 0 bytes ...kernel_matmul_avx512f_p2031_p2013_tile.png | Bin 127634 -> 0 bytes .../imgs/kernel_trans_mha_batchk_trans.png | Bin 26618 -> 0 bytes .../docs/imgs/kernel_trans_mha_reorder.png | Bin 41607 -> 0 bytes .../kernels/docs/imgs/kernel_vnni_calc.png | Bin 19778 -> 0 bytes .../kernels/docs/imgs/kernel_vnni_pattern.png | Bin 8991 -> 0 bytes .../imgs/kernel_vnni_pattern_left_1x4.png | Bin 112792 -> 0 bytes .../imgs/kernel_vnni_pattern_left_4x1.png | Bin 103753 -> 0 bytes .../imgs/kernel_vnni_pattern_right_1x16.png | Bin 47079 -> 0 bytes .../imgs/kernel_vnni_pattern_right_4x1.png | Bin 49970 -> 0 bytes .../kernels/docs/imgs/kernel_vnni_perf.png | Bin 59796 -> 0 bytes ...tmul_vnni_noperm_p2031_p1302_cpy_loops.svg | 1 - .../matmul_vnni_noperm_p2031_p1302_loops.svg | 1 - .../matmul_vnni_noperm_p2031_p1302_tile.png | Bin 139286 -> 0 bytes ...l_vnni_noperm_p2031_p1302_transform8x8.png | Bin 243551 -> 0 bytes .../kernels/docs/kernel_desc/3D_inference.md | 26 - .../docs/kernel_desc/binaryop_injector.md | 109 - .../docs/kernel_desc/eltwise_injector.md | 222 - .../docs/kernel_desc/gpu/sparse_gemm_gpu.md | 339 - .../kernels/docs/kernel_desc/kernel_amx.md | 48 - .../docs/kernel_desc/kernel_avx512f.md | 45 - .../kernel_dynamic_quant_matmul.md | 62 - .../kernel_layernormalized_spmm.md | 21 - .../kernel_desc/kernel_transpose_matmul.md | 95 - .../docs/kernel_desc/kernel_transpose_mha.md | 22 - .../kernels/docs/kernel_desc/kernel_vnni.md | 65 - .../llm/library/kernels/docs/profiling.md | 170 - .../library/kernels/docs/validated_data.md | 1088 -- .../llm/library/kernels/include/common.h | 38 - .../kernels/include/data_type/bf16.hpp | 37 - .../kernels/include/data_type/data_types.hpp | 37 - .../library/kernels/include/data_type/f8.hpp | 46 - .../kernels/include/data_type/fp16.hpp | 35 - .../llm/library/kernels/include/engine.hpp | 47 - .../kernels/include/engine_factory.hpp | 42 - .../library/kernels/include/git_version.h.in | 6 - .../kernels/include/impl_list_item.hpp | 58 - .../llm/library/kernels/include/interface.hpp | 340 - .../llm/library/kernels/include/kernel.hpp | 133 - .../library/kernels/include/kernel_cache.hpp | 52 - .../library/kernels/include/kernel_desc.hpp | 84 - .../kernels/include/kernel_hashing.hpp | 163 - .../kernels/include/kernels/amx_utils.hpp | 117 - .../include/kernels/attention_types.hpp | 40 - .../kernels/include/kernels/data_pack.hpp | 30 - .../kernels/dynamic_quant_matmul_types.hpp | 56 - .../include/kernels/eltwiseop_types.hpp | 42 - .../kernels/include/kernels/exposed_enum.hpp | 127 - .../include/kernels/layernorm_ba_types.hpp | 63 - .../kernels/include/kernels/matmul_types.hpp | 113 - .../include/kernels/mean_var_reduce_types.hpp | 41 - .../kernels/include/kernels/softmax_types.hpp | 50 - .../kernels/include/kernels/sparse_data.hpp | 222 - .../kernels/include/kernels/spmm_types.hpp | 157 - .../include/kernels/transpose_mha_types.hpp | 88 - .../kernels/include/memory_storage.hpp | 55 - .../library/kernels/include/operator_desc.hpp | 119 - .../library/kernels/include/param_types.hpp | 198 - .../llm/library/kernels/include/stream.hpp | 32 - .../library/kernels/include/tensor_desc.hpp | 64 - .../llm/library/kernels/scripts/README.md | 16 - .../library/kernels/scripts/requirements.txt | 4 - .../library/kernels/scripts/sample_figure.jpg | Bin 29273 -> 0 bytes .../library/kernels/scripts/sparsity_all.py | 170 - .../llm/library/kernels/src/cpu/amx_utils.cpp | 77 - .../llm/library/kernels/src/cpu/cpu_isa.cpp | 63 - .../llm/library/kernels/src/cpu/cpu_isa.hpp | 106 - .../library/kernels/src/cpu/cpu_parallel.cpp | 36 - .../library/kernels/src/cpu/cpu_parallel.hpp | 182 - .../llm/library/kernels/src/cpu/data_pack.cpp | 84 - .../kernels/src/cpu/engine/cpu_engine.cpp | 110 - .../kernels/src/cpu/engine/cpu_engine.hpp | 46 - .../src/cpu/jit_domain/jit_amx_configure.cpp | 29 - .../src/cpu/jit_domain/jit_amx_configure.hpp | 46 - .../jit_amx_s8s8_dynamic_dequant_matmul.cpp | 277 - .../jit_amx_s8s8_dynamic_dequant_matmul.hpp | 50 - .../jit_amx_s8s8_dynamic_quant_matmul.cpp | 382 - .../jit_amx_s8s8_dynamic_quant_matmul.hpp | 44 - .../cpu/jit_domain/jit_binary_injector.cpp | 156 - .../cpu/jit_domain/jit_binary_injector.hpp | 57 - .../src/cpu/jit_domain/jit_dynamic_quant.cpp | 173 - .../src/cpu/jit_domain/jit_dynamic_quant.hpp | 58 - ...ynamic_quant_matmul_reduce_scale_quant.cpp | 124 - ...ynamic_quant_matmul_reduce_scale_quant.hpp | 51 - .../cpu/jit_domain/jit_dynamic_quant_mha.cpp | 1093 -- .../cpu/jit_domain/jit_dynamic_quant_mha.hpp | 345 - .../cpu/jit_domain/jit_eltwise_injector.cpp | 1145 -- .../cpu/jit_domain/jit_eltwise_injector.hpp | 202 - .../src/cpu/jit_domain/jit_eltwiseop.cpp | 128 - .../src/cpu/jit_domain/jit_eltwiseop.hpp | 150 - .../kernels/src/cpu/jit_domain/jit_gather.cpp | 109 - .../kernels/src/cpu/jit_domain/jit_gather.hpp | 63 - .../cpu/jit_domain/jit_gemm_avx512f_8bit.cpp | 258 - .../cpu/jit_domain/jit_gemm_avx512f_8bit.hpp | 110 - .../src/cpu/jit_domain/jit_generator.cpp | 215 - .../src/cpu/jit_domain/jit_generator.hpp | 402 - .../src/cpu/jit_domain/jit_groupnorm.cpp | 311 - .../src/cpu/jit_domain/jit_groupnorm.hpp | 106 - .../src/cpu/jit_domain/jit_layernorm_ba.cpp | 416 - .../src/cpu/jit_domain/jit_layernorm_ba.hpp | 128 - ...jit_matmul_amx_s8ab_s8Ab4a_s32AB16a16b.cpp | 100 - ...jit_matmul_amx_s8ab_s8Ab4a_s32AB16a16b.hpp | 92 - ...jit_matmul_amx_u8AB16a64b_s8BA16b4a_ab.cpp | 155 - ...jit_matmul_amx_u8AB16a64b_s8BA16b4a_ab.hpp | 85 - .../jit_matmul_avx512f_p2031_p2013.cpp | 140 - .../jit_matmul_avx512f_p2031_p2013.hpp | 87 - .../cpu/jit_domain/jit_matmul_vnni_8xkx48.cpp | 140 - .../cpu/jit_domain/jit_matmul_vnni_8xkx48.hpp | 102 - .../jit_matmul_vnni_Ba4b_Ab4a_ba.cpp | 127 - .../jit_matmul_vnni_Ba4b_Ab4a_ba.hpp | 88 - .../jit_matmul_vnni_noperm_p2031_p1302.cpp | 183 - .../jit_matmul_vnni_noperm_p2031_p1302.hpp | 98 - .../cpu/jit_domain/jit_mean_var_reduce.cpp | 73 - .../cpu/jit_domain/jit_mean_var_reduce.hpp | 66 - .../src/cpu/jit_domain/jit_mha_dense_bf16.hpp | 849 -- .../cpu/jit_domain/jit_mm_exp_vnni_mxkx48.cpp | 178 - .../cpu/jit_domain/jit_mm_exp_vnni_mxkx48.hpp | 105 - .../src/cpu/jit_domain/jit_seq_cpy_2x8x8.cpp | 166 - .../src/cpu/jit_domain/jit_seq_cpy_2x8x8.hpp | 108 - .../src/cpu/jit_domain/jit_seq_cpy_48x4.cpp | 225 - .../src/cpu/jit_domain/jit_seq_cpy_48x4.hpp | 134 - .../kernels/src/cpu/jit_domain/jit_slice.cpp | 199 - .../kernels/src/cpu/jit_domain/jit_slice.hpp | 68 - .../src/cpu/jit_domain/jit_softmax.cpp | 303 - .../src/cpu/jit_domain/jit_softmax.hpp | 95 - .../src/cpu/jit_domain/jit_softmax_Ab16a.cpp | 329 - .../src/cpu/jit_domain/jit_softmax_Ab16a.hpp | 108 - .../cpu/jit_domain/jit_spmm_amx_bf16_x16.cpp | 239 - .../cpu/jit_domain/jit_spmm_amx_bf16_x16.hpp | 122 - .../src/cpu/jit_domain/jit_spmm_avx512f.cpp | 127 - .../src/cpu/jit_domain/jit_spmm_avx512f.hpp | 94 - .../src/cpu/jit_domain/jit_spmm_vnni.cpp | 587 - .../src/cpu/jit_domain/jit_spmm_vnni.hpp | 167 - .../src/cpu/jit_domain/jit_trans_AB16a4b.cpp | 98 - .../src/cpu/jit_domain/jit_trans_AB16a4b.hpp | 67 - .../cpu/jit_domain/jit_trans_AB16a4b_16x.cpp | 144 - .../cpu/jit_domain/jit_trans_AB16a4b_16x.hpp | 73 - .../src/cpu/jit_domain/jit_trans_BA16b4a.cpp | 156 - .../src/cpu/jit_domain/jit_trans_BA16b4a.hpp | 65 - .../jit_trans_BA16b4a_trq10n_x16.cpp | 232 - .../jit_trans_BA16b4a_trq10n_x16.hpp | 54 - .../cpu/jit_domain/jit_trans_cpy_nx8_4b.cpp | 120 - .../cpu/jit_domain/jit_trans_cpy_nx8_4b.hpp | 81 - .../src/cpu/jit_domain/jit_transpose_mha.cpp | 1434 -- .../src/cpu/jit_domain/jit_transpose_mha.hpp | 205 - .../kernels/src/cpu/jit_domain/regs_pool.hpp | 321 - .../kernels/src/cpu/kernels/CMakeLists.txt | 3 - .../kernels/src/cpu/kernels/attention.cpp | 485 - .../kernels/src/cpu/kernels/attention.hpp | 144 - .../kernels/src/cpu/kernels/attention_ref.cpp | 472 - .../kernels/src/cpu/kernels/attention_ref.hpp | 140 - .../src/cpu/kernels/cpu_attention_list.cpp | 42 - .../cpu/kernels/cpu_dynamic_quant_list.cpp | 40 - .../cpu/kernels/cpu_dynamic_quant_matmul.cpp | 39 - .../src/cpu/kernels/cpu_eltwiseop_list.cpp | 38 - .../src/cpu/kernels/cpu_gather_list.cpp | 38 - .../src/cpu/kernels/cpu_groupnorm_list.cpp | 39 - .../src/cpu/kernels/cpu_layernorm_ba_list.cpp | 40 - .../kernels/cpu_layernormalized_spmm_list.cpp | 39 - .../src/cpu/kernels/cpu_mha_dense_list.cpp | 52 - .../src/cpu/kernels/cpu_slice_list.cpp | 36 - .../src/cpu/kernels/cpu_softmax_list.cpp | 38 - .../cpu/kernels/cpu_sparse_matmul_list.cpp | 77 - .../cpu/kernels/cpu_transpose_matmul_list.cpp | 91 - .../cpu/kernels/cpu_transpose_mha_list.cpp | 38 - .../kernels/src/cpu/kernels/dynamic_quant.cpp | 83 - .../kernels/src/cpu/kernels/dynamic_quant.hpp | 90 - .../src/cpu/kernels/dynamic_quant_matmul.cpp | 394 - .../src/cpu/kernels/dynamic_quant_matmul.hpp | 135 - .../cpu/kernels/dynamic_quant_matmul_ref.cpp | 218 - .../cpu/kernels/dynamic_quant_matmul_ref.hpp | 84 - .../src/cpu/kernels/dynamic_quant_mha.cpp | 404 - .../src/cpu/kernels/dynamic_quant_mha.hpp | 121 - .../src/cpu/kernels/dynamic_quant_ref.cpp | 75 - .../src/cpu/kernels/dynamic_quant_ref.hpp | 78 - .../kernels/src/cpu/kernels/eltwiseop.cpp | 91 - .../kernels/src/cpu/kernels/eltwiseop.hpp | 99 - .../kernels/src/cpu/kernels/eltwiseop_ref.cpp | 60 - .../kernels/src/cpu/kernels/eltwiseop_ref.hpp | 75 - .../kernels/src/cpu/kernels/gather.cpp | 112 - .../kernels/src/cpu/kernels/gather.hpp | 94 - .../kernels/src/cpu/kernels/gather_ref.cpp | 80 - .../kernels/src/cpu/kernels/gather_ref.hpp | 65 - .../kernels/src/cpu/kernels/groupnorm.cpp | 160 - .../kernels/src/cpu/kernels/groupnorm.hpp | 100 - .../kernels/src/cpu/kernels/groupnorm_ref.cpp | 112 - .../kernels/src/cpu/kernels/groupnorm_ref.hpp | 75 - .../kernels/src/cpu/kernels/layernorm_ba.cpp | 266 - .../kernels/src/cpu/kernels/layernorm_ba.hpp | 108 - .../src/cpu/kernels/layernorm_ba_ref.cpp | 134 - .../src/cpu/kernels/layernorm_ba_ref.hpp | 85 - .../src/cpu/kernels/layernormalized_spmm.cpp | 120 - .../src/cpu/kernels/layernormalized_spmm.hpp | 89 - .../cpu/kernels/layernormalized_spmm_ref.cpp | 115 - .../cpu/kernels/layernormalized_spmm_ref.hpp | 91 - .../src/cpu/kernels/matmul_avx512f_8bit.cpp | 295 - .../src/cpu/kernels/matmul_avx512f_8bit.hpp | 114 - .../kernels/matmul_avx512f_p2031_p2013.cpp | 197 - .../kernels/matmul_avx512f_p2031_p2013.hpp | 113 - .../kernels/src/cpu/kernels/matmul_ref.cpp | 547 - .../kernels/src/cpu/kernels/matmul_ref.hpp | 107 - .../matmul_vnni_noperm_p2031_p1302.cpp | 296 - .../matmul_vnni_noperm_p2031_p1302.hpp | 141 - .../cpu/kernels/matmul_vnni_p2031_p2013.cpp | 387 - .../cpu/kernels/matmul_vnni_p2031_p2013.hpp | 135 - .../kernels/src/cpu/kernels/mha_dense.cpp | 940 -- .../kernels/src/cpu/kernels/mha_dense.hpp | 174 - .../src/cpu/kernels/mha_dense_bf16.cpp | 374 - .../src/cpu/kernels/mha_dense_bf16.hpp | 148 - .../kernels/src/cpu/kernels/mha_dense_ctx.hpp | 103 - .../kernels/src/cpu/kernels/mha_dense_ref.cpp | 485 - .../kernels/src/cpu/kernels/mha_dense_ref.hpp | 144 - .../library/kernels/src/cpu/kernels/slice.cpp | 102 - .../library/kernels/src/cpu/kernels/slice.hpp | 90 - .../kernels/src/cpu/kernels/softmax.cpp | 131 - .../kernels/src/cpu/kernels/softmax.hpp | 92 - .../kernels/src/cpu/kernels/softmax_ref.cpp | 119 - .../kernels/src/cpu/kernels/softmax_ref.hpp | 76 - .../kernels/src/cpu/kernels/sparse_data.cpp | 189 - .../src/cpu/kernels/spmm_amx_bf16_x16.cpp | 162 - .../src/cpu/kernels/spmm_amx_bf16_x16.hpp | 121 - .../kernels/src/cpu/kernels/spmm_avx512f.cpp | 114 - .../kernels/src/cpu/kernels/spmm_avx512f.hpp | 106 - .../kernels/src/cpu/kernels/spmm_ref.cpp | 300 - .../kernels/src/cpu/kernels/spmm_ref.hpp | 124 - .../kernels/src/cpu/kernels/spmm_vnni.cpp | 279 - .../kernels/src/cpu/kernels/spmm_vnni.hpp | 148 - .../kernels/src/cpu/kernels/transpose_mha.cpp | 408 - .../kernels/src/cpu/kernels/transpose_mha.hpp | 147 - .../cpu/memory_storege/cpu_memory_storage.cpp | 54 - .../cpu/memory_storege/cpu_memory_storage.hpp | 50 - .../library/kernels/src/data_type/bf16.cpp | 47 - .../llm/library/kernels/src/data_type/f8.cpp | 238 - .../library/kernels/src/data_type/fp16.cpp | 61 - .../library/kernels/src/engine_factory.cpp | 64 - .../kernels/src/gpu/engine/gpu_ocl_engine.cpp | 124 - .../kernels/src/gpu/engine/gpu_ocl_engine.hpp | 59 - .../kernels/src/gpu/kernels/opencl/common.cpp | 316 - .../kernels/src/gpu/kernels/opencl/common.hpp | 26 - .../opencl/gemm/gpu_ocl_matmul_list.cpp | 41 - .../kernels/opencl/gemm/gpu_ocl_matmul_ref.cl | 16 - .../opencl/gemm/gpu_ocl_matmul_ref.cpp | 73 - .../opencl/gemm/gpu_ocl_matmul_ref.hpp | 76 - .../memory_storage/gpu_ocl_memory_storage.cpp | 106 - .../memory_storage/gpu_ocl_memory_storage.hpp | 49 - .../kernels/src/gpu/stream/gpu_ocl_stream.cpp | 36 - .../kernels/src/gpu/stream/gpu_ocl_stream.hpp | 33 - .../llm/library/kernels/src/interface.cpp | 139 - .../llm/library/kernels/src/kernel.cpp | 20 - .../llm/library/kernels/src/kernel_cache.cpp | 74 - .../llm/library/kernels/src/kernel_desc.cpp | 24 - .../llm/library/kernels/src/singleton.hpp | 55 - .../llm/library/kernels/src/tensor_desc.cpp | 45 - .../llm/library/kernels/src/utils.cpp | 239 - .../llm/library/kernels/src/utils.hpp | 329 - .../llm/library/kernels/src/verbose.cpp | 130 - .../llm/library/kernels/src/verbose.hpp | 86 - .../llm/library/kernels/src/vtune_wrapper.cpp | 43 - .../llm/library/kernels/src/vtune_wrapper.hpp | 48 - .../llm/library/xbyak | 1 - .../llm/runtime/graph/CMakeLists.txt | 169 - .../llm/runtime/graph/CMakePresets.json | 62 - .../llm/runtime/graph/__init__.py | 248 - .../runtime/graph/application/CMakeLists.txt | 130 - .../runtime/graph/application/audio_run.cpp | 937 -- .../llm/runtime/graph/application/common.cpp | 1159 -- .../llm/runtime/graph/application/common.h | 196 - .../runtime/graph/application/main_pybind.cpp | 694 - .../runtime/graph/application/main_run.cpp | 735 - .../runtime/graph/application/pybind_gptj.cpp | 351 - .../runtime/graph/application/quant_model.cpp | 83 - .../graph/application/quant_whisper.cpp | 74 - .../graph/application/third_party/pybind11 | 1 - .../graph/application/whisper_pybind.cpp | 472 - .../llm/runtime/graph/cmake/Common.cmake | 60 - .../llm/runtime/graph/cmake/ISA.cmake | 67 - .../llm/runtime/graph/core/CMakeLists.txt | 66 - .../llm/runtime/graph/core/README.md | 76 - .../llm/runtime/graph/core/data_types.h | 233 - .../runtime/graph/core/layers/CMakeLists.txt | 13 - .../llm/runtime/graph/core/layers/Ops.h | 96 - .../llm/runtime/graph/core/layers/conv.cpp | 947 -- .../llm/runtime/graph/core/layers/conv.h | 35 - .../runtime/graph/core/layers/ele_reduce.h | 60 - .../llm/runtime/graph/core/layers/ele_wise.h | 267 - .../graph/core/layers/inner_product.cpp | 254 - .../graph/core/layers/ip_fusion_ffn.cpp | 788 - .../graph/core/layers/ip_fusion_qkv.cpp | 313 - .../graph/core/layers/jblas_common.hpp | 201 - .../runtime/graph/core/layers/jblas_defs.h | 74 - .../runtime/graph/core/layers/jblas_gemm.cpp | 594 - .../runtime/graph/core/layers/jblas_gemm.h | 55 - .../llm/runtime/graph/core/layers/layers.h | 18 - .../llm/runtime/graph/core/layers/memory.cpp | 33 - .../llm/runtime/graph/core/layers/memory.h | 29 - .../runtime/graph/core/layers/mha_dense.cpp | 2539 ---- .../llm/runtime/graph/core/layers/mha_dense.h | 174 - .../runtime/graph/core/layers/ne_jblas.cpp | 79 - .../core/layers/ne_test_layers_utils.hpp | 93 - .../llm/runtime/graph/core/layers/vec_dot.h | 702 - .../llm/runtime/graph/core/ne.h | 239 - .../llm/runtime/graph/core/ne_jblas.h | 69 - .../llm/runtime/graph/core/ne_layers.c | 11958 ---------------- .../llm/runtime/graph/core/ne_layers.h | 645 - .../runtime/graph/core/parallel_context.cpp | 120 - .../llm/runtime/graph/core/parallel_context.h | 51 - .../llm/runtime/graph/developer_document.md | 429 - .../llm/runtime/graph/docs/fused_attention.md | 293 - .../llm/runtime/graph/docs/imgs/Attention.PNG | Bin 41552 -> 0 bytes .../llm/runtime/graph/docs/imgs/FFN.PNG | Bin 11867 -> 0 bytes .../runtime/graph/docs/imgs/shift-rope.svg | 1 - .../runtime/graph/docs/imgs/shuffle-attn.svg | 197 - .../runtime/graph/docs/infinite_inference.md | 41 - .../runtime/graph/docs/tensor_parallelism.md | 115 - .../llm/runtime/graph/models/CMakeLists.txt | 27 - .../graph/models/baichuan/CMakeLists.txt | 19 - .../graph/models/baichuan/baichuan.cpp | 352 - .../runtime/graph/models/baichuan/baichuan.h | 50 - .../graph/models/baichuan/baichuan_utils.cpp | 191 - .../runtime/graph/models/bloom/CMakeLists.txt | 19 - .../llm/runtime/graph/models/bloom/bloom.cpp | 331 - .../llm/runtime/graph/models/bloom/bloom.h | 50 - .../graph/models/bloom/bloom_utils.cpp | 191 - .../graph/models/chatglm/CMakeLists.txt | 25 - .../runtime/graph/models/chatglm/chatglm.cpp | 362 - .../runtime/graph/models/chatglm/chatglm.h | 51 - .../runtime/graph/models/chatglm/chatglm2.cpp | 387 - .../runtime/graph/models/chatglm/chatglm2.h | 50 - .../graph/models/chatglm/chatglm2_utils.cpp | 204 - .../graph/models/chatglm/chatglm_utils.cpp | 198 - .../graph/models/falcon/CMakeLists.txt | 19 - .../runtime/graph/models/falcon/falcon.cpp | 392 - .../llm/runtime/graph/models/falcon/falcon.h | 54 - .../graph/models/falcon/falcon_utils.cpp | 198 - .../runtime/graph/models/gptj/CMakeLists.txt | 19 - .../llm/runtime/graph/models/gptj/gptj.cpp | 561 - .../llm/runtime/graph/models/gptj/gptj.h | 53 - .../runtime/graph/models/gptj/gptj_utils.cpp | 193 - .../graph/models/gptneox/CMakeLists.txt | 19 - .../runtime/graph/models/gptneox/gptneox.cpp | 438 - .../runtime/graph/models/gptneox/gptneox.h | 54 - .../graph/models/gptneox/gptneox_utils.cpp | 191 - .../runtime/graph/models/llama/CMakeLists.txt | 19 - .../llm/runtime/graph/models/llama/llama.cpp | 468 - .../llm/runtime/graph/models/llama/llama.h | 59 - .../graph/models/llama/llama_utils.cpp | 193 - .../graph/models/mistral/CMakeLists.txt | 19 - .../graph/models/model_utils/CMakeLists.txt | 0 .../graph/models/model_utils/arg_parse.cpp | 495 - .../graph/models/model_utils/model_config.h | 118 - .../graph/models/model_utils/model_files.h | 759 - .../graph/models/model_utils/model_types.h | 474 - .../graph/models/model_utils/model_utils.cpp | 2524 ---- .../graph/models/model_utils/model_utils.h | 487 - .../graph/models/model_utils/quant_config.h | 221 - .../graph/models/model_utils/quant_utils.cpp | 645 - .../graph/models/model_utils/quant_utils.h | 55 - .../runtime/graph/models/model_utils/util.cpp | 49 - .../runtime/graph/models/model_utils/util.h | 428 - .../llm/runtime/graph/models/models.h | 26 - .../runtime/graph/models/mpt/CMakeLists.txt | 19 - .../llm/runtime/graph/models/mpt/mpt.cpp | 375 - .../llm/runtime/graph/models/mpt/mpt.h | 53 - .../runtime/graph/models/mpt/mpt_utils.cpp | 182 - .../runtime/graph/models/opt/CMakeLists.txt | 19 - .../llm/runtime/graph/models/opt/opt.cpp | 393 - .../llm/runtime/graph/models/opt/opt.h | 73 - .../runtime/graph/models/opt/opt_utils.cpp | 206 - .../runtime/graph/models/qwen/CMakeLists.txt | 19 - .../llm/runtime/graph/models/qwen/qwen.cpp | 417 - .../llm/runtime/graph/models/qwen/qwen.h | 53 - .../runtime/graph/models/qwen/qwen_utils.cpp | 180 - .../graph/models/starcoder/CMakeLists.txt | 19 - .../graph/models/starcoder/starcoder.cpp | 464 - .../graph/models/starcoder/starcoder.h | 57 - .../models/starcoder/starcoder_utils.cpp | 199 - .../graph/models/whisper/CMakeLists.txt | 19 - .../llm/runtime/graph/models/whisper/dr_wav.h | 6746 --------- .../runtime/graph/models/whisper/whisper.cpp | 4662 ------ .../runtime/graph/models/whisper/whisper.h | 548 - .../graph/models/whisper/whisper_utils.cpp | 249 - .../llm/runtime/graph/requirements.txt | 13 +- .../llm/runtime/graph/scripts/__init__.py | 16 - .../llm/runtime/graph/scripts/cal_diff.py | 61 - .../graph/scripts/ci/calculate_percentiles.py | 110 - .../graph/scripts/ci/cpp_graph_inference.sh | 423 - .../graph/scripts/ci/cpp_graph_prompts.json | 58 - .../llm/runtime/graph/scripts/clang-format.py | 66 - .../llm/runtime/graph/scripts/common.py | 390 - .../llm/runtime/graph/scripts/convert.py | 66 - .../runtime/graph/scripts/convert_baichuan.py | 251 - .../runtime/graph/scripts/convert_bloom.py | 157 - .../runtime/graph/scripts/convert_chatglm.py | 370 - .../runtime/graph/scripts/convert_dolly.py | 182 - .../runtime/graph/scripts/convert_falcon.py | 177 - .../llm/runtime/graph/scripts/convert_gptj.py | 173 - .../runtime/graph/scripts/convert_gptneox.py | 203 - .../graph/scripts/convert_gptq_bloom.py | 238 - .../graph/scripts/convert_gptq_llama.py | 139 - .../graph/scripts/convert_gptq_mistral.py | 141 - .../runtime/graph/scripts/convert_llama.py | 1347 -- .../runtime/graph/scripts/convert_mistral.py | 1329 -- .../llm/runtime/graph/scripts/convert_mpt.py | 179 - .../llm/runtime/graph/scripts/convert_opt.py | 184 - .../llm/runtime/graph/scripts/convert_qwen.py | 181 - .../graph/scripts/convert_starcoder.py | 245 - .../runtime/graph/scripts/convert_whisper.py | 242 - .../llm/runtime/graph/scripts/inference.py | 215 - .../graph/scripts/load_peft_and_merge.py | 54 - .../llm/runtime/graph/scripts/perplexity.py | 276 - .../graph/scripts/python_api_example.py | 45 - .../llm/runtime/graph/scripts/quantize.py | 140 - .../graph/scripts/requirements/baichuan.sh | 19 - .../graph/scripts/requirements/chatglm-6b.sh | 19 - .../graph/scripts/requirements/common.txt | 12 - .../graph/scripts/requirements/mistral.txt | 2 - .../llm/runtime/graph/scripts/run.py | 212 - .../llm/runtime/graph/tests/requirements.txt | 2 - .../runtime/graph/tests/test_llm_runtime.py | 4 +- .../llm/runtime/graph/vectors/CMakeLists.txt | 21 - .../runtime/graph/vectors/cpu/CMakeLists.txt | 17 - .../llm/runtime/graph/vectors/cpu/quantize.h | 788 - .../llm/runtime/graph/vectors/cpu/simd.h | 240 - .../llm/runtime/graph/vectors/cpu/vec.hpp | 24 - .../graph/vectors/cpu/vec_arithmetic.cpp | 170 - .../graph/vectors/cpu/vec_arithmetic.hpp | 66 - .../runtime/graph/vectors/cpu/vec_base.hpp | 149 - .../runtime/graph/vectors/cpu/vec_compare.cpp | 51 - .../runtime/graph/vectors/cpu/vec_compare.hpp | 29 - .../runtime/graph/vectors/cpu/vec_convert.cpp | 188 - .../runtime/graph/vectors/cpu/vec_convert.hpp | 43 - .../runtime/graph/vectors/cpu/vec_load.cpp | 15 - .../runtime/graph/vectors/cpu/vec_load.hpp | 52 - .../llm/runtime/graph/vectors/cpu/vec_set.cpp | 53 - .../llm/runtime/graph/vectors/cpu/vec_set.hpp | 39 - .../runtime/graph/vectors/cpu/vec_store.cpp | 15 - .../runtime/graph/vectors/cpu/vec_store.hpp | 73 - .../llm/runtime/graph/vectors/ele_reduce.cpp | 67 - .../llm/runtime/graph/vectors/ele_reduce.h | 44 - .../llm/runtime/graph/vectors/fp16.h | 161 - .../runtime/graph/vectors/gpu/CMakeLists.txt | 21 - .../runtime/graph/vectors/gpu/ele_reduce.cpp | 50 - .../runtime/graph/vectors/gpu/ele_wise.cpp | 293 - .../llm/runtime/graph/vectors/gpu/reduce.h | 33 - .../llm/runtime/graph/vectors/gpu/test.cpp | 112 - .../runtime/graph/vectors/gpu/vector_func.h | 48 - .../runtime/graph/vectors/gpu/vector_kernel.h | 181 - .../llm/runtime/graph/vectors/parallel_for.h | 55 - .../pc/build_talkingbot_on_pc.ipynb | 2 +- .../neural_chat/models/model_utils.py | 2 +- .../transformers/modeling/modeling_auto.py | 2 +- setup.py | 3 - 506 files changed, 41 insertions(+), 126629 deletions(-) delete mode 100644 intel_extension_for_transformers/llm/library/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/library/jblas/.clang-format delete mode 100644 intel_extension_for_transformers/llm/library/jblas/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/library/jblas/README.md delete mode 100644 intel_extension_for_transformers/llm/library/jblas/docs/workflow.png delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/jit_base.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_device.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_epilogue.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_gemm.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_parallel.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_prologue_a.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_prologue_b.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_storage.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_utils.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_wrapper.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/kernel_avx2.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/kernel_avx512_bf16.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/kernel_avx512f.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/kernel_jit.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/kernel_jit_injector.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/kernel_ref.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/kernel_wrapper.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/xbyak/xbyak.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/xbyak/xbyak_bin2hex.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/xbyak/xbyak_mnemonic.h delete mode 100644 intel_extension_for_transformers/llm/library/jblas/jblas/xbyak/xbyak_util.h delete mode 100644 intel_extension_for_transformers/llm/library/kernels/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/library/kernels/README.md delete mode 100644 intel_extension_for_transformers/llm/library/kernels/cmake/Common.cmake delete mode 100644 intel_extension_for_transformers/llm/library/kernels/cmake/FindOpenMP.cmake delete mode 100644 intel_extension_for_transformers/llm/library/kernels/cmake/FindVTune.cmake delete mode 100644 intel_extension_for_transformers/llm/library/kernels/cmake/Utility.cmake delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/2D_to_3D.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/3D_spmm.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/4x16_to_vnni_format.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/cache_inconsistency.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/cache_mapping.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/gpu_naive_gemm.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/gpu_tile_sparse_gemm.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_amx_bf16x16_calc.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_amx_bf16x16_relayout.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_avx512f_pattern_base.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_avx512f_pattern_unroll4.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_dynamic_quant_matmul_MN_parallel.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_dynamic_quant_matmul_perf_chat.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_dynamic_quant_matmul_wei_preprocess.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_matmul_avx512f_p2031_p2013_loops.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_matmul_avx512f_p2031_p2013_tile.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_trans_mha_batchk_trans.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_trans_mha_reorder.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_vnni_calc.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_vnni_pattern.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_vnni_pattern_left_1x4.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_vnni_pattern_left_4x1.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_vnni_pattern_right_1x16.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_vnni_pattern_right_4x1.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/kernel_vnni_perf.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/matmul_vnni_noperm_p2031_p1302_cpy_loops.svg delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/matmul_vnni_noperm_p2031_p1302_loops.svg delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/matmul_vnni_noperm_p2031_p1302_tile.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/imgs/matmul_vnni_noperm_p2031_p1302_transform8x8.png delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/kernel_desc/3D_inference.md delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/kernel_desc/binaryop_injector.md delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/kernel_desc/eltwise_injector.md delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/kernel_desc/gpu/sparse_gemm_gpu.md delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/kernel_desc/kernel_amx.md delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/kernel_desc/kernel_avx512f.md delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/kernel_desc/kernel_dynamic_quant_matmul.md delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/kernel_desc/kernel_layernormalized_spmm.md delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/kernel_desc/kernel_transpose_matmul.md delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/kernel_desc/kernel_transpose_mha.md delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/kernel_desc/kernel_vnni.md delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/profiling.md delete mode 100644 intel_extension_for_transformers/llm/library/kernels/docs/validated_data.md delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/common.h delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/data_type/bf16.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/data_type/data_types.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/data_type/f8.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/data_type/fp16.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/engine.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/engine_factory.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/git_version.h.in delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/impl_list_item.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/interface.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernel.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernel_cache.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernel_desc.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernel_hashing.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernels/amx_utils.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernels/attention_types.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernels/data_pack.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernels/dynamic_quant_matmul_types.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernels/eltwiseop_types.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernels/exposed_enum.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernels/layernorm_ba_types.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernels/matmul_types.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernels/mean_var_reduce_types.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernels/softmax_types.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernels/sparse_data.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernels/spmm_types.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/kernels/transpose_mha_types.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/memory_storage.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/operator_desc.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/param_types.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/stream.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/include/tensor_desc.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/scripts/README.md delete mode 100644 intel_extension_for_transformers/llm/library/kernels/scripts/requirements.txt delete mode 100644 intel_extension_for_transformers/llm/library/kernels/scripts/sample_figure.jpg delete mode 100644 intel_extension_for_transformers/llm/library/kernels/scripts/sparsity_all.py delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/amx_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/cpu_isa.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/cpu_isa.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/cpu_parallel.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/cpu_parallel.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/data_pack.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/engine/cpu_engine.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/engine/cpu_engine.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_amx_configure.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_amx_configure.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_amx_s8s8_dynamic_dequant_matmul.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_amx_s8s8_dynamic_dequant_matmul.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_amx_s8s8_dynamic_quant_matmul.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_amx_s8s8_dynamic_quant_matmul.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_binary_injector.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_binary_injector.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_dynamic_quant.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_dynamic_quant.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_dynamic_quant_matmul_reduce_scale_quant.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_dynamic_quant_matmul_reduce_scale_quant.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_dynamic_quant_mha.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_dynamic_quant_mha.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_eltwise_injector.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_eltwise_injector.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_eltwiseop.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_eltwiseop.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_gather.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_gather.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_gemm_avx512f_8bit.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_gemm_avx512f_8bit.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_generator.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_generator.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_groupnorm.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_groupnorm.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_layernorm_ba.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_layernorm_ba.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_matmul_amx_s8ab_s8Ab4a_s32AB16a16b.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_matmul_amx_s8ab_s8Ab4a_s32AB16a16b.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_matmul_amx_u8AB16a64b_s8BA16b4a_ab.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_matmul_amx_u8AB16a64b_s8BA16b4a_ab.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_matmul_avx512f_p2031_p2013.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_matmul_avx512f_p2031_p2013.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_matmul_vnni_8xkx48.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_matmul_vnni_8xkx48.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_matmul_vnni_Ba4b_Ab4a_ba.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_matmul_vnni_Ba4b_Ab4a_ba.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_matmul_vnni_noperm_p2031_p1302.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_matmul_vnni_noperm_p2031_p1302.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_mean_var_reduce.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_mean_var_reduce.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_mha_dense_bf16.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_mm_exp_vnni_mxkx48.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_mm_exp_vnni_mxkx48.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_seq_cpy_2x8x8.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_seq_cpy_2x8x8.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_seq_cpy_48x4.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_seq_cpy_48x4.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_slice.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_slice.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_softmax.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_softmax.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_softmax_Ab16a.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_softmax_Ab16a.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_spmm_amx_bf16_x16.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_spmm_amx_bf16_x16.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_spmm_avx512f.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_spmm_avx512f.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_spmm_vnni.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_spmm_vnni.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_trans_AB16a4b.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_trans_AB16a4b.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_trans_AB16a4b_16x.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_trans_AB16a4b_16x.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_trans_BA16b4a.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_trans_BA16b4a.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_trans_BA16b4a_trq10n_x16.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_trans_BA16b4a_trq10n_x16.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_trans_cpy_nx8_4b.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_trans_cpy_nx8_4b.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_transpose_mha.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/jit_transpose_mha.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/jit_domain/regs_pool.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/attention.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/attention.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/attention_ref.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/attention_ref.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/cpu_attention_list.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/cpu_dynamic_quant_list.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/cpu_dynamic_quant_matmul.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/cpu_eltwiseop_list.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/cpu_gather_list.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/cpu_groupnorm_list.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/cpu_layernorm_ba_list.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/cpu_layernormalized_spmm_list.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/cpu_mha_dense_list.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/cpu_slice_list.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/cpu_softmax_list.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/cpu_sparse_matmul_list.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/cpu_transpose_matmul_list.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/cpu_transpose_mha_list.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/dynamic_quant.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/dynamic_quant.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/dynamic_quant_matmul.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/dynamic_quant_matmul.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/dynamic_quant_matmul_ref.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/dynamic_quant_matmul_ref.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/dynamic_quant_mha.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/dynamic_quant_mha.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/dynamic_quant_ref.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/dynamic_quant_ref.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/eltwiseop.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/eltwiseop.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/eltwiseop_ref.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/eltwiseop_ref.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/gather.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/gather.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/gather_ref.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/gather_ref.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/groupnorm.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/groupnorm.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/groupnorm_ref.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/groupnorm_ref.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/layernorm_ba.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/layernorm_ba.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/layernorm_ba_ref.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/layernorm_ba_ref.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/layernormalized_spmm.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/layernormalized_spmm.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/layernormalized_spmm_ref.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/layernormalized_spmm_ref.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/matmul_avx512f_8bit.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/matmul_avx512f_8bit.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/matmul_avx512f_p2031_p2013.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/matmul_avx512f_p2031_p2013.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/matmul_ref.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/matmul_ref.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/matmul_vnni_noperm_p2031_p1302.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/matmul_vnni_noperm_p2031_p1302.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/matmul_vnni_p2031_p2013.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/matmul_vnni_p2031_p2013.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/mha_dense.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/mha_dense.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/mha_dense_bf16.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/mha_dense_bf16.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/mha_dense_ctx.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/mha_dense_ref.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/mha_dense_ref.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/slice.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/slice.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/softmax.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/softmax.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/softmax_ref.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/softmax_ref.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/sparse_data.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/spmm_amx_bf16_x16.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/spmm_amx_bf16_x16.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/spmm_avx512f.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/spmm_avx512f.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/spmm_ref.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/spmm_ref.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/spmm_vnni.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/spmm_vnni.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/transpose_mha.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/kernels/transpose_mha.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/memory_storege/cpu_memory_storage.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/cpu/memory_storege/cpu_memory_storage.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/data_type/bf16.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/data_type/f8.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/data_type/fp16.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/engine_factory.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/gpu/engine/gpu_ocl_engine.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/gpu/engine/gpu_ocl_engine.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/gpu/kernels/opencl/common.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/gpu/kernels/opencl/common.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/gpu/kernels/opencl/gemm/gpu_ocl_matmul_list.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/gpu/kernels/opencl/gemm/gpu_ocl_matmul_ref.cl delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/gpu/kernels/opencl/gemm/gpu_ocl_matmul_ref.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/gpu/kernels/opencl/gemm/gpu_ocl_matmul_ref.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/gpu/memory_storage/gpu_ocl_memory_storage.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/gpu/memory_storage/gpu_ocl_memory_storage.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/gpu/stream/gpu_ocl_stream.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/gpu/stream/gpu_ocl_stream.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/interface.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/kernel.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/kernel_cache.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/kernel_desc.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/singleton.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/tensor_desc.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/utils.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/utils.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/verbose.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/verbose.hpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/vtune_wrapper.cpp delete mode 100644 intel_extension_for_transformers/llm/library/kernels/src/vtune_wrapper.hpp delete mode 160000 intel_extension_for_transformers/llm/library/xbyak delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/CMakePresets.json delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/__init__.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/application/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/application/audio_run.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/application/common.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/application/common.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/application/main_pybind.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/application/main_run.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/application/pybind_gptj.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/application/quant_model.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/application/quant_whisper.cpp delete mode 160000 intel_extension_for_transformers/llm/runtime/graph/application/third_party/pybind11 delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/application/whisper_pybind.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/cmake/Common.cmake delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/cmake/ISA.cmake delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/README.md delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/data_types.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/Ops.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/conv.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/conv.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/ele_reduce.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/ele_wise.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/inner_product.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/ip_fusion_ffn.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/ip_fusion_qkv.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/jblas_common.hpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/jblas_defs.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/jblas_gemm.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/jblas_gemm.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/layers.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/memory.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/memory.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/mha_dense.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/mha_dense.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/ne_jblas.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/ne_test_layers_utils.hpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/layers/vec_dot.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/ne.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/ne_jblas.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/ne_layers.c delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/ne_layers.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/parallel_context.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/core/parallel_context.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/developer_document.md delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/docs/fused_attention.md delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/docs/imgs/Attention.PNG delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/docs/imgs/FFN.PNG delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/docs/imgs/shift-rope.svg delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/docs/imgs/shuffle-attn.svg delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/docs/infinite_inference.md delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/docs/tensor_parallelism.md delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/baichuan/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/baichuan/baichuan.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/baichuan/baichuan.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/baichuan/baichuan_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/bloom/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/bloom/bloom.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/bloom/bloom.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/bloom/bloom_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/chatglm/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm2.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm2.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm2_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/chatglm/chatglm_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/falcon/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/falcon/falcon.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/falcon/falcon.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/falcon/falcon_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/gptj/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/gptj/gptj.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/gptj/gptj.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/gptj/gptj_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/gptneox/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/gptneox/gptneox.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/gptneox/gptneox.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/gptneox/gptneox_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/llama/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/llama/llama.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/llama/llama.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/llama/llama_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/mistral/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/model_utils/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/model_utils/arg_parse.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_config.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_files.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_types.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/model_utils/model_utils.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/model_utils/quant_config.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/model_utils/quant_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/model_utils/quant_utils.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/model_utils/util.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/model_utils/util.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/models.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/mpt/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/mpt/mpt.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/mpt/mpt.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/mpt/mpt_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/opt/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/opt/opt.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/opt/opt.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/opt/opt_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/qwen/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/qwen/qwen.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/qwen/qwen.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/qwen/qwen_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/starcoder/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/starcoder/starcoder.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/starcoder/starcoder.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/starcoder/starcoder_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/whisper/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/whisper/dr_wav.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/whisper/whisper.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/whisper/whisper.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/models/whisper/whisper_utils.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/__init__.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/cal_diff.py delete mode 100755 intel_extension_for_transformers/llm/runtime/graph/scripts/ci/calculate_percentiles.py delete mode 100755 intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_inference.sh delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/ci/cpp_graph_prompts.json delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/clang-format.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/common.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_baichuan.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_bloom.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_chatglm.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_dolly.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_falcon.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptj.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptneox.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptq_bloom.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptq_llama.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_gptq_mistral.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_llama.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mistral.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mpt.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_opt.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_qwen.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_starcoder.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/convert_whisper.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/inference.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/load_peft_and_merge.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/perplexity.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/python_api_example.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/quantize.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/baichuan.sh delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/chatglm-6b.sh delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/common.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/requirements/mistral.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/scripts/run.py delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/tests/requirements.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/quantize.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/simd.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/vec.hpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/vec_arithmetic.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/vec_arithmetic.hpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/vec_base.hpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/vec_compare.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/vec_compare.hpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/vec_convert.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/vec_convert.hpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/vec_load.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/vec_load.hpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/vec_set.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/vec_set.hpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/vec_store.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/cpu/vec_store.hpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/ele_reduce.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/ele_reduce.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/fp16.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/gpu/CMakeLists.txt delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/gpu/ele_reduce.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/gpu/ele_wise.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/gpu/reduce.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/gpu/test.cpp delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/gpu/vector_func.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/gpu/vector_kernel.h delete mode 100644 intel_extension_for_transformers/llm/runtime/graph/vectors/parallel_for.h diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index e5983fae25a..c289e6a21f7 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -27,11 +27,6 @@ /intel_extension_for_transformers/llm/runtime/deprecated/Cmake* yu.luo@intel.com /intel_extension_for_transformers/llm/runtime/deprecated/cmake/ yu.luo@intel.com -/intel_extension_for_transformers/llm/library/jblas yu.luo@intel.com -/intel_extension_for_transformers/llm/runtime/graph hengyu.meng@intel.com -/intel_extension_for_transformers/llm/runtime/graph/scripts zhenwei.liu@intel.com -/intel_extension_for_transformers/llm/runtime/graph/__init__.py zhenwei.liu@intel.com -/intel_extension_for_transformers/llm/library hengyu.meng@intel.com /intel_extension_for_transformers/llm/runtime/deprecated bo1.dong@intel.com /intel_extension_for_transformers/llm/operator bo1.dong@intel.com /intel_extension_for_transformers/llm/evaluation chang1.wang@intel.com diff --git a/.github/workflows/cpp-graph-test.yml b/.github/workflows/cpp-graph-test.yml index e6d46b1d30d..d61496c0da5 100644 --- a/.github/workflows/cpp-graph-test.yml +++ b/.github/workflows/cpp-graph-test.yml @@ -1,13 +1,6 @@ name: CPP Graph Test on: - pull_request: - branches: [main] - paths: - - '.github/workflows/cpp-graph-test.yml' - - 'intel_extension_for_transformers/llm/runtime/graph/**' - - 'intel_extension_for_transformers/llm/library/jblas/**' - - '!**/*.md' workflow_dispatch: inputs: compiler_version: diff --git a/.github/workflows/script/formatScan/pylint.sh b/.github/workflows/script/formatScan/pylint.sh index 04ece88878e..9c486e51096 100644 --- a/.github/workflows/script/formatScan/pylint.sh +++ b/.github/workflows/script/formatScan/pylint.sh @@ -29,6 +29,11 @@ fi # install packages pip install git+https://github.com/EleutherAI/lm-evaluation-harness.git@83dbfbf6070324f3e5872f63e49d49ff7ef4c9b3 pip install accelerate nlpaug nltk schema optimum-intel==1.11.0 optimum==1.13.3 peft==0.6.2 +# install neural-speed +git clone https://github.com/intel/neural-speed.git +cd neural-speed && pip install -r requirements.txt && pip install . +cd .. +pip list | grep neural-speed echo "[DEBUG] list pipdeptree..." pip install pipdeptree diff --git a/.github/workflows/script/unitTest/run_unit_test_llmruntime.sh b/.github/workflows/script/unitTest/run_unit_test_llmruntime.sh index e6a92ea65b7..dd60815da66 100644 --- a/.github/workflows/script/unitTest/run_unit_test_llmruntime.sh +++ b/.github/workflows/script/unitTest/run_unit_test_llmruntime.sh @@ -32,6 +32,11 @@ function llmruntime_test() { function main() { bash /intel-extension-for-transformers/.github/workflows/script/unitTest/env_setup.sh "${WORKING_DIR}" + ## install neuralspeed from source code + git clone https://github.com/intel/neural-speed.git + cd neural-speed && pip install -r requirements.txt && pip install . + cd .. + pip list | grep neural-speed llmruntime_test } diff --git a/.github/workflows/sparse_lib_CI.yml b/.github/workflows/sparse_lib_CI.yml index 745580adf2d..7b5ba4c8669 100644 --- a/.github/workflows/sparse_lib_CI.yml +++ b/.github/workflows/sparse_lib_CI.yml @@ -4,12 +4,9 @@ on: pull_request: branches: [main] paths: - - 'intel_extension_for_transformers/llm/library/kernels/**' - - 'intel_extension_for_transformers/llm/runtime/deprecated/test/kernels/benchmark/**' + - 'intel_extension_for_transformers/llm/runtime/deprecated/kernels/**' - '.github/workflows/sparse_lib_CI.yml' - '.github/workflows/script/SparseLibCI' - - '!intel_extension_for_transformers/llm/library/kernels/docs/**' - - '!intel_extension_for_transformers/llm/library/kernels/README.md' workflow_dispatch: # If there is a new commit, the previous jobs will be canceled @@ -26,30 +23,30 @@ env: jobs: sparselib: - runs-on: [self-hosted, Linux, X64, spr] + runs-on: itrex-node steps: - name: Docker Clean Up run: | - podman ps -a - if [[ $(podman ps -a | grep -i '${{ env.CONTAINER_NAME }}'$) ]]; then - podman start ${{ env.CONTAINER_NAME }} + docker ps -a + if [[ $(docker ps -a | grep -i '${{ env.CONTAINER_NAME }}'$) ]]; then + docker start ${{ env.CONTAINER_NAME }} echo "remove left files through container ..." - podman exec ${{ env.CONTAINER_NAME }} bash -c "ls -a /intel-extension-for-transformers && rm -fr /intel-extension-for-transformers/* && rm -fr /intel-extension-for-transformers/.* || true" + docker exec ${{ env.CONTAINER_NAME }} bash -c "ls -a /intel-extension-for-transformers && rm -fr /intel-extension-for-transformers/* && rm -fr /intel-extension-for-transformers/.* || true" fi - name: Checkout out Repo uses: actions/checkout@v3 - name: Docker Build run: | - podman build -f ${{ github.workspace }}/.github/workflows/docker/${{ env.DOCKER_FILE_NAME }}.dockerfile -t ${{ env.REPO_NAME }}:${{ env.REPO_TAG }} . + docker build -f ${{ github.workspace }}/.github/workflows/docker/${{ env.DOCKER_FILE_NAME }}.dockerfile -t ${{ env.REPO_NAME }}:${{ env.REPO_TAG }} . - name: Docker Run run: | - if [[ $(podman ps -a | grep -i '${{ env.CONTAINER_NAME }}'$) ]]; then - podman stop ${{ env.CONTAINER_NAME }} - podman rm -vf ${{ env.CONTAINER_NAME }} || true + if [[ $(docker ps -a | grep -i '${{ env.CONTAINER_NAME }}'$) ]]; then + docker stop ${{ env.CONTAINER_NAME }} + docker rm -vf ${{ env.CONTAINER_NAME }} || true fi - podman run -dit --disable-content-trust --privileged --name=${{ env.CONTAINER_NAME }} -v /dev/shm:/dev/shm \ + docker run -dit --disable-content-trust --privileged --name=${{ env.CONTAINER_NAME }} -v /dev/shm:/dev/shm \ -v ${{ github.workspace }}:/intel-extension-for-transformers \ -e report_title="SparseLib Test" \ -e BUILD_NUMBER="${{ github.run_number }}" \ @@ -72,12 +69,12 @@ jobs: - name: SparseLib Test id: SparseLib-Test run: | - podman exec ${{ env.CONTAINER_NAME }} \ + docker exec ${{ env.CONTAINER_NAME }} \ bash -c "bash /intel-extension-for-transformers/.github/workflows/script/SparseLibCI/run_sparse_lib.sh" - name: Generate Report run: | - podman exec ${{ env.CONTAINER_NAME }} \ + docker exec ${{ env.CONTAINER_NAME }} \ bash -c "bash /generate_sparse_lib.sh" - name: Publish pipeline artifact diff --git a/.github/workflows/unit-test-llmruntime.yml b/.github/workflows/unit-test-llmruntime.yml index c0c75532ab8..f4d7171e6de 100644 --- a/.github/workflows/unit-test-llmruntime.yml +++ b/.github/workflows/unit-test-llmruntime.yml @@ -6,6 +6,7 @@ on: paths: - intel_extension_for_transformers/llm/runtime/graph/** - .github/workflows/unit-test-llmruntime.yml + - '.github/workflows/script/unitTest/run_unit_test_llmruntime.sh' - '!intel_extension_for_transformers/llm/runtime/graph/docs/**' - '!intel_extension_for_transformers/llm/runtime/graph/*.md' workflow_dispatch: diff --git a/.gitmodules b/.gitmodules index 356d6729cd1..abc1f90d2db 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,3 @@ -[submodule "intel_extension_for_transformers/llm/library/xbyak"] - path = intel_extension_for_transformers/llm/library/xbyak - url = https://github.com/herumi/xbyak.git [submodule "intel_extension_for_transformers/llm/runtime/third_party/oneDNN"] path = intel_extension_for_transformers/llm/runtime/deprecated/third_party/oneDNN url = https://github.com/oneapi-src/oneDNN.git @@ -79,6 +76,3 @@ [submodule "intel_extension_for_transformers/llm/runtime/third_party/xbyak"] path = intel_extension_for_transformers/llm/runtime/deprecated/third_party/xbyak url = https://github.com/herumi/xbyak.git -[submodule "intel_extension_for_transformers/llm/runtime/graph/application/third_party/pybind11"] - path = intel_extension_for_transformers/llm/runtime/graph/application/third_party/pybind11 - url = https://github.com/pybind/pybind11.git diff --git a/intel_extension_for_transformers/llm/library/CMakeLists.txt b/intel_extension_for_transformers/llm/library/CMakeLists.txt deleted file mode 100644 index 6e412383884..00000000000 --- a/intel_extension_for_transformers/llm/library/CMakeLists.txt +++ /dev/null @@ -1,13 +0,0 @@ -## Copyright (c) 2022 Intel Corporation -## -## Licensed under the Apache License, Version 2.0 (the "License"); -## you may not use this file except in compliance with the License. -## You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, -## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -## See the License for the specific language governing permissions and -## limitations under the License. diff --git a/intel_extension_for_transformers/llm/library/jblas/.clang-format b/intel_extension_for_transformers/llm/library/jblas/.clang-format deleted file mode 100644 index 84b87670616..00000000000 --- a/intel_extension_for_transformers/llm/library/jblas/.clang-format +++ /dev/null @@ -1,7 +0,0 @@ -Language: Cpp -BasedOnStyle: Google -DerivePointerAlignment: false -ColumnLimit: 120 -SpaceBeforeParens: ControlStatements -SpaceBeforeRangeBasedForLoopColon: true -SortIncludes: false diff --git a/intel_extension_for_transformers/llm/library/jblas/CMakeLists.txt b/intel_extension_for_transformers/llm/library/jblas/CMakeLists.txt deleted file mode 100644 index e698a81554d..00000000000 --- a/intel_extension_for_transformers/llm/library/jblas/CMakeLists.txt +++ /dev/null @@ -1,34 +0,0 @@ -cmake_minimum_required(VERSION 3.5) - -project(jblas LANGUAGES CXX VERSION 0.1.0) - -file(GLOB headers ${PROJECT_NAME}/*.h ${PROJECT_NAME}/*.hpp) -file(GLOB xbyak_headers ${PROJECT_NAME}/xbyak/*.h ${PROJECT_NAME}/xbyak/*.hpp) - -add_library(${PROJECT_NAME} INTERFACE) -add_library(${PROJECT_NAME}::${PROJECT_NAME} ALIAS ${PROJECT_NAME}) - -target_include_directories( - ${PROJECT_NAME} INTERFACE - "$" - "$" -) - -if(WIN32) - target_compile_definitions(${PROJECT_NAME} INTERFACE _CRT_SECURE_NO_WARNINGS NOMINMAX) - target_compile_options(${PROJECT_NAME} INTERFACE /wd4068 /wd4849 /wd6262 /wd4702 /wd4100) - #4068 ignore unroll and GCC flags - #4849 ignore collapse - #6262 ignore stack too large - #4702 unreachable code(false warning on constexpr condition) - #4100 unreferenced formal parameter - - target_link_options(${PROJECT_NAME} INTERFACE /STACK:5242880) #Stack requires up to L2 cache size -endif(WIN32) - -set(CMAKE_CXX_STANDARD 17) -set(CMAKE_CXX_STANDARD_REQUIRED ON) - -include(FindOpenMP) -target_link_libraries(${PROJECT_NAME} INTERFACE OpenMP::OpenMP_CXX OpenMP::OpenMP_C) -target_compile_features(${PROJECT_NAME} INTERFACE cxx_std_17) diff --git a/intel_extension_for_transformers/llm/library/jblas/README.md b/intel_extension_for_transformers/llm/library/jblas/README.md deleted file mode 100644 index be9c7906fc9..00000000000 --- a/intel_extension_for_transformers/llm/library/jblas/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# Jblas -Jblas is a lightweight, header-only acceleration library for high-performance GEMM and related computations on Intel platform. Inspired by Cutlass, it provides high-level template class abstractions for various elements required for computation, and allows flexible kernel construction through template combinations to meet specific needs, maximizing the reuse of existing template classes. Users can also develop custom template classes to expand Jblas’s computational capabilities. Jblas includes several different types of template classes, specifically: - -- `Interface`: Exposes gemm computation interface to users, while allowing users to specify their own Launcher template classes and Parallel template classes. -- `Launcher`: Schedules computation-related template classes, allowing users to specify their own computation-related template classes, including GemmCore, Prologue, and Epilogue. -- `Parallel`: Specifies data splitting strategy for task distribution among different cores. Jblas’s default Parallel template class adopts an L2-cache-fusion concept, i.e., each core tries to temporarily store the data it processes in its L2-cache during each round of gemm-tile computation. -- `GemmCore`: A computation-related template class that provides a micro-kernel for performing a tile gemm computation with a specific ISA. It is the most important template class in Jblas. Currently, GemmCore supports the following ISAs: - - AVX2 - - AVX_VNNI - - AVX512F - - AVX512_VNNI - - AMX_BF16 - - AMX_INT8 - - AVX512_FP16 -- `Prologue`: A computation-related template class that preprocesses (such as data type conversion/padding) input data to meet GemmCore’s input data requirements. -- `Epilogue`: A computation-related template class that post-processes (such as eltwiseop-fusion) the results of gemm-core computations to expand Jblas’s application scenarios. - -The interaction logic between different template classes and the calculation process of gemm are shown in the following figure. -![bit4_emulation](docs/workflow.png) -# Highlights -## Weight-only -Jblas provides weight-only linear computational capabilities for LLM inference. We provide a series of Prologues for quantize/compress/serialize/deserialize fp32 weights in different ways. Specifically, we support compressed weights of the following data types: - -- S8 -- S4_CLIP -- S4_FULLRANGE -- FP4 -- NF4 -## Postop-fusion -Jblas provides assembly-level postop-fusion through epilogue to minimize the overhead caused by data movement. Specifically, we support the following postop-fusions: - -- GELU -- SWISH -- RELU -- EXP -- TANH -## Compilation Requirements and Usage -Compile: - -- GCC version >=8.5.0 -- CMake version >=3.5 - -Usage: -```cmake -add_subdirectory(jblas) -target_link_libraries("${YOUR_PROJECT}" jblas::jblas) -``` diff --git a/intel_extension_for_transformers/llm/library/jblas/docs/workflow.png b/intel_extension_for_transformers/llm/library/jblas/docs/workflow.png deleted file mode 100644 index e7f67775f2fa689bacddc86988ff5d443b5dca65..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 172385 zcmdpec|4Tu+xN6l5p`E6TZ>XDd&piXsZbHJ%a(l~jJ4Yx$yQWCmJ*VXeK$&x?CaQt ztYc|xgJHbKHACG!&+mEO_wRQ;K3U?r&huEl$M^Ui=QaK}l;n5qJh&5uLhVwxB6kag zqW_9QZBE}#13x)-`ma#{oX9xvNBJ6t&z zO`lgd#tA4k{LO!aC+Z`|1&L;jl1BXPdgB8j*X;kE{*cA>SZ8NwmulNH#U-&5if?uP za=5PZ{v+@3&b|z%DDgI9ZL#}eQM1cO_D2;2C7HVH*KBah4zp9T%T8pjmSgF+vbE{P z7sxDd;pOWCTbjq)Lh6&u%||?1r)Q<7L)5~vr=4RAacMe5Oq30+|6?oLBQlEE?APnN z+IFzy@0V@dj{hINpLk_h8JxNn5gB>VQKCpKS(RZ$fNg7?z{WWMqk^0yW%BGT4Px)B;U;Mn9 zgS9MXQc+Ryv8G+WnD@o&vfy5p6Dmg-!mow%_ShEm(b#z@Dl1q1`0-&nW+ zl8{xCyygcl<=6iVg+3}o2@5gFZ$tLlMQE!AEBfwvgt#4bwXun?!!iW)n zzy9<0`pUOYJrhGuv@geInbun`FLz{_ih6iV)-TPD*k}@0mSzRco^7=wB_}7ph>U!6 zHKEC=FM2$^wd9UhPba1{F)?w$QU*3ZJ$5eOq19Pu&6KX$9z4myVoi&flY_;#jt!li za7=9*pIVUa#+0HRQl0Is@?v6B#dQLd((!Z4*7buyrOGd5yh>9@tCxwgeCf;^YnE`H z=*+Re(e!LZp=@2iv7TD@43flG^P3b)s;a79L`U}!@XlEwV9HAc+ND#tJn0p$c+QWf zo?U$@?quFv*t6g%6ur<`)#;g6N;X-wlyVjl5)zP-@+kM{`L@&x%e3+W4+)5h=EcVx zK|A-@%q>TA^wmz4pVh)o4wE~pa!PKt4_!`kj-6PLOas> z9gE9;oSrU~q`Pb4PUEUC#q5rFPE>lHo+>3xv2bv3I9^-dkL{a?Lt9X&iR7ZG!jzQB zgsb6&+s?M`xmArxzll#hu>aPoA|1b46#?7Zen* zV;MX8e0_ZfV#&>32d7=@-3I2L79WH&kJp`dFCn>~$(?eX3f3W-1diulB?1$M+`OI*Ht7s%ScxOW{aVEyD+`^*xHM zY&8Omk;)_V6H^a|Hzz4iFZM42Vu-mdOkf@kV7g%MFJfYPMV1Cl3_?;%oOZIlZw+=K zY$66Rc00#&_P=;ocRwJtG7qf{@!!|{vocaQdRn(fFWR!$)WpQ3&CUOzqF3Lw=(_5; zImb%8BRKZHAFs7zDN(T4p~SXk{=&uP=4SRhK)C=UuhMaDdXo#JJtVzk)xs%U=Op@# z*+H|&3VE63#;-&6fNz88E6=chnby55=+9l>hO9GX8(W4TQzSem0Tpunn(AX^yn6J@ z+dh1(<#>#3J#+#4Nr06b^S+?*%@f#RK0d7|S5bK^$1`?XE8{F%Goxq`bK7!<3&+Ut zu(eAFr^!fNbXV?_NgrR^@!eJ)oH3A1>U|bR$d;Oo^HYNz9UTZ_Bs$5@YqB#H?I;kA zs)b|>f}bz`^pjy*a+j`qd2X-Ff}sqPbDxJZk`hObOucm1Pw(7DUAMPYsW9BPRBH`8&>C6nXh=I-R#*pPZJ^6Z{H z0)m2w>HOCgyYzy>OJ;KHw2pV-(FnN1>w80yRl_d$Vj_VO3OR7&sOJ)jx?|5$KZcb+ zGFryA%=|28oObEzDMsflyOtK^gyV)TqqFKGA|j+Fe&~yNkyjFjqW>uq8K3J+{g}zx zQBB-I(#p`7P>8-9zb8a$L1U#ci!NjN`azuTVpvSehAradkh1W7E${4ghw+wVE5=^p z1(|Yz=nLFMI~XPsJfp!&QQobdcKf)=+Qlw*&Gw==D~pDE7PYJ$V{YSVMN^&G?)%bP z4TMBQ+L|$5Q?2oYaZa)QmAW%cF>Sx@a)??*FhgqGkD<=kMCIBqOmslFTh`Rne4d+| zJJT96Y9nTX#aAv#@67e(4L)*yFU_mQdvYnQ;|i&eO|i=@Ndx_BLu;McqDy3Z|@#c8#xY!*bJ%I>Qst0Jg{07Hy4o7Y%Vf|&DEcW$J`#~=W1q+-{9Vs&tE zxS6cFEYfOhZ=cCW>PUugEPvZv(p>LmE;D{y*rOnn9Q2 z8^bz}9z9AZsQ2Iv^ev+$>k1 zxiMf0rlgdcR7jdC8qA7awWMccoL2Byzo*z$YgR`yVlfu63C^sf_S?_aQvsb3ZS(!4ol*;1T18tHbuJn+uW3 z7!lzQMez$1&)ijr&VmFFB_J6S6PCapCJR3XT}{YLi5#+b)zSke*pXM3w8nG7pe$K* zc`js?i1qpDB+VK$F*xRCX8r}J>f-+6D5pQ?8i6xJ9lP90c%D~$te4bFtaw1j{8}^h za!PCGO(Z_P+!cVt&r0F2FVJ>hZjKFB_M8cOQpGZbkS^p^GO+du3`Kh(;Z_IJ6jYX5 zdOj#{1H{2VcY#9`=A84Cc1>N~)VnsoOeEG-qJfrF=s5Qr>;AchA6go|q_suGuNBG> zdR4m6g)qS3R2~QT^Hwe8W~!Q@r>At2FcKtAC4-eA^2w@?2n52FP_9Q`?iyuvyv5-O zOX074bFxYjrcFLCct9??r+W*l&*42qjA+$U&!S=mAsnn^ND4_+ViM=l z&LY(;cPj3mufgxWeEBk8#c!cy(};9eEX?Y#mW<14c8hDa5Pxu~&2eX-V`N?SO}^;|O6Q;Ijyh^1^{Fv8 zx>T87(7O`N!^4A6n9udA-SuCT$v>1UCiD6@Y&>V92BoH5m9_J251)GWEx24D;`(t2 zgl@03Rl;a|NQHXo%(KO|ywd%!oJY#;1AFc2rHB*aY|KppRpi+)%;SzEq^*@-p@t7^Jy-#QGINAeV%SB zKa|Nv?sM^+LEJGeN7U5R(AqYRj>1y~BQcAMl` zAV$PW6hiIfZd<5bFJ8V3g!-9IdlbO5skP;C&dGz&SOp=yMT%Q|r)Xq*h6*Hn*Jehr z$0MW(68`yNFBq%~)8|5N)3yn%XggGq+LVxZg{uJl}nZk4VJ}>xt-#T{w23l zS|QmgK7G0Xv}ztB1zD(`GqdCl1p+`o0QjRq=x0c-5Q(Yxs;8_pJu4-8_r$JFm>vzU zo;O}ww}dy+?(w6dKOL0Vkt4bCv%HJrUf&|hox4Y}1shgqf!VL`?;^nEuu)fVVP;Ix zpSJ7~m97I|p8sB_3m`VNK-@T53U!b$i$nlr4CERbI0G019QT2YQ)o8zhonjZ#s2RTIt{zpoH^PT)5^jJVdN|Gdr(N zVfLVHl@exL6^mnRwvj5$@n#0MM}W_l z-uE?ZyRNAC#b%|#X8W)7^ri?VTLdH|x&%=C5SP-aufqVwL2L0QfubqqQPL%e`>HlZ6Dbmwj|1e)l3Y*bvi?#t<* zoT36OW`9fjq|>xx$n)x<`45#rb+u4n)jtL!3>zw2C4ZID)PrbEw|aD8g(w}-k<}rmC7rh zl|Rc%&pdHiAygU~8yi>7U%q^KAbQ%})O}@;Glb8*pN;_8`UR=XE_03YgCA)*1MgCl{VZHMpA5L#T~W-^)0g2L_H_7 z!*buw2T+46Dk}kLF2A}lYWK@sH;*Cp3nlwLE$woSX3xHdX2J(`a1wR{a!;=&uyAsQ za4EdLaY9ZcYSLiiZ%|&B5r&8Q{?IJ13o_t{VzY*Z#zQdPJf;MXDK9U7!le)x6m%(+ z>*lFvM`G)V9}L%H-kX&I-YBsjOw7!Lo+`W7E$kl{f zfq{W@`M>_IXAc7UC^0!e)YrF?-z)nnGz+V#s~5d{R%L`Nus}OFj9d9K-15XeO!tP6 zRhGJbzc{c&i+K*1WO!ucsABUy$}T-uTz>BoEc3@M-7ZphiMj{7Ovk|=&5!ZtXr|&Z z7cX8!b{G;O4>=E^z2si%$BtMLdCi4T6s|Jq?EHL|9L+PldZlbs0{v^7OacGeW^nLA zt^xH=QQn*WqHL4p!?S14V6DGGBO^1j={7>}AS)5uz^x%dfSg1G>TTJeB7WiAN0AFH z18q)K{wE?fHUxYU5D=h)V`XR8VJruF0|XIB1f#?Td2L@qx>pox;YO-jm+*l^-E{e) zo~d_g(y<}1u`hS+q;?Al3%3jgZ`_1|gCc5qb3mLtNgP`?dI9&SHOWfR?64Gp+79%ao`Jv)`?vo?y^TKyRRK5x6oqj)Mbqll7_s>;5Cu8qQEQ`Td55wzf87`Fllw zvm8yGR?sLRzNm(wWbuImW4{s}HTQ@TpSIuMzke^6aO+MieORIyWIq(Kuqrul2Kb#@ zIRXD=`Z6$0LPA0so?XUMay>911y3^_jjCNp+X!pavJ%BM z?V=mR+tEY(`u{|_f8)#qEhT8m%1-n00!I%3o2L62P%!O3Fk!bY{&hE^NYu49FsG!X zKxgxtbpTNX`xprVNFerETtxC8@hK2y+V#IKyZIw|4}2Xr0-PH{4H_E(85vFp2f(bN zG*NN!LJnUAN=Ng0f z{|nYo40|aa)K@?W?R1$YeL1<(-$837=vc%jU8d=g#6jhnO3Tf59y zc&`2B26VNbQ;tKfyJt_MdCqHq0wA~#9y|a8*v*f(rs0DE1CcoP_xGPaDNvxD=Et7J zSwaZ133SGidI(T7`5t{4i0j&bis0>(&56ARhzIO_0eA#?2$uylgMu*sEe~J#9Ez$% zdfI_c59q*01HEq3LD2E@L88<299-YC?L&5G|2lKdiz0WLNF5p)qT>4hHCNBQ?Ikbd zRjMdU1FnDj)Wbuh(Ld=ov4wsVBNQ+t=@8H}Ydk)bnucGJebW8?Rh4 zcCj2?a~v4}l^t%j6oHZenP9gxGXy4TyeC|k<~;D(A8F4hToy?tOOdYf;B(j0L{AP^ zamNO7$GQu84x4%&NBV($Vz1j^O@z>ss)-4lip@$XZXr`~{m$PXQwpB*CU1I>Di78Ld(@b5gF_qqnh#7a`7kgKy zoGU>qCA^D^y3 z3v2IBUv@0o)%6^&cK&XJB|$X725h=PV4S+RepP)O<*NFznm~Ess-Oly4FkFliTEq= zw0SG5!nmPrg~S8wwU$0VjDZkwUo7VgX7pMtw;QgBSRAzR;K@v1E)_#Z>FI4ml<3F~ zBv#vLsnuNZ{)_(weDZ^JuY0*dXMFU?KtVcgz!{Mqu3NNlAzKa0G%2GS z1X=&(ZiZ>j`eMDyFHt~mp?zTBE?h6)eQ5^6l1S+m!mBRKn`+px#ZsNO;ucb>o-nlz*6#T%)g`GC|)CH0&-_ zdI9DDl}a!I5nA-qI zx$I_sRGkQ~2H2CFs~3Sv)%A&wHRnkf|$OJ^Ca4WQ--UWCP#i-wJq~P$sBM%ZF zRV^h&6Q&Oka=(ZmD7D0VLx#VHh7Q%y`)!;B7=rD@H$F6taWzp{98x>Jd$O$pHaNxy zQ>Ow9Q3VYgZ2|IW`kLV%DtmGfEP zyC%_km;(n778)!LnVf%mzCc$GZbqn5_7p@gl9+dRV|xu08u(q-TqFv7L`_~KY^Yts zf9#DWm|1;z!EhFbUszwfT?Bz8x=m*)e=v9+o1O|nmdBVeAG9=<80B6Nb_mJJ9_EMk zTCLyEyIdK;3U80}Uudr^v+78ZN&csfiUvASV=3_5iapQacRiYP1(q+#nF1&wNy+X! zT^a)rogn1lmY=z0ma1GvB>K*cKHs<>bq?C_97eBv>4c%ZY6Py&4&fd9vE!#tjp@YYubv;-CoRTo-S zq_^FT$WyS&Um|hTKnC(l0I8`6a5v7Y#vwU`WG?qdu>JO%8>Nfw0(GUI0apM(Da?Ti zLDfUA0fMN!3dv4!@y;&ajtszYtN!qy`^aVUbVZb0$Anjn0T69A^dr;;AWoxI+f#75JaR?e9YKlF`-Mp*&50Ym_lMI-%y3c z#K`FL#(V}POXUUaZ=k?NCKBeFl@V?UfB{UD0vKR4P`2k~Ybg4kbRo3%HY}cZG&RSy z_@G6L+RU$&ghh}f_1EEqU`i+Iee^xd$Dj>CngeBjGm$9Z875gs3!)Ip!pa%|z2zcd zP-<#%6#R`p9G+7KA!}mgk%u$SvoGkTAcN2W?A>+Wqx5YhOTZMia#TeGnpNONhaqx- zW~SlyQ-l%hd7fw5gHwFMK7pO9{BaS9|~32Uj^E& z_eHSgB`9-LUSyu52{VZQM5;M?YfT(f7LCuKvT}>Fv-g3G_)4H}=;2=x6clU<-nRTY!<6(ol|UUugs_SVunQ!{lll?2b^{`v8Zu0=8gLL~%>5PotRI)RwWpxx=FB)5Ufb?(; z31rciWI+Jv-#+warucB~23C$=iTl6v>|(1J=g8@32I=XRvcPpB^GHP$VIV|0sUB*%%uq z7%mzcq7wD~Qz(z_LTIPARyLFGy1uj_!t@`YaH|!lg4iCUSpPcg}QrF?JfU|GKIu<*I3hz_*%&LDn1-p%0}hg@2;rfr$`f zz)x5fHCj`{baR!wfF%I%Y+S7D;}L|h?%tEC#sg{lD-}SNKS#7q+#fAh`T3Km~9Q(&2$x0a3f5 zRBdQ*%^_yo)z#H_NF@ZmGe)H3ipAJX&#YuF^g-qE^z+%cq<1`}?GT&$#dl{w8#Br3 zxSeV#!U~Pduf8#uQ>J$?SnuV%EW4QvD%NZdep>RBIKaVC+CPV|&eFL}@;%XEjP!*M z?u(CCuDh*4SufJfsNqam8YO!DzCpB7X*I$(LBayWz|BLnLa{`|B-jTi5rPp~Yd6ge z8;q@%t1Fy<;pP>GM1#!`%E|p{ZouU}99c{EGHf_Q-*QvNbA3zSDuAJH|ScSm^Szolq6!UEW^Az}+iO1d{sB2EQ@in@--Gf6!MDLm2`(wy&Cl|#`- zBG~hRFO?##A=(L5#HGq>Fe9R9ok&jxN+~3EaJmiv4%9iVyy9Ya9>1KF7*QGuYXoeD zO8eC$%VTU?yB6idSgMVO1#<8}aV{m479-KTubOw12S1yW% zagkAKALwA5w$2;DPU}OlcM>v8;Qpn5VwS4)IAnfqk=dQ@)`K6oosPK;9BmOfcK{Wb zus8^_x!F3wh#IOtdBgG7_{#b}%)7xcibs_kZl^iiCNdDhe1K7E?gmidO->uype;Wo zVWj|i{-QP>h&~1yiLzC}WNz2M{&bg1MuHKGQ_fh{dYLtMcVbZ=6duoOdrhrPfN4S? z`uh11E&EVfQsl)!WG$6F&VB4_IZJwjZuaS{zs@SIYnTnsOtKEZ6afm3cB%V0Kpy~| zh4pyIG!nQh%pN`^vr#BrS{&@`LDwrn6w1i6Eh@^FOvU)biT5wmg6h=vz|8GV87R*i=mOc?p$fH1%KwcYSEI@S7nl_v1cW z=*^D1;2DD|#%8+U)S_E$T-N|=?fhr$pCk> zJx9vUnB|qS_mNAym}{q9FG>wZpd~*I?LO$-6r-9^d%7S zh}sl`8BboE>?2Pu4$6=ROBX)0HZ(NsD(8bXX3HdFoK}zf8m1rX$xRXAq?a2*D(cA- z^%YwfB@b=gQ2p4Y#oYj$vqU!pDn; zJS@@25fNT&)G+yhaj9nWLO{t%4I+J|d(Nj2zzJW%u`5H!tipZLLN8z>)~hs$R1g88 zpukF|-I2f@JIc=IKqvvZO^RqW&q3rXKm}-lG&D6I^{w{w1*NE`R|wTh?(JI{79D)RtlK`Ark4$s z7LhujV|exI6%+zCPgWo9J$q_RgrLh1NiDjgf9%M>(nxFwP-7h%oOw>J6;2t(K*(?y zIn@RuAb0%@bhQ1zw(wn!J9z0QyF;R$L}B5g8=)7wCR2$kB?krTdEps*_YB&&T`MNc zUnG>IySo-I-!AaXp<%o0HP_8qnHNh^I=vQdQ^CxfR-v9w9&zbcUUO`=h#^m|?KAD2 zm#Ii!X}V}PlWKGDM$!Z~M8^62XZ7zPdqN~r_*TDr7aGd^gj8ZKTB^|mq{U&$^KMO*R(U;4UgY|k(C-^^EX+}8SYy&hl}nfvx}F1rz3zOt}9wDBl9u!iSbyX;P=Uh`~Th6p#D z!bx+ad-Yh*v>fpHfkcn>Ld1OS+6;;CB*dMI!)t}0)mcyUoF-Q2Ez)@%20p<7dL|)W zW;vdaC|Yt7kj^HVmrXeOZ*E{%8BmwadXzTv^expnTOX9{Fl%_=a|TA;(y@Kn)=ttsk7X|W(5-lyew2-K_jkaaK z&v)3-bQ&?jiCjJ%l#!7)`K}E)v_w5Yw^wH7sLWpqp)Z8XWF2?LP{e{YOYbTdm|RvFzjGBT3yvg+1LnzSCysN@L1uI8;R zq~+e3MZk{(Gx|yl$hUgede0*Jxt~$Vao`1GpMa?~Idp1u9P47=PuQQ9i&ZSX?!4}fBVrs!1e8oRM1^>-0v7^xTgVW9!=rgV8x}A2uFP?Hr z$D0>LZ`BEY6kcL4*3`F*13pgMgFIz~yvj;Hm~M)&ks(GeeoEr%+|#GAltr6q4{5mevv19@_(yKnf_XSfheVnt z*~BkQdx?@q3#R2h2i*79TbA*ZA}5Vug}&E~(#GB?JKr=Wgilj(uH;*7;znlGU}9Up z)z*_PK%lbCT$AoUGmZ6FNcmF&)rIY3JS(LFv@2 z@`~C@JYy-JAfIa$N+O=q-DDH3LQ?(vrRC|$cpJB_7%vk53pkhwQ}&K*Ywr8cp!|5s zQO6XWoNt;@i}8XCraQ!WL*((ebh^U%y^)pD^SMpv>KR7etra_pdLm>|-YP zrihdPpbMmB(YEk4?FF}i^fftGKJ>N*AsNB~mJPeSsDfWRvc~C&54qbvw#G6jGxL0t zbA|3q#5Oj$yyb}bg*>cB^9%qio28b|A<+L|wkJ|DjM)?#UNT{3-wkhH9WsyTEm5HhrxY4tWpgNpkRcjK%Cq*)-}O-i_8;848oxr>s_5Tsl?N zr@8y_xgUr@8dE(@q~=>x+*%IoiS{`J7&)`FlIR3%;7OH{h!xRSw75U9Ym&>Q$xPzA zK5$~fD@KnK(u*G%F`OHrGw~@#a??uJnw?qm%vxn{Nu~UqKG!ubWx*X=4(NZX@{T)y z!{;&^znO`+io;FDAhoXc`6*ux7r6wUDJCW>l2*4BZsy+f11Z&l{!)wXONxfhidvdY zK4;ZV@0Bn+e(ck}8#KytLHvgvZ=zxO$~8+mYI2p~2P)$F1>JhDakjMj`6b=*!R4!~ zrRg^0ARO0#Fp7Uv+HUY{G!rW}cea)b?enVaAq(EO7Atc_H9xTGCe2bgqRLvdK<#%ZgZKWj=*;C5c=0Ybtm%_Po z=LG350wETpH@v~5MdWr7H2${lPHLNVWdb~iyuxHPj+OVDMwZNgRBg~Jq;%R6Y|E;?EwyrL3vWGx!*JSWCe^_*M zbez(^t%O2dDgK8dGoa|B=K@cDt0BMPI9+guBSaoXhmS7DmvA(u34tE`)@e)jBH zXkj189VuzsPUF*=Z5AeC)ybs*pb0kT4Zmicrv-C2!KfwtM%+?2ukUjA0V{lI>z|>S zS~%aP<%CboyBd1r$rVX{6)i2EbXo@#!$8j>01_%`{7VzarIX`TL3#jC~SFg^)uIK0HXIvzq>xVV^mA^|%Q*&~1Do!l# zzhPkTioX1Yu5QeGHH+7N8_b`&LfH`NCF27-ET5P6KQ8Zoh#eu4h6paIDk?`|edT_P zM9rgr`F|ZGz{fpk*#)C&!98IHECd~s?KRZPZ2arHlQI}ZWp+)%hh~9&0@umi+dpp+ zf(3rm|9~5eZ+z=69Sa|AX*xVv9q+rlyIu4v_~~Kf^@?SqxKXdBzmFpPpyHxlzGNHv z@#9`crg7*SP!E;vN5H?jAt9IJXGp8?p_ykL{bpfzFf5+d1U&th)H(xOUslgt{wus{*A zFY5VLRi!YxanfmhS>fx)vUoMs)Sh3xdUfD_#ElbNA(ip)U?_m)Sz+N52ZI(g<1;dD z0>?ka#}|SVrldG>z1mvExnD>aE?VigtF@pT{R};M}LNY{GOCbNvy42DO3pbm*hh~{vprV_2{=|!wK;LqU2~+1hxzzK$@NM}*H}d)pMAFGfBB-; zWS4`Px9Xh)R}1k~WI2rWR9OJ#ZgQLW~lkT$uhjM65U|#<;JUj`W z;JA8=3IgD8eg1ZfAh2&pRx-^EhERisVpI0hS<&k+g)1PHqp6FUv>gr1Rtg3`Sfd{o9}x| za)g*$SPac$THD*(@dVgEfShz}x{6{s|L@u*$M7fiJU+D5=&wNTct+0s`|BM^QZJRl zIb6P`z8&fPcA%|079vd={WXR zss>>@(E!DKOhQ5n8uwFY&OGDV=JOGNWRP%T_x8CvM|W?B&u`zqKdn34U3UFHrSgrf zMwc^+i@O(PoOcCiKPsHEv`r3SMupanIi8fk#&wapa$AY{T{X+|Ar@qJ35hBci!e-A zPMth?lalrOb~^2>SzIMcTL_DYyelr&h7=8b1IL;mN-DPq!QTDZ{)F1N*zI3sZxMfj z(!q>}?rK6{qABCr^-EubG_hoYQtTO>LOBT9-JzQv?-q$tgzP=V%lo{Ak}B^~Q=e51 z-n@9x=XK+~^s|u8s(0=rLvd4>Jj|BYbtb&}7}SHjuD#NK)-~k$Nd8lct6dT$XmK5d z@vW!^gQ`%{TCa)k##w-~UZpzlAi~%jxh~oh(%$zUU@Gv$Q35;=buDxsgC9i7(CiXY z4r1$*u5dM31nKj?Rg4lbYfe-oMEV+lhFmp?=HcU0HWBig{A17Fa)t?T`1;DhB9B)u zmDH1>YueJiBulI5QS`@BTHn2XePsXs{R&VIA3{ZdxE#mh;cojG{M=Ia?0lel`*s3A zjEiDkSL&xvpTNC$Gq~JO{}Cwsox71ky9)Dg<^#R+>y=3fuZ#eCZ=)=Y(@>gy4X%W~ zZhQ-J@ehK_fJDtd7hdUF{8MglTU|Y+uuxpLx!?BfhYvS=4Nk!7)l$wjUi>5Iy{}Qv z+oDEkvpP=ul^+ILcRNK*A#}ylOOk((;Id3#BX?wvt0V_}r0SMvnq=KUcmVxT*y8;I z`ua06S*JVBaMj-9*f@U!i|4A&4gVI}v)Oz0&>^9kA45YGYp!1&$!8l?trBpaYv{@D zF&splvv>tH6q}vhGTmK?+;g$h-6Q7r+IwQ#Zr>yX=31qMzp);T!${HIf0hLh2JxHHYh0`8zbG9pYa-`RaULa0@5t&T*9DeZ*FR7vny{RdC8o? zqz`2)z-yHBLaryA&!qRjI}X3U9JV)z7WMEJ^`s#{1*jpLRCb-8WnzBup75#IdTxjW zK&fw*B0Qp^q8u*-=r4*yS)K6R9H_xLA@g*CDbz$bAtr90u<6wDd>#n^&f^pC@k zq_O;uaP!{7eXL_woHMuy@_oVWm6bA`i-cHo8r`@7*`4Gnlk(Q(GGx<%8;d4mO~*7# zS3c5>54i_C-TMkl_p(T{(9!?YshBY$QKF)y&jDsr3$Cjx+>Yi&0 zp3wjJyuuQrUlsDjTExvN31ZkRYYvxv2O4n`CMz&NzBAdCr%YOyYz7@V$+-IYNp9{W zy|XKxSl}_)E`+;^vC=xT!?n$z+rNRDBfaIS~vGcB;Q@iPMZ0hP9LW$g7!--*p+g366cIuA*@zb{#JSw87;BeQ>w16-ZfGu zEH&2YV1s*&cd3(V8*?1HPh04By^h@ihhaAiYZ+Mt=V;H4bdkDu#pOM~!Osdq($n!c zmD>G09AjAjqdQGgI|lo-gD5hI&6QeLFHR*@W|SJ zg^iCa0W#(&VdJxg|pa{>z2DhnHgS zJfC1f&F$S3vBzkz;*nB-iNvQp)!{PjXC&>UOU@LsfBjn<>*bdBpoB+FvdK`t+*+G^2z~^i^z4i>ap_>`%(+7PR({N|0^v17+V-)s=Z zNC?;e3t99wFm4MptFrzB;uK_MO}2{l4YV*oZY2~K*9G+Q8aY3|4+jJ5==AEDgU^Nc zl-@uLVRm@#0q<6hM-TFVX!K26+{@!@f$-?rEL+0fr*bbT$+o6I#M0$dse^K5@KzlQ z^sX!OBbrXDLM>Wf&4m?iM%5&$v5VOrLbh3xxUi;pQ_G(9KtdsnAlH0;dHB;slD*Bc z#57k{WxyA<&Gd}FOCZ;Sas$^A-PV>C=s@0r8eWfJkt)o}Px0^!o|V`v1VzX?kBE`{ zm!narK)I7u0E!OcI-MW-lRYQV_Gth(dpp$flE#V70@Eu?(aeKRJ!j)&M6BPl{1hm^ ziPPBv$?!(c;1&2Ahion_p@ln#2dG z`O_#z^R}w$o3j>eZ`0D0VB*)%mZ~-1ZWvO&7wgR5@mN%j;_10bd6ky`Es{KRI#(tS*TqbUvcGEhD9AE<>Geo-zi^U(AL^*jm%C34 zSfD6|?(>&xX=vEpXIz1wKgGrX?HwBfltWE`qtMQaVhnyrOA99Lx1Hm0`IFI?@7NS! zkcJIVDe~p{tKdoweo7d!dx&T=O(ZBKrKorz;1%K^c805zMM@_KDy)B#pWQLx#VVXr5_c^XcyjbG-Ls6L-idn^mca9rWh$zxT~fw~a!f+q|f1r+cx zK6}y-|Cw!bppi+|IS%VPNKaceO{|7c%*|DUz5(fL4?ejf2NV}R-~qE|9hPd)4{dak zk84d>ANBusEqo=5-ShaPcTmN)EU0^*o$C&y4Ne~{{P@KpD1s}&e0grH%6i+5eL)Gw ziyqueI=b>e)-1k1w9s>T&FlLhC~*vqBf?po^q9UhLSq8M^*=$;oh-Mg^YzNw1WT zzX{DFJgTAuScvrgYz%%#Pke4;1W8@VC_MbY^{ja;w5OE6!|G*tA65rWJ+^$%Yk#?& zQSzsLrWT6-lU28dz$kH%dgJ2<9k%lqLnD3yxPA5WoaxtX?d>d#H7=4d(p!XXkt*yCKs&bxQD^{xwCd4z&(cyAejC({V+um1bx%QhqBQ}= z@Lz2gzkAaG`WMY(w{&!@R_5!X$=9rDBa&jLR$5l%WE9rA@vg6+CuPJaqF8u+mrLCb zU%n7BQqHzHP88`NczM@v7c?%P@QOE*}TtlI9Go8Q{NwI1s{ZQgMD^2a^a)AKHW zY;%+j7NFtj`SBRmHW$A*GfPL4Lk>Mgv{+rPq|F&lV7j&0+u_)9sU*w#o%Ih{if93a z6JvXh!{1g^45ErKAVj5~{rh7as9A4-(SL!i^Dwj$z_*KwB_nEAoHoQ~%H3tx61a*s z1xo6~3Syznogw9MCr7<%bgJ?H;J)OuN>1y;mQ-3yITrHfA4dgpU9{6H)j0a9(gDyX z|At!1ZBP+3vrK|=t$W@L51Ux^6yAn61$kjzGOq?QRW#PQYlGLNXQ8^`+M-H z33QDfG}eC@0G>l0K;Rfh#9+RyG%L%TZJ!Jj+oE%&`0kgZ(YiPwRy_B95N&YqeiNAD zVI60lQ5!{6{=T4pyenzf<-YSNT>`)AIUKz+BI?r#fVLzpcym%YWdFKG)$xLXVt0ZmmG2?MoNwYwLGIo zj(g6l(_~SbRzg(EvRDOL52As@dnU^*-onDfp@6f0VJ3DtZ1B*f?|tzH7dYgQ$tpYZ zV}#jJ^F`Ml`!Sclk-fZ2fl=)K5Jx(P_DQ-S$HGGP*zj=Hz5~X%fr}7ti0-A_J1wcDSXC+4H?n2Fu2xd2= z`84L3{K|mn9m0AQ*x0e8=GOFSbl}JZrfrOK2h%|5I(QIGJjgq)@x8l`T%TiScqxzF z2c<8@@{Tm~g=L4-!UG6w+s9#H@z5}hcbLYumU@t~ZTh68NE2Dmdn5yvE#EGmCZ@c9 ze?5SiuMHmRK5Nq}DK*z{5#+^bA=^Kyz5Wja+K9tHpr`5<7`hO9q;EcXepRQ%JdaQH zV!1UEq+CwodCg-@;Qe;fdO+KB;7v#+0rKnSe#=u{1|+C0nqpBCN#i!{KU-WYTLlsw z(i;m5Uf(}`dMe)+$qL}k3O!x{D=V9!jh!Z@NuWs=tlED*<@$nOa6H5JMg%pNAmh;c z;L4nbEB|n`+(P@=>`twR^A}3{4nJ}D`e22$6ZL1cIPptDT|)Gkt(!M(+q?-h&)@LY zGyVP7K}`f#y!)%*0O4I99bvpLpCe}V!V-~&v=8-|KlaS3QWEZ?4)zKRUUnjb7s27v zaXKgVJ)$*{)J^2H#aoI-4pJls4Ud!MTG@=s|ivEda*kB zzb^`3anVv0-?C$0ehalw(x9AxI1d<40r(5A9)pYmYRdlfF?7-3{Rk?!Kihbuk8(*C zUX5Cc_w(P}NmT>gv7Dbv0ay)18&sxhcYg}Dl;OhvBG)4N2IYz@w@))&ljP4bZ#{J> z{wB!Uzw_qRjkmaiFlIq$-SabA*>G6r&S@s9I7>C@e^6X0iVyI6a|$6tPK}&YKn4N7 zcn}B`IhVQqdQv$EAC2p%5gyJBsBz=QArQ*4I(+Yn{o9gdZz=C11d+f?RZG>+`g}na;*JnG zLSq6WjlE)jl%nrV7bxQnly`uz!|?TIPoKVpb}@7xJq?I!w_)rAGBdWX&oro_;xZJZ z6bJ|1=Kn#pad7$(k!wtvakVlR&Z%$X2-gDeSr;=jp~-w@62e-b72->k=l z?B8%)31UA5W)T}+zGg~1_#Oe_;_W1Xd&EnS~e8h*Bxz#}V zhZq0n_i8Cny;W6JxfEbn2P3;*F@Xmc{y!;-Wwx^G#^d_08sfnH0~-T7Te0_44Xe^Z zoQ(mARDOC8gsRMQj$=+x7PJ13?5#IkuSeh1?1@cl z1m0xj3cRDshu4jxk7GC9I)`G0@L*ViiIL*e+;T(zFlGMRL-vuED2egx_%Z9gH+&~D z00twVKyXa=TE%0?OCwI-%QhjCYGvuAH!vxG2JfXZf($FDO-8d6u^S>q5-JyrvQ+t5 zt}n8fo11efKm*bYhdQNBqc@%fr7WTB>(j^H*Xz1@#7jJ_h+0%LM7#CEkR3!35XLV) zI7e4@<;%-$aVaovJfoF+9iFLtzcAUA=G~` zPW9M}M?)qs-~K~jh*ySbe{2m_7sBwfo+u|BzI9p0Uu-|LVZPBeW@{gh5^#nUZ^le2 zgA^vVU%ZbP=kv>L-!H89PKa0FG4KS0$oCfC;ZaX}3T=w|I;7w&^hlAzwNXi9%}EyU zE_k5RG*9&u(YAyDm=acDesC#(DLFIKUUvT9$--ZFjO&o0l47e)o`ggRv71l#=Cy0W zdy=+z2efszC7OE9+)5%^^VFzRh+PX6|8&EkZZm%qzv(8H(^RntNtSHGN(RUs{1;}u z7SIwvA}|t(BE8>3Pjee4CP<8mcH8n%LYq)gVU=M57eEU7U&CR z(v@4xHtClgM3*LTu1+{d_mWbIc0P*|l2Bkbc#ACXBrNdXlNj^9)-NNoP-+lb3(`iv zZ9b(ugE9mM3W9NtVr-ET&S@83-M}uxbWIgbYKYkM!bz9+0j&G}y)fQ8JG#za4keRq zrdqR4<|<7~5$5>jxYv5+Vwh0NwQCg;nxyipIk`Ds`l6nmJHM7a!=o^2f4qV zZGrW8U(>#@MG!)Kv|x>-{EfxbAL}e`uhyrxCi8CP+5aKyE1;s_wnh~LQ9&d`Iwho} zq(LQzl#+&zE&=HVMFm0189JqVkVaDJj-gv5hm>Z>cLu!ozkF}4S-Mc>#O^wKGknRs z`Bvm)fiUS5CHmM=!2aoJB0rBBsn4G-o99?MhiKrK0h`Z{_B{K8PW>Q3k!WdbTwE!j zGGWkl4LI{d;Y=zwcRhgz0FH3jW$VqA<@CQk$r}r4^JkJ!n`-Y6??01uYdd;Y+Hr|T zP0U)SJ))|Ud%Sp)7*`yNn7=7@oT2>Px4shz2!GUvp6&|XTZW;&LzeHhHKW0DPVB%@ zLVHPZytYG9lwN7V9(zlmvwwvd6)+B&%IZ2bsx=VZ6X3KSaIFD|wh{*!2`kd#swxsz;u=LcgS0&nnCOiFcs zjAHOZ{%i7})4XVg+wfcz zB+}s`30_9WJ*R3sN(~zPep>#GD=g8_w+%FmbF>Zg9|`-^^w=*}PpSv}>|G^bLzT_Hss%fCR~>7;Y{z9(cX}4-AS2Y z{(~6h{+sFa7l8Bot|3>rq$wev9he>+5eaR5761v9BoOw}IZ{mT2siE9fjo$jr_U~x zu>KN;lPIGmZp33$Bw^%!E5?y^m;`G@bty|B-K<^6xtczBZK4hW)#_QA>kD;cF;u1l z#He85#XBRL>y5*w2P8u~Cw@O3PDKw|DP# z5Lul^{ckkf1&EsQ1A;s~B_qSM*Ag90UEr8CsX}BA>;yf}y{PniJCn_0uRN1c?r1#W zSZZR*n&Ul5M_M%W)Hk^HllsASuLuD&nja6#8mvZRD_QZ8jqbAX*{B(0>+;Q9~ zrU<*))i571lRtJi5vO%3mV4?vY34Y+Sj;2OFj#@q-|deb-aX)%?h~D`7#5u{CJ*8| zw|JkV`5oKcD>jwuKSP}k&yQ9sxJ<4OL~{tu6aMJs%*XwT0|FEZ|KZ+**PtnUeWjdlao`8>BvXFd;F>D z`GCE$SzO%C!jb>(!cP@Tq-))7-dKoNmX|!{ZnHmwQ)Ows&%DCNf#WGXFWlEd?_kO- zL#XYgqk`qqq%&z!Zyy~GZWUDb_t+SVFDBob@nHja%8U;|w?2PIDo4pRkMT?k&pnK; z4ldPrF8S@($1Y0f^ZaP#P7`d6w_0EADBZ-IMzpEiuQ@n!ulZA?Sat2laY?$kJKh3W z1sY%VM{`)jKIK*mzBMA*k59m=UlB%1XU!F2w4GN8A=GHl& z#+=NLyyBb9f|M$oj-xVrW0j3%C>d7xKGTPAFqb9;r=?{u3ma88Ya)1tSWf1%iUUiz zEn1k_m!jhI)vUD&*Q={E&K%D?yAV_&Cqr~hk8~{}3zY*UjQYm~&0!M#!38-WgGvE2 zmRYO0DCTNVL#T%t2Nf>?vUb|g{kXZWkQlgRzT}Em1!PaOaopD+yZ3uSE_@UZS3Qon+weir9 zCgbIYNjzfsd4R)_B!V{ac}8hFKl$Ys9dPurMWP1^-^uM9#EtFhPxsfI?WL8K5w4J) z?L6{YhdHn@lJ#4iWrT10m`8SZH94-@aD|ELiMgzu@3^FixW*~m_R!40j_S%k;{S95 zI$o+fJUrABE!SWHHFKc0Yx9}ht5-v~f8RYlsg9=ESm~{zM5^l*+mmU=CRrhn#csJB;hQT zBhdl&vS#BrF5UIGOs2glwl$9v-4j%ekpLc#)kKMA<>BECE!R?cSc|9UKbhCXE(Kpd1uEzy-)T6 ztxC*i2<^LLMvgF|mn=#@>{(48$&c;-loD|L^C4DId=Sd{YABnFn|YB$*K1c!m&k9AZs|;V=#Gu#JDb{*?Z?hNgV@AG8tT@F(a@bKxb9dLLss)YpS!gf z7`J0}x@z_w6PBc#>m2IWT0@NynmikO$oKDC2&#`wyAn8*)a5@xZZlU{N5vjvhTC^o zx0fVFnoK9DZiuGrHy}y4yfWZ5OzQj&3{1>woC(;=2erIvRn|#U!XqigkruITHM+by zqPoKh^0Y$dzL<1{0r7cnxhq$q65)caaxt9gDi7&F(aD25Ge#Gr2tfqsy1V2m-t!ig z_c_VZ?KpGJ!-!)pLG5EVV z6h>g}tuC*f)ayq{IlSQ<8lh}>dth~t;w&l6w}A23<<397C2?9zgi#i22%Dp7ZCpELMk}BjhXZ`b0=6Ndb!X&yZc8hoMB4)#8( z^V}$u1{tbzd{fPkkdm5|YL&ho8^XkBx*0v+<(Y%a9$KM!TqWGp`;I-jHTToqv| znT!sHla*`u!;BsZW$>ON8CJ@rv^uTnv%P|O?2x_#?b;luM-k7kv!q!_Excw*)O>h* zymV(0vt%ccbTqEFOZmgj?Nx61 zwZl6SHJ&|-J2irAFjgK3WmRRE-kC(YH!5{H`*Cb8)83=QDoit^%(g|s@h_*E+FVId z)Rf5J0xKfWr&^X*F0xq`s!YJ|t<@Ll*j0htKJ>LJqStO=q}>gw$2@l5-zCpEjoNi@ zQA#t)t;gQNQ;gI16r1*0udrcpUUATWJTQ^nU-Bz9o5W$tYJ|;w zFNlUgfze9~Zd0*sI(axKCr`$Z9xsx$Q0~&Y-;%V_v{tZuFfE!RIHCU7N5;^*Z=~f+ z(DXN7di>!tfA5nd_mpF7HLoX9`X`bBr|x~Fm1B;{RU;)3%OSx!6lKi;jA&QiAFdR+ z$$PkvlgrIKTcS{XRypN?96uZ#96g-X#;1cUxV}No!eZqnyAz;^@K+K$r|jN&8F4lP zK}dbXnY&@$y5MPu@Az3((rM4O`|_YQr>JMm-ukgLW-Pp)b^16yP zmlEM$sn4tnbDBRGxI=PLFm-ZKs|

I?m2zz{ieQeR?4wByo$Cm9{(($UqhdishI%f{UwDLlqK+rrt*(+ufY!C!-nEyB`LNawu+em$W-C zFS?tySRKyBw2hVi*{-yZ3~ZKA%qi_l1;kQ{?7Ayo(~_*c!8RZq(h*_JpIa=?wUe1` zefY<})bh|F&bw++rD&2*X}FZZI6RSGC2Bre?VpapInDkEVLh2IX)uS~q1;#GUDVoVAJURW8-y1+;fcu{QS(b< zolKRMFUsKEWtPf0bO}nbvn;yadzv-Bymad(cWaxw4=Ynm#D&&XES-*;!PtV7Gso=8NqRIx1=}A zd%kUz)ul?9aJfE@D(`&J!|6dEQOXFW6|MO>-Au*FnA}{3_~&x&P4gdytVA_|g0Jwx$5p3j-#Nomr^p1Dm`tCctqaFkJ5M;aae*d`tH zHtyGJ;q=9^rA`qDk-nH0d7N zk5aX-u#p9I=tX{d|m+-2|kl50y>UnWSkMcU1#VUVNc0Po%W<0~|2(B`9OzI>n zMCJgua-2WD_E={p&)eE^fYLc}Kv#u}TZD(6NxpB%snXCLE|MUssbZ~*!y_MQI+^y- ztqzfF4s+(E2&Wy+nFyz~96L&;my2bdg(WF_g9X|3=1Rj0r+I0xW3T@g?W~lERxs)TLL^}t3eNgf7Q4k~cH<&T4ZM5$Fb?PJ=FKwRJ z!jqtzc6=FPIm!rl?=G)yt`(^q!aF;^G{2`Z-`ZM6R$gsAJ|>onE43qYHiBO%)-_)( z)-Dwu?G*#(-#2ookWvWco8>c55GDwI-&eA?&vd+_N**np+6@M}2EtEEJFV2ZR zmO5sf;B$IT>Dprx`D&fvp5>96IURZkSA?)_9-FAUGCP^&&^vr;i`UYadbWKrLnoFo zdr~KRnMT|W)sW1TwaFM|1c8+>^L7<50&M%*4)>jw-e`0|o0&4o7Rq-{t0nui6%EPHrEj~(Z9w%Za7)jzZ<3@$HYY670;~z%byfggaot+ndCXn%bA#ltQ;G# z(8sXK@RYVWKFxz=jybyjFgiD(zn)z@RQeCU+?C~kuv%JMtEnztH#X!zHgb>;UR?{* z7xeyPrgoe(Kv7InX#UwSP=+_Oe>MDRX;Ymsl=~GE6H=xhWlwJr8x{jEX+y*@^enyE zNppOfGV8(Op1G;!rtc=t9yj2HNer_ zt@YS#W6rP|O2dlfaDZkcWvTtzFMWI{85`oSH6Uruzsn-s5hjHRXYxR|V8_+`k;MP)BiN)#!CH;ong%T8a7 z*@GK#HffnG@9jp987k)u(n+M7mt$KPDr?83J!q1uD z#}SiuH<#Wr$DUi!p>|HWTRw`J*;$h5umguP5JC@wWLsDScE@HShZP3XIG{AvP`N-R zHF}jCWvA257gknr%GzlbM)D*?q_pXwUv_hfD-C0HlXP7DZDJze*|Ty2{bP@Wzd&hV z^p>y32oD<>*oRF7sBc*9+V5hW4Ang=KlafdpfL;4>9-~kbPl4|>XK29<8H`c!tZ%J zMAuY#gYD*H`fP~&sqW!mXnkzuS84s)ZLZ4lUz}Yfnmjgn60@vOdWF7D;@OBucS-Af z>yyA>9Rd{(vy5JI+eMsCP05G0k) zY~kREJHW!_Z~`LlAw+TUjJx+qgP#dV&?g*zwG;iF^X7or(b))P^UzhedVUq9^vhpgIlsz6J0c!Co6asN2pqnb07o zK9ffs@_8K-Op@1JziJ!fi-(6njB(3P@WoFL9^QJ~!+k_qX2jxCb&f^e$Sq`52Ipe_ zN@W$2&kpJ+5K3iVYuNthr|)*eGv8k|4%MtgACO^=kEs&PO^=~>&rEyV$2O*$@b{-% z#8}fCAMkL}p4GVUuq4GFD=!zwY{^x|(Rz&W2^Yt1l`jO}<#hdn+j6fI!(`(Pd@=aw zzWR*@{dWyU=KvuZ!bDYI{U3CobSApj4R=+!KLnB21hD#RYnCs1y>^Bt;4<908PKBl z&;AUL(2nQ5wHjO9i92hg8ruVqp}O#%l0(bo_7vQLuinap3?3euyT{Hrdv~krT9MrE zg)@=)a!>fOqbEkF`pV&LWM<=)!z0BeXKyy=f^UT2-V)<{){4mpJd%cgL>*sB3Gph- zpd!g8dO}J`A1j&uDThuYNPeayh7k`9!-)0zA z5zLOoqmsG#>3PYQrOj7|`iD68Bn!;EJ5)1;aQ8)f`Z)L$wOqLbfj3F_tYRr7HDtIrM?tSV47hK@0Pz;@t zde6wasO*uEp9QbvQ-1knvoL=AS{G5araiLPuBB)7@gzVCgKDceo6haV+#-DpRN#5h zKKGg#)dNf#YGU7QpBJxjpFzl*O8Mm;d-x@gwrqcnvSNx;gXq$9^#;_eP9EGI*YgNA zQwVnuAg85po23iC8`F8^{aWTjp;37$QB?+NDmr`|&VkW3YF3wp&j5R0O^bAqeMi{>fnOHm~+n7!`WFw(rMRVMx)z z@Ia;6qH;gqsDn9obR_a0{dMsxG{z#1hUZw2q`AzhvESl*$rx0xo^_|ieYYD@@_Q=W z8k*>ps4|(^R>J!vIqz+}DrLf&6#sWyX0IEmOs#QJY8KDj^saJu{_}GLy97SkTq%`A zKjsja7~nRyw~=YR!nS8CBA)Xt-dv3*rk|KIhBw~nIeB`ilMe5)#F$q39|U_GV^+7( z?=<_z%eUL!Z$}v$8f7O4`OV87x&CSG5ZZ*%Nibw^)V_K9)$5+e)4XIs;b%T=50x8R z(^SkTy{KZOQYwO!#PZ4bz6H1N%F((w69#{W{7xqvcoQ^{s_kkib|-zCG+OC!9bdEl z37uRf#EjGU#nhHyuu8Azwc%Ici1KZp#i<(hH)oUh(TRfoR+*8|BR$2ai5Jj*n_q{> zd@`RkiA3*XEM;Nc59&NGlU9=*dTu&fkBQ|ubEAy54ZQs14u@~VJb_We6ky$dGREYh zAuvdKoAFfaex6*c96dHM-*hp-3r-}r!LsKf3mUKi$#%ifz2ot5xZ!BLl4?R#Jsn0k zm)$%OQyh1KUyJ^)Y@&^YDpAqKTBBa#wnt{FJlm$MzYbRzI5Qu#J6zqO_Mrgl;;N~5 z|8%MK;T84zgINg~(4&~isn2weaUSw{){*0rvKWc0AESlb7pLSw<#Ao(iEjfd-hKx#w z+yeZVtd8S^F^R7zkucJuKkAUOE`0Yk%#t?E$TK z!KR6f!rHudN<6XWi5nplg3Za@+w<&kLxBeuF{hZ=U6Vuq&sw%#cz(DUn8YNj{;sjf z+t_JsX%9)reBV9yLlBz{oeNtw#((j+{7<@O7RFyaXWaF3>8aVjcI`XjZtyp$c1E`x zL-DpAH}`TUzkknglMnwhlqEl<%t8oe7Hp#9G&Np!kLzmT1gA_#28Xi&!;M1AK-0b6 zrv#~AorU`0T8;Hc!U2Y!sDi{mh^Pmw5gR`^nGMJ*?M3|hMOX0{xyCA)gdmMJ+h20vX+C|xbOK3+sl8dyYyFF6S%Fd!JQsNGiBg@w?QI`s=)6!(nM!m{h8hgn<=>rOorekzcU$r zL9ER7l6xTOV^r}OH2L*bv_4S)O%t8vg8hlEe#SpjS4iziWAyH0LSH^j$hAeW6c)Bb zKS=2G_49S<{8O32Q5?OSE}Hz+knN*9u1CjBYTU-aWIO6RwxRgBQ&K&vN^*?Y-Boqv zpYg(jQ)ia1^Jb=~D)QBrrG~MwC0j1P&gf3;%qn^)E5e_67M}FaX5Q_Zp%O&`zwH=ujT;EGrcP%HQq?&E< z5};i8hZ@)O6T^(QdbQW*&+g9W(&V=c$_L3jFEriiPJSC z>qmYKy61G7($>1M?FceFzQ148J!H9LdYFv<`h^RoLoELI`Kmj9-E4g#oBQdIe)Ri! z#SG8qPUKXuiCq`U5%tdb`MFW+sw;Sg&1mEk^E^fixY-)vuM6 zzAKSki&CQQ(ZW@Ge~OPNGOLcbnIqYM$6A>xzk0P*<|R!eqd$E`N^4xB%JfbF+$}hN ze9!)HhuD{jDh5V%3&U6XnZa|B?ZxtMZ+^0vc1D9>e5-J!;b~Y@RNQsl0X!O!g3+3f5u*eX7vT%7Sw0xh3$>K$+U-iqV3HZ*5!8YxT#@2ktG&XLys_+PO8 zQMTscHuG_}`T%hglo56f<$Elxul`~XJ$W=ZIM1TV8RdFB9Az^wpo$wT*?mGb608(? z{rQ`9R8&$_2hVV)@j#a4tErOkt~i$>)3paf@W~e|GbaQO7K(qxYJy{%@Rp#Fj0`lp zL~2MQs7yM)Q%+oh;Qr>^a+KTLmrOU*d@yd}O&8*}rI0-93N`iXyU>f0VQ+Xrz& z!@~)bl$FJ0JLuqv)Aiv@vle86f#BO*F6-PGib)3YMk*Gcuk zZ^=lun2u^URsceFd&_eYkN9z!#QnqCJ~%qc_Sv=C1w7fgUZ-WgI@cYnq5SdYL}5(i^0G;L zxZS{@8Z;>>YAx%W$U8n=Ch~hTjcqs-ZgWhvCkaX4%zB;+2ztD|{zROUJydbZnY?_e zGAMVk=W9txYc_GEA&brv7`qShsrOdDev!`xRTyX&hU0w<)BkgT&;HGPhD^j^(SE(! zf&Yti*jmNKh9Ca5;a9-I*res#jri@pkb zG?kGFEF#ye%%WEy>sS==z?YX#)hssUDu9o6W?;t=FImlM=u;@dDSi)Rvz6J5cVQ2+ z|Fh6(nu%^n4dMcvm-&hEnVH!=t6`g&BT?74sZ!1uO^tXa05+U}N^9Q?cj@??U<7`? zQ$()T9tefffN3x7@%$=o)<0iUxV!Rv_~tO|4La45GmLBQHp23}mkC42EM~dPUOh3} zQA=pSvfCu3rUBH?F^cw$TB~tNDs}l#!jCXzO{x z#iGX@WqWcm3A&eZ?KmfW_~u_`v^FFZ7&ois*_0W{paI3kaakej5!GkN3!*-s9q$Y69}5TwDqvXXxjAw5Cuue?*j9xU z75#pl8O}z^o3GK?N!%rLu+4y6T{YF|H>Z~~ry>cC*T(mw26hM?9`1Dot8-K&BwgY& zy06=+_wI?51SU=H^RZV~oa*q?<8>Gv?gXGU2avz7Tf*4{2L}l~w_w9xNMvAfabh(0 zefm$pGdZ)4p~=qpzRqZly2RgT5J3>6C>y1}7)Squ?j{boKr@)T$+CZXz>(;v^^>1iLk9=hqd(%4mK=j>Ma!N3w30~z%;8} zakdvqUm9!5qu1Vfa0y5V!nJ{Hn^*bnU(U7_&oHGzOA=~$WVX{JZE8%2rw^LVVxAbt zz2cII%B8wBwOI>jP|wxw8bH|HduPY$U_r zWzuu8N^~L7gK~bU5C0j_^+zn4_Sh~{-{a9LrLa&0uqLxIJ!}T>gn(exJB4~jAalF} zy5SVHo4Y;V07rrsh*uh5=2Q8v&lP#)@n!y3PiycGEnIGksyIrN8maKc4imHSW-T^T zoY_vQJGUval+;E;m~;z&Z$8}J<>`zOjyO3A?HzOXUGGj6+BL@fc*E`P-Meqr5N<)l zDpK4eZO%Gqq$o%JYomBLoPxq@Rr9n!^n{X3&>5rkDrcTX?I+Opx1XiTS>(nIH}pPz z&^9vocX#>-LX#j$QNOeO0-qmKlh<$?U6&RWK^;BF?~XTboUBJhKS`z@w>!7^H7{Gj zx=LUgLX)W1h6q{{*zdG*e2p{K;{lf6!e{;JkB?;GMP#(WRPyq>z$y!O73!|p7inH* zNW$pbyFE2zW^OLrXb~mskW71{eG@_#H?*7bfnpIF>u=$%m2wu?b%#DU6LgKPHsOt>uAlHRao@120 z=jVwD9TA5X`}InzM-dU}qapN2jOeG<7(OWd(=fE^q`Fn%Hm{tM+fhs|i>4;W`E1i0P%n+k~asvN|lu!#j)~;k1n-_U<@l%Arq@1%vDGd_qwJun1%*O zr1xJ$zGH`6U!NbJu8m?)ufw>DQ6py?)5%+ha*HKdDJEA$NJJ`_jg+4~@j2lY{pI4b z!OrKI#yWms&JcaV|6i{XVM3jtQ0(2{jJ8z4+jVR@#>Z@n_(>IhVARgL<>f7aBF8V} z{D|gB7t#8AK0CP9)@vmt+}lV+T}9o=afYsofRVMrC37k2qq_cN=lgqb!7Od}U`hE{F3h1Yo8yM}reYtpf>-DNu^A>jron}xKe)YpS)SHoRs&lmKB`_z2L zT+sdP(fSeL%8*TZ`Mw<38hu6)`pZo6$a;VM{qdH1PoZrOb;_n^v%I-!pYLdAJO`M_ zEskEf$e8mdFsFaz>VHqz+ES*HP&d1Gc2ILvv2n^!M6QTlbcZ9rMDXo71TNr!@CPs9 zt|s~z2m=%1ngAo4rD4;SgpSjoqhX;rSd@f1Lq6ddJ${jCm+33f!G>r0qPgnAikcQ2 zcL1P_W*JkI^tsUMA(a=d|F0wN*ycXhXI8OM+k)kLeSN#Iz9%^O1{p;UOi zBdR@SqE)^(hC8n-|DcNF-Izm1B*gqfO$1ZU7o;t^%uM`9M?bHld)vQdNk>ZT1Y=YK zeZd3hS7xo9&Zy7DKwgtC2hwJ5h z1`_IU*h##%F5H+bCiPB%s$p>3>`h0PEE6khJ0LJxReAXvvXPx8KzT7fW}+4z_}PrD z?K<-jdv(--5h#FjV)|?LUVs$ph{W=*Bb-~raJ#~y#T}>Uy@jt3`eg^6p%~XX?6a-# zm|6B6fw&=f?Pm!hLuzHI7w7WTD7$vfTz}Ey^^%_-E0#oRyR=~P=jy16h3$s%W(xn@ z;-aZwY}$QzNy#U-?F@(A;}|m0sppC=>;*Mt?=zF!B1qwNehUkdcvzp&5jm)1_GPWW z2DJ$T^$NPC0cE?drZx}gGCD*>S7>jk-x!fOQe?ubD134o zL}$2zvOE5=6DC2M^Of^cUz{~9l;#$OWGFm6i&1o3iB6Zsu#mBQqB=-X$A#wub#Kq+ z?Aq(eO8;(^$dXc9NS6l?)3f zTy*jMaQU*%d0Vnt`ZBI-D8V$y)sezbM**>Dg+l8iy-ER(B63-T3FBR{`X|>}TF|FP zq}OBpZ`vFQeKRLb%3fRYB@vDjDBBU!$N2?Ol=8X->KMb4eg5i5Z4#@tOEHK| z)c!%mX_meA{!4FDn3r5_3-LP*VN?BHtK&W&xvktOxG1Cdh1$SqYAo9$mq`a+PrY zQKxx~xSi7T^P`0m5OG>97z2O)-OP*55b`58RKzeyakSg;q49(8xGYE{OB@9nfFn0r zXe4_5YN0rBs?@Nunp)zTIh2hyKs>&!OI|K^M4Cnut8t{riML0%JcP^a5a?Mnu6O?b zlePT;C*ybXr2JwYPRtb>!|6t5@DR;7HVIre~v3SxSs~M16wC58_A0Q-Ezv2=F6=oKR^zB2tMA~ zw%7ey>fA%BxpNtnjRM{&@2$u8Ir+G+i(~n|1twsS^*1V7WIfuC?_Oo1gYiGZdH?>s z(8*3NFq_%O`_9RDqy7?4kbpT0?tG4a7_VIuyK?<1H&1Xzp&bFRHu)3MI+QFR_hE4@ zN@{91=gE;y5$@~3I3P-Y;MEd-Ti*MoX}olJ?|oc7AqRSq2pN z=ioIDq|s=W0MCyirH+OEF?h&1cGNw2s}*j=kT64YCCLHjm5$zF&B3C zO-$_P<{2~-j7(#G`8Z-hV;l>32@nd;Kah~5rC$pXvgU~sjnRFjjk9{b7r=kF(fxna z2T>kM04bW5Y_?m}6rp|n4zaXSW4GRIZQ*Tg!Qw^wd)9?91H@_W;@rJ|Jxo7L%1-Gj z2yR?|9=6sW=1AOb%;^Z4SG6-2Y_4+EU4PO1bf9m@89Ekw`{K;pFAmnhu+aa~y2+&I zX}{O;N%_Vjo^Tp`zbhcx!ujM$YwydzCwKXQ)Y_joxpn;sUo;I#@HfLI!}W@jn~~EM zVH%wsRCHd)K$4|b zJ6Ey%*sVS#p7{Pe)vHt}O`N2;F{iAl)Sea)3>tmBZj5xi0q`;~tgfP^<+vAltHvx- z2fIX0Ks)~2A2W%&M6TgZpC5E3=jSKXot`Q5j@seT@Goi--ms^F(UU0lf}G1dFPri4 z0SA#FJdGJ-*~B1_`#=Fs77!WVyjgr9YaJsglfbyd{~ox=W^7myCJIhuMcxmSNe7aR4(3)qd~VYqnl8K3!MBV^mn+;)@8(!p zQOVWFLg>F{TW}|x?(6~={CgJhBi-*H_MVjm$G`)av)@^=yNQ`_Vb;-pymU=&8af!n zrq+$AKU48~eL0aaCpwi__bnXRPY+UvueY`xilJ(Vh|ILKutOy^@mVZ^DtF+-2A^L2 zi+tsO$=w%rc2~=%ih}ryGhg8PQ@Mh}VSfPDw*QSgrY6i8Y0Rn*{fs%OadGNIu0D4@ z5Ai{#s4hE!$kaPr)X(R-qAc&>;Ped+ih(36H7>u$bC-uW1;H(bfH=?rRxpQPG&u@|kZ1D*Z@ld$V7pfQBBv&zL zo<9TAV{hf1CzcV?2LG$?)9%~2ev)xrKUyi5pt*W-a*YhIU`h%G2zX6@lb2TPu^6%) z0h6@ys9|lu&Fz^a3YG*)Gu1$jTusd+P1yns{G7*`m6h=z1{EWB^DPhw;xt4%WYey} zDBz%0%rB&ul$G^)r(k}1PA|rt*bvQ8iuOo2Qmxp{DIo5-@y5PD-=*~3Ldy9$K9)bo zU{XJ4fY+CV4dT{te}Bi%_fq_TAAt=IEM-1_6yrNSIECx4AD=M|EpydD-B^ zZ%yu@sGm}5&U6_)UceJKdwdY*a#$U~5)c%4_ZB#7HdRiearE=jh$d)UTv}S%Ck>XH z@iQRxeAC|#_yK6FI7VLLDalXC$plr-AQ4kSH50v%p5NFxEq=pBaN9Zvp5{$*6g_v^UNq&yftOoBMw zOHIwaGPV5t7L>owF|kqeK#^-7S%w_36dj&h&$cIOUjBG!VFwDafB~eD{G=f&z>%M*-pEL)j0mLWjxp7U$xM?e2fH zQB>G4``g<3yzNT9TsifoCP_WWsoUC^i8z|z%r$=)A>B-_(18yC&?UQ!Ln29?~3t4 zO~pO=^Y1X#@B2}M@ojX2Jo7*Hpf@Y0G z4+_v=&mRO*_sZ8q+1cCL<06YJ`V|troLPa;H&~yrpq#$O4xxPc&<|whm9x3SKzV`u zQqm$?l3O$ad;J_r+`d4oM-#I(0-;$iC+*kyhQApa1adU4V*K=IhPP$wJjgBq6cUwg z&fSjAQ0bo?aE*nmW8Qqrhx?H#6k4Qpd&{A%qvsJjJA@lAxuhgI0KY|9w9;?6oikAO zwuOyNN>78MG9bkg&ENKB(QG06hU~Mg-#KjTsvm`(8CqTBElP~)Bub;1QCPDT5UOlVOFMLXY(e++bmcX^rVwlXXYnAAhW2$%KG~9ARSYC zl=N#;AAa+9_0E>$?b}?|DB8m5!2Cse2n0i1`nYmG927|;+BjO^f0$0k`4}p$zrgI z(Tf);eT;>!LAM_i#Q&!jz*R^A2<1;2P@s`3C+_Zl6$6pNYxcx!E!Up~`4#SVCJoom zG0DrtOo(r%(OX(ExApYU-r`fwqkn5ocUX3(bXu{k5&#h-oPDn}2W1QLe&@#b`w(*% z7x%NoblK#eqe1FhhPd=SrlD_kmax!CmmCIt8j4WhEOkR4Ieg#VBaPuO!qZI<{&7Xz z{vpZIVtN$srq9{D3m7$dNb;Ge$F0GLGcUU^aoo1dSQ{iUAL3lOUZemIEPgeuYD{TNNz!yI)2u8YfrH!U`H>PYFz70i3&>AS6V z&TAsi6o)yK(;hCppITSh+1Wu1jy8Y zkj$K%_`cmi$_|ta!k;Gx9y{Mb#9=)q?(U%oOG=VD$%+MW4m2GknEchM7T4XpEIWVw zpn8Ap{fC%86D7CU{i5JHF{*ytNQd_Y{S8gr9r%$&mki;B!sPOW27TTjeWC~ zw&&$5z0NM?C?K5s<8E`p7(!=1>sNd{Inv68GcD%iW^j2=M6)FR^vJ!)!%KW#KnFyC z^66fm^w$6`k#_m|0!V&>qOV?{w*ty$K_Gf@6oe3@LKCmp`?=r+yrA#O-h+WGr^*#o zwFoYoi6w`YY&cNiIb?4^35+8#q%sSp1nUe(xBWg zfS~F9g$piu7aQklJadb5W&I6Y+`DxOcd_r|L0jph=je(O(5CXnuY*mP(Dy?^3fF%? zblEn&be9i|tR9HGx@GSGMw=ei#|?$T3z9Pq&&)QuQ|I`$8oGUGX}EH8auvUln!V@4 z!X>(jLFf8zpF_ycK?%_cWoe7N2iZFmJ{rv}KRP~c8M7UpDjn9!zo)5XF-P1+(skFL z4BBAj3cKh?W zYR}yOSx3>R6W>!gj4a2ZXCbsc8TEBL{|t z6=XJ?8)DZD`BsJd`wsu2bbmOV!YBLt?XA%lXN}J*b#&hQ27*T3r|llcODzSPZ6P(! zuo!XY{4cbHhGlaiWFx-gML4 z-X`}`0wx0cQ~`d|ESdE7x?^>pMu(o7tL<;kd{McGU)y>5RUfCOyn*xq{`FDxliJ-e z9*zGT3ipLh6y0;l|Ndwg?Ix5h59#MVdQVC#B6Dj~4fQ(H-U=j0TSu-;skRl4LJ zhq!eIi)3`aJ|5>TpxyW!ZkluAx9Ut=1NV%j5M3r@|63PXuY?uilOlY?BNep$JX3{u zyVx+l)2Gpws)xy!%7d~K%jg!`S2jm^eP8*We{e>_xVA<9#{hL@oMCSuESN(``>tuO zZQ>xp6iT`U{Yp}b&{k!O6kGr&6jh9c?OpRh%c6U((;fsEZcmDMEL}HfB7T<++Q1cA z85y1j_CkD)vN##Cwj8IRV>RfxSmx7HEMXqLL4Acv%_75E)yjTKIq~fH{O_`**lO5~ zp6l9ymoaHjVs`PcNLpG{lrK&$QU+3s4Dv#5B%(js910x)v%Mtq~_-L>2RG+9;5EN7h0+{>6c$9 z4W5%cl-USBcY!}is!0lyecpIq9|i>MS*5&U1C07E>|j9QF9JhJYuK!TUBpCdnV z0hQjd?FbCc)AJZ;FS7^|K)iy^a&Fj;cUx4*P-;4{JkN;;QZ)k;l^w|l-$t9oQHTyJgif&|)(>bZFf zv4z2Z+l~iZDP?qN@`eJ7RUsp6u4Lq)_)*N9I#AVu{tHscjPMH5hGxwkRBKFe2o7tHuc;%#-EM=?eY~R)NY#vwFE*g_tHpE2K0iX4-no zm4vD2cBmVzoLXX6hr5497o6h_O_w;!JYRZNbI(p(#1ib+qhK}wA(X{rv7$zF_~_)_ z=kqH6gTA4u$%^&i-rhvVIsl+E|3y3&4a`?H@9msB262P-xX&&uK({RK`i|T=I67__ zZ2E`JSMxLlzHLZRm27NmN=i#@cym1EVyFGDiO@M5 zjwlD?h{j^GPN7BqeUuacG)&zbD*O6A(Pm6iOQ0!56UU{h(*o7-6r+QiM%eXWLdG9! zYbfOo4-d=Lqih@;%1TR!BcJO(BEKwULm>U zKQhwL=yT>it%}(V)hOrxcREpPuuL4KCZf}xl>4e&R98o4$0V0Lv$iJ7%;U==+pqvl zn2jCI=4n>5+3@>TNh4osp^mRt&u2CO46Ge%Gbjnj`*GNOb%g#O3D)9u-z7j}HvG9z zVCv^@8-Bii&xikpJvH6IfY@}?NHykX-+9wQ=Th|nz6f<_78j|{S(7>@bqohuhqC*) z!;SKdG>%LYk=zy>e{aQLRALjY9!(ofi+vT+5sKadEqOFJtdgp-mIlC};I8?DH(8+h zK%Z;NnVFe~(mD$uaKfp*QhWEBekj%w%$G|PMzBt?HuWf(Oxf=e%SH$ouUM~K=aVrp z(dH}b>2a6Hvl5S--i7*3#!O);J1Km4Y~)8ZpUKQJIw7F|DC|-N4th^UW?@A$SQVbr zr#ZyWZki1GC9s8mwT5QNz1B%}Atxs%MJ+94A4FMH2@{jbI~vz~By#Btd*NwlZBEDx zKQ>$Rxkg@D9NKbFs!ovDHWT-~F#1CDN>)O`Z0^S42jvJoMcNq7%$)pA28PMk4xP${ zT94)SA5hh2CJ_67>;d{J79VO;>+8_`Uhz?IYpbAqrT#a$|G*X_qmKY-nI73>M%{LO zB{0%Bu6QI-k~%U3IzO*YpEd%ah#rWk1G`dfkTq5rS1;P{lxA$~w-t2%i2@+}z|fRr z##5{8FRAP*Sya~eKY<9elE#N)9oo9k*p^lbbvd-iGFm?siD_=`t)hnhS9EiLKaht7 z7%}6i>*(O0SsHy=(1(`f$I~25<>{bqN|Rm#I9OceD%oZTby?@h=TL2BqnqLMLjzvl8K?D=XbXRPA^->#4K}5myom`C zBzN4QzkUp+MhFTY2#I#L9ld1_c;`MY0_U-$R)}pr^=T`gOvXhFMcxO5Bfn>mp;vosI>>`kn)4Jm5c*! zVTCt-!{*$;q~GOOUY~f8?luganT;erW@tG8Om%KJL73Py(y^2c{VqR2j+&Z~u__fU zXE3^#MkvNk)yRQ-zH6I2h~GDvxT#J3lbuDaJ75fEcWl4L_rh8>(FLpq9PD?2`D!O3 z?jOad#=I)Z5D$>c#TNIc`fpANH7;|>$lXv|u3911^oT)o(FMF6UO!%bLSBe$aw;bX zWF%SIgF@a9?Gckr-ph_naLILkxIhZL*HQ(kPq2C~Xg7bMg^cV|(p46)Z?imzep@a8bY;o8_%Y6Egp z*j}W|G`VG*S?4w;>sm{`QKa= z3XIskmDunN56MSX+~F#B5cof981IT4{#9MMJiTxg;Fk7;JL&HWkd3)%abiue6G z7crkJrZcSubk-#2AtdPX-ung0&Kw*b<!q6_WODxrEJ-d4z=!mO2m$7Vf5h3x15507x8T4_q0+3% zu2yR3$fY0+)Y#LHgVGuQ+uY4H>le&`pD62!5k08$dLGNxbq+9gs5lQe@j+KQ;P{pg?{k8XoR zWYRYjaUp@0_6?tNX(?79R7S*Hx-RFwDyYPn41aEe3Rt@N&t+!0gcQtEKxE|1X)uD| z$Cd0}=N1`g*h&7MkrIPK9Ji2%|L0Bre9|Wc!*>A$G`JFE^3VrC*xkLfJZxeSm+Y3! zw@8tvVzPRRW5>p!eXYUf=4%ktjZD5BIyHG;GU=SW!h1^KnN#u$khQ%DRNimp!s(!p zI}#UDjj-h~7(vLy4aEZIZldg8qxCUYom-?MkEj3PqlEsyz~TRLRoo}KRR*P0$FsMA zCO&yQCWchSl_s%eli&J5$y)8izIESdlA$!|e=y$9a(SPpuHFcl*s4#0Ol|_@^MFXs z6DuBc*z!IXc>3vNu8NPH1q6EVA@5+mg`;mF_sr%zCqtT?G)KdOAblWi> zV=jfc%i z5#s4YRqm5mvPUkGx6zhlqBp`i7ZqHhSut@dMPez~var*Q`lTYVs*ybQn)xeMrY7?zy#EY2 z^^n!HOS!_$=}#R)I+UW}uu)8vny?A z$nQAsjthG2dBuWuk;)b^Px9pZ$R-2+%kc53{yVp`1OMyy9|48;^7F<=XY`zQ%f_t* zr12bW6;+URt@}LIC*I#ovtC#Dd3hu^oa1E;m05S~*x26g=@-tv2!8hgqa6l*k&f9z zH9l^wAT2dKasKfvczK_ld#tuyy_MY<0b1dW+0?r+c*Ea^XIkaH*T>=XU zqyNf~Kbn&`0J3dJM3OGCRx$;-Wl98X3>6U72o2@Coya=wNVh|n%H>k9Kk1^w3XK;J z^UTf8tw^u-(CDU?GVMt8;bE1m(;Q5pFfJk7uL^KC3`}M)$ zI8pW1FuW(R;NQ_&y75e|ECI_FjMB3h2I{CfFswY_iadBa^IHD?`?B7KUnfWxQeTiV z=P^$gHbbt4D9u-uh7Ec6Tf$7(_u3nK6$Vr(QS$ z2F6qsmSV3+!mR-O<+-|WA*JFrVO3Oq`PZJKg>mF~^qozVF4+O;)$1iOmr_GIbW`MN z6^N-6l?%yI5x@o$QJbHvk_-_lnCb||M2h&BJ~3W&^K&NB=I)-P|DglL9;f$RLv8xs zYCS-F!?X|_dRDl$4|!#+oK$VvZmJ|0_)?~gd_^SWuTy+7PEI0FVheeB?2iHp-VLua z8nL4%SdFGe_09cND0E*~1d+}x+FcVvF2QDY0fGIoCKT$;pBvcVfp&|^M$q)`DX-=3 zV8(h%q?#guVg(XRST(iovBhh8RBB|BFt}i9S*)2kOo0nyv%ep(2;8%` z)F1N)a0+dYg<tf?-}UAW@>tyIZfO=Sa(;+0dM7}GJ-q>W298W-_hP^y6VdP< zyU@Dhf5BHO6tMy>KiXf{1>60DjMJo|v6{&i{;8PEa`O)IJpF4lddr(bBU$v9R>tu4 z3dC54DrPzyMrcb^NR`oQS)nM4k$KXlll-5Yp`AO%ueiTUp*PZc5zrKg^?&0imp)bN zV!2u4__~*mA^|QmL9OR{f$u>9KZD%eICcRscZSOdee%VI@~V6T+e+t+uIsqUn|BGi z^`?Rv(RBL3T6!~RLko?!kjZO=cMG!5Dlg2Nug^SgZcRmeb+QqsbY0>$2{UY61&-gZ z+eu#PpV7{Y_#+Ic7tKg?Nw#L`iC+>E0)sDI51MI@#KTC~)Zpp; z{`J|k3mg5pet!~a@?3l6dZiMs|w z;P98ckP^vC7%+n(Yb;W8SO+0;+xc-@UlY|-js7pS=+X%-Imm&mV9cs@qtL3wEOk*$Z{h_ z&yTJ6U7Z69XRu(f3Bp0G)wy5YNWhO4Z-n?2{bQ}bjEUtnBDSK<6i`?R_eZFe69g;6z zLwc#9xO%1f*?l3GKG9EwPUepf)&F-d07m=D!|M4krZY&IAp)2#+c|Xnf$6?fJo1a^ zf1_bXd_thF+Pu(-Pfx;nUEYr^rMq*ZUT`0ZwOuS*a8KjB@^7(Hq_$dz8Y58r`^`?^ zS+=I-w7J`zRVRWVx2?s?&7=QwPKvWzIV%th!0zh|DV08?r{?c#xaV#>VBxtqgT2nG zfc*_qKKxTdt)$ImjT55XeGSBXY}>V-v_e;qR&+#N4#8CvJ*PQdetRq+%@ev_oP0F zn~%4j810?D($lc+fouyt{17E6eeeIs-IMBU3(?*oV6tBiAZyYWAH>~ZS^iVo-1Oat zC_)GF*w>TWfBrrL^uKP#)pUYcDCpn-s>6l(&J^46U2UL0#4O%lHRUba!DXKml{Ebx zH|bvs8S>eBuKs5@J$mgm2|3@yoL1Mop`xh3ilxu~YErb~mZ|i9_*q!LC*hVnwFV(0 zjE^6Jbj5cx1Y)xQo8vO;_XorbPHWF$3@zFH?rTSOeb}JT^z4~H4|H5!5J=m6fhe?= zcI7{*XH#(*hImG*i4&x;$y=DS z`;wQLIKxaCbQ(9URHk58<-7`ZznhTk4u*5i5v<`EzPM*n9EuEN&A;b)DYw+OZQH%P zS^zRPac4}r_*akV)3mA%`IluXcW9Lz$?*up(K9hT%qb#7-V)!BoiN8^4IcM50}>x6 zqAe{V*hXBo9z3UKo%p5=WL7g12hBbGwG#X8Ns8OAwYu+{W@`lblT4nTy(4kIZ&?V(8OV}7q1|uh0>&>V?0xoq0&$ro*8O?juTPa?nea#P^!Qc7 zJm6~4H=ANUlhs<@%5*HXvmCg~efb?*8gS{KzR?!B3Yq+_ZzNwqJZ}e*df-aavPX^S zA~tHs_XxToMMxL=^MG)+cdXI${dCXPVZ91gRv~mqWT(r+sI$2#t?qqP9CwmPK{NU> zITuoG`~F4&U8PWWsq?-IS!?m9+kCq#7JTR>u?_(KHLVL}Y!lK!z3eLgx7?BkGVrXo z86&?tKit-wPsYCDjCTTzjxUcJ@fNH)=1eFleY|Z}ocFCE`x9)pn5=HyQDbES2mOyE%Xobf1#^3 zSsRJ=^2@xv&)-QONNI~=w_RGKas06IJ-A#(r_te*^ljA+p!XCPCit315DU~7zn`yu zq&>Zq6W_O{N%Kb&sST6ty z;A>2kq`8NVlh+C2>=nB4tP6;2^>krOuk)sCzF5U%X_Ki}tW;~#$p^c_sihTHZ(0mR zR6n|J*Rgq=`IC~7g^1I~8h*3*=3k1^aZY0V>P#bIl ziRVGHt-ozl3W}!gyL{XTrhq#r#kO=jYLxc~`3rz(jjxTgPub#rupQcp-dIH8Pz+L2 zE4Xi+%nunY3`olr7?okMH+z*aQhZxgVRM?6ETfd+yNH44Hu&P5lg-^|lfcs5f_?t_ z=RaLK#OAQZWi>(S_i7C?xtJ$)^Hk-C7p%Pgih*Yv+=Ny)y|Z|{QXls?^?n_h`U)=@ z$~JfiHaY73O}0SM3N;Oy5N|K%NXTR2nB|7_{F;&7%R2u%5*38 zmDBC_YiJSbL)^x$TwX$?qD#!^0S136PKt)6b|YpoMXdzvKA#~TT!MBX%&LJK3~_AX zhM8L8avBOpP3PqJYp+wgK#~_U@H-)wd@26s$Oc!=^2LF0{O_O~Dh0>=;7T9D+;1NA z0^#~<_B~On1uyVp<>88SgZ&FLK}T>jXe4Im!GM#M#xxbQ+Jn9VswIEP>pgDLZ2TGh zAH3w%q$QmHA}>+WU5ARjF2*^PggHy(V(C3x9;U>aoi<0x=5v<(GRHR~JIsb6? zckYGG8$yj$@s+`~KaIE*C^+-MvREWPTPn^}mRinWjh{y3H`bP9vWPL%B2nQHl)otI ziax)fkrv#-iwJb)@fdIR0aC2k%G%JiWglSdQtYoZ3d z_(Z4@$%&}CL z(eN4G2seyvYt~rVetBTSsGu?6Tn#Pnuy9u~D$tJgOGR&Tdiy`Y-!Vp|j>>03qp^Im znL>VoDMY9~eLnZNJj5i0{_?8A{YgfCOKmoIISKhY|FTG*eAR1^np{NAir@Df?`qDA zcBkm#8MB67_)@IvCSeTf{z+q+ORDq#;4Ss_XAoGZ+uPTovfOkbstq8X9h{%};&gL; zNUuTL2{Rv*s^Qn%U^{{Bic)^1T0x)zGGW_RvADM=rEn+V?-e-LL@aM<+8SY-^P5^Q zS&kmfm|_NC&bT5VXxne4ae~Q{26Efs_Kk{PpXtxY zg}b47-vhpR?MV*sEUERJ*2FLELR4}b_wz5@f%PF8uM=x;d)|Z+s%OXujR~ImAqviU ze#>1hG&$VbHu?PVmq4Fib9r7OhchSe6sWbDps$~-H8C(Gza(+Pfl|~Xw4TsT0q5BX zOTveNx``m_>RQ^(##UK*tK*{)g7$c_@4Ml7_Cn67LgoC2r}P>PglQa-S}~_06P2$x z+;=ywSF?J=hRz&r<-b|Gh2i|d2DwVrgvClph}SOIp6|Uh@}IfNHZ^5(M(NuUV$XnV zsd7)s_19fo#0o=O5Fv3Rwb>G|%1=$|ey#R@t3Ao59SQYEx(t)LQo`!3wqdnGWKAB& z#tR-3mTOD=D@3SK*QOe(Z1Wo&xs}?o?aD4Y|Hu!RbNha_L5IPAbdIFw{0_EVREu{o z9(aCU;q8;eRg6S*cUUKsQ50YEhy~<`?1ga9JVFnXk?L}%96ESl&qJK5N?;{cW|i*2 zX*E9gykzd<=2seS*47opdRF(_kT>JzZ5>^qfkW6;y{0F?63VMh@7zKuW3Fo9h^V~? zv9HFIi|Hg;!1#SMsN z(}jEUu3g7%Dpch@V(a^*vbbDykzG9{1pV#t3H)5WVMD>=gXY#f4{jY4)PxtV{e&GB z<(B2UDzO&CFeei&&!o;ijF770|M#unmHhqwUgw&pV`CQtN?`N(`~6_^*TvhKBv~of zhF&skfvfvD^F~5p>RhmrZkHv)*g^9}oR0?E5ejK1s<4*r2b1u;x{HM|riZ_hWdQ(~ zS%Yt~O-?d)A;6m%(BY?zfLoHcSvwpsYGo3%I{N)Ruia&D#qr?I%E0Hit*<8Nkb+y7 zBMM1_FiTc{b=Anwh4iav#774#y!w~(SJIxwlC;$on9(SSXZrbWj$Fa|j0v*NfvV5P zk1WUW*Z*2b56Ip|9TeH}JFj#^oaa)6w$QHU<_)4Mv#*M%hk^alZsjHr296b#I!C3YH^b8$OI0DPSrkV0xXH9fv1UJWm#))qX$T zZOUSIAigff2iDrNwxC^U`X#u0-!HgcyYnH&fA%=jJ;=4D@vmJ0y(js|=T=?x=1aNw2+5Ud1?wDVcr6JHQ97n#W=(xf27^fF+a+zJh ztWubqgoRosN~GlRUQTS#l6+5u%i#!C&(Fk}(uVMfiR$X8NH9l4H}37n`04>KxyydP z!!EqO2)#YjsU&qge>r;?gca=a76d+F3qA8(k3=wdzX44yGU}YlJI=_iZmzP{rZc++ zBl)Ct67P{o4u%imkB+`wg;y7Z9x`TPh*f`b7)x15{#4%U&_o)7R%U&L%e zSiCgW-#?PQ{0j(txKiWCT_5>rfqpgTz>y`~lonnu@9Hl?a|Z;RMi_YCs%)6;!cMwQ zHmO`5E5!W9`Fz*uP(EN?97`Qlox9N;*GJ;V>ljVE8TbQ=FHUw)`=j!P`e-~8i`EnxQbC+?p1FrOl zl~ZwXo}||9dJ(SPc*;zRATfDAFnZZY(V*Q?^~8p6@=efz$HA>|8dj@BN3tD}*11&r z!!T1aTw;K$p{iy4c;CUd23&4@A;9G%8l>fky(d{@_$2m&TVpEh@a0s*J2K)dyLT!7 zMEOQMf+OAr=FvEpQB$e5Zgp5pIS z%!$dqGGTwYk=u+@z47(_~}5MWJf5b>>a#xFX>uciKYUFUH*C&&9bZ6fy(@BK&l zdEOrqrseAh6rc%y%FOEQ$X%SkA)j03oUgXOe>B0w#CV?CD%E!GX0_1d)JHWV3V!Rq zdB%OUEm(oxv)^ylPZ>O;PcI>v} zdu<+$3Hlu!a2rmda0XD`qOGcH|NjOw>8=x-x)H5^ygrFcWcJ&#pghwj%5H8qqVw~W z(Wv{C<(G*zPHb3xE#Hl&HXV2%*KHen9)f3s$AC=DYTyqbLOb6Ima28ZU=3A_Uf{XW z+JL#%KdP56LaX~ewu1hcz;X^rnfE)?W_vsB*@m*p*E9Ld&=K;Py%(}_0?d)5?h_e)J&We4~i zR(A$%^g`?vI5;s;e7KVDA1n14ej{i3-uDSskjS9Fd}6WR99Sr@H$652WIAi0Fcq8z zuM{eY;)`R@!ZenA1GsYjRx|})J-7Pp+i*Jy4>tGFEV+~s0SPZ5p&pj=;;gSHo9%fH zWMrk~`<|BL8+~)(${Tt;;aw~4wUI5zM$yW2wIb(r}E>G)us*y^#A5Q0~{A&DGIHDcd~wHFHKGut5*E6OZfu_`YHyg4}nAu_}D_K z$r%^KB!b&n2N`5TU}905;{AYbjLSL~)0cK6)5OT+{H+Ad<=N zhTrU8#PHQvk+_xUbecSA0aIe4rl?|xT0SGlJwCZj%WTRTn8%mkkkJEFHF6d3{)j=0 zvQU+(ip#}AdL?x_9PA_RU@Y4nj7(*ncrNc6-*YvCJBB$7hqtnATGygg!t$2dy`EHF zy>P-Mt`<<;gV1)@+_aHxa@pP6O<#3K+>W$rN zbm61eE#+m>#LLU%#&(wxn}0Z>&$;0*;?vlLL#= zZnkb`O%w(*+dmEgQZal?ILqOs6X!}QC9Guke|*USfh71pzxHO5_eh*BmE-167x1+R zKR5H|#Dsk;s=Ou9s^-@f;K4c;e%CK&~%)q<85OlI{91XF^>ki zmg_fn8HjskO;v^sVP#$;!>J790CNgmq`U1vL)W5GTqN`jPt?~KLG2$aB_hnP-a;;` zNyu;QN_G-`t=SN?X)#&0^UKOo!{&;w-y>i5B@gykXH$E||S8yHO)Iy%kWslta7C z_3#Kr?hbpwuBk)JzhL86;1 zi@T=A2asK_1^GmHSY=y3EjV3IJXY^06vmxeB`M%Oy7-1P#)gn1{EVyP!Tp8zZewF!rwYG^3RPPst~pB0WCjZNsX=K2@cmytm#SUjp>ev(O2-bgA$Q zwBW(DI&_Da$KRrpXp}gOA#~h4Q+pb}x{(7)IR%{Ndvyrs&Yl&ulXQa# z*&`H^vF%2i_`{l^ICG<&PqUTLjlS-^);B0NEEIfU#RD-&Y5Obhipy>-gq1!FS+CcW{xz$#$1zYY^`p#w>K41$sN1N7k&aiACUndW3 zTJe&uikkujQ(5-wV_4tm+Q9F)(x4Dgt9@$wI2>9o^Uci6(k%P;Uzb~$~|0HM6hjwift%Bdrv@kIDu0m^n zDqzxcKd*mfoT~WddITMUlR0{?30{ak|@Pa&Gi7`G`CFvh@;{*vr}Q-AYb5 z!EM(y+kGawqT)p;5Fi4Dmi1w=>@wGY{nJWLAL|4ypC=kgoXGL5rvSDI2Y zrS=G=+dHQ$ZfNDUqUS!nAmsn<1(@}D{KgVkolpY$VHy=z`iP^*A;OYi^9$kV5*Xvw zdiTu>AhvP1Ebft#_TxGza66W3K^O2@mt(l5VUW=7|MlnP{lmwyU@C#hmnB-fRY=sz z{p|0iL`Y6bzf8uz!{hx?=iJdBDe9B9McpNtfKt0Lwg-VxXs3LY5jRVhj0W&-t}ig4#elMl$kpE zP89?YVIoZo4aMM({$k})U+_k>==}+E{HnV`%Pu8)}m+uEW4 zTkYAi9nD9^8wCAB@QnH6EH;8^Kda{pPIeiz@cLQA=1*g|P({AqVW)f%+e>1~T{8T! z_-zBLP(E6jzu~XD6?(`Rbqv)lr^AIBiRK7F+l%>Nh`>R>Pk$e+t=qSEoTZ;0bM{Kt zd--0i0)j&ZLL{gYr=@(QnBLw26H{zAowSlz=(v)cpTBGT(b7|}FhjyQAP$M5j5*|l zVIk9n!NP1z`m3lkvnnJH5ygFeA|YPhFP=Zy1BZ!GKM%ID=NB~+zIbl5OrCFMbczk? zERG+fyV5h7ZM3t>K9@Fi8iKwQ`2U#cD#fl~`&GQY=6_6L9OxHmK*#bH=OB1%ojA86 zv{JV|hVnz3&7EY~e5lC`#p91Ky*Xg*r~q(1D369c3{89{aRjjpC;!$ryC^4$ks7eo zx`u%}O&PIK66JwwH!D0A#8?{hB>sr^2bo1UBW?Jwx|5i6eK2GYg`tS(O*@Tp?YKU&YCr+FSU7Sy_Sj&n@y2uTU*wokxDZ}{Iw7R*Atcy5Sz&NC)KbLCm zQ_Vxo%yTHn2b7a-@Qt%z>#0Pt@UMhT7D|lcS8~=cQEfP1c13UBsK+$kZOQal=3-rv zR)66aF_DiM(MFGE)RAP)aY~7lNP2JH9x3ld-$HwBL8c&mRjn|?GYZ1$2oVao%w^Qe zD9?7syUV-CxWAL9yTbqhpU&y9dCn{t|GqBCvo+?=Gw=}T6XOy?ir2YBbL0iq-TTU&jqQc5f_=k~hNXt;P zNJ=JM*f3a2mR;XI+}S(WkB!AMAz_7`23N1Je77G@LbBN9R|9HPiSoh+Y@D3gD1oh^ z^ZTt96l^K!Mb5c7)RZQ-8#L6Jys1nsbxOw;H>UlsdfS8Qno3!l(xMaQ)VR?5e&%$w zs_or~J!3yV!(s&|eu9A&Y8K_iwj?K}!CaIj!c3m{IkZc&b^ql?pnmu(0e?6hMs#?7 zRHHYvec_~^mZA7>c5h@Der6no^5I_Sr;f3>9$MMn^S{OTvYmB}m6RdrgStn6iw)uo zY8d+``!52E9bcqjc8B7fzaVPB3Mnz}_Nf{6A54<&Tc;#gCWtW>12VO1Hjqc<$CiXP zWgIWL%OM8@c~QwY!m0PMBqF{Bh)7a1Y)xO%tY?z7Gv@iH@wa_>E$9g)!bd2<_%kP8@~&ppU=Zyl?wnLld{(sbwpXQ;|v0 z+c7I_fK|Z-*2rb&>Y0uiY0FKw9*JZY`|EKgbn5m@?3diImZrwZu(?VEewMx^Rc%Vj zsW&nT9jY(w#aXevCxBNI_ELbP62ye$l6%+! zO=!n7H-n?^#mOrms?cdC3Rw_y<6DwX!)5j9M21S4E)+W zkY1ED5wVk3>sC@>&!#kBf9D47ACocUdLVyI*Uw-sgkm8Ev`2p2L_u~iNsxpXBm?Ni zHq>tdOX5`KNk!1|?#TJQisr$votk3w+X!|7LDLX zOWK|+#l1QIcr43Fp{E5dhS5xyZ|cwiDvD8roIQ%v#|sXM^A=|H=n-V-0BtoXB$+F- z$;ammM$5$dwC~56TLH~yW$u5o$m5Tp;XmA%JB(coC5I2I{sa`-io0YImII&Hyv4WW~CO}(;Po4vXks&%7utLWA4jdfOJgH#O zcU;!$B6YXUNd+ab$1{Y?2b9dK%uqe~s_yHo|9lDgPzge!N=eKPY2|7Wqeh@enM#=p zQLhno52#ko|F*Veb{9pGyP{l@!>kaEQ6_clY zo{teklnF8P2sc~;Wm?ToWz}!S_D8*(J;3aQf1M z^2))m_i&lP-_d+zw+5lE$Ot(A2@aM*M}anL9)8(CzwoWI42>tk$+7VAPyJJk8ddt6 z8DlI0D;1r>k^rl~f?rIMDWh#u$j3aWZM*2N_pg2s9(yFS{KC94b0M1Af`!6-^MU9o z+Ir-f#1+zz&A>$hLc|;86LrPIWuOjG{)v71*YHUn zL4!D@>Rz?6bRtr3d5qsTXUeyqP7gqUI+=LM=_k)S%!Znlrf7go-r$>?0Rylk_!rg7 zE2%dA!EWS+Y7}Hug-$*b5w*CWjbit=9Zux<6xf+AFM8eI5q5! z6;07VH6a#uSbTgjuDEev{rPeGihmr=F7*=uO$lrJl+ATj6@EBWiI6&7pOWUtRyDK7 zD0-W>TXrd+n&hPUc2_FuKWSr3Z-D=+1^0he|9AVpmzP~hX5ra_nP-+wfq}K3Fe4^x z7radwqX4N5zkN0EPI0bnw(%KgiZ>8;(I-DO4>GVQehJq*Y=&7;UiKsTQS@N>9w1OF3E-2e3ceK7~R$4$r5lXS#kS>f*AA=1nr=b4+dFUDSEgQuBsm^&^)>Q{j>7W4 zLV`?b3rBrUx)Jx>4cIQcgYY_S*3@zb6H^HJ6xIYFUjE+J3Hbu`)qzuoX?(;+33*0u)@hTE9luD;MZL7Galj~=*-YSyutIG4)G zyvps1((?(Se_yv6FF6!8oVpG|i;v4L`$Ok9`{*pD5HmLh2Hhc)RK*8FC`;3>jPpEV z8V)?Qv9wG)?IFis?;xE~Xwb#DT)#}Yq^bF30*-NIGNg5J*p<|)cAAeFkt+eOCJ6=) zVnqGrt2P8%yEe!;uALzLs1%%JIrb(YHFT zkF<-;C#4Uka=84Oa_f;3GPxY5IoP`w(^~ip&;@O({0*d?86E*-OWYuGcD`N`s8Ph} zo+jD(*S=W*TJ-lrg`3bIeD7KPonGJSV4MWdSUL&Lcbnb;maHpAXM02H zqDksiL`w54ra>8z!i~&w;!W*XwVQ;XYlW!16l_~ykc+_4{00VZZRFSX8@`GN2dlCb zL|Ae9za00CU@Y%r;SlFT%O~8CJ4(EOcQoh1PBtkdc>EUAd^%LIKQTYlzuaA&~u?e-j zoM*$TccvVswSAMojQVE=2*R?P&T6|4hlISXh02_;;Hx8>JW<9;Ib&n%jN{>g+tz8e zq2Yok)vDlmJ_#>y1Bo5N{7)Vb;aw}>7s=-+Fra5a(j_oyYJyf|`vav1rKYIzR>t?y zlj?24R`goJToM@*d}bBTKKuMQ?~4K^?(@jL=N)G{nbesQ95Y=~Irc`Cp!-<q~dj?hfo2StZA#O}T#0 zNz>Wu*&I)$@Oxj8rq4i66%e{*xi@^x*~j8rm5dPW*ON0bFFt%88r(nWaBxzsk2xYt zA$`77#VRd}&-d)V&ug?aZ1ae;UGsijAy_c3I2>~LUqd}%%&=^K!Ux=kAO8`!w_z1{ zrkn+wt&;M`laho8Dw`n}rnR{pk$qa8Fyef}bPWR9bCD|v*#rhp9${SFHV@UWX@8km zr8bZc92T33(8<~PtTp@KNU*n4IaXq}v&N>t(|3DjY=4d9EZ^e)^8 zf=NbSXz-$daKAp7+w!LVK)*PF1g+o`@wSS83V3s~|A#a7dB<=&SuA4NtpR-UaB#Gv zm6Ci?(Fcsp%;N6v^h{sSqzD-Lq9jJ=AO3-Nb@HGRu^1ntzw#;JBH;J6QE$|r7Y z^hf79_gMK&k6T95|3EsBa#3Dj4l~k4_d~T2yZu7w1hl1l({1iKa zx!y3S9J!1(-`Hel{V?h1wcdli@R2RA2#3~}u=rtEb zV`&AMpuKc*0ylr4T1JyH{_TJ)5*Bw&mz$k^*bJDxUH*E|h>nE~J^&H1c)n8qyuAKm zVC8Ulsc-qRx|RCpOGJ}ij`?~6X5XZ6-`$U%`_&!H>u<{!>WxeSHjcpl;#HqhlmF9d zI2IA4)rj{Y77cQIXlhuq%_ZU~O2g}X1kvK?3`nvBY?x_sin>#Det$Mc=~T! zp%8GbHuuEsF9!d29`<{lFAciW8IJbXe{A~Q_#{`9#U<4-`?CTg2_wj7cv{Z+of~`s zu^d~c2eJlXMV*;&JRbKi$FU8)_0e$fctoU%EST+9SMS3Ch207!w|SrNv-XfGEG`+n zA1hw;PotVGYI84RO!C>Q!}U38s9J0dFu!2){Ihzw`1+P_L7^n#V|?(BSLeHZ?Hv$% z)y7L5q_A0jb#HX6O@3F|arqmvO0&>(&0{s+^HqQ74bJVK2!jFKTliEY{g`7qXF?}S zdNumBQ{f(5|D=41eNW*7-m{Jlp=AqB9x%0otGzubpYU z6Z$pwCCR?fdaKp*&EhqC!QS?3%xcW&w%zIs54eSS1XTs2!jTxF!0K?0bfMsBTR=18 zwKDV$TDtf*kQ?l?+S{0M^fy}P<6va}y&D)IaEFlkEMSuN(k9CPlZl0U%oPA9&w!=^ zl%z3u3590ge3KDZ9@M>tTQq)FQK96Ypo$_4Pu4THx*^9)`^UA;Gb#}7?m58(*RsY~ zCP7fWDt*!;!mWEC@4Y?L>)PD^*(Ti5RjZEFrLs1pi%7nif#r2YM)%DfhuVfKQJAmQg_&l+%>CXt(JQfh5S`rxms9*Ko)1m9&8gV*3-->dc?{eNZ<65n8?!0Cc}9Mkf1?Ppv!4(-2i zQ5vO$vA}eMe&4}8vO3u9&jpfepGDpn{?!JH7>0WNwl#U#DyEu|=N1&a5Xbo14TDqJ z9;undJ;?jmwO^@Zrl0N;B;gYnk_5PI@%bd_c%)(>U=8ufOIzctwG_sX#5K<=9~NtB zPaDRg%F_KpIsT^GL4JK6QujRg@Nm@|nA}}rc)gh>TQ%oWd@WMvG6vdwH#PeB2=s<-aZ!(oScYv~pWg(S}?WcUh`Vl}CT%#E?x>!0mxq!86?$RM#-m!X+`lOnT%-&z_2Jm`km1LYuH z1*I8~urBxSLbEo`aLf&jGwQ{JM$U$`^`#d(rZ0Hvf3hrm+gzpw(XxTY>d82 zet@w9yC=R%%!xCg58<+I5#s87Yr^}xLv8Kh6oa0ACJl^sEC`Ihj|(vRH|%Tj^;UjV z6L60b-i=cj*Js_rqyf_m7>Z8U_}-7c)T0`oaRIZshr>9FuyQhE{nFS^4*Llx5^IZH zvDiBBVvw@qqKPiFx&bNo$2Iqlk+nAuTh;I9>~9E6V}I0Kq6FtnhK|5bc>QvpYJ@r@ zQnB<{6L^bN+3d}azm;6u?YY%#^xq9$1ky!GLQr$xFyJRn*ZnJ4jeg$YR$`=V|h<6>*`v8wS1>{Vc9_1fZKMV~J|js<>fyFD_Kyo*z4bp7&SRePs?)YH5&3tS;K zCH-lxD=xyyl<{}Gvbd57j=@U`;)lUy@%4rnJ)XUo)4x?pbi*s+?YCD(hE4bT6oGeQ ziI#(7I9ZgdIBZlL+DF&9#{rIZ?Rzt555AdNJMg!6xYsSd@oRsJAVt<+iL%$NpDd-1 zsPG0*ai6tcVWs$b5-=tk?{NV_K5c(i9p2e{Tew6WdvXAuPp}$1sp-+$@if$0+bwaM z5bk$FPsEWq)WRHh5s?vC$iZpfHs0Cf+D#DwcM!7;g^Kx>ZgwOFq|>5fTIRL>c{5Ku zu6AD-xEswG7SbrDVcSR5X*KuDfa7}$RqT>ug--|lMdM_LtS-LJAqnF3x#Sy8OMQ$jpd;JSQa1vr)DX!NEO zmR!JG)ZQx*ou`kLwPO{qj`_B+WC9u?K>Zx)# z^snGpJl)xW=h7CJO^N;FM(}s}PcFP40B-}H@xS@-e{G zjYZsb{qa<5jGvw2o3;;k`z03e~z+*BwSTAZ>l?*GV#p0DO3;np0V@e zYp0rH)J?ueTZeywS(AO(x(X4S!zfwilxJBKJ}jaV%jhe3mb>kj{tYECoq+JoAJNOh zchA(phXJNpLcU&^pBg>0Iu5Q2_)jX&8}}F6fU!V0`UzgcBK36a_Q__9-qGRDb6x*_ z1)H^571fp$Yp$M-Sn7DRi1I5eU1uVmJfMW_wz7<0GmgX)Ax$fT)BP0H=fe^rUw2CO zIo}Ak&rdTjWUfKMnNUfF85U2P$?OA(Z^G47$qvVTf#fZe28HaVsFE#e8eos14MsaM zqL>pVVWUDwoVSd>tc(&JhdnD|npkjBLtV{|eM&Rg(E+1-KllMUcp~VB8~$u~$X8-P zVk85T%NuO#Xko}QdhS1EsY`R1ty8oo2k)rUaV2ksC+0Q}4l*-$+6-HN0|DzVIm8;| z4Y;`b)kZ0GW1}8-qu3b_g&?L`V=V#_4SOdiCXt#?CwN{UYOi-&Hn>ZC!m`8rPC?Pn zGE8f({zrdnYiCE3$P~IsT3)r@w2+PeY5}C`%?;-TiVQwf>(*!=p5X4cdx6aPiXq>8 z7Hu8uIv(3R&d|f~25ly_joLOK74alDG4^0p0NA6(-^WkEb@sdiq*nBNyw}Gt#qxF;`83NQ}l5~Ce`-NaD`?(v&s;J zkLNv)Mq6V63fxlt?hSp5j%l)OkfnF*VZvng>50Y5s*?NF_E5hxni`U6z}yzp&k8iZ z`m{A34B$*BN_3bR$g(1zWNW0x$*C)Fo0o+p8-caXtMyKML$Vp-cFGjN-hK5;VtyhJoGblq^oEtAT9@>X0 zIPHRb2;Pfbjtmz^r^_MFWtA*)i+&?Us1#@Bo$jM?aC$w01)x_a*R0Rh+rej7L`YIZ z&~6r9clyc7@sFf#u6uQ<_tV2L6VO|{Byi@q+iznXe0`j?LP;2-f`2sG+6y+9Ayjbs zLt};oTBQS}yYifwlZYox8@RZ+?HlXrgoXMhZ&BXLYAcD7fI=P-A*2PCw^mnrIwN!E ztNulyuf_!am*pi_Cn2+W$C~cy?6{F6T=hiZyrE4y%8pN%M_yspo8zhs23@>dKnq3;0D#C0~kIH;E0e9rep{;{M zCx*UKT{&cM{C5?;{q1_h7fUt0dpaipjh+r;p{^&$&aPOMc4i^-%-DBq{4mfCwaNw2 z@y|1L<#(*7Ma;9<-<`$b`8Y+|m>E*+P4UJK?xZP8C{52ly3yPUoWu#n_4j7rX+cvF zB|67{gN5k+iMx-%)vGEyHQ%?t9{dtnd6SZ=WUYa;Mr%$l$C@Y_S3I5xB1bi>g8H|F ze~8BA8S+uqrGWj9p3)si?6S=-?27h2-)+#3(!m#P>9QL7ev?efgE4dGp+V@>0`D_u z4wZ1c;jut;vRr-hqZO~2Cs?G91&g&!WuYWJ^5dFPr`{bv3gKYwh9SKj37{ps1epe> zV)AymlhW;X2@WKk2y-qWlCq?Y`PLq>V7-WMRaSjv=}RXKQOTk_HFn%xh90|Vf{0Am z80`a#t9kl)tGnaL4Sa0tGGaWd?Z6N#8*@Er`zqyl1pF4B95N%#>Vqd-^El-EuvV)1 zXxse}jx7AlU>bgY@qkodc0Z#~BAl~#PBGI&p$ z2AP~aD#YkcD8KVZJ~gDs-%>c{#Ay0s6()XkVKHXqvihI)dX~p~#Xqmiy3>o-t!$k4 zRKOAwh;w5boS(q9&ZWl(OW1;LXJl=bUKE0wq@%)4}Od+@@QQz zI8rlb*5uP}?>sfb>w9;{Sw&$?j)>rHqxG4O8=H!drrNulH;cXU=y*LHyEl{%gzC&1 z8XKp}hbT+iHKu*$q8|H@r*G^KsvQPz44TIf#7>(V+1h4m=yOej1 zE7A(;V9d-S{UU33%Pu%3j;*1+J`w~hIve=w?GX5!8rjZUx%cG#(5H8mOA+YWZib zd$Nlur$koG$)0=41PpG819ew$AQ#`H;GCSI*CS=!Dka2&W<3|qafmp68Z+VncF|(% zfae&3>nVz2bxTvUJ=@$(^OXcim{1Cth2>tiV`F?e>%+6#J~Qk5PO2cUfO=We?~3$v zQGrttEmV?q=5qa)tOA2!qZXI1CB-+3^83-9{XaEe!p!k>Oh*zUSnXUQU7wtxI<57^ z17d6X4EH%SO}%be{S$Oc$ry&yhwAH1%dTojB+OiWF@UYZGtXiB6x^Ct6(X$kHPHG$;-aSWj7$VqoiKu!mgT7Bo~U$%No>Q`+jB!>hmxhl zMH014b6AJ9)3{Fy-MF6;>#Oa&!xu}O_Zs?#I*~afXhz!6_*=Lhh&8-JDv#KwH=r&_ zGO|67Aln=S7EgWZv1#L)sDb4+2gpjs-ieatY+|P*|Fp)(5~YgAicFQ;<4hot(!+$< z@rlF1e1PiO(xxqwAZio&X2?V6X_{A_5orZT-s?{{!#Vs_!a{DSO7+iL_>Zo;oh;+) z0K6CW{V4dwk1@Q$g-g6Jk5To02EV4 zGBTwX+jWI(BJv*1&`F!}WO3~?e@EY_?s-UY!t$VN1)PvIWcC>cMM@D{5S3RVY)S~c zYePbXh*2egUZx6TF0AQCG8PpZ zp{8t8>l%>_OsF8VNn@$0Gc$-1AQP}JcaAc|xQ|_YPv;+PYu!mKSVI&O9x#<%(`8C) zoSRLuok)IbcZLVA%axnGAVyVV*qXQORlr4ea874QqwcuSN$4q9rmE4W2M z%bH_Ec0#|>JGk@Y7>whc++C4=N|ruwW!c7;W?ngiQw2Y=$~hY#!q0IPA${tC5TOuMvTB7 zUAr@T?t&Nkkz>u$KLxBzT&*%k3{wfSq?JFUd z<4R47LI%G4q!>qKhNe2Wpxt!H+rLZsX>a=87U@ynxi&+BX_)~!Z<)O2p}*6~m(|ws z1*vdFT|jRc+5|rkWGsp}|8u6-GY|&wT{2Bku5+NN{z*We6^4#u{Ar4?y{$a`-VNIR z9e%2lbpC6T7LV2CmSRpJ&0U$}Hx3xKW-8QW7N^c4O<-}m1vu~y~VBP!;QMPFFJtPma zQdZHE7a8nO=#DEm2Qo!IJjV|dRY2#bKwbhIlBUhe%c+iEZv8-Ed`%nM<6cC2EQk(; zNzsomuVRrbsm&H!Vo-VLG?^qn<1aJ91mQt%?d{EF>e3EQV1KC)Wm5C9diBq-knT$% z??T*zU$0-B8OXuX$&lj{B~0v1V5Niq5yVqlcYd$nntDv)la(vE#pRV3%gBNgf;ne@ zrEIleOu|03Ow=_ElPwGPFvuFhfyq}R*b}6a%%IGlgH?H^cee9RKU{Ue-P9hxVuqSV zg;W_`S`a0HB7TC^+V8$74v$0U%OXztgFJg;>dY;yo*>g&zyBwxwk_<`w==vvFt+S%69C~j^bU$fE4f8jMG*qaWKDCWA=@CXo zUK}EtJ@3H#&1b+6cnB#;CA_TB?_@#@S}{+$RatyyxJk((Mwi9t76lgf(IU%r@f8RC zQE>pF4}0%K#t9|`!?wvfOED;~y5z@Sr8q%IG>=p66~0&0xX>+eSN+4#v|y*D4U3N&Z})rd02B=*dk2eS&Xw!npNlk3WgqN|bH(nW?p}z#0nmZVa< zyT;vKO~ZR%+y7-~3A{ws3<-${GZsz1>K+W`aB$M8EPNTWco{bM0*R|_RWe_cIu>*q zGi_mW`8zU6=Es-tJ32ag*+4pDFmRDc)~XL$c!x?);;}?L6FNzW;DZ<2#pQbC$Wyao2r3K_3KdH~1 zNhd9XC4xnF>#Q1Q_20&@!0PhP7T0X5+ONr)p#f76jfg{_oKncBE#C%?Uu0#7mW!CG z*`vKVrhBaDw|b`9i@9o|Lp&S$W0VhmOpD5gO48BK3D_ZPUFR45QxHt3I|r|S3xyT< zSz<*fgDkIrn6(LWgq&}hsY(94<*{k-4Jt9(n`D`qfzGZVEUZ>Y;hNhiE*c6?)*1b5F|L`Sbvzn@ zvzOygQPC6Z)~qfADnb0UV3bXX?o22VfdJaFi(YS_$;Y|6TwaG|&}5Tk{!ZBucjpI^ zD)4t?k)VN0RtZr*C#2%Yn#wl*P)NyIh{29+J(h|NOJ4@{2VGHcj}%}>kD3@7qW!k7 z+y*xfbbh*6kDWa9G^A(Re%l`%AZ4&l3K9Gs&8~3%H05RdN7)cva<1F88d!76`}smt zQ6XJkT`joy5JzM?S&2)kC9aODil*wKMrxjEXn7#qI28x)?59Y6N^iRV>g(ntHv0p) zV)5rckoj??5!~ig8;-`~KJYVXUQk@>l4KHQ@9=q>W3RUb-13TPTQ2uMg*@zYnzq9! zTBdvY6gd>NPQ{(Um8F}aAvv!W?26V&@%#%Kq>ykia{29K67NmzV@)x9=84)}LE#irb1#~_jW zZ~6%ip7#{0#G&Q>>QG9&#C*(~Zm7{+SDlLio#P%HTC3TpS-YMZxxv$O-q~$eWlXXn zlDblJSq-Ts9x+3#K!q>c>k|4n)lXC>&gp&39ljM+>1ZN82e0sYL2RbSC7~utOq4}m z8|J+Hrd+a84Kx7tJE@qTr~16G+3ue^LUR7LNU<9tBDu%gbN11B!* z2Bp!6U698iW02&6)#_O}=VH&X(p|ePb3Aj-C0`?(El`>7jU8i`Sp`x~LOG``>a1*C^LJrE z@`A7cm@_;J_u#>c-Aur-UX5GKz5S>=YHeJYIZUR-ub4OK)j8dPahs0~%_n4@JJQ)J z`jTo%;>>xZ1}S;m zDYzl!b+)D5GIE7>`pL5s&YBEYL{YIQT>WaJGuZ3xRF`(rCu%%W){aUu& z8Qjg|v$}y!b~PD~jkCKjAIIr+3a(*+th&TASFaz^r*nH6%GI#A{1Vg*Ykz63;=!e6 zol%Sv&h@%W6!y8VH1j4?w#6+dRw+*Vw+@CXl^*wKBfQk}T=@42##8-yF(2=`noJCX zjQtb0hJydtxj^SkaTZJ$0Pv&V+!n6E!I-I~6ZQ_lqCVA2CRgbL{;cIpD3; zfw-6=M2qA2{Aug@`MXk=VGDoiU;nhl3hUh7C2`?m|FhV3{om+Hzdr8hEi%Z)A`ysO zqf<6$62L+#N_(%9$CC0HNd*kq!(v*)ur{`_t7XJicrfMsv+J1Wyd=(03#>SSk*p+2 zGLFgnlT_eiUP^w{U9r;#L4mI~f{uieo%mKAmAk{I6JzUsdp}r&q%qqISz03p`n>o z!u$YIg8wC60Qd%IV2|d+Dnsd~G5QU;pm) z*KdvyhMTJbaj$(g-&^gA61|JfWyt_qDf0(bhmX$d`_CAauFp@x<^;-*Lufj#nIY5E zU7+);j?HOB1Tpk!p2CzebaL?SGf@b_p>*<`Ale)=OLR8RIFA5de}u4rT=4qp{T@8K zTQcIJa?z-R$@l6N`4|cSL~~OYH3@iq-mSEB)0KQ48Hs*$v}^35l-9k49WRzCj+noH57r zJ9a#k+qo(hJ%FaR_Tkukii?%-sZBcP5UImcvrHdGU@`0|D{`;@7Pf|B=gEz?kWWb1 z*Ezp{0t{@6DoYdPZCL{)=+Sm)w+~i|3*QgAX`;i{%l>ty=ykqgUi?u)no7vPJUojS z%;xk7oMfpKwZT)RI3Y(>Ki1?6h!2LSb+nvPI1ls(e|l)_E=^qdGo+tk__F5(-}^*_ z(&!YXLCRz6@E&5ci=YYm{`#8I?*}gI+0%I{fr;I5^Q2e|nrUTZ{qt_Jv8ZGMO8xn6 z>FFmp&p-%@M%(L*g6G$b+t$ksXXpf}`v>!V{;}2e%Iu_{e~=BJ{o#t|{Y8M_U}+v* z`4jxfw>UArhKLnz^KL&4{pTPL%Z3AOG9x1`3hOTw49h&O&B!zV0I2#7WOg<-Wi>T< zz4p8|0SR-X)cS9Gg2ycdugeRtqeZ5tXIl!)x*ISWOV7PDOIzp#Ousl0i^=lS)j>c& z0D_$~GIblLveBy45k(m1{n4ARD_Nhe?{9}I|Hy8gOsfc(hapUtGzFK}F)n&M-l2M2 z%R{+j*szEYSjr!n2#ck24`Ar#a`w~gN{h`&beC|h>JpGT$RZkTU=SAW*=ua!Mgceq zDG=ql?<#3w>SL#}D(dQr(+({t$0!-0#wq zmz2~dwOD4aWGbIc(l!YP4M&`ByjTVT;Nx!Q72uiY^riLeqPcNhRg5T(G|c6E<|wsuZ}s6n82qO@ z$()g!dmJ9{OkXOW3qC!nqHxv63RjE#cxiRxX|XTZ7hZF2%G}QIZflDTo?#n17(V+u z8)Ra&)8co3e{T5qj36sX`1usG^DT1e16M(LkVb~n2c2?3)sN&G80~4ei^tp#O2-~|S zKvoS5ktL0b(x#>`XI+OahUo_HClPo&+`Uy*sLkH4xY9gqLk*p1WS_{0{cD$_7q%L` zq^T)#$Lb%MW{OLp`603P+9#QzvrE@myn^2%f7i*lh|ppf)?GDX(PFJ0*JKz1{C$gK z-z9?8ah6$ueiuHsC7{c}v4i8sQh3+x3Zq5%kymdiJ`#LG=~SZ?f|{I?rDp2*yvZ@p zBLH~(V`%Tvc5?;h4CYQLvOunP`(DYpJ;-Ag2sYy|$*QHRyE}Ed_ zT4Zhrq-DsD1pnvvDo=!%8M_N(n896gq$Z?V504E&1I(#$mZm8E1{XwxS@aBjc3)n^ zmOARq`BF7t!I7W}>qsWzw@)myrM)(IvtMq`3i^aN*Y{mKvf%pc^TlTK%=WixLs^%2 z95I3lTS88Ercu3gd-()ZQqIAm3?4sUF{J$Jh){1#xm;#*Z50RJ?*lOJm*+l~9qx=v z#e6s=W>P5+i8nzqc}6!(Q$xoY^=j4UG||BUD5d<8Cr^TuflYKl#Y%f>Mnj50OSXw< zKZSNLTesF(LuyRY!AtUuXsTaWp~+x4$Jt84$=lhxf|m8=#cZ;b%NY;!{QjOAQq*D4 zy8xYIZ-LV64t9U*^Apmu%ndET2=KLbHs)QTui}&p@QL61Cg`9!@+2~igmd1!j!vmm zuNVV-?@VG-GFjCf%o{QH?lPjfeCR{Z`~v$eJZ^P0{;bj-z&vLpO^9Kk7ZfD4IE&@+ z&n&41m6L_{gCiPb`@8(YS?vSIhsFWPfU`CHzZ>?t!2enH&LVPmWJ=WN?+qxR=;GG~ zV?50*>PeSwiXbj8q?|-?cITf*NQql^-lHC z_;}U#lNB^n6-Qz%!TgcKsG ziwfyr2mAuP;@I(b(cWM2EfgH`;Q;mY)+pBCx9hn-c)(>E(7H8_o@QLTLK1owXD7@u z;7%(SciQn{Oo<*L?tb^tsz;6Bm)E;c#1`% zvrMk5yVFW=dHMVQxI~=;cXzY0J6Q)p&!cE@K9;Utks;hQ<^VJD2w zs|tn{RdExg5SUfyss+2+DYk1If|KoWilwp6uX})#T8E-i{Gu0Cao~E)eS=Y@Ft}1< z*3k#i=i8}O6TtrFb(m~~9Jg#udihfWxM^^<`>F!o|6eV@a6e(FZRU>OFq6K+U)JOY zl-O6wN++fmvIQWijLeRx&^Dm5>z0I2_WZ#lqeeA{5vHYkP)KX5nsR;a+4z2SPj{E*V^v=bZvSBYSsdYhiEq z0XD14{DMui(HT;I#Rc|Occpqf>|hLl7TbHF79Fdo?)9vJ(CkW`g_+ydy#1jFBfuFC zLvHBiX$oA(DEUU2^g_8P4PkP8h_iWG1`-AVZS8c@4ibM9XT&VQ zGrL3hJreh?V{dNTp<6z79qy)P#GlluL&y6vd|FwY+`OHYFcceK?+ zCF=XD<|n5KX?94WN~0i5C0=sF_j|(Gt>>DoiVmyVslPR zqCYClP;_i$#^gVJ;B&QiURP;<$r-t)fgvKZs(jE z5>Pjazy4ErIT^I+@~_FfoNc}rRUqR(_53{f)w`U9SdwBy#x#*Y5HNLUij$zxJ8|vX z9@^8vxS*y__Q b+8(%x<|da4ee0Dk~pfDjzk3~TzB!38;tAX5TlzbYm;GgWKSs+ zz#UQQ9cddU=b-d>#H*kshE9zPl45cxBRHGeC1voj_AaK@{+J`rj-0%|6pB!`tv)a3 zaQQ)?al*1AR*^HZMz=`Sau~Kb&7vR#LI*hdJ+efCKXgnDYNqNC5czSd?5y!c5X(4t zy33VYUOhz35KkNJbT&-`8!?qUqP%%hTJ7l1k4J`QJ~ES%D9LcHzWtAvD7gZXxFicRvMKAHdd(*)o!6fL6V1ztE#IU-(P?b zGrzut9w5UAB1CD&-}|nm2TzH|$5Ti5^@S}tKF#jH@VP--w?TleL+*$mjA1(W`CZjp zgo)taQz~7bgmN2b83ZOpE#aXhrz>ih0xNRqcGBAS-w4~ESOItXf0mp2j-B2e?Xn!z z@Vd7@M|~grelVz6jO+0Y0bx|Sa{MxDzqVky?u03-Gly!SP@SazdE8H2i=Oa$Ckni! zU9;e`a&Bx-pr|1NSo_4dNib7*r~>v{x#lqPY2a&Mk}qq^@sqRSpAa=<_Kx11auR)i=V|T&>2(vC`%+9fdSB1vd;I=p zC_|;EVaW7FXkj*=Q&_MX!_5W^5AcJbRVl2G0kn*;ze69~ zprYhBq7-X-$q-zcDO8jNrD|$(=f)&UkCw^VHDp|a8uC1dfght6;*cA1kaVsog`LcDUKUnl zl$A9E5~=3O_&$(O&yIiPv(-hH(<{@a@mI1atF|nLMimSMs5DeaBIGGCTAO7XC#VL4 z%lQUb&vS{;VWq**vIFkUFt|{8@2bhXIQBD}@=ANo%-vU3FF(gboB_|^(QEb6N+=Tx z;hZA#PiriLV~4rV2bivnD$2)mNwTOtj6hdC^&|WGs=EUm8^}?=5II1n$s4gQ`Qci< z&t+~HB0Kq{F28%ShrYnL=sgW@dw1Stb0TEqEVVd|>#r33p>7}(K|t43ILi9VtH2l9 z({))X#Yw?A@{Ni5ThxpVmSqJ&Hkx5oE!(E9G=MH9LS-n zTS%D$L;nHXdw_d#d;X@b?R8NNG;Kg4u${=^GQG3>b0q3LNdvo_;eG`kq^g0v!yT}& zwK|GSq;4WK@vbf&jOD<^<4b zd0khQEY+Za;IcbF>QJ1wSK0f)YXDT{uru}(h*4T-NlFW#k=3~V?kE`e4}}!Fl=X)C z9Y!*%C@X@LOF*e7D=wQj&r30K^+sC5_|@>B7o&#dqd zZVF3DCBtCUB#a;U6ZIs9b9P`2Fg}E5zkx8`0FQ|RLa#rg;$E^ILGeBQA6w@XooUpx z>5gsNcJjt{@+RrnwmP;u?2c_)9ox2T+qNd(x7N&>`PZE6wGW@?VDDXZ*IieUCFf30 zLoj{teBHs>yh}t!CxGREaRV3~WWGi0TbIx65!_d+faO{7{J2qusc*=v&fPNQlk%8$AruwFD;G8S(?Th(y-xhvOF{`W;5ZPdG?POC|+)f_B z#?K{&=<@2|fF${l&iJTbp{c8qf?R4GQimN|)2O zE&jJJIO}rl^Epv^hyL%!n%=*GV)WjRb1{hR30?ViR0w#rr8w{R8?2-JW~IC{o{Njf z-$w54pi2*^!u+tdZ`5v?--F!;Mw2mP@m$E)F)T8CYC+`Uf-qDH`#GQFc_1Lg{~X!Kz37?2IRXZ6a4B_MOT zsmII!f@}@aHhs-^D|~cY_tmj9JAGrGe7)nI5=EjW4~Yv~#7+@o()5*~#WAJGM6^ND{83=GB}sMu1eAvrcwfdBj(Q!`8S}!bzpw(--Ib$qEr?}Qg_Wj#u*rVn9{&88% z#K`0&XDPs)VV7HV7TzM|?#4TM>R=Rj1zpsh>3UvJSOGt1DdOp%%TVMW>ZNV0ah^D+ ziq+0H@RDXB%|@1b#5yg|ZQZM*pq7?9S01s+u2*|6#gNO&Sd^6+Yaet$QE`mH+UnRX z4zW*Y0cqwOcZEl;GnyEr=WIExA#ufs*Y~+Fj9D#%1M!?(D{2#L3@p*euTG&)s5GEK zWm+rmr-Iw%BSh7;&S`yg6)JXXLw28|0Nflt8N~M%NVy0P-m4?646u?qD2gQqNclH!o)BK!NnSO}0<#<|9Mo~lm zg*Bu~ZD@e&>)jytJRuC3Ub)K{;b7^QIC%;!_u7FbWC1xlQpe{Vv6F})Vbyf4;)fP^ zp_oS-9*3&!X=1lQRFIb&JAFA(ub*nXGplOLa6h9_p?q18+U6U2(=KXzXZz}Tx~zmp z9wXxg3TbEn;S-d`Yk8hk)bz;5Suh6He2|R>n|mNFE6>U@_R?B^y)nEh+`3M2CBHPP|p<=L77}C>jY)$CMpbXc4 zLDcPh=ou@&bI1`hC+ejh-;5|!Q2CqW!+15dZS(*WR!yCy4oUjQlPlz(AgNPqOkhC~ z#J<_ZneBH8s1z;<^o3S$u8r0QK|YWWazhFp0P`qoqKX>k8d*lVnhkhX?!M_43Z-Dm z#;dKd!%kMMGU8Z=cA+M-JUmQ2-Rt2>0!v&y=cl zyrWmIrBrVCXm4#v`YeJ*aYWDf_8ri}_Z8$VN=oy=CS7ry#6N_=o|&u~r7yuWBCSv9 zV!b>3YZc~j!8d*wq$b)*DXttZXcO*h#0ZHc$;{k2ZCldNw8?S3Qd^}ZRk313%t;8v zVTOLAI4N}~s)}IYW0~5$rKO`uKZpqJaE=UKQB@nl9C<2`wg`x!Z@jtmAP=GfMa>yiSOJ-!PjiXw-1AOpD{;GmA!eFH?wNDh%@EtcuI1O=qyu93>USm{8jAC)g zi8AX*Inf2a&p;S@=9=mJLu2$3pwIYWkX)FeJeuxM66nR zxaUjNj?ZYNwr|BHqWZs)`@J?vS zanme!!c|eIY5K;~ejl1zspb@La^|UGsBkXLU?VA>F4fc_Dp{oXvjTS;>NzblbjBR` z6UnH0#;FwU_A&??QdqQB+Wfw|hEYz*5J%d-^|nO>=dnDa>OZ}+`oP-Bs5bww0 zj_qfd!E8@>SG(jX+4`i3vVQ)L0$?W1cio`Tzbyk*xT4-xa6*H_<2YKlL#bIpAp@|} zK0`yx6V!F5^lL&NZ6Ni-wM<}fi%4bw>4oJ~8aio|iDPwY_KK_ny^)4RtcWicYb&hN zJ!lmeB`TN{7^nofCqZX#C@bK+r-J)dzF{&=alfMfOtViF$JsJa6izt1CMjb;b?Fi3 z1Dy4Pr09Q&W{RnVjEGxV*w7Squ15=I52*393jSUv~@?FD{#+tU07iP+KBX7SlnGsqG)FP6JGKqq%_J4;VRgMbmqb@*HC&K1%L(D8*Y z)C5pJw+=vT#sG$KB-q&5U)JSEm`ql>3-8Q&nn`vuo5%LqlIV+&iv zb4|)gc@~F}+>dUv#`~rjCDh*;foH>c8;dpnU;}29A~O6{a6v;GIVM)00aQ#l&yP*h zUFGSmz(73&}TtJln9{Y>$-;=Vm9Q(uf5grmr&;sSHib)f% z{uiMk9#N|kr{WTmw#aC;3j(BS4XVOqOeFn&#)|O*KwVNk1{Elqm*j zAC02@tE6akc4}!Cj)G)T9kZ{i`ENQWw?M|xCJDf!l7xJN6BOoqZ;Zv-%*2N@2aCfM zic=KTF(@A$btBxExlsjwdq2rzlqTjR$lLU%NR*^)a6Pdu;xBVKSRlNs`kX#NfA-X_ z)WL*HpBPe5Nqt^|m{W=XSPi4ly<{~&yqWo%C=0G`yJ=+NkvI@cpSl2A$uu_KVlBIh zV>AM4V>uNKh-dp3CFgV-7y5OA$#Hd>*Vs#JCDM3((qO!s9FT(ijBv1`*zH0G4Vi9X zU7piQDm6VE>w<29Y1SMxJIG_zXK7Rl-V#B6Mw>8R&CEAmp_29fc<~GO*yOADQ5leF zuPfM^X(ajaN2j*q!`XUXJ9X@Aq!RXp6E|O^=ws&J{QY*86z?<|=q;fYy zg@{Y?94-q@^I5s|MpLMgo}Q$g8=MP<0I9DuEmf?DXcLgdM*y9WS{6JXcA%!E(3ME!Z#iT zSM2U(iTkyWJEi(o6Ib^yLf&aNE%Yl@t?~RauTqxEh?dnO6qQFJCqkoeENN41Qa|mm zPc!(Pboe+eDoeii9s5Yah0=tqEqi&~z%SzUuN>_aDT>~&?xTF89H<6BZ?>;pY-yaw zq5i*o;O`ganjh^inPV8g$qjOJgnK&P@J}cZ-SLUUfypl~;^+ogzpff29~Hw8hxi3x zfB)pUP))jj+<^L!P~l_<`0}mHUETNmp}Vta8=%k-qDC znc1Ss(#z`FDhsXPo96z@Q5#NLtZzi8?EV`5E=Ex)2bS zEBHC-=H=IQ*L%_7_wm)+fHHz%R?B1Dze30X{dU=3O^5)yQOyi5sI@45sFT54Mxqe5 zebBd#ow=XwciuNIej}je-Yc3)6`y6Q%C7XOLnVJ@V^X~-GQ9h-?wP2dAF@A6-!~+Y z3P_J7sBlh9O*mnkg+)F+FK+S-C^x6#=H!zxcH&N7ZRcHr(y}X5P!g(63u|O(zdi+T z*&}Ord*t^$K2@1|SQ2lfbd%rA%UJjIKe0^8G}z`)@%n z-$0bO?*A_7{h#XI^%I7kPT2i9Al`h^i6or_WzJ%5mVOI=Z!|-6Qt`PL$E?;+>s)qda^M1@$QjT@4u;2d%#Wvey|1=D z6P`k5*ICD)A!I?5qALAEdR@VViIweBMm!2|FLsRQkbeL6&JqzAE1)bze6yg2$x-od z_fppo@wi|Amez3oDg=fkO2uo+#44S-qD#{LeSxAWus8x$5ec&RS2XJTd>W2kN37d(AScJ`{>LJCTm+J~zz+Td~se?W^$ z^TmS9b{yZwxfyLsO;1xZ=dEG0w~tH|F^9bo?|A) zk4l(j{7Hco-$|ym$_jf~A!O=)T(Q@cCP&S)Q$~`JDF+YqZ%f;U=rb-<937Pj4=9tg zl#9L8i#UKKG>xs+V zL30}k3&8W$!3q)m?rC0vKFjjk5m(>ae-I0UDS*n+0ZpT{?w!~zscDlIc^;xQUAPc4 zH9HW0T0z6?b&!_I7Gt;yPd1ueN}(u4hds1HjyMS`Rd?o?+KC?#+Rep@W$c>jR~9lc zV7<^G$xd(YUIn_Y#@~mZJpqz@R1rHQ+v14WMh*_(XCHi%Ysu*9HZz0hD@qkVR?X}Z zzhVIQlE%173+})s&vCK&LU6E)&X*gD0qu1`&1hTY(}ZQExbIn9ffN!>7r@oy+1faW z<0j$pbk3abB3v7pNR8ggCTZr7(l<0`f@^f{k(b&U6UxNo@IyFD#3;0IMpMFW-N4-< zDBalU`~70>$}r7tHa0gAb;1jc!#9&dDoLXs~ zCClHjXdadSCb}xnND+@z#M^-z@RsOg)yHrpg-o$d_nSYw7z??Z7%OU+nw|rakqCCo zsdVNiumCr3WN;zzl9{aqLh2-gD({=YpMYt2!tX9@hdx+&ZjvR~e2y|d7%}4;y zB#O{gabfX>mcIVMr4E%F?UTdP_-OfKvJ4D0F^*CP?0_{A6u3NdIBJN-QDN7#(pdPB z0c76RGm9_?;> zzAJ;t>bk*!Az5QNNryNSP$^23p9?UQA@EAQopj@ooRQOj*%TlVu$?AHYU_)$ZkQLdemKO@0HHOQZqYLu8E`3;saQ5T5yaa&6pflew|&(0T#@T~FkdIk&py65{{%0=zsr{Dg9b-w+?6 zA9I44{RVa?lba9X@UstDeO@*~kLQRQQ3euD)c^xtR@iSlKmfTv2-F*qVdke*ERAsk zynE`3D;Z)d#hwCZl*Fssv(jS73t;tT!F6=h`(i;v5;0wSO!;Cdb?t4l)pbg*p%~KE z=t}=SGuLuJocLg##7Tlp6wKc%Zz1P5@=4{!2>no!ieU_4ikKqmwO1Q1aw4zgsbrE0 zB`LL^Lx+uP2Q$jFc(xCO`(4D%5C!tx##6)pp%NaSY|iJhG}qk&AdTujmomh;PW^ihZ0EXj9&U&7YB zth;qhEf?fXY+&75vQ|umix2p>`uH-GVEwu!Dl^}sZ#kT*)eh7lFQ!#;NO9~lp;m%!71B(632qY z!xnd|6z;ZAi!ZY@Yi|{B{2+k62*P}0Q?)tqXl|?`qdxZTqB3tyFC;AqjDE3kh z2>!;OX^h;pww|NoHmLSW+bbLv+_SxBo1wck@*I3vprJLS??yoly@8V5d>$p2?K-&o=6wA%by{sWQbe1z438bW^V6DJkh;~pA)Pt?W4m9qNEkf z2*Z>~C1L8y)W6R#A*@atwN=|gG@oh=blqEcoZ?&7VN2!oi)y(gl!^buyI%1;wQ#lI zrv(`}aUv8t)iWZ*YYa1I&ss1GZ|A9CTNIQ5MCACQGhaN{(4W40wjV+auoON)4?rv}g(poSBZ(Uy zuZ%m3$hQ_xOk$l5iZT;Yq6xUY3(xz$84w{X?W_w7*8WveGDoN;Kw`laSF%RRh4wx z(0+us{=pIK){PpLLDxasES11&Zvr=g@$jAG8{ke7khD8Gz=9SCw-*M6gaCSoCS$YY zn3bog7X+1W+dsE|96$1%3{XheNl-7`9J25<`b=o3{`G?nmhhm>AY3<~x$ ztQCLKA-~XDS^C$sXKOb`Q>jz6umj|OqF?ebx%d3t)`|+&!C5_~4L(#ggAGusbJu$w zCwdAZU}wje=X^9i9;ABors#lmNmMlr>hmAERWzw#S;e}{M~B!RMH`mv6+X41V}aj0 z8n?S@Dzr4zf<$4paP^l3N$}9Ne!xM96$0TUi`DeEJl1G^?jIt2;i_D;32%La5J**l zPuSaKidO6>&n_DM?nX>nSTo+=1}5uJN9nEf3S}GcHH{{gnzDY7+~+iJ3{LS&F8O`U zJ@E~|mGM@=1SS0S4pCJ%7e+GH03Fll67(tAY_{{l=auh~CL=36CP6d)n5@(Q9nK)4 zYH95D65aGxrbb5PtpWZ#RhW_68smK)RDZnl4wqg(ilbeB@eMO-OFWVOnXNZaQCegw zNrym^35^(qAyj^0s63=;Hu>}VgRW1No>yP~KRI_U$IxM6_Nz&z!K8fZtO6ow} zyR5)~!4zX>OH~N)VB6$8TRHOZ(wB2O7_rqz4mdl|mZzWK)%@Qq1F1Je$n5{)Bt-bN zeMQl3?K%OhvG5*ac@+pT`y#79>-_&9O)RXLMZX6>!M7%J7*wB@IE@S8$2 zgfp-MYOyEfetsSodw=m>-Q;@+_FTw+kA3uvGH*VFlQzeL(XzkDo*((Dgwdmlv$ZrlA_i7Z!M_b9KdFZsiT|f-b2L6L04&EB>CC2zx#1B7 zR^d~R;K$Z-xW11H?iY4) z`8Z^@?@hP{Bc_QHA5)>JmszDE|UtWViQ;L-O)4B$*7*2a|W zEXEnhsN~>e`#Q~hqN91|!313xP$s1UkGRXDL>L%?ZRL_mtFbhi76KG^i;F0*G; z`*LF@;%dfk zn`lGyd7WC$;zztVsM+ZpRp@Xouy4Ev>Jg#HD8&#m(B&Lr^SrQB`8_d2h4a)G*=DT^ z2-#6fQ6E$35t1Zzjz7OxzcX#!#M7AZ`JTJd`CnKiXbNx)k%1!w_WGnsjqWj@@U*6i zhk}$oP<4j)%W2|oAZM4RdUY0T6!zmBCBb)umum0Qm zAYk{|b#?Q)10G7%IXR}kI?Cjx9mFm^=&{TJ4LC87Nw#DN(ywCV|?@>`*GY5QQ*pv4wvMrpPmE3#8PghI&szK#9X zgM!F=XsAc;RWKbPu>);6f2#JNw4wXfyW7b`ToVHhVnpG({;@gEH-zK?5UEh74%$6&us`C|c}4&A^&}0~rx(;h=Y7pdbo-eqear84a=tmi_EzRGNmRgtQaq|I zg7Ep52%*)R2`mb9r&WPIPBgX&Jc)$4fA8|N#+##1-Le`UE!)r|AoGF95{$&>nG95a zwCVRO!WPvOCXp8+%aDc8T90k`VhinBcL3%fEARJ6!~yfPEGUu*Y0Z; z?2EU5R+j7MMAr)%e4h}oiag2EVRxV6v(4|Ji_Oh%Re3XJ=O9^c$M1c~Ps+=StjEB> z#`}i+J@2BUd>$-LZ>5GiYHW8GwyqA29F^tQ+RMM$oOR+5rk{PjfxRMWWASzW&L7E4 zVov+mqLpO2d6Vfp@o z_Ku$aE3~_v0Q(K7cdm|1jOca|e$9Hf(6auuR@hMyx|xOec_#SYxHQF~5@CpY25zno ze@1kL1+wI_1YOacMr>ndmi3)78O<<%yEaC^&m2AF%bA6JZ!wSL+qeh_@ZWE38&!u& zn`B{y!7N`bFU}?K*Cak7y<~N}CKtS*q@97NNkuC3aZ8Mv%CiPivLu9Lwi@P5E}HIz zK=ec-C5hu0$|8kwNn6KPFn#5s4FMNky{l-VR+AU+Y(qnvT{bnOS$AvXtP=b9sRk-! z3=vNp$R$hkY>mOG1!nLNeFdFV^LO(ve@^+I*L!aW=e+AK_UNz7>=Sz*(vhQ3?%w|LrrlE(C?QbOutd=Nvd9SNld8vTYgYu;{z=Tv zv@~KTV=_f~VnPrhMW}X8o7ofG{%DH6BdT1iJt$0mvFmb5Adrq%D7~L|p$H0BVhqmc zcaRG*CAklg1wE_Is;4gi5mCf`NukTm+Me2?6E?=vej?Z&{`?VY!S1gRaq1o$EOW(X z|IP(QlB~3X094J(>FAtekdwW^@%@VJtb28va>aIxcleE7qVP9oN>T}{y1kFz&kC}p zsIo6raBu5F==Lc3uGrOI$~Dyh?n==Y zGpt|Ja?@u_F*U@~*w@?3dUq$IqB<~gB*@0!1L^+ptAS8`>_y5U((!Ay9L*F0J0~9e z-4InVc*kn~6;S~b?E53^$)%!oNcLOzWk3nuGhFiU>mTzYIC^)WZ~k6T%&(klFbDw) zV9aQve_{-SXeCn9hSSW9u+>mEz(B>W7??MRz*Unu0^c33XFTrQ~8MCl24b8rWewM?ua9>G_tAM0?yMUs#u z!seY(+_YwWhUF8tbkGnhCp4GnFgO;kv_cxD-tGx?Dng$bTH{%IlNv@eH1jupC`tTX z#6`zqS8nWGeC>HQi#Mw9o0Rl>1*L|v+PQ#7ef@!T-FvE7mYLebB)` zGjiz@%-}P1+a-{Gy=YO5LOO7f?VVg_es%5eVfc8yI!fo`>s?7|S4Ec&=vGZpZb~KW z?E9mPSt04=ele|P)fbi{Z=kJMfK&V?g!M*Qb$2LV#iFJ=6nV6^j2l$4&*syvz!=3m z0;P4rd&|SV@y3aF-+c>#`<2Ney;~M zzic8E(2$H_uw*cX_5YE=q?Aw9Yob69?f{{)Y^$UKr=q8b3RIFg@mE+ zlG9^wI9jDvb`vT=OtTW5CtrMwS;~Wevc|PspB>85HF%MuUi`*uma7jnb2`N~F=g$q zE?QMymE_M=UBg9IfEXIAjvWWD>9p|3d=-eRx^co}Q!?7XUV`=F23;?NOb>v@i#m-* zMBH9Za6d|r)d%^YXD6&mS=ZAs8#ByH;Bu+q_1XdH4|Zz@4LyxI{!t++Q7ZiNL+XNW z2BJg;l~ziB&R*Ui1s4@*nVDlr7uF^KIV#@(X2i;FT-kGPKuGLEjP!c62^ zDJe=M4%ISxVsDXN^=XdPG9+VHwtatagGK|>i&Q{|eBfuqsACXaXM#!lWkcmF<0c4~ z5CP}}=vh9lkwbj-i&8^_uJEv<(Ex~$+-a?y)i9M2(?vCEz#?VxbQVa_I^WWU?(Z0l zq(SB-8v}uN18ebxN=WrYHV5~$ED>%4Y_17izTjBbcMB`nu@5+ zX6ccQpkGZe_(O$7l(Jr4bMZ)@?A^35)Y(kCt(Qf+96#!N9|oAqPfsjqYD$ip9lf%Q z zTCgO*^JtULWVV-lmd7TDC_?IjLMAvS^@WZ#IUcJ?XudrT<(-_~j~`qroX;G9NY@v* z-l1>e`bzoPN@?3M`;*zi0M@@-#1gHLKV&Dr|NNF=jC@5uK|#X$;N@tM0AMHW!1Yek z5-d>p^P;d~#LUz?s3yIM+O983s`73}gy3Q7@^CwLapwdD02SY?(WFMNX%K-nj^E=WNk2=nw*jdPwG03( zEo35_wl&8B=zfGUgFEgOs&iji&zNu`@A&y6hlBzsj1CadKCqtmE0A9Hqoi%uSoFR| zef8GAtqDKE8V^eY757$TV89atk(lCp_wcgr2b;7==*1ljL*9-A4|i3PX7&*~9qozQ zUcWsCkto+t(*4F_;)Ko&$yiC#mhynG7y~JIh!C*y*hWq}q`Y@qzQM~uU%D}|1G%X_ zvf)ZUJ1uLbynh5DA-(7Bjp?*q-TIV?0!o|`$l0>9Z?bf9Md2enU9^PANMjS*PNFp8 z)aV8I`IZd0ue4c$t=UyR58}Sh`AC{t8b(*2<}ZhwdOOQCfmsS?>c}tNv7w%oKfvbvboKECiHj z+$??-4qIy2jkOhBqC9>pg>{2r^^f`!Hiy+lsHb zAFRK*T|4!9I%h^hm+fjbVFpD#=!vBGs--2;+@^E-SQBN5b=85C&ktbO5DzQK_Z+h& zVGArvTvLW%b8V-n36Yw6_cg6Y(Ms$5Uwms;Fa3{eUCw;)H{|9zqPDEK?ENj7W(&Qig{1ev0YrGFC*oY=@aeopF;P3 zC{Fgrr(&CUAJeNJ-K{%?jI`hJf9{qEuOG3a6$GbdQwp*TJom$wiXdF+X6Ch(MKVm7 zwpH{vKdfqaIr(?<9sD={ka4EmP#H#7O6mEaOmrP?CAU?aX(V(F=oa`^RKeg+8RF5bLJ(Ln;e_fsSCNCe=bP@K7BpasGrV)%`q>YfcHE} zMJ0=LC^~BV!ygNKn{@^7)~32TMltnsw)axg-Fln+A}WbqrX|xj-)(se`FO=WMe0K6 zKLO6p9AE3CDf$oq&IJ}0k8JPC-*=i$dvZ0aQ-nuLOQm2OLU%&H-j4}7l8h~d=#moU z6!|HY`jit;E^;;ig-W4D(n8xTcLKS>Gzkb%mO1obXfdM$4l)V$G(51Tf}b5S7Zq%6 z*3r73jPUe^G2kN6WEF4_y@eV5b{oeNb#|+Wq@WDH1nYxMrn{>)B+L5KnZT>X;uzl6 zEN^J&vnngRjH$(ad=N%V-bbk3!}=Nt4iA&QOC7l?wM2N!77MAZJyxXb(w^zH8M-Y~ zD{FV&r6duCK>5F)T-XBeUJxvTquXM!GqMqzA+%OxzB+|7y~O0;Fn~^%&%<1GQXP5l zgWsQda+$Zh^W|ija#^ZY$fGju`3cDc%GUKU#Ii;QPUCWKg^bC{xf=@|f2Q=6aywqm zrik*yeyd=2(x1Gb*SW!!q+!prLaXAA*T!IXhc2OB9Gfjw;^qki1}G8V*6&L|w^G<< zw8>f3XWqzC`S_y>)SN!?>7SM1&vDv0zP%zF7vi6U7eCMI8jz~=$8GARAY)FxvlrPh z)YU5qMcM7lOC>GZ60s|y9kHoa_+EkycaMMm)tK zzQd*-L3CQsR9M=QoPhtaY>7$16rxV{W7uwC@7${d4JRYr_U76~&AX%2E# zab)Xv$P7j$Iali?f2jOpj6Y*XJlM;fS?#W|E%zMI=9N(Igi+BMy-+qPt~PX;CW*~s z_V*o^p=`^$b;0$m*`j%pK3Fc-PMOxD1pMUW#?k8p%Hiv32^>^xUVA7tMYA{%Eq`2A z)?OL!InCPArj$YrbNHX_lXr~`UZnNQ&6}t*bkBoM)`>HuWN=LH`#&mbH2lSQo6-8S zS@>`^unZplfy@)D-Uld)DkLR5m!(>(e_4B&cj&B^N$SMAXy~!S{Q|Cp8{JKZ5EDGEI-k7rV6_FP|>?mZL7>$NaDYeI+GR(Dqjz_x} zGfxayKfO548a_S(zyfi2qMd84CCMHt!z+YvAhl?cu5`91@A2JM{i5wU9AX6Ba|y|G z>Cx69sFS@rg*dV$Eseow9Km9(ofa4K#UXB<=v4w*(~0;?e1bqd+rUwtDF<#Hd>{Y zUkKrkt*R)9GG=JYEkjErKEwfdyv4??sMbZs{8O>JRd@|D>I1%^9xYxeJ;nZ+ya z){7PA)^TVY4No-k_L9^(kTeF{w`EfPY(e`o?iy|!vqxsWJtBJGD^qtafvt8KF zI3SfkTN+r!NQ9Fe>iDGukwkSBKehI(tsVOEjvXQmASCx;nwrj(#zd5;8K7)b4KT)% znwV0a_kF+7D$9?HX;37r-`QNnb25TpDHOGFB9q&C5$$ZJ>$wz*&3m)h5IoP;4KmOr zj+hET)=PL4$(7>5_ZEwIJPSEE0G!BP(p*GR?dQ<_Z!Z8fTIroE5(2Be%kL5Q0B_f* zmw3TFIjnh6V|*|Y1n(q?VN=G;j*-kw>_4ld+tIOqA^*PI($kx3j3iFq$a#ye*zz(G z-VjFK{(%X8cYaf6hQq;}Lo6G`yQ|c$2H|6A1w}^{wU(Xhhn0lJrjTtCD&4;^6Zb&O z9kbCyuSGvcrPBy1G?*i~Zynbc6l{OMv(ysXrR)@k`j2?2r?$B85=sqc7yiO0-Ui)C zJ&PPYQlO|89fRs*8q(X(Omr`KbLAD~cf-`#QwNi5rI}YE^c^`K)yQzyHi{xb3rxhr zEb^oXqLckQWVwu}Yn~pB0HTT-Toc0uCQm5sFfvqU#E(O)En^@dBiW3D7I40Q9!Da~ zi5TS&1iv+(+v{T=G#(w&eD^9pjaqJge)6^_TlFlzuzvLswU72RLQBUEEy^=hgH5k? zGyC+H1qM;dFgM9Isz?flv#;GxbLm>I$AvR{W0OV?N3yy*glN0%oxdDy?LLS`;!E$- zG~=qnfKxb+P-eSGnS)2&85EqqlId;TFc_oP>TOB?1_-oW@sYZ&sIW9X{6?0dCC87t1qg)`dFg2E!M`+;z#^uZV zJ%ns;lAawaA;TD@oD3exzu)ATX=V@eQ{u!%p)TV$jvVf+`^wyyX5eZ7iFQg6 zE>GL`JR!94<+T5V;uD3AB{a(We9!Ze9 zcTu7~8CR5c7L*A!dRR180cbhJiOy*RZ0+crK)3~gY#zJcwkI2YwNS62pBr%wb@1Tcb{ z6i=mo58BQ@oiBsTA!1Hr0yI&|(tSMiN8+q>Q9hCkrvYDOzJ`$#YO_$i49T|imAVK*%ZG{l*7Qd2umpr(*%(DIxvK^n=+QL*6if$4)$8&t`i<7hlisH2pSiMU2&My zN!gcinW$HN*$&+R20B(p{=gPCYPMLPVi&m1SzL^LS2Fkrn_Qs#jw-jj-#x1w|0CD^ z50MT-rJ4qQt{{Pqb=EKAbH|X<@<4k?@9381EAc4(u$qX8%)28{Ia+L|WFd?Gim zUS7POdi#J15^b?ISc2;vby&mji81}CghPpRm?k$T3#|3#YntS*E@Z*eH?)6cxR3q7 z1Rei+p@Sow&wH>aKj)A;p7BnGi}v~k6dE{d-b{T9I>SVLJlMT(u~PN8JMr`yBN%mS zj&Q20SZj)J%+%D;(A6CE35uryTkg_u)4LS<=Ir0WTPxrx73rqZfjk~{k z=M972Rpc}pwW7{mo2T6>7JW6VL;UoGFQy>Q-YgO$+r1XAcGIHb(>zAWSpUIP+Z|c^ zu^X$5S%#b9`Xh2Nx%+Vw!r5?Z4l2)WEbo)3wNH4!f_BeTyNBbJ@) z>})s(GtbiOjP@7;HtmEDh+14Zs?!?|x?5 z>+fTodP%EiS}t$f@wx4u{jxYs^m_!LIsjx{6<6h|DOE_b z!dONxf;=mRi>b&hefY_~`F4qZ62chO*&4W5P1F^$j}}>P$AX=$y)$}@KjfJfIvDWx zUxzC}kQgv}pV7e{AImFpVVA|PzFZiFsVyTxDi=kB44%l9acp*)yK+m0WlykM{cFMc z{3K$oAWZLmq`@4(?4y~n&OiBnqOGGzc$AM9e{O|XTh1tPm!#|bbq~46Iq6_}PRu^? zqYW%AV9zrM#?s)~J3)zV|Ii#|gj|g};-IV+Z;ATA)4viSl%yvH)Gc%Q!NCP-dle+_ z#`6*Xau2GQ72e0uzmze&91K$3*3NW`h@-B9H)1b|kd%EjD$)jYggN)OIJR6*xkiDZ z(X*_YnRyC6c0c1IaU_DCrq@GxVmC8+$n~i@Y~*VFH10uKidjY_fwl#HR?zn}rZ;=4 z_>M?Gs-*(1He3(@I*5d~Yk1=>SrhRI#iulRGDSSnQFHgGI!7x%5X6M^Cj(?^yC|i4 zufxO;ax%YQClLFUel}(iDe6g_K8#IN*7sKQ^RO+M+fNYKcl9Z=>23+dboCdt4!p5L zK;%l|ngO6OEA6DOri^wS zG~o&pkjw1v9+5*5Q~rbrsrKU+)kH_nn(T%H@jJYTMLoQ5PGDt+Vn{fe_t!e}1%oi6 zo4TYJTaVBi(D48mo&0@#&krTU=sj~46hL+f*YnUtMdKIN#TPb!$Npmwsd?eH}OBs zPfo3J%V-}Vh(6VkR$XEQ1ZQ-ImIX5m-(R_-{H8^Epl`O?SARB?G84QBbF|lHA_h^k zBzo9-UlV?`Sj|dC)ikh6c?k�Cl~-3(%!#SPCS# z8KXYV&htB7(jDQ`UR`V59tAqSF2VPDwF8^6pM)DWbLB>T(ufuZ87 zn~u6<9D((T>r2Yk4+7^4AancwV(S~iG*NvCcB z`nTd)jAORDCmtOc1PCbEtwTC0Bv^QNddqB;H7!wvB$x~l0`Z?s-lSW+_1f@CLmrc% zf?X?$bu=q=5!H@XmZ3xIh)qdI=kLh8@6^@m&Od)_3}5>eSQn9SCgZU&%-*nV8s2C= znj#B9=NM-pQm$j`)w2St-~trz2@hw*lT{B(Zz`I~sAFc})?74Z5fi%TYr}e&8_`AZ z{2xGTBA`oa39pJ$TWp0ne!(yC1c?KM&*dgH86wilDy{cvA>@S9{nIofzwJW+*ZH_xBHPIUn)y~8` zH2x+M(~Gj(a${to<|1=ql!!hJzu@Czmdu2}Ebi*eIq2k=&c(YVK!rj^ciU_Xs&Pn;)(SfM8w-f5mR77*zoiQSPtO=Io4S7Btn2VcAcgS=tz9*h z-SYOY^a=GboBPQ30K%xvdG5ZlM8 zWEx4vv72b73Q0x_SrC5Y0ehdm>-=_)!0zp5#^D*Omq{SYjo!}~dE-12^1x!Ohmkox z6^DLe!9uYKu7nLj0LdnGtiOlZY;bs^-XqKI1VC$S(?~jChHxpNOGhavUdpX^XY*5j>v@BtZ0Zxzf=sK|-SBc4!dIhebv$ z11wgroY5*yP&$(S?v=3)z3JkCARt@fMF<&q2qnxdW_FK|N*!irNH#X}f^OGLY$?A< z2L{rSomYZ!Lpqbo+V(eroKHZlID#0@B1v3-;JJHK@`~H))v}TDtyV z(p^u{S5r*fMnIH&`(RJUG?n`T>Vx0O`c(sQK?*47MxriznQN%L{!u zJ78F<&;tQgpa%Xlf+#H4t1DyUb5M-Vp^;6VhiyJ-DJ7Q6c2~!X3a?k=16HA7zcIbf z^xlJB*Qdr{i8rZ(`N$~U&c5X4ZL=aeX{Qg0Dj=o5H2klh12TLL?IV{?=&ud3^9{?P zH!KL@Bo-9`!$D9!zf6dYqJ_Dh>${Jc`-zFOaw&gDzi>6(?ro7ws-RUV!37titT8)S zVVg?@S=5)LVWk^eSK+o3OUqj-+lD8X5%T&z!(97#W|CI+#=)bDKsP)Yk;!<+GaK$u z6PHkLRc?unG?i9W71T29n0T&2RMv(gmR%p6u;m`EU4W9=dQZTAmq)yEak=NS$ts~Z z8^qtP2WEYKS;WMOM`hF9essy^*gZjn@##S**cpw$l0hsi%xn^cwO}eTQO?;ClL`@x zdA0mScag6THeh~C?E zd9gh!Xy+8i*~vg0)&qkj44VoJj{Vx;m&Qb#dV=qj&Laf|i7@ZcO27fOiG0zoWd zOFP`xB9RY>k(bKSlrs9%M27K`$z%JxMYq=pPT+OU3$}T07oXfZu9o#3Ih|u{v?SMw zmayyx<0T&-`+yMo{7JFj!(rR-^yYf5>E__KkuQt{^Owi4ppI`=Q|?xLe7O^xrTEH%10KCT0KhKvU2>>K-(c(dC!y6K}HY#F-g0~G!)Mwx?>Uu=)8 z`tr@F%H4Y?QNgeT2gmMT2!Rp4;=SJ&%JTWfG7G(9zP!vUERHCxM5rQ|SrZ$t0*%pb z6M4$mYrm#|b6c9%z$Qg{LM#f(9Y^3>hcMkHq3*voTG7?D@QfPCMPt5gZ?~t~FgAHEuds z0%Hw75duG6cpJbgnU!F$f1wRWf3kzOng0t{#D(2Sg(6QSOU^Oj&@$FtR7!=IPiW{U zJzL`!$M5BgZjUoEwGIAFE4zDdMhUobI$qE*-2@{Fg}?oX_u-yLEgYk%j9jJu z6>fm)zz8c`r9t_^_>88`v>G^l73xnk_ZF;T-0t7iMN=SgNUNHeh`(~B3%7lA%l1j`-BtiLl zOK$p*5K!$i;AIDjnIV&Kx;j0T!k(?*pQ(3iP1H2`GxWMD3D=D=Btud zLqFR0fRPAHtewq7*>+yo=Jgo))7YopO-xkB)e=+m_f7I;`I#AMA&SPGED2p)aP(eD zrE+{buvCrRy*+_toCSasM|U!gU)<2M(dq4dM$!gvndbQAXSc-#AFlD44R6fIgiPt}%s+>;R%%*! zeqrjqpBd<$NX=lnjwYd0&ACe;f@pg5^-rVSrG!f`3bX*S&Gatx&(=uPwm%`SF~QT; zXC;MP(lYY?&Jo=|mtb-^#Zz)gI&52UV@E0ALLf?o*prc`b-70W*=$$Pap>a>&eSm6 zy+WCH1H;InqQ4YfW*ng~DToHSe0Qs7Q6h3Y1E%$@kxuBmuq_2JoRXU-QP6IhIU4`1 zy_Ep_0Jpe4$;%(>dl7`fgw@;~qaa!vWg6BT)yp`3$h5m1y@Qi{*DQ+;mJp?m#mE}{ zNz9YuR3pK@DxyLI5Ed*!h-(U&n&dcB0<+)>8~PRiL{a#|P;i($0QPQRmTt7^7;+)AyVmO>&p+dzfnzB~z8=XOj?^)hLHF7!0G-AhX)v>6B0 z=;^nDI+meGjdNU0|1o@oJkum>V3Wjn~06e7mvX3Y?0QSdfSKBkBEKe{uPhl-~=D<5wLb zXiHaZTTzlv_E#^7;y+~$`#_@;`pXRzhS!?=m-wGWU*LJ4gufz%H9h49=k@JrD*f7A z_pBuYy56h08CFKh!$2T1_D-0NryeQw8b?{0<%SAtR@B#2$pVYWghil()WN4#+TGc^ zrsdI

TJ7<2>BUM?$UGAdGW@ts*BKau%e+gUHS4EnL0Dx0MaeA{Oh-i8kZoBr*8M zZ=sZ5Tg|zf(Txoxz(GGe;$#khSZE8IXKLn>@0nJ+d*@gzYlQ@}oBctt>JXSM(?@;o zUChSx(-zrTB4L7WbY`xRt!2RMd}`V)Oa4>WL_JN!nLJKmQ!0>`EAOvgU3tNkDH3M)MNBh!EPeQW zVWOuH8-^B7aAtJT2(uSTs~M7gXi#e6sWZVIDu*SrH;9){c1{t_ zToVo&b0*xPC6pin`R>f7EfhMpkD#yf68C#>ur6X7Yl|u(v|k$9#vYa-wfTfG-CSdT z=LchqyiUp^;}GEB#E)lfI>FUmp zG;FizU%J-%^Or~-MMhVx>o;`ydN5UsFUi6Ab!9<3A>N z$U^9DujFhUUB~2>XD_WU|K&railtgMruvJ3))w#vJhF}lqd<>iCPC~Wk!Ws@<)}T~ zopLE@zaNx*)ZCK7U7Hs-lL;F(PRefY&n3&f-fY0I5x>7U${|lTsEb=mngTEWK%F<% zao^k?!AyKGcU2q1Iagq3eP}85eRGwBL)S1&%!iCB5ihYM$X&tVGaW2 zwK0O`16ea)*A$xG!QJ9>N>CEbIV!AVRuhB7CcQY62my~7f0P%rnS-K zndL-Y?I>%LU(%UcnEjiGS07q4M2cerj64RkJr^O^fV5`#?L<3(1!TV7vkPl8=s=u4QFAqy0;Qk zy>6!ocq!LfeJvKc$ZG4$9w&b}@x;$}Pg6Ssk<36rzh?4!2l^cMRdi{0KP~>mS=3t!9 z2oXXo@Ar4)u82VVkG%3_pny2?f2|olL`ZGyVGa;wPrTtrg@ zs|JP!?)z8;Udk*jimmjB!(;6jSe#O!KQ(e-L07v>(BHdn*S#O_wiEom$x6Nqqt3~j z>pQDsd=@zXcAJ3jBj)C#kB27V6=eARRHQOgO{|u9%{5EQRl1OuhnMyb0+;N)POuM| z>UuM+tR*VoB^yOMtYEo6))i0+oxz*n1={W8?0gu1Gx`^v@$}8WNdjnPySyVD6;ywf zl%)ppWOpbqO~O~}?0UOpp|5+E-a;Sr(K9Xc?p|-V_mQKU-B{scIO;f4$L5u9Qp{G7 zJaRFI-{U*oht_{+yb&nO4mgRq-ZcW?&Y~8>02SLEbj<{iLz*k^mERF;0MKVM-rX1Y zuLOUmZ0{B;XfaLH;&&tMPUpycN{9#C{a3$_^$yRx@XL7a*S^5iM*^g^{VhL^3_k1_ zaP5i+ej8w)b{}S#5{!12+|@laMZrB}+Vkfy*m)uzzR4bPJ`tF-1u$G|lX^yn*juhb zn5YTHA0-05dBIsv>g&>Z=<6_eqB3M?!e?Gh#j{^S3s!?vuX)dQLm-_`d!|*{{Kgeu zZzz7Zw3rGpmuny&-@wEUjjW$#g6+1zaTjl!kqD4vA`N>i0&5X=p9-R;n{B?$xef;Q z&d`ky*cBT4LOYhOi;sz~19wKY@Lzib&-exBkX8$1hngI9JyZ!XqqAJ4W944tJhsla!)o?= zTzy;Bmu6zgWC78vK0y`d;cAma!^5pSEz)u-NpG6?wt1;r{rlcDOg-PXPmnRwQ^<;R zj_zWADrbqV`sk65f1GtbDEL~m2J`J~57&PzI z==CqU>yuY81o3;!Ol;M6Uzm}tBBN8s`sNC8iaGpn%K~Y<0Xow)6<#UCW^#^Kg(Tx; zNPEF>P0KHF(ETvEezCdU+=DH%{rBC=<5No{0>k+y0oEsl3gW(Q>n^?vej#k%ta5K7 zx=)wpCB?LY$&x>5-6r0iAwMzcOsfMGxQj2qXesB)P2#yAmwCkk3?0!qMzp);{vY2y zA@zJ%hA&@E9U2O=yZ^%laMXtef72|jl0ZKRbX;W|S6`BCya zhmJ)~U44E4Iajl!9{!wBEuko9bc@<@2dUp!l^S#Ph1#6i!#3B($LC%vs-&fgpb@0c z@ouz9_%hI))6ca&wBxZ`9RrnST&=Eo$v)w@7k-auB0T-w-#a?IIhdr0o73@?TIP{d z>aF81$*~^a+7U|gI^JHxv>Bc-<+TFs7+napZ8RhoC{bGm?#|HuibL#7E-9kNL@90Q_d52#A&peXrv9?6kOMi;NNc-qV=exzm;62#|y zJyc=y&z|=`;4r|=hrilmC+=ebRd?UouW`+$sSw_upikB^iu>d7-ENV)^Vd#yW9;|< zx4YsP9d9Q!7R20C)W}dVw%-;{Oyj!`YXuCedwEibWAhb+jP?vaVRuJVeuHLvSk>=3*L z#w1}0;>j51r=jVq$;->B_4M}jRyMPz&CB-n%+B<6h78=v5g1Yj5`}_93P}?201)xM z%#i1*-woB}%!toos=%;$?X$3oA z&Z2xDslTJ|64|~z%HIwV|P!{KT1;i|`i|l&w|5qst8Wf} zgggD6FpVW6q7f*vIA+`D)wMRmzo#Am8p;1aQTfve3(CmA@Vin!(RKIsnvP$as2q%C z;H^CG8-_@h7LlVawnpK*x}w9+k{g!n3~T1VyD+F$r*?*@rc!0^m-*bm3h8C};bvwL zh&WvW>baWlFWjDL#!pCp{4qGsjTS&{Ci@Bu+ zn|M=A_Re7bRTT5&8|1DJZdKCVKKxNjE4MxFdcnt1ZL?YPGzw_#otKX~;y+sU%T75n>jWSW#h4Rb)w z{n!Pml}<5!KnSi)#MyAl5e9J_{iQ5CTPSnjHS{vFzs~V-A*=hngyG?XrZ3XV<}p)UW4JNsK?~rFR`kJA44VPWT93v_U zQ#ss*da{_!{MwTFG5jc`xgkRA|NeptJZO+Olcc*pTi?gw+n)5%NiJ#}kHT}$wxR3j zY2HRjItzUXZs)=*l|rDWvA!IlBza!95a*I5$tB=AG4j3uA&<#A5R}7C&LGJeo3Fc7 zxLUuLf#O^1W7>Xjx>e+ZuliTOai*`!F*t&&nq)XBX2{6rS;WUK**w zviQ0`(E4@Vjr}7~iW}3dg?)Cq?G1zBn7U-!H;|_t&atf)TQNYFMNp8U$lj~nMg^hT z)@bL4kab&Gc;Q|<06qKO!nrBvC8{5FdAO58({Z<^0H?A!`Ia0A!wO+I*30~vr!xOJ z0tQ%!U3p5~-fb+q&-(MvKt>AHCK+)rWbkD$Qk%OeH1fw%WGxNQ&LZm~O@!hOK1%)D zgs_pGkz*udxFj5)dvN`I2ikw@Ztc69|E^4RxY_`;g4^RBhp61ujd!E zm+y1I?hrEjzC5#tQz-cJyi_f_zqZH37IJwjS@j6@8fK)!>dK6Q>(Oxv1x<)eBn>O3@vl}Sk)G;om<2@wht$+$3%l|?e) zU}Ns8Gl~{aMI0K73-{vRb}9XO@6xJLkAU+g#+<(gkZEj5FdLabg{q_M&Ou?`mfYduZ& zra83oJfti7lvCJNo8AlO-?B;OGLE>+VdHXXXF)7k7?ne5|K%Jf%`D1^$6vPBt{Z!M zb919ks~Zp%`oGUGI3yuLo51b>#Bftr51VZEp0f&YtdCb3K< z^8-xnC}$6o*7IHC5u1CB5olv8w|`r-_uIDVNt+uNpQi~|H7<9L|FzxmWkQU&$*9}r zIC8fu9sK6v^qXyV7`$W(Y+r0TPLb0-M-ID3QY0Ve)K)6|&0x05!{?pk*&9r==v*J| zqP^|$5!hH6(8pf)^rxHjQb7bUMFWPG9a zC}*y3Fpbx={Y~S(cNxhHFd`DmCd~Jk=~b5%3w(8XNg2^!lazkcq+pvVdfhaB>12wP zLp%3MhafGr;R1Sw!2x%W2)LB~#v+7M5{RHbW#_U9D@?r;RZ3Zy&8<`{cgqJoq8UsR zC=ny$p&qBBe>$#^F*1bnDP%OvyMCcoPQt*!&#<)w!1iZ9G#Z!9Svwb3h4NED*Y7k9 z4@8MLyVzKcJZE_7`dYsd2y}aAq%lHMwvm5>(jXpIpbks#k z${^U|On40Fak6Gi%a!|)`ZR{B8`$kv9yi68Rqy7sW(T{zkL#$QYE<`(cjBoYfIk9;gnzZ1UL{3|9qSV0grsZNH)E<=E5P;QG-@ zHnqj}Ze!{%o}sm6AcF+;)LIA8!7OgVJYOI0)$(|se}C{6>#d5vv9*^rkjA$qJ9DQT zZZCHAU-^iweD9&ZR_irlHj@2qMj(G!@F5t1HWQzxnbe zjUa8-1>zF__*D9N#`sZ~2PJY~adDn(HH7SrfC70n-wBORCp%}QRUsz9fx<`^<~+6I z?V(fM9W>(^UQd<046Dfi%s~3C$unXT+n*uvu21)`@-Y@6!!>w62U&ey3SnVB`s@%^ zI#Z8u!Py_5l)PWP(9p?1`U1>}ghiBc%0mb}Gp!^b{PnhM*<50+D1x1-AKZCfQSlG& z{TBM)goamKK7QKw^}%hQl7V7OU=_unKHfPONUAlq`qOwkRk+3(MO4wrtFS5@1gt@E zu1h!2*UJd3rf#Pw;P>3a_I0vWZpyS2-mu_OX)_CJ%VH0k1aH=`q$@Tz9N@{)Jflxh z+2p3%DJi{!YCYGN<1Lp5zlVBn&gRr`p7xiLdk!qMX8Y#|ZiQ~)RcLRg)0UDdXPCJq zp0N4$TT#@X$O{2tNskuXw(5(gOpoSUENj@tP& zh5A2HFZMYH+FN7uBoM8-p+8@~;uWLk97lfds{$-l=bBuBzu&?^S`qY9(=sS_CeqXo ze+8af@)O+>KVWIL-DhD)+%EnEeFpu};F6c2E|q`UI6%V=0&_V(?dBW0E=zU)++p>7 zQLDbw85X0+A>5wfciIlzP;rU>nkVWL!N#EI8r7ovBKG^P zo1D)Udf@xw;H_iqU274PNZgY~G$Z04rCc{4wU_*zV1upx{K`Fh$+haV1LF0=;yv!x zXEK0pF#lU_opYpe{?h;T`)$3#98&Y;S4q~0rL2s?4Lap&^VwkCv40&@+Mq?zG~l50 z&VquV7=ANMVL4%tkA~g#1qXe(gc5_0f+OtCOLGu&AqU^ybDZ~Uk42;KaJk*b(fZ*sc4=E5s9VKg54WCyt5m_ z0&)STJU^JDvh5pnZhZCp#~R+&>(};8?zcu>6vXHn^<;LZv#su^{H~WYZieZyMdV&% z5sApCnn=?%E$?F}s`C}k#8tf8%G)CT%_U@H0t|wLa-5Ah;B+?hWriQzrjf8nkzJCg z5{Z;!P`bD1V1s(N1^*{Wj$=}~jK7SemNcwRmY_Z33jEK@}(MZ(1;W&8DVy2keMSGu$#CTXXh0s(0gUVrNK{?_zh89whsj*l3-+mTC? z_pS(S6jj8SC^Tl!yu@C*X*rwCk({dY?0Q0>B)iGBwm;HJF+%#20qcin$j;Nx6ty~h zb#1+${wLwqLg1GGcP_#5inz@@Rb4Wsp<)4~T8Tt`&w((4&Cn2H-ar;T#cs~bK@uR1 z5+#j#g}oU9KaWJ#AtW6WSR-LYG5IlKAR|4W-Gb35#U#NhIQ814CIZy-A7um}uz2j7 zSB*{ryF_`w6>1_0$9m(IWDbltOqFfoEmloolc_I8h@8XoER?%`pG!L}2$OOqkTYF6 z!TV7X@1x(gtv%DiP7?9nVx>u#(%%TI?4&a{dK3(4-L39<2f~i$m36zzkckphlv>=T zDFp!gd~5gqhG{-Mk3nBeKK>_MC(X1?-WcFLwiwM@0s=B9kL*$b{BhJhdixYpc)@c= zM0b_3Gb%Bdv@a$as9eOzyvQ=&z$zi5G6MZi7wJlsFvKqEPvP*&Q+B5|WF^3~9}Dd6 zceyv*oN*Tve~u_#Xwy_j`s! zeyPDgVbR$gGw&PS%FKK(f2aU031tch0zvn#5x4xT=tHp^yZxR>*XK9)xVb>ytM@=6 zB~oSV?aG<2YNZW;^m3RoIXXA35i@ZD#TKhNHCT~AAx6T$FrrU7Jb^fFgC2Hw4Sv|z zv%Cj{3;-@M^51YKrhHcyH7>RcKR*} zjsF0NdC_H-rzIiQ4k2r;Hr0t3Ijw0x^-yP{I57Pm>Bj6Zo$X5hpKI>P)UQp~uc;HG z{=$ma4Lf)Q#RPp3CRq3{v~Io5L>@1=IhxvwdNz|CkF zWyn3=aME!zTMp6*!QaZ^1&4V@*twG7b1oNWoOh#$ZvQ8dRu}WC1HLuiLvhutE=^rWjnetf8?TgT5eH?vZNUo>9o# z@VoQ;lRS%(l=jw8t+b55*ss^TtD_Nb>?oI7tSjmB4DyNum-MJY@bzT{$tzr5?B4c4 zK5|{7^J<{T8KM-JfZDp?lt!J8W}(XmX0mXZK*u^~?i&hG*4VdRTR7hC)b1C6fAUS@ z#NzD2%21EdXONtx_L^hdJZNIyzbr}U;auSbo4xqyV+$>pzcySk_9v~E)_!wE?v>Zd zvkxoBD=X_${Otm?MfBu0NhcL1%gG2zDaT?khDS^34D~<1S z$^g4d!|UF_3$G)Zoc4<~rEVmWJBK#qx$99b~>EDxkh+-KwRvv=O9wn_M#QQ7QJEo&fk)HI6l@k2C^~0^nLyGbnRZhEvi8=rj?B zBwHjO$E_y)!$)CMu{Ifk9Kzhe|!1KBq?Wyho(cy@VI+2ucPcQ8g;yPTZxPk zp*|1|S0&4ryrbZsUFO!e1-)p4R2}VqS)|SoC6k2m4x0H@#^+pex$pNlaet1vy_Q2I zimS;lZ;39S$=#ejArY&Uyl$O?_Vp6n#;w!NQSC}C$v2)leF&b=cy4V?_f~!nq;~Dw zug)PVL|aoy8EuEYZn9XNT>m4x7KbU`P!j&7QX3HSKL4S*XXA>;G1&WqL!EC^GBVKA zvf%rx2StqGz*bg1nD#4|1d*?g3sXLOy;6Bvbd&dAAA+yC9e}*cU70=CSKohDuSNYt zLzIP)1Qwk$D%-w*#5On1ZmP&RnM6T6J(AE;DvS{=w4_||gLdQk1p2(kFx)NB`m&Ry zAqD6&^2@X(#!Pm4yFViQ(M%YSLfe@NfjAwmJu_cj_?dEnxSIWfzu!7W>s45+Up}sH zdo~xHf98qk%Zk4roLASpa(8pQz7L(j= zm(?h$i`nOfICz29zcRkhUkUW!tX3rRlLAf6UkVWo<55ssAGz;BX`G)MekN@)%wO}Z zv=4JlHKu^HhfL|z<7iuFXc8#=7+s;|F~WbfksU9GcDJVaBW#v@gP|F6^li_-w7xvD zcHU&)mLBIWkcE>5(%Fu;1T1_ngy|+fxqh6W!2Dj1xK6@xo}F0{QQ_*8g-3yy0QD0C zjdigUHW!M&4^ZYsBCWi#%&{^|cDp@u=8X)V#^LZPp_G!3mW1)(;o)O*fBj!GEnN6Q zLXaa5j349h+R)=hGpCr0aNa*qKu0nV36-``lM`~RA3)f?iy^nb`2FUGL{}vl zS3ahop(OGdgXoIi+1Dz8pT7pC849*q<4g0$s(&Gf8Xz6KFCGLiz_xJI5wiX`KrvDo zcK0$)pu{XGB$l!o((zXHw{*ks@Xy|ym&+b3IcUY<*>tFe~o zO@a7rN+1WYp~-y}hT-&5S1Lwa*wv)2N>~Y+*^`0selg%>Ks#3S%Lqw}QPIUoiHYG$ zE2^qUZksW1#o3y!trwwXwOV-Tjc27wB3*0qhxh4-&mIOqm?Mc%7C8bN6h9zJkXoEr z6jio|=ESIiWf%!N2Xhz^Qqt`~!v4{gyPwhVKKOx9msn;}9pE7|-g%OZ3Qq-q7XPSw ze3Q)YFDlC}&dDG4BLEGT5^b|h1d@s@6Z>o|=MYFE9#Yq25#i4GggLYEVB;S#l~k0K zmliYqnL$VjXk2)**)hVeB&w|n&d0$3I?~|FEr}IJ0Q#dXrSmO{_ifHn=0XEwDuBKN zfoqbURswJTu)!RfZ5gswaZ(j+AU;}YM(W<-wV~6*LM*e{nMr}Vhhq6z1nl%SlrTn* z;2F~P&T4>`_}`r^WxtMUQzOP#1O<;4W^Dw*oMs2CNTXqxgKM7vy+gMqXFvy6!t^k;<{!;G$& z*My^+2^~IvF?H}vA<0_EN%Vk(2+#$)J<@l$!$%>~_)@q@!UF5^{L&)7m{Ue+RldYw zc2xsTQSh7~$lRnejM*rgYW#X*>L$1xgQI2COj?1lr7#j?IoFxRxt@TV*`H5TBq4Fr z39As`UsA&1iw$#nh~x<2iQ-ZAHxm^33Xu-Ou_>~OiFQSm#mPBu?8ywc8nWds`=>$! zTB>tgCO0Dww63{ng2j8rM4WASm0`3RkH|33Bfn5_t$vuprP~#e{qog*XIs~mw4^{J*;){L z_6ZQfiGFjr{5SRvd3F?itpZn+oG)2t6ClGI9=|GQzRe-4t8#Kny$B22;QTzV$s$=p zG_;SecmPHfwhkpDiBskefnAj_*@93T&O|q zB^-*4U3hgT@@Ul6bQJ|IUTzrTg4_Qp*I*s>5*|^d2(#al2KrW8d(bxXPh72 z*y2azgj0ysz9zO&T~nNPAAuwfQVwQ*B~h<2J(tWnCgEh;Q)CJ<%F33w?)LjzE3}n) zZK70301MhAkWW`+@@$eDAyfaGVny*Me|;hvO$`bsQjtuS8nLH3FbNCe$7ZzuUtxGk znn0QHo;qt&79GE^L2^VhD4Cin=6*dv2CO%D&b#MaeilZ%$Dlfa5X(@ z_MW5*%VYSjm8x7@k9tafCK3;e7)PYOpA8i(rUT?Dg7?dsY82v8yuAT85k*1wDBgvuEP#-xKz zVt59garSr09HwG5l7#EpBbNS$3-H%mB;#zzbZu?jq5>uXQi5$p1j3mkn#U=p@ZFlM zitMzrJz~t*e2afDm>$Nh`ORop_WthA`s_UnO2O>^JbcMkY?(+er5IOJkH)Es5*?Pw5MOA(mqhwEf-fF3QjQPAI-kW`zyb~#9 z*-Z5-0Rb*DPZH6rFJGo$qw^q@_*({=2bYwnM9bj6aKp@@#Kq04Hw^E8}t&_=ikY>p@!3 zeZHK0DdBMQjGi=v&}A1l6-Fmxhn(7t%pg!3azc$eT$>eQ8$C$1=25{ozh0WM}CPFRA)6I(a5^m?S=B?j_kYRZSGTzcw^BzIr5|Yg!Ls z3x9wyk+{5njm?b+~QLox0f~kx@tOY}fj3Qk#f$ z_A=pL-Bmgj7X`G5K!@fpijT^Jf5eB07s!mPp|g58l=kZG5X&cK=V~L818x3Y$RWDo zP)Oh)lBh0=Xi-c6$H6b5;M z9+qB)MRk}!{sgn1GapqvHa39z zij+6+sK3e(#r(?(O~!W%5XaESS#~GHT8RJ#B192&{j>uIrn%pfu_>1Tx6a)5k5mR| zR!PbO?)GAe;ms-l5gyTznJi<1phPg8t^}Yp^QeLeq$E4zj zLK*r`t`y}APPVxZ-DSTo-h?uP`fh*pts2Oji!%I1f2Y|VPI7Wk01z;^*eSbcg0>l7 zxORy8i2kwv{sj-m)GEUa5$Zvv)~OED2)`MsjRyXhv;9BEqnd_Zh~4pqd9I5R##92F zO_7TkR9Tr z4I=R%lhj1=4P;se!6Hu2(>ZC@)6f+!cNokv0#p_VOeAUk)*o{%Wf2{-fFaJ%A7%#w z^lylF3DXMm%PXTvee!-DNJCzMGJ3D_hIeM-S$-Y0Fyj$1#@gJSuY2=U94LQ?^fk^m zFK`W>6=5wOAiXk><)uKBG1i#}kk>{l%!km{9loibRz0DNB%J<*IXnp~0-`m9Tg8k? zo7X+2zHu?CvYh)~YkyQW$p=}1b6C1B%*00$%+BLQe?~?;rGOENNXZaWc z`C0ML4Div-MKLY4jI=qTLnVU7MBNijCqRsf z0Z<@_;50K1U&!7Nr*r0Op89UO{C`$1^7VeU(*=)RUl)U8$ZWQ^Ui*zsXAS0`$CcpC zud?Hn+cGXQz!=5Q7$tfKe4nSwFJn^k3*WbX2lMdHceiBF>*+G0ZLiogJ&MMbWtY8^ z(?8!t-I#h_2liQ8gR6GOiI$1ny_kI!&fXdPH}H4=w^w>KKJJd(*RI;jO$evvMF6&) zBl)zZq6INHHV(;tQ zuA_;oEv7Aqc#mIU=;O5=BqxBM_^*oJ@*M8?RoJ3l4NAQ~4(^m1qJKUPJgr|dR>T71z zEKCrSkog;K&U+tg*Hdw%^XDxc(}z5rkUu7ygHY|?37P?!N1dh4`;BhvE8(l7*5j1x z<{7tc-`={9E643tn@-bljfdI$#v5)ED_)=_iJvge!QAgz)~Vpk)theboLwwFmk_Ul z9>t%yIB7X__dBcU+l_mzmF8>u5u$YKoJsh9$MC-2di31fEe(CXqBYc+<3HL~uctk) zylV~NtL@-61uEn`wDz;kc%*Xw?O6U!z@xrT( zXBA0AxKQzuePc!mnElVr=CjuyQ%nvg2M=$5E`%uMQDQ|iI# zmQNP97v1J`_%yo?MmO5t&k|7BBb%cVG%q+Q+jWscDG2B8-sDTUx_xlH<7uf}fR#DV z_g6mkZyEjRE9P#T-Y$!;!Vfdd512Qp!+GZv7CeoIR;h=>;By8VJk@4ruk!a#_4&fA zx2N3ON}-5rGT&f6&!TiOE@G4;E>{mk9I*s(;j;2_0vxz*yWwF;e-n+U7~R8-R;eDw zL!Wdb$RMyjejL9T9X{`1zL#;FO?-{rl^$2@=k{fFL#?S8GF^di@GXhq=l-A1oo7P9>H&7*f^`Vm=y>z5wn79Tn zs4t}ziB!^~$!pgAdTH^r8qMMAwPBTQJYy99|6}Vd;G*c-|6wdZKv1MZK)SnI$)#iI zlJ4$C1Vu`^LAn>IMV1r=q`OOLb_wZ*|9IbUKl=XO`RML?oIP`{I9GmW_QOVX1OHz6 zb!1D+mQtH1d>@bJC$$aq`1H5? ztYOUywzOFA|7~!9f2~TdTf(9i3Muk}?Q|#mv1xyBlYXtpMGtW#k?%=yfxi_4N?pKD zdJv^1XP_p@bk5xwQ$eR5v$TW-Wuv?HWeg@kBTWP+(c%XO-7*4|q^I$qc1uMMG$^?4 zeI4B0xND|68Re4plycS^0gHmzSXPH)s8ZwyFCnFdZjTJ*N7j&nc(YWWbG;d4^A&(; z7-)UKgl-uLqS3ozyzF$a3?DTN92|^Hn^DS+pYb1#nY~!Aw+l8gnc3g^V8AltV1yd- z$xdF{xYU{LO^r%3L5aO^>4e?NXh^xx>G+-0)M}yOgmyXsROvwTAIA?}DOO|fJ*Xu? zVX@fKpuF?|3dugJ=$Qb@M}32HP|4?Z#50L9uRbQVRM#-4&-kx-b*o_xyR+GC_~$ye zC`W~u2x|58>ZsJkQ&du&T&T$_Wtw-QqY#ei7JJ#=W3P)nxb|KR|Z};&AQ7Da*ZJ6Cc%~ICjAXj-=sr~2^RS-$% zoZo)OM_QD*$1fI*K@_Gk0xp5S0YpcZG{gq zJIiz82u&w8BWmZKic_rbJ>4F2i>UGgt3D?smbUt6b@J%=KHp5DoIH-J-;=q*Tv=jj z>hW3pL03&4?w{jsM}8~mB2{AbsgQ=|K;##)c}qt8*wPQV6gqdFex!n~d@eNZ)s>&> zTwDz6BpY_D}!RW;V$-?&fumF}Dsv>1OFpDQuNJ2V*5#_Z>lLlH)rR1gIbx4}cz!s+ zTN-WdkGYhNd1AZ~RS>PbM@|dG+R(M`LobZ1?dB0X`wU3|%qTn73p@O8W8pB!!sNtg zS-;1J{mIBF=n!`h-FZ}Xb74GoSP&$LBA)SWo&Otze_gU#s4AVsGu_<#ZH`+hj<%z( z{o};eMKH1lE7^NwhmHGpw$Bq+q-F^|=B|E^RQ>oyZ2y!lQ}AoOUqy9cbC8jVXsOHV zGDG7TIm=fRsK}Rk%|abBJDY1On$YTi>n9$Cw!R0hb$bBjQBhNKe^_J+<%yG;XeKci z%i7sPIaKn@3HNw2L~2BTj8!%Tm*x&VePy}VLH*!kSdbFk8)r8}IZWjvp@?htAXxh- z@BETGD~RR(|3)KY8y*ZT;xA@#yyNPlt}KB7d6yNbC?-re@z+ z@8|1YM%8omgV|fn(lRtC=tUe)L1X=OpUWllQ3`)^w$2NE;nkOx0;#V)GNR-KyUjGa zY)QQ-SmG2Tuk5QA`ReQZ^Gp>IK($smGJ)oP-X;X?IQ&YIr(+c19G7l6;d_j@JjElf zrb_$S9Q~71j9-ZjKh%C>g*p;^ju%XCU9RQ%;>zvo>-Ww z1-tkWqjbi|JWPE>&UGg-ES?u71ij8y;pK?fkN)=FowxM^PgY>z^{_VWR*TWudfF_5 zqXFCY#t~h#JZ2g0+M!Bw;lt}=r}Qu!N)OEu5)FwrZFmqeOUwPfp;^7mwo2x@M~_ka z#3r~S@;l-g!eTMUw=QP9QU=!4Cz@-|^;dV*LJ$Z`Z*7s)?~tV;n&{m;jwm(j0YWb+p`%3OPPyKf@J6JN9yY*qu|Z1Tw6Z_c82}gYr)9Tm9`%KyAw-9 zkZpF?M=uR^hJ&1K^VBEz4Gc1Qc@BsVidztFVAYO~@rsU+qrE0h$2gu(}3ySjQvZk)?=?`Svd!^UEo$ zb|!`JyfsRj^={hlI!BLa(P3kw_9Qm5Y|SZ42-eXnvjA%iQR|6WdN+jUD_O(uCgKv> zn*F5?_9R(z1|F=CQU}4z&pe_kN7)H&l`D6bydGsY6efN?H0XDM4CKzjXPCZdvM$)H zr#4W8#6$eztHU_36A(0W28%ya1C|fuUngNzD*SFewvOBPbehQMAjJG;DQ;u2XCa86piCdZRrE~7RHGCjT z0$5ZSWmD?fJ*Sob5YG%TK51W^3yiW+UUj96yv7WEw z1ukOW9kz!(p|&f_>APURM587p+4$7h^vEgB()OuKkibLg7-*i+fQVPj14S#7G#nD7W#s|#)*P@HaXmnxV)E2up&W=oP+iJ=1r^N@F?D@AmY-W0O#g#GLyXv#` zAr4gQA&Kq>6sj~i4jJR zym-Zbt-a@I_m@giv>sN+pB!(HUy!rztj&I@J6U~3);^BO6HxPZ+%>SsgwqfBX%;PP z>`tzvinw_&A=<{|_Z7xAn`36W(3QY<`AJ*IZyxq_q!O7OtUr;GRx%2qJc$I_niAE8 zDFjX5>n~~fc%D#mHpX0iq4DOpYxvM!uaJpVYvIe?jauw@iKgg9rA;vdJPJ8d)z zs-w;;+XX&${Ly|M_Y`+V@hj!fXFWT2gl4YoYnO5^dR49H+nGi>Rnb-^C()05fCoEF z9woL!@(=9f*q$?wXtz4J^t4d&t6&$p`ZN{gMN+1$34~x1K~==fJUAJ{QKpXi#rwBV zU22N6;6+kMD4npUm-}`ESH1n;ouR7oEc?zY^ z$`27Mow{9OyejUA6jDxRNLG)3e}jFG;6(W_tTF@d5yEevxuLU+WhAri-aU3pVkJR(2ydReEHl&1$E@4iUnf#Na%KnFnWQi+BKoBHLsWGj z6}wNTYL*n24YRvlnEGG98q&W(9aDNX1;dYi*2|E{&C><&ih2<4Fmj3S?m4k+24uvj za(ml}V%2m2y@j$6GG|?X|IQA`wl>rk{ZY1B;yvPOQIzvAFYnDB~er^50-My)8Tb>KrQ9(7^p^8LaXugMW^@_USgBpx!}M^ z;>i5|G~I)!$l8zDdaKn*T;@6?yk61<4u*h_>>AyeOXqJ)dQzTLFs-<4hxYU^;ZxIZ z^AMed{eK&?Kq@})5X7J^8@n~vIvCNOs}vCSQqb7fZw3uMPE#a<&{`eXf}fF6bU);1 zzc<6*c&X6*s%&LGyVlp=OhvnryhD^`nfRn$7)T z_c(3&Ia}u*2{6A}6PE2hJfR1|W3_Jt3tg4DnqH?Tb(7)pNR~H_(y@;xaQKp|U>0l_lI#SR=zN?Lr*7A3vHxDH@HPp% z&)sfp#i|QJaoI4`H^0`biv%i@stYlz4iBs_Gn36-)$fR?*7ECw3TR&mBPY;}^iM*Z{s4DZa zvnCsK%6B(T9-6_64GAeG2jvDPah}FW7QDxn-hUF&DFa?E2Kw^?*JAt@we|29_U`H7lc+2yy+b#?KY^ygNe1x0)s2UM`BF?yBJnQiubVg1rB z(uwIT;`1UOWk&8tQ4*3ib-N5SwT+hxilqd~%GvO?T%2}l*V%t6J6^aJP%Fj?eYAh_ z<3|Syx(MoJkL2zbFZ}2%+PrvF!*`woezOUjT?%;{5Y*@0#P*&eXge0_r=Olm=9vdV z&#|dg{isGLd{ZU5JdWKKc3kKt{2H@HHwhJmpvD#rs9J$A^8IZKe>Bt5 zq2!|sswr?i>IfTmALQomN^pwXAVkV=d=3)GqBhi30yD@DI(}Ms`!sIiGVWNEEZ(Vi zQHEqB+I)2ATDJvMOV|p>P$N^3<10rLzH?R9c|7cgArfDh(@CX`PZ7LA-NA$&5{o3z z6?INfq$jBljY*9s%k!NO=|!H(hgTVRFyb8EOxFP`g{B*XV_ z`|42Zb(s5C`yvo14@e+^G?P5zILo&mS=RUQU23ka;9WqO>ZE`25v@Oj^V+|Ve%^-- zjW@vFK)FLLZmE^+4TrhuAmg}qbwF*Y!e>z1~7 z#V7g{%tY#@HGV$8W&E(q9S{UQHr%LfxAI7-gbf) zyqd4m%iW8qw4sd-o)2tIFbECOXVydHDjUF41q^o|ZFliavL5C59SHJvQ9QX!(!RXzV0 zAs#Ic?mE*rO;;=QUpqea0~_+H)iGn@;`pwd-+eHSN=C^pRVAd|#ZJWOl1{YB=WMIn z-}*V4Iz+sP4R!fk@-(QTk<4v~Y_=7u{BVgnpS^6!Bpvrxe19t)6Rc|1Tfxw+$y__T z%vaHC!P2xPGK9l9z7EcP8jGyZB9C^4ws+6)Co067ilalODj~zGzJm{%&c?cc6@UHV z`YM3~8G~HlzA4f_N1#fFD%}!(Jc1$7o-MT2U9{}uM`JB@X@lSjS`oe(mJk6)mI!$Q zI@Eg)n%zP#r{WLu#CxjF>taC3_rF1-hQ7(rD6=QUvEKcV@SUUi;&sJGPxtHQdr$S5 zdQ@HZ8P7Ve-LhaXZ))vWGlW{!S&@Bb*mZ%f;>Nq++`LmW@a|Sq6_!dV% zxCyj7G}(8026QjQqhkyYWpMHK8BSskv#&4Zq}gVkd1eVJJ-AEYhWh7p+UL6o>YjMUsu2d_i5rbW7aa%$95BLsfwMHC+-NCZ7qU$1=FnXy%D*1JFI zU#4c0w* zU_S2}Q?=qKU#mxZE%u5)Es`ZR z^wC$9S5-yF>m4f3f*I_u_A4_al)VdcQngz2XVOS*?LK`Z^*YF}tc;y0_GMGKz(fj$6ig#Bj z*+#cmfXhBNQfh4WDX3(jIufqUj}yG;PMqk@=aAJ4ClcK-3Hc}!7Ywp?(3~4HHTLd% zEt2*sWW2l1W-}U+=?-_k=HsxrFy^Qc1+G8-aOZ}A2-GMOM!fK@{l@p&+|S4|*`+#U zfQiB+gy7Eo#1n1_AnN4%_l-tBl?`hud1UG6M3t20s>Kd{3o|*~)zz>(9g~X((Im)! zW2AXZpFp!+IFi5QN8sEjT@w*wJEQtsh5w5Qz-7h!*jT0`&fkkRG2uv22FC9Y4!9(M z@=a%ad7kw}JT%kJ-)gApR}rGd4!g3CwOwdEXaB7?)N#9CSoV<|A)nh&Z`Mq}Zp^N^ z;i?KjIR7Mw1(hNTMCk{`1(JZl`BUR}B8UJjY#bfeusJ)BCE|3CAfk~UUrAyee8j6) ze!6c_vW|}%?gPrlR!Q=i{|qU06&Pzdvl|^Vs^RSTUS$hR!pP*gNtYsz0U5?bO88y{ znv)sX$ioin=GWX4(oB(m-!+&!D^}N~Ucw>p9Ycm{v7`iajMAC|K_-}rV0R|b1%p%G z!{d?3i4t6RZuv~-18mKL0??_ic)gwh1RUxsVdFYf75)J%tM7 zX}~Js(tJ_5@-)S!91vfT3Ubt3mWN-9aERDJ09BBFb@gKcSrCof0AjKda@DTF$9t!r z3PX<6w4{_17k3rq@uRyOH!@LPEklEShVwy(H{v-`($PY4-b)%{$IVEk1wF)uGUuy4 z92+xZv|kbHslti&DJkhL(mEKMNsD7Q9&P2Gwh6VqsMqDi$_6-L2MEc!T!Ee7_3$qtjWFwTkb z&IP%wOix?$E1G3wO+tkUV%a;2liym&IXo8W zMM6oAX+0T!EWxxH) z!=P42g5-DF`WZvX?A(AQ)XC6i5biYP7+Q!a`Iwn0A|j1cgqmn}=%FoJ41p9Z-o1Yh zkQ3V1;d<3dEu2D7ZV>fEZGwDao8g8l2mE#Do3(l(P8vp&i0SDlNqtPVyV_NDa#OZa zZf@FM&qk*2_QgiRyPYZ z$s0aOQ=~PMFVWKvhJp~@wsmhS>fn5r!4`)zgI@|LzY$iLXpwa>1Iro(bks3aN9-z#y zD8#z!OLv*RNRii>vMn$ce3(ub^R5&+9=kLNhwsG?;Ded)R?f5A*l=b90-aw)#H`g< z+35g;qOi0*yR8Yo4+WHI>LJ)w$GmC33+tzI<;hvw^Q z_BGH7e7Q1f_;Y9LsjNVTI&kq6d4V>xx6*)V`e2tNU`BFHqAVavA9*Pgarsd-<4k1j zyPJNyvB7n+SVC-#_u-qH8Xg`M zfcd1Aw|`kL(A<@H^*FjSUa>H~E4~>!Ov|)ZlHysk)#jvtSv45GzOD+K?d`MAfP-d> zK}kLM6bmzxuxWYz^XS4^CCz;Dca?B?CJJ*oC>z3HA;0*D}-{ z9DawKqjPhmdXSH7%P#i=V=R6UuoR0SWxB4DMT`oI&oi?-bIH*@G zWr3OMA*H4q{x8@H_V9a<*e;|72$BEU)2*m0e3-#4Nc|~xGq|EteNML2aZbR=wmg&z zSZA6+m^k-?BG@ktRW#pMn`w8_sw8RfvX&FA^C@1Vy-rx*UX?dySK{U#>o!&0yEOV_ z6T5?g_Vq*82$B?_?tl*fYIS)QSw7mVO;wUR&pb6_Rlq=W`PnxJ(tM_>@AxJD>fuFy zQR?GkgQz8dspWtJnw}o!@7vu^bNqFI%txFE)``40sgD=*ROB8Uv*L2rmj+z^uBSY3 zQQA$5PE8vN-g}Spg9bXFc37Fjk4RcjqJHKkAOuPvEFmQe=Tpfp`Flp%ot?~b8P8|q zI)X?n4x%opu$}m$g!%gU*~SU)2eo~$BTiWPHo%a%*1Wd=zUNQ{>80^OrsTbUSQLSD z4`0k~#{4lsD~fRhzZJG*ezl$M?q}w%ICbwUab;Mb(Br6|i%zF}PO%bJ&fnJ4YrJSO z*D|@DFrdD8@Fgth6KeRqz!5ryW?lvy->vD(sAmBe{9CiT(7YTH*7E4G&oAm0nnC-k zeFS4qg%=P_*~%sn?hEZ^?xjUGB{1Z(H+pU_WN;Q>();iZAz;sj7#^5z z#zp<)gK!0%oz$BUgwirU`@-&PN}P=2Fas5emi`b)ftP{3PDfh7Rp9zqV2%IGp|BbDrkUE`~+l5Y> zD+UK8kz02x1fj;e1aS&?ki?cTN$tnfTKo(YTC^+td^|v)68BzpKht(x*cLRQq@p4c zCUG-gE|TVd^YuWNy&(Vzb~D%trbvxgGtd>7Jw3SzXD>PuQqJM9obExn!==c8VZh@~Hylp(l)>+HzQ7B?bQcm`$hz%JBBK6%=aRD?Ef8=ZwbD&YPZtzs zd8z2uZL5R9QsN&*W2x~G`&rkYA$l;GT~8)B_RZ80&u@*b6v2dxnIQBcr@}a!u4SDO zsB1}hH26PjdW@6X8_Kfpc1pv_m7LnTXm-Okn7ZpHt7$HqBAc!Ks zk0u#-XNy!Vq5(!Lm*vU79BQR|DfX`^GP)1QKXdJodpK&a@++;g*eCjCfgtn4x5j5J z02N#D%_N-^B*31OdLtoJK(9u#Zn6;%M3=jt3rk9BI(SF1LQNuK_}p$*&!%^q8b>H6 z+I7|H*JlDTP8*y04-}5<7s~G9EX`8SQM1cu8do2u=dH%#Sanlfhdi0}v??OB`fj;=+MfLHp8IF2`0%f3u0K)& z#tnTDys)(ihf+bfcfZsG&DPac@L8_tMM$&PwXOgi{W}=V&9!mg-YW9%0cD;D9ApOM zpx3S6CG`8J*raGMmft46{>a2|vux$izv!y}WUFm2AC_F2Y}4}|mRbEuuu8u|y;>7^ z1lhmy(+Rqd0{s8)U`WokxfMXRGz#H=|AACQCA?#4vG1aDJgm`F(y}2XL>K4Q7HxO5!`wHx6y&v?%dW_n zpQ%~&7}!l8z<73ibsI3<;9?E=^U3+a(z;;FuF?rZNG_RvQT`#j*;&9~EDp3O|6Yra zRExK1Q>Ndm7ECA~nss%F%hM`V*}k-eoJ^A_U&^U*!f=#-mluc?5_rSB-vjYYuj3bT z*IM?k;TF+N5NT`C?1VaB)qn=$(A5HTxYsdvQ`M37<@!!bfn8tLV8``I(pf2E2^YUf z0_+P8+TNVC)e#3b&pbpll}B6+fn5djupB5IDB6%d-dGczsxaJ&-e?HmP{N;Rrw)nL zsckrA7d|*%7;9OrP}N_$F1S9a@2MeAQyVxkxAR=yIh?)f^3JtOJbRyLjQQ|ZvM+=G zS~9iEMYJ%=Sp6aUTnn|jOO8s`>0Tsk<2^sjSv8?#PB~S*v_wK5odoUkqb!?m3maE+%AwS?`BX@u3y0mZ6e>`=yJ;OD!!4w zy3lQu|9e0=G0MYaSNGWJ8r|lw!Km_eKG}q(NVUX1%1&+mH$H{>i`-O>qrjumUgBA}*61L7W7Io_^J$gWW zDU{%IKLOaTww+I*t{}_Fo9(mU=yi1T>ZVPD7p-Ke^9^-lo{>?J` zdH-G}@?8!oQfVy9^9~O#lm5PvCimuo`>~2vMgqjyiu&# z9XUV2+Lc|mGrN7M@`XA>A1JicEg(x^Q;c-ZuUql6jGT_2+ga>ulj(Z13g3~Br5gGr z^(Lx?VJ)UyON(AQu3O^u6)?2-UzD3}?W7d{|5;K7+{$V~a9|aDKE<9YoU0QyA~K)A zrN3k`YT4hWXt%jF*dR?kCbxHNeSMa6aN zX}-P&i~77wc7Vir){chw2=_U(tE9`b@Up!BGcvb)1`}Yt+Z^c(ZYxOSQKistHf@i9K=M>(;J`WsQ3bXkNZ4Gv|__u^nl)+bL=48>?YVf-i;>$18q|2YK$1$a1LqS75q9 zH{5143}SIXiGos3*TBWey&EeEJ4Mv^=|`GJn^XDvX$G3g2WEo(j~j1wMC``l{-ed+ z5ixfSvv*1-1LO+qUz;;G%d`id*KF0hdA~AD!h9PWF(g))V%+)4t$gmI0!k#buSGG= z!ptp*D);q!e?7%7Dhw0(+mNM}P8l?q&S;%gIFw8%Z!0Rd(aiL1Y`TC#rr=9<+R?N= zR-66IVC%T$xA$5&SX(|zJ2YPvK<(yO4TDJD%8lM^s8HnG=$ntgn=%ViF5CA~$0Es2 z%vyv|7KkFnNo~w=%kHfA8(!bs-o4ADmh<|n)37=3Qo(q6#d;6JQ8}ytrGQKPXn0+N z9rM}VAqPAlrGj3siN=k?i{I+cUd&q)bP-85v0KwtY;NHBw5g^0?4sf62Vun;C%s8gle|BnM z^2mM0_gP!l-%q(-0bIf(<$v`_M^~rDqt^`P+0G5y?jTGBYr^zwr?eHCR*{*zge$`R zFJD829JLmF%^C|viTPg{@YBW%+QDeXFBE@nQ=)}uqXkjbtA1U9mF*@s*EGCrvGr3( zBeNZaE--E{N}X^RawMGwv9>OI5Fg)ZcT_eLEK_6;{D`d>bzF65Nj4oPE9Q!nW`$W@kr!X4cDY~U1{W8Pcz&+Uh;v1UZO4P2v zr!9r1nq18wi>_L#uEf!g^b>v{!+OWW0>5iP?_b%{uGhiYFq;l5I5*jtTh{e^joz7( z*$&Nv$cS96-k7I-j5mJzW80Je3h+SCTonT@`{_LQE6H$V7E?}oF|NkZd-cuvNJ(m= zn4GAvlp3*U@yxW(Scb=P-f(wR`NW&9xX};7a|>z8?r?18>qRYv`err54Th4((e(O) zijS5M+?2pWU@9)^S7%-2&HaO|l`axMa*xnmS4abjizjL}qK=fCi8ZVI%Q_UT22bQc z=Dj4s2Xa=uH3ACQ_$4dGM1+n;}@Hkxc;OU)n=r&sm=MhtaEdT zAHIKw$Byp(1dJP4>FF+;%lEE-yhZ)|IO;a8zyD|Gb)(csAItt^z|@zW=Yo0tlD`|9jKfO~hNF*Dg*o za=EPHm~j*r$=(0=n}HOPe_2oU$8X>p8)Irpc7t`-EB?6qbFo$3>K3kRq`U|@!$vSC zYx47%svH*FTT*fUB$ZALsGy#`b?`OB5*f`iX8)kB{Pm>kY`CQfoOOD@(3ip~{6Au~ z{w5a8bmnbx&SDpE$t)Yx=-HIWqF7j!ttEoYumV5?oW(s;;=9RuS}kAv$$Tk3lZc~DFX;i%7TNP-vSIw@Unr?u zR(hjmH}ki?6aVY{&*S6n!h7qSXNMJ8YH~d)?N6QNTupLU`!k-nS=|uK_WZ9kJt=Ew z&?M$mXGCZ zJm)s4N0P$$Q+?U)Zp~jtjr(2-dY`T^$1cV?>v*@nxSY!JQzUEfPk|$B{woLkf4|vQ z=65D}`Ri@VXd<%%!@>lOwiPqXG|ax~&_u`U>)0EU8?O8i{?jV7qQ(s1*Sf43T%OQI z!!3?QAdU7bi$U3|BcTb?fd%~Rv^qdC~QXu!+glJgT5j=c+n zcnYN&Jn()&`{4+}xIflQ^(Uo|pO{1dg-@!cUu`s(%gvoNufFb#j(f|f>ctf*j&EgY z=L>Fs)*dNTdJomQxi-O<%_5+lbbAC2S(B5DjE`rZlX<>4o;yyS)|sK6A`*R-ZeM?@ zGq+ox<~kJ{4%F5n*oW|LlRQcsQq$WBfF|*a29KSIgS~ zLW1W;*OB@o{?sW6s-&LM>RjC>KmM+&`2JC=FR!Oy`~A8HKzA5JB} zP5tF5UVbB>ENyY?J>orHkCDz)p)edw&}1I&gc#yEx!qhtng1`9drXMhxjEyZ94)k0 zaTNs+1_3d6*0AA;> zu0Xd{tAAD-`W@Ki8)VnJB9|9yEkn?Wg;*S!_g3>4DLS>z^&OF=;r~@Y$y!snZAM2& zU8m1ADh$fOr5YJY3oZk~$LY>9p^u3BdlvMI|03LL^}HGCuuy)!eO~K-R&8iAGoGP3 zSt{>VZm$LWy&UbVG3^Qg(L7a2Li?9>KZLBUt#d}?Bxe88d{DK?+KkTS#I-(RqNU&n ziYc4D*j2{T`0MUvVat`8zR%W*O43d0X7rAWho_t1sT*M~=AXL%LrACLNYd{Tu`skh z;GlXwgx9rfRu9pD=s{XeTdwrv67_K0!fqrOIth@P!}4mB;?F2yO&n;y8~AKJx?JyU z8CKc}(36g?0qt>_Z+hz285fHjPo=r_Ff#)E$p5@%>to#}@7mO~v|JVw^}i+~Qw5A> zrq_|#Z7>mVz;HP?(Ojam;Qeyc>?Xn@xXY*s5!%`!=CLJM z1xfvt!2hCfbvuMNXNeRz_#xhUZ`DqW%}qV0AC0#f0sML0`od*989Y{Ql^)L}_tv4e zKK{H7Lnso*W{3Cw`YUc;D=II43COjo`8xRR;b@!^{lpCA6k%U7|4XtKvyiJ_`(zVU zX5j$F;6K3`q0ado&lNUZ0x)bsV0Em2?q$a)1Anv9tC6{d!5&tJm}L-yq`~1IQwD3#0WdPQ38ytm7XNTyw8sCmLISbZ~H&%kYe>fKMiKv)qd8}as*y+ z_F~rK)8ys`t0dhNE6JY&7ZBXqij0ips7ZUK>iTCue{7PcX)smdaNQH^eRaA}TwCk+ ziv{RpgQ#8GanVO7JyU?Twb>BIjYTD>ov#S*1ynM$K6U7?3SPRngQ4Qg!fbBV>~Zqd zs%U`P^})a~0_Y87?(y|hnabP`6D0*-7h*QKM^y9&%}$>yj-_yMQ~Q0HHPDgIBjDaIlCKHYW}XkP{;&Hn%& z0ip5J#!A2FtDnRR6BF0x9tUX!7F0d4@n8nET-K}Ia@dj!Z)tNg51ix1Tt_$}0BG&j z*72;%z;EGy{ws*2`~uNGKL~#tP}=#-N9s4-3{vf6aU-f#li*<9PD`Br`G!r zuC|>U-h29Aicn}4p#s1yE|IYK%D-=s#oS>*Js;K^Q?PF zMyPIXhwIsIGe=ni>NTpEy7=}M>6x2NGp`PM>JV{RjRwW)FETq<)Ne$&)(Kq5Z91?Q zK2}<5JH^3l*evwAgdv>twhcwV4|L6Z4Gr2~sE9uv>?S0j0R;T2$hfjd&rs2SC>lnE zgv6l3K{zp-k6vpzq({$W`I9>QP}A_z1lY-0fV-D6t0ru_4Q%=w+z{g#N5A(Yh6{B< zn95BYArlL9wkQmhDKtN*&=b5wK{xWm*tEHKo^7TLbk(8|5_&)ayHZ-mTL^(Xb2~mL zfiaX0Ym-s3B!M7bCg7o;E z^D84hWHHzGAvpk7<-cE{BDL0kU)l5CZC5xu4*-fMSNqrE*N7~CcaX=ot~jE99F(-E zUashAU7c0;^6nGxP1I||#jl2+;UQiKxCy30OO?ehu_tibQ;+CZauP_uF;<{2#|BIU&tkO*j5(E z7_Bl(vfjW&2Rt^FEYz+P!cq!@*?t4x&_|%~|Fiey@l&vZ7)$?^a&9`iR1g`EQ4yM$ zYHN&A-^m5=k*}1|y0oA`KQM>oArf1!bA6-#{<3$|8Jq`@7ohT5Nld8~PD}83d75P} zh)}egUwaPg!tvPHSm`|!%#E;u!?Q6;B}B-h3J^Rd{)1_8Q!zqP%RGceX1n%L|ifcutnn#JUj zScZRtBQH)lmCmMy-EpMUPY0`+5P2qYkMB=cnpFJu?c)^z3VjGeFp!xZ9<8cRC)vtM zEzv3t=g6)84);McOTnUodw{})BMJUXfO61p-umn`X_jhMh-|pSzWegs8dRxzHVjUB@5pxh|6!x z8-N4?ka|^93&=0TX`d;fz)&Jrk(h5d@(>8eAb><;M<*w>j+S65gmaqN|NbBOQ;j3g zk{CaF@DTf}W&jvDk))}l1fY{#x>wKufYN2@`O{fOLn*)^>;w2%b^xlAKSG~BBI}h| zIF#5$vc3tVy<-5TC^ORlh*ehhC4oHyaSmW$^G%B30DkwxMI9?nTWq){ZK#%(R$|P2W zKx5nGnQ)mZEmccVW~LYbopf_`WlWL(4KV@$p7q=78~%d;qf9zsqhn((s-4L}xIZRm z*;@&6J!eM_Hl7~lv-;_fwX6ti0V&;YJb45IwWDRDs99 z^pT{Rg;Ui9Ht=<7naN52Bzbjx8|rEL`{9;uxPlkp40WPs$HkXG83qe03|9g7_1Xz zl67#=V-vuLOB^Z{@}H-!Jp7l3KlB<60lT`>#QXYv zH}OsJD^SJW=&pzYQD970ayQ0zobYASx8RzKoKxycW@@0;`{SVwa1Of&Dhe5mv+Qy1?KE%y?1#07LWWFFQNKp z^K)aRM-aus!+ z>qww_ce-y5AC_YsEz=%}m-z;uZULll3+2@Af*}O-MSvc|Pg`T7Y4rHs1M!b=c#UzT z)+qH&md1wy#Gh(F=0RUqq2Ivs+WHR$_Y+W3Qig}EBZY>sOJ!ut3Ah%yImSYlKJEGC zGxSM{io6oRh=eux0J|G;U00E?R&O>w^m7xy4#K|!eDOGDKz*$;#*MPpPmoKF3P1!I zWlJE68{LffaCj-cTrAXnrZ|$=aDv!ja~Q}yYIib%gGnVK*ad#|H(2vKtIr7w!+wUY zIHwQ0Tg*TKAf&aOou-9Fw9vp%TJ-V>E1+gw zX+qHS_OKp}Jo zHhz8u4t@oZomcYNO1B;8T;#Eh-z)rd4_e?N0ToZwGVgDxq#k_Ec>Y49x z0(E#=aS@%#OdldeUXhbWpsW7Nle07AjSc>uE{K6`PeFZgrWO;308d1Qq|@A$tB=^h z_0w@5B?e}simIvv*o+?xUcte~R{?_+&CCd+OwgLEK9y$M=w?yuiWV=F7EmX_3d3pkgnyqr&k;^OGx z)}@m*R=DldP9+RI18;3Wa!+S{cKXV6CjdY*V=Z@rI#^R#TSZD+`(dTl@`X?WN8`)5 zG~)N{A^QL?9*DWQwcj0(K?Ruf;@_8Dd*4fhVUk#{8~Gi=|Aya1e+ti9WqxyF#KEil zqQgyCwV2S{f|KCsRj=ax#g4rAHiGZw`n`T6ZQxb1k}Iui*yC{%GHy}fz6egQ1A zp9ZV3$u#y!%~pdoc0D6vW51pDv!%$}vyi()sTCq?jn9L)7C%{!se;~z|MDY4>ZwZ3 z=yP2EF6-VjB+es@q$Ng=3&zF4r!9go6joV_kgL5gljLW^7aDs1{yhgAQEFvfmJfxz zUz8Zq3>HxJ)oeL8iF`0`O?nUqIpfHtE) zw##(akF5Ogg_0~2c|O{MyXtF(JbDzQvEO3po)}m=( zfQEd5e#DB37SwIlqcgB0&6DO+_jV@~CzN8!HdEa?w5kgGVb?nkoY2se6m4gI{DtG) z#ksw$#t7@L8y z$I7-O8(OdzYx5n7PU_M&DD&i+8Ygp&^Z&8+&Eb)KTbtcUcWm3XjgC9Ela7s!jgD>G zwry1Gj;)GqPTl#w-^{(=%=4VTPSrV8d+)Uu-u14vSraJYLZyF*>ySW!DV1r0L@PBr zo8MOtwBQv<(;X>qX^Y`=bB1GMQdG3G+@G5C|Gmq0H`?~1d7lylpHV+HdR|>&`I8Xo zp-2907ZL|PwWT7$2O8v|{y~zW?w^UBJha#Somb+xv46{uCWFC5MZRG)_&S{`tY4^b z`JvX5YR=}*yz>k}pAA&jQtP*Au{Ji9O>|&X>9iJlUz)}xWvBBK`|YjsoSnbJ zN3l1uEh%ejOUlZUv;Rd`QAtFO5+4ET@=f;?{?P+VRxfrYwo4t|zvA=FD$$B>#Wr ziUyKfeJQj{|Sws*EVS1YaRPHQ$TUFVO8d{eW5wDI4Vt%!puZNNmk^Ucj{4bs2Z5G-0=-tl*DntC4m+00(1O=Ll zpM%dckfmONsi*A*u}usD=8bCRHaR_*wCtD~vpC;@pXZQYO-zQ^*cQG(RIv?XttPXf zJ`sZy*1vRbH3KLH3*%~Jv?6DNkjDH)7$HV2hi1TwAysypm&pVN{IEG}U&rbn5A+yp zVjkXh7sv+gaozOz3zqXu0~gAC=8n&V<;9?`k@pE|I2C& zI=ce!T{|y*Q=UQNlEOOFjqO24|2L%wb={rLxR}M5kFl#1P6P#nY0pCRUP^kdMZC(} z!nEIG9!B~m`0Be(^BrU{6@R@n3)WQ9Nl-%t#OgEbvj|o%rh`@~5zGkYx^@POZwqkW z`Do-i_&pKqU;1PFRiN3%`v^Q_ch#hU8|1rO@28^*)74sYNSFaFR#&w^rG2pc`2Y1+ z1Ca=k|EEsshV_qSf{U}o*;Rs}a!P>m|AU)%DrKN2+_(t(Dxp@89AgRuF6^U2^^?g0iD907kgXW(FvbFmkGeR8LYFkL93sqN}XnZT6)X!Ad(n2_W`@bK# zjs}(YG3ETDEjlD01R}j|s0a=OYJI33uusFhW63i2d~t&$Gs*#39ZHi@c1R zgbWNG7jW9jomr(oakXm{awp*C?1i1^aQoTw)ug~CL}Q_!?1N#C6h7NRx zBEQeovDGIO^VjJzjESa()?VosMyPAB+bJU|_+(i=%RK1odiTy5{lqBzD3m((W*RiR zo~lUC3yVDiER=6w{Rok;28s}g)XgdV&$&eD0=nJseqck%{>1wB)Gdz`_-#prwYn}} zktDevVWWa)dNo>Kf<%;#=;}%rlaonj<3=IZCNor)EWhsRS|`VggXmi#s2v_M$r#`p zfhK1nirc@W%j<`v@L_?mv9Z4lMaRtizf42?_yIN^UQt_{PhJVhf3Mgs1o^+YTTu4i z)bM#RoV;U|R#%Z?WCWy!+8SdzcXP2sITR`BWWTKwQS+!w4RjgoznX&yZ;`WS0^rdN z$b?A@L8WB6e6@Na+q*zBtTZU7T028~!{}XI_{Rr+a4;5q1Q1G*k zk|8&qE1=B=l{8389Rh#oZX{eys#e!LCMX5P?<4l>3`QCG=<4=Qd}@8UT{<`rPQ>g{ z$*VT0Ub^LJU$oJC?^W;lTyky*dUs#rs96&D<&yQD`FII#;5S8IekCm+ILLm(mlGEk z7L*-~B&?HgeND#Bo+Mfj7nB|Tk0j&9{`;O9bT$zSABlQoy4uaTnk)e8-VGc9u|I;w|Lzmb6fYE30aZ;Adwj<>84dg=5hvho-egEzclPD${#We35 zO8AzNb9JWx9pEZB==(IMMWCVSmvi6t(&;bEt+;SyFaGczDEP4^g4eQS_qgPfqqm{d zC)*Tzy(!|Je}FU^J+OlBwO8YMr6QkLrS}fr?IsQT$br4yt0SP|aE;gWb&r|V?oai6 zK_^RHr1lbr{nn}jpJ3-3mRh9wA>2h7{@e6#yn=bFot@7&tM#t()SJG7v(G8L+ox`3 zm+XjKe&0u5-u`$+ej^5M@$IEu zZxxnWEJ^nIsC3NvKJONg*sLr!2iaKA=~At$|A7|qudTiC79@h&s>$~Vsy_q+|5!H*UZmm`Ur-cHlwbhv zJqJf*3}#)TU`7~*5DYtxJ{QjEe z_BPw2D4z8W?BDK#LDk&5P1djihU?fDzgtZ_zW1x!1?6ztGGS(GZ{6G75`2)uzP{(q z$f2z%aSadM3wOr!e+5zdLE)vr)08U`$luI`78%&yrjkx(W4u%O$BwN3`43;Dnl(u5 zr?5%d(MA5ATl!dtz<7SqeWg|4mra^_75l6C{ox0PE4Lr9zjr2p$%@K`io!qac51Z> zSS9O$(RJ@lOYR09)M6Pn>ofg|+m1FvWm43Um%Gz)@mXnPCX-CuxZ5u}x-H?Zc=o0{U~*plQPaVJq|IGqD~r=WRQn+B~=5{GThh`ulaoKEn75T3`IdXqcv z8%}uKLpCkHcZyXf>1x2VsfA4xc!*d%(z^pZm+cE-sEW$T#RvbGz!85<3u7}flb`p! zqwCTYaoT7~^1NK{El;OPa{1#qB*%n9`7@62(qJnZobT2ny1=FiPg4)D_+S}+4U4b4 z=~Jq}L&m!)|Cz_R{5S9VHGhK3xzYQ;g`}dKdR{ZquUF?r+;EX@W!CnAuPG}>pe`ZD zWpY2jceeF1(`D{VC=Y!{afRn7o zs{-gvNmsRxN^G8K4sbC8sknJguKp{H@N1RqNUJM4NXod}`p)2&7cHE0nzBe7l0%*v z&+jlIOd0gw>`}zHq5}?0M;^`#jL@b$qO3L4WtC0P#=Tz+TSyp=ufKXAIw#QC5UIk#F}JrEY4`?@Zi#H2y|N` zxv&XGZn^h_B0!5LjeBE4H3PW6{LHRThDffm%$-B9U-%nxo5+E+1&}tmCP5CB2*VjT zz&)~{JMiZf0^$Y~Iv^Zmtd`ZVEhhlE?wvSCZz#@8Y${7<(71IDu9c2Lqi!6Zj7qq@ zT|(yrs43=yI}ea(>5KBZ!;*(BPbyHkDE=^kdT3=MQmaDgi{A#om82*s ztC>cQ0bu-_qs84`7Fq)gnbDp%aIw!?Z;2ngZR>R&SVP zSnf`kqu*L{_ImD__M!t+pB}^lsEG!lLyk}Q`cY#DNt;g8LA_yP9Muzw`>wH7I9wE& zXWhWoGy!J5Hr%x7C}?Ik8n)qac@0j^*@ydej!s<;@!&Pqz^3VU3`D^jqR*EP8k#-J zi#KW~GU3M7@W5YU^ER|@M>`ajULt4#Kr_kg^L*e8HUd7^)Hvm1!{-l5-SW7tt_r`7 zbo8++ObsOg%I%Q;${>nJxLi&4Yz)BaO6pkpkw`_*(SdhYl>Vwu z))sgav-SDtIP<<0%jUVVr)lQ4u26J~k9GIeKpsKdb>SaJ{qzbXJWu;(QZQ9A0Y{-= z&MP!u?xJS1)}Op)yW3oZ-kA8o_j#giB&K^a@9p$6hXAyu&-(Oy1qIzn{Ve<=csI6Yt-kq9G;{8sM zhc@Q6J;}7lE95YYwAGk;vWpTvhdM#v>0hEZb~6yS=6)A*v72xYJ*MM&QBY(qCPh|%(@X4Cf%(Z9K_hp{C`C}}=%UP23)cL2%dnJP4 zrNMkM#T~zCH2?HwHFh@j$9JFe4-Oplq%TV^w^dK2pnb}GDBejGghYrA0$J13|Bn@X z(qDP$y=1sU8}uamPlr8-m`kxg*=05PZ(UnuNDdyC?9vMd#3fz97^j9%-V76zSGaVA zX151!RFWrm9G?)Nj1BixO7mC2Rir!ZTP}UEG7yW)G4;*~y-7O7Vg3e%1_ehD>c<*L zxuVwDTL-C?Ulrc=0$llZrU8gs<)>_ZkmUD?mCxT0t5lzs-G(U$nB6cJNok^*@j)fE zyBf}TRgP-c<9QmlD=yE7(-m~jiN=>YjuwnmybQGFBB)Av{X!e(gS z^~J)_Eb`U-Q2_nSKPp2tEU4QH1CD1X6oZpMJIX1h#ikg*$BWpn3!X{Z zt@Y93c3um;MX55NVMYXRA5|kdaTcCQ?yIbvEcqxi><;#WQ;v~^d^qpo8rMq}Y-SS7 zA2y#4I>Gl4Lj42Q1@ChLHA*lkmF!MMEIMmiGDF$f`?GPr+pL_cxiab(Md0-M^DQRC zw=m1Q_^9>f9Z3kp%)Mhw-sPFjYUZOa4{p7t_qShW)iy>@A~@I8CU8>@buD zafE_Hy|;qIwmx>8lawNdh`;^R4Zi9QEn|Zwun?@|?5N$L{5sWhaypIwx z7GH55%u;i8d}<~|rRPlR`ko?=k;I8=#XxR&e7a-+!0Mmr>?-*CcgB&pxDUZ&lWQvH zGdIy0@%OHy6%H|QI!J-hLz+y=r7*|QGkf2766eP1x2aK+SXY7>8(V7ajOOXdZ)ccu zs>UqJHekt__W^wO%Ga>rzt-;u^4B#qKTP6f5a4E(?op4ERHW+$q;cENM zRp52IHcekW(QJ{q-b;z6rgE&7cIN`S!us;8* z{~Vqf3MQypD#+X_Jgh+^bDdRawLNW7tg6zskt%xa#}BehgZxW85%4{@n1i%yI0_t6 zMNL_6M}ApnbQP|ztATFBfBF>##&ULh!xUNCQx}uQ!>C}z{(Nmb*9XJ&y}=U^Dagc? zVrIcb0UgI^Zmxe&4>8tnVA>IV>!6oe+nW6VKdFLgHP%Y>miMKqE&%O0PBGdQfrTl~ zY*CcDyt1&lF1mQwlI9FaLeZ_>r#@p|*Nofl1P)T3lH2In&JZyq8*kTh1HDihM{`wc z;H3Cl@>d&NFyG`j5y+!6yy6~ncvnU%)3seJDfe$*(Z)34DI)b(&=;EN@|_jglNz4; zQ9tQ%rE&3wHDMcRSiP>s6yT0Wup&D2=WniUPvv~)FQDii`W9_WnT!84aP0Bxe=i2(Bk zrAUIyo$YS)$ZB2TE|a{sZI+-4VtwRq_o5Nt>6WJ4f4f33vuW}zHCgq#o8s=9kX1GZ zqAmR=s0t|cOkP=e-Mp5*n2oLeT(&5GkL|vO4X>B|sZS0SfmwH4ti}`E?sgcZi`P>S z?HAR=hD9hbNw03dL0#ns6F!#GaR0&E=IuJWQ9kFIjWU--hq-MR;O%DPKri z^sL{l8xw;;y+~noxDI{{i;uHanDbjuGks%K-!1H`$EPw1q2T_4(omc=|0^4 zJFRHF3b8RyPciK2u>OR$6+`Dyp#^35BJtuKc6$e?WunS8>?*OAG3xk`U2>(7<~TQp zs5}#2=Kew`Af4h6j3tO!SWvjKHMl4vS$Q_62NZ z3r>{-rLj`qJxa`d0qGD10I09*e*-S!2e+Lvr@~frvKwxTgZKzWL%T_pkKQ045gNR6 zMh8i!WBXSwyHr}geOeDj6pfb)FUAvrfjLxUzd~y^2{JBV&Kzy59@NhJv#K{$Sc3s- z)2BxP1W|HC^GbRgvAyzYQ{f|mqH9Ab1E9CCV5X-xaWKlYIQH4O(WCT$TZN#|X;pWU<%WoMQqhQ_w%&ITsPnPGb12zOp0 z;PX&@O~cg?xQw6f_#Z%6=zjuX82)w4w(RUxvUF=o!jb^!Vb3MUeEHeM{L)Cpqd7xi zf49usvLN;%bh;eugQ0+|JL?KlmjM!MI2hOw!!+f{Ost)&Xs!ku8ZWS9j7`oi2;p>{Uhs0D->C`x zI0UA@E*eFp|}*k4Zwf<6p@y)YgSqNVqGvhh|j zbdL6{udT`Fn@ZXtKl2yZSIn!?l%H;L;J!fT7&PFxp_uq}Z|}fyzZ36SCp#84{Mi}X zkQNM1y7&uIrN3L7WjoWdu#ZgURV1DZU@^x*{~x&qB~JZBP$Kg zlVNNxCa&KiP$%T}Q>AG7HS!4F{G-4a)Rw0emZ%@P;whVsZIgdb-2}~Mz3t7SI(2mi zlJ>Nwn#UE#Kg)4o!Z1|M?L4edFq@DlT$tR$Bc_oSuN0a(sWA z*~R*s3$ca?3L*d+jsorw(#O-U0U8?EO{l{Y@rd=o6ey%XmDw}#_>`6vdU$Bkdh~vK z3q#D*3KX%v-C}U6xMBFXrJZDiV&G@Et$`Y71G{!*^|dFLqGcjTfYdYDis*QnTH&yL zxeIvfBW-EO8!T9+af)N|`I%dQw$!4e)04Ug?|P+AG1{sgX1{BW{%h4>e4?hKLT{ep z3Y4{<77ZQtLa}Gc4#^medqnpcGE7fVud^AVn~p}9cjp)_$v~mHSNk2ubZ56~Z;4T3 zR<x$_;;5w^RLG8M^i7kE_dtAo+BA+v;-JDp@e!oB+3hB5M4h;<^;&zOn<5N|R zgxcz+S#zby{FvSXlsK#@iKqcQH(3)gJsBvt(JvOQsom~9J#7wU2{1TX%lPkiDWOo>!`}Xx;5V0)5AQRKg7I;) zKPVP4y-ceeDv_-ca8HjYbxUc}{?5qmQ5C2cfjuYwMGsK~%Q&=KcSTrZv8ts(vYI4=R@joYYAY{Rgj6tCD=_y(LXC0%ig zL-z|`lQIihj5y_eZzTz~R)*pvYqeAANwP^VO0anDX{J9^F(q6!CrW(YG2QizSTn}> z_`U}jI;6-@NUu)sK?Jdg72zk^>>+gahb?9AWyIhgCQT_qfQJ;rM^I4)Id6w}?fG(B zTO98s6OF4d#EiYTVWNlP+K8kQ$E^aUE^|zmW&`ffIF%ZdP|q)c6LI`}ye#7@7tA+$ zuULs25Hy7@~oye$O3TWA4g%x%KxpY+9y=LLui`Dlb26*ELi`Agtr zbxebsbLPvpZ+%1em5y29ULoNRQR{a@HeBGo(+Sg=g#{wFRhNHJxX8WEH?^EDS@GOo zB;GDD#ocTwk-I(+d~VeRhgS_!@&GZ0w+~a;6l{MeW;;_4i#?JQ!wB_t@tzhWIt1a%mGT%$(3&Auhpiv@G39#l;u z_V=ExS`!dHXysJQbBya9`hXKLXN;*k{TYaP$770NvxBHE$Krj1wDy>N0P+x;GSqIK zJJLgGLsA`PKgdcDUG2{7FadHg_NEKuB~qo7YL_l^SRck`5)j zZjM{_E`->-cU-0ErdpiUd)=P421Sk-J&u^ZxAP>KL(gsbyVJKOA15VSnAII*K zr5j_UJsru5^-$MuZ*S~U&l+_Wa)J#RQ@{==fp@5g#g@8at`K2ABO`7kxa{qT*@4c^ zl8`AsHc3V)ds%vV+QoX)4IEodWjPa>9pIZ)>m?cwEmyNFXdX@8&v+*w)0D!;##rW# z>8ejrz6B}FqFdmu<%pAaEZ>7PGk#?K11h4!7XIn7hD{uiXbAC3zZ|yX>1Mt3OdS?` zwwV6Jvz1Y`Gj<$F22+Od+YBxq*TU##f>CA$9P>5J2?nFdvM+S=0MAW;fbZ%Eo_W!QBzlvi51OHm%G_C^Zd)jCN|3` z)QPGJre4vATxaZa6G-gQ?1M9f;GH4jiV9*C{YVJ(+$zQRchbAgc(Ymh`$l@JTn*|* zbnt{Bshq>*?tnATTQW*HAQmi}nlpRE+G71qF}td#G!P4Vv_g% zq^j#6Fg`xl*3?Uh9x^;^kaMuuL4+6dP)=GmHxpK~6UV&L+H*-p4m~wMzn#t)>o&7w-;*ypM57TLR%Xjs&Qus}@QBuZh2Y{rD zQ}ItzQZk&BTI+d_i#G4?Hcdl=>v)H6&FC(QaP{8!tz4@9s6${P69tHK_6_A{-<%7#)`ZKp;Jl?PV}c%56qc(#RYqG z1E(o<_1#$ONQvU^1Y`+tF9Pqz2IUEHerw0s3T$LVI~uLrIcF=%cQH{8V+@9po~X21 zR-qvRE8U#jcQ?0hw~x`Y8Rq$xYdp4XM(C$Bx+rB0nOr`Zpe`SC@9R`sHNT|k()EvR z$VHQc^ZFb@hl~cZprRp)*wXQmGYVa&Ppv<9T)KP4N!Avof;-f1!zQImbMW;$8xl%_ z5$4&s|Ab-2vD=?S&{>08V=Y*X`^yB+l7?Kf5EsRnRCapOI$vW?lp#EvbB#r#f)Hop zbaKwBLLW(1*F^tvYqM+FbUiC>KwlZq&08;_T2Rl>7hdSw-X1DxDEr*puZjA0JST)WJW19=tFcHP!E_8tXqdllo?^R-rBUd;6nuR4w^iGo5o7=}7?D1eua|LG7pq|~zAfRa{t`zZ42`l zu_bv- z>Ec6j*)@jjb?rsQXiiuv9AkGnMe8sC_8S586%`gVF@~Gcay9>O0dRX_+hh=CdS2bL zWfK*QYj#@}G4?btY`61nN#U~i`orEKm))PxN?urH%=R=apdxsv!}rUkAS3a4CbEHg zg^iY^agzvE(vC;Zrq)t>vN5q|yh6n-In^JEQ}C2&sUxUxQOrC^WFcaf;|?60yot*= zdu{v06sf7H28N6JP@~1(;vv@k6L89`1b#GcM0r(Tj7*ONJ;L+sV~%7i%*`1J79U1B zL;gfUgH^hbn~eWH$WG?U4pUO!F936`9!Cqhpr;_3!Kq?bL@S^1Jw4$pWDgX$7^DvN zY(F~V5(bp9W?tl&IP_EC2We15jdpzVlXl3@x*w-0qY2wdg)?;tyzcXnyStkgWSKfj zl%~1dmZOxEm5ay4BSy#Ju-59Eq*n~5V^2|1)Yzf3bx3U30B}1zkwL~F+Ie^-@5}u7 zk$6lbEnqh~ZG6qq*wEROJQgZ++Mg~){Z?i`@rpLz^`3b~c(8tjmXN?;>XunoM&@qQ zU}J=fn}KIoD(onL9#42~P}q_X%^$+Gz%Tk}mmU@=EKw{8WHFv|cSvKG@f_DwB`hH+ zXYTI%M$ldwC_0X#R*sp5e|$L72IqXhY#G6b34=lCob9?Y(gvc39ko`s%qaiEH)^r4 zsMI~84t@Ly#_|sn50p55M-FZ!|GONqy1e>pa5i8MZ*xs{dO)_ad7NrULKA~0iDb(? z5dVGtj2zL4g^gz<>{pDodVsQZ+%;dYyz8tWTbLPporNT-mDc$6K!ek7-(>adt!W=$ zR>Y4F^`{&4-#g9ue9+pc!J3In)78Jd69~PmAU|&gX*-ovC;k3BER&-%dd zcZY0Dbo3gd404H?tXq8WN2D`C&QDqHBDsf`jGef3>kPkKxMK)qmPpgX|Va8 z4fr@m<-1$c+l|nB=W0=TQW+%OS2FV2Iqcc4>Aq$0ZGLC-l!L3a)Q#um0E8~kmz3qj zy?!ZYQ(MbrXvk1VNZst*24PgP0p_7N$~rmE|?XbQnXhTe8=g zeSyqS6{V#yihtupoTC$@%+h3NWUMcCt}jJbG(MR6mI+*XScXqSq;pM}o#~4fl7f#` zU}1si$yubQ(GFKp1$PhTD(CptT8dJyFH)I(8PCi4{BYTFb&i0q0?pJ@HWe9R`yzCX z7Ja28gX2MgLew^hK56oY;i*TT99#Tt5vVaahm7)^EE|6TD*})x=X_5=ug~{-9SH|w z7UB-vUJFHHrH%>r;}uk8rKaQUE@r-Lq)9;-Rk5kiNaYjf)z?KxmHo^rxSU|{@meUh8It1g@?l40 zEH5rAPc+=3JiK=}yT^RK*ylN-v&h1_EeOE0lrf2~e# z`uYT^Bl<*Z6Lw~8eW<|x1u-kxi3!RE7;8_1@ZVOZpkz;f?-}W>ja5}hQ%~oZJdN0A zd%Qsp(=;qFFb0bvA3)>62Cx((i8~pRh2u-*%Mp(}Hj4=m3MgsBVKG-=;m>k8@|udl zA77B-J*V^>e0qx5H@o2@`W_<_Xllzll0~nq^v8=;$-?*0Zgf(TTdi#3aC5W-{?3Lymg@?+TPTXO5Y{&uaaYl<(=%r9gSuB#Sh)SMIt zO1=O*kN8n%Hf@zNzA%2FgH4*w50STbaS7)CH8OHNZ~aSM#KVRa6u?o#3x%?C^d5=! z5-;AA9ZDs$X>8N_`|0BOUWj#HBfhs**cuLi=N!Z}MOe^3Tn%?{KOahWUJbK)-L^59 zeqJG8beTX)(-Xo|jrUR06mt;}U!YC3%p#2C2qRt^16}oYB&MSbFWOV+^)|=&PjSlT z3*ZRX#Q;`Y`(gX0#jnKG;ndXO+f=Nso|&h{CfLOtyhj_XKgx~4&k?=-eYcE_WHa+! zrxAX6IztM6l*AeZ2(b6G_>ub+xBAfI_S_I^+ON%@dE6s?p6i{g+{{dRJ+~~)rmCt8 z!e(V9&SFspKuch8rBh-t?j63Smon1tR-uh|m_XJ2Ag79=?WJt3)kSB@F;s1!o?Foz zMi(tWAV+v%&q%F}i$ld9&cC%jd(^7thPtaIe1TFuzMtUP)a~O3CH!#wgEk zAKt}SwTt=r&REN?D2xhm@LGC9A!VyI3b3NlP*IjqB_ZQypZEb_i0*vg5quE}?lN;& zc%PD`qYp!3CH9-+D*H8szdpM4c7K}5GMV_hEy9z)*34U)xyd7W_G8bf^ML~L!<|>R zK+otDaIu$?5>Nn~s^XAw?vix1p)ZS%s`PkL=2Iy#=nP5d_;qmqnbY+OSARNj`S#(R z?Kn00=(07%P0HZ_@ztLjZWJ14(p+sde)v>V(kBn622@nMwX9n0z@4aDZ&j&VCC)l$ zNnTSp`;(Pa)|ld_F%maNyWbS|V-zJ{jNZB}(l;EkFY&a}uU;CWd9#n%x0yh`$4rW+ z&#ce5mgfq=fKyZf_N`^n1{37JFnR}Ao{z7cFY`uTB_(n9kc7aXnlpWAt3*+I|Kn3k zuJ_v|u9rOmACEngk2Sru!H;Mbx-Ai^AQ%AjiI(OPI+H5aj>9L~#yxX$37*0jnQe$Ir#|HCfu%N^@lXMC4IjPB6BaW|IP;zSpS^PyrBh0z6C1qBr zArPlW>~e>>8a`=9@DuJeXJZU=sW8;bobI0Q_KbafR>0?u@{)CZNpfo$wQ*5W%`>;B zkN1HLt{K<|rs%`9ia&Z_X&P@;3BW>jK2l0ciVpf9|U zsA0&_VzbpDr?iP$4AoOp%$$qT&0o}JN2Q70Di3617Tq^glEL z9TVuHb$}ZQtm{ze7H*cD+PElO?b*-Q!slb!^dyW_;py`{6z0@DHzIBTU&IbK2eU|K(Pp5h;y%RV*14l%8F0Qp;}^z{p)fODvs zE641w93q3)r%|qBuEeD*qiaY&D!-|tlMQRkUtwpgb;!z{d!3k~Av#Ca+lk4Ey4qG1 zt+kvvtrofnP(5PeVq~;jev+_Pd=rCI9*aFdYufa7VoK&q#KdIMUfZTa@(;V76BmQF z*5~iVA;lWut_Dqil`xaZX3%VVhJ`R$OY;gky~|?=bFv|O>!lKin?x6yP*&<<7=K%s z%F_(IO_FxHx+E7kp1QrM=NxWdG@aQnl(10Ob+VbveuL%(@JbB3JxcU!3*V0y(k{&z zVnUUS+4<85qa*L;eK8;}tE?)nE6Thx^dp+lSKn2sF|&G%Nz29t-qVs=S67u((0%h0 zmI}#L(9ip1@%Hcw3k~w#!3e^~hZES8ikKO+#$5_J_8W(5o0qX&kyZ7^rk@2fw8%US znRd8dfa{CK4+v>{o_B3!A|5^W@YD0L;OuB=o=uEKAr*JmK$Qlx+8xl8yu7x8Il+!X=1_&@ zh8q8&ZVOJ^=RmO491lK}Do?;rlMO$ql@HpReOw}?)9n<$Tvns8B){KbB0(dqQxJEi z@h1|eODpbf3m)#2UWUI)6o>Y+YdC6?grFpc_2IT*r&dygEE24$Mn;iN#<&gn?G46D z2NjMjzUi5juq4i<)DV@@UcRiM_tNQgH;n-nNVYE*PuM+}A$WH1Vu+r_*wuUcn!Ylt zD(kH2pZar0efX!^LhjxkT`5_WMMUV8!knKI?EL+0V#&h1^IO^MOC+Ap8>jKg5IrZOb^9VNA7qT|Hb zUrW<&5~S=VD1O4!&}c|^ti&Ak3e8#I866s1I+Ch7Bvwi)Av!d|Uza{T{yazUuct1{ zw678uQCtBSucp@aGLLGEfSBFAdEU5Ot=;J-EVux9Mf8|>IqH2z4=$fKGo57>5tcYp z&w8{ARUR-k_IrARv62!u8{^=*XB@9KDKJo#UN$uqC){2R%rxr~>p%7eZg1WMjOE|$ z&9IqhK_^nPs?ce(%c@-D&YceE)avLAZ8Y4|-Z~aKnJBv*H%3^p*eX>g#%Mp)tw z*HpLabh+?vX$Bq)bWDv3(d)I@hwW6(aqdnoiFFxYl^qpJ1165xtMLg<>yQ$&Z5ile zi8Jf?BJ!Y=bd3yLBDG1s1~m>@+7JeQ|HUyr(W?r9al2o^X?HX_X>CJXlG_-8 zlx{aK*WUbi)4#an?yIv?rPNT)N4RTG^LSoZQIL$mLgyYHiA$6uU0NS@;>4eIXE%AD zb|aOvFAVvuTRzv@t4PsYLBJ1)>_$*s0@cW%+5zjQJrS>&DeB3bpT|}fIM_^hM11+k z@0**#O8~`|e>Z)eZWM+j(9AFMsX8hDxd%T67DWmwSf-2KDEu5|*74);RefgAtQ&E_|6^k^@l*Y8OB$10ZKzHHFX|V#EHz?DaBF^WA;CfNR>O z)j%d5)!g6WFz$Lb#K+8^-?fzKcvLtlO|! zHkzqEMhWG2b7M?7ZND>U68#``f>34hnP6oeEtyZ1oV?_)O#xl9w(@c(=i*fO^Wf$Q zrjTJ5!?e|9pN?MyV{vo(dhiz4-QEbLYd=4Uq826bZ_@mNinxfX|F!EUzd8htJ8)Qt zV{ng6rE)bIhT1DoaC&kcv(3u_?Dsfh!|U_8mxb?lmqn3R7(LN2b>}%XYcgEeqt)BsZnu6!}zF~JiBc03 zK9sI$@6_FJ)9C&sE1H&1R2X4!E^oUmqw11?Lfo26NTc5l!pj`PQlz(Afc%=Kq#>Ed ziYkL}=Ww{r24-gsTZP_tM&!t@OVqx+KHJ-U1;&ag?_l2?dA%6$_E>Xfq~{rL-rRVh zS{NVx;IO)eU-?!6*KMDe;k{P2+!=hR6-AEbhX9qR(H%qOgp~XCMNokXUW)?h8e;LX z^ziPOj=$tLQ!TY*a}!Qjf4vt=n&u1RT`f{Lil*LBdqhBJ7<09}l_f0^x+0oW^bT%I zDm9eh+8XGLKc&3n5z|+0UypdJ0Sk_gr5vDR4EwcHmucD29xZN1XZB&RT=_m6`w7ez zpDQe~?Y`C85^CITwS^wSNGW%H0yv(hW9;ZjfRoP-dA6X2kVKjo#btq0X8i-bwWTdl z9aC>><{o^1qk~xqmEYo5E&AN?lTg<)gUfU%czX>R%TP7t9$VC7gk9=LQ`Ct$6ev zA9z$lQLj(rLZsiz{|%b`3~GGKW?1 zH&#tG=h3%yiF?*TRu5fMwn`ttqV4?qF#s|OdxXUI$M1Ha^Sb^CLH_*Dc!apf3&gqx zSt2W(U~?#lwmIJZgFg3e1xymOJ66|I471JJ)U|6kE5e%XCE!P7kB?Pox9jWi>p6er zG+q91aNFYLY)-i-o_P^7b{<8B0S_=fEbzLEVDQl==kqos-nT)?&9`Ix&!65Q+QSh~ z7+Qt9X*#95dHpTzCb~aFRQ0WF(b}aXJJy9&Q~7kqdyuq#s8QnEn975@_UVhl`}|O9 z$MHz!y~&kTKb4T-mEokmmlg3_CEi>)|LN&;JVmx#{SFq@51Cj^WSpX$JQIlb4h_aK zZbOC!Rg@CJa=E_g@M9R;WcLKi*Y&*m@#M=c`-tq?6Y%rR@ojEidwB(o0{dYEk`T=e z=Ib1W41(U?ZrbC~sH(!>_Ux2LHYH_E8al)f`uGjc<#v-u{s*hf<`SI2~1vk%KDl~pd6_W!2qX$wZPwdXBIo7S? zVx0*I4M+NMi2ac1O0m6wTdzBCMj7azBy|Rz&(d!lzV8rIS%fIMFHCLCBpdC1lR2-L zUdL_lXF3^!YG1Q!q$nw8N~dtn;H%hz*i3BW40C5SmOtPuKOoen&_-ta^1L@1lf>%k zX=}bZ{m2EcS7Wa)jtF2BKT6Bq8;L^paTZxS(ft`1qyk&(6tD>k8wsyk`&qG3SyR(H zigxXPFr9z^<>c2%H6UAMG|fs4VQc#InjzBJNiM5M&$8|r)^`K(_P@CL%Ah#5XzL3h z$dCkw;O-U(1b26LncyDW-GUA-!QFju8wdnDl9o0Y>dnUnW^B>NHqLQsjIVIkfMd!~OF^ztE5AjqZ z!{M~Xr5jW8UUk~yHl<}1emi!;R#i4tX+$}QVE1UwTfEHo8#R zmN!t?=xEP5At)kuSW+h2X&kSlZ-?`ZjZM9GQE0*b+2PxzhhJd$(tOnj;7|>*(rpgx zm+L$`#QB+hVrr@Pj`@yz4m<4tj-`V~Y%#{d^BncaD%K~%(IzgcIr2D$j~mLdvfvnXkJwkD0=te`hQk zLKAGKB3MN-=Sh1@Pmneb3W9D*H9bM-9C=cCNmbKN1v^I$2|%?ZJ={`(C67l0_D4lH zpsAb)t(Dp+%NIe9h}MTHZiS5f%4zkGcj~`;oMdx&$eDbmqz$Bqk8YQrzHlj;+5-I5 zI8(G=T2WE0P?^fm?HCkH7PA1`oB>4UpPKrRDtVp ziW{@BIw`fVS>#-U@OC$@(bJIHP_Ta;yxUM%8%ChY_;xn4rua#bXm9maiuoR*2*jKUz65Esm^!A7S~+RF)E6R%v4XQ+E5$ zCW9l7hftd7z_*3A(!(Sc=Ks4a`~8n`E7e8?jvA|C-O+;NVh27lDh!HlFm75arvhxs z6U@S7;1dRWh5aRpqRSU8XrRcq_}n=+qOopvC{HRjrnN6{#a&k@A@4JT)xfEX(m+f* z)tXbqDt`A?fq|fOYQB2PTW&0vqIvf~KBCBJNhPYifGG$vC%!!v4LT1kAyW?M2Mb#5{YsFhkrD)ohpWz$S8-+^)wd-5 zb|fmVn8rr!Lt0QRdyIk=k*W7p?=4%FhNC;CMx`|((qo*{C4;J5s*lX&Y!Y0MS9-FD zlQ?*BMN()Zbm66J0J_20x%v&M7W9ddh1OO-!V|(}k*|nt@*9M)F-+-1JVAf5hwC`l zl;%*+b`FV6l}UO=;>KvsgAqWkj?+U>__OwEH2~5vLK#EGphNc79e-<(5{o)jGlvMo z>!YUJI}nD9DjmR`gb-8Wm%1m;7&9n@GS3@e(dZW^0a>9xjH&r25+WCeFNQ|4ef>Jw zJ9uOiQi9=btGe__+u3XEeoVLH&@4rjnPpk|NHmkc)HQ z(NEGbymGgG_If2rhbkB8?skbmz*snV1++Nc#l&L-AL|SjAT#3~`EyiZOGjDmA@Zgd zFYX#r^PO`)*W*^n1b*e}p4s?PqtrO5zBm?`RmCBndRdV6VXqo=26a`C_Q!bRyOXo2 zE+Y`XxhKUda??#nVPBe_F!`Ejd0w}#@A^!W%PO};Rfl{Jl_=#FF*nY9Ij3V*u#-1K z_@U}BF`$ZU$|lDKa~iSBYp?uyBbgBg5X3f}mVJk-=*o7sTV@k@C;f~hSKwRVyf7c^Ath5H(f1z@l3EbjOa~|;6 zp6z|QGW~g$^2X!r_^{dH}62w9AdQYC@BqCaa zP=XMsx`V1yd4Cigi|F$f=aC(b-&l79#EISiS1tf61{rhM*1ji=RFr(u#NQc+c(yeW zoG9yU0cH{7hw_<^6Mu~Vn;!qq6OzpT>yr{!O-}bHaQ>ZK9L#3PXhwYa+#)Cqtjm^$ zDIh+EwOSW>LaO3R+S>Q&mPHL_(TzJ&FFO-$hgS-VYr-J;5}vLfm0V}yKJgVZf>4^U1v6KeQ`qXTg0 zR^UrFDBEH!k!KA0rgE%vMJYqXWeLgo;ZL^ild|1jq&<-$W_XhLdd{kto+(va8e&`| zq7Q#bnsTK7Q`FP)+pCHAf%;|k)hB?ZC|s75>+eLL5Yblb>$A?w{xtGCF4`8h_@YKx ztj1Gr7jh9O#-icRhvF88J_<~=QF(*i6Ct7Cn!b}Ce7f2TO=Owj!sY?3YI{6yLGHr~ zgd^KiEI!>d5>k#G(GzvMvW-;6xh<|4^zs=52?dO9uWq$`xf+Jq7(HaF7;NnzB)R}U zZpOTX@p0TXuBvD3<|i}@bMx$>A$-95RMDx z%PKiI4qD9qgn+%x`$%J>)}-h0F;tX%^cDi<9s&*C1BBFH{dkR-;P zl3U%Fxl`q7?j43d!ljm$AdVu4%{r}%Mb}6akYK4p=0N+4h#`*9+0rg54zNhaETYT( z)#NTwXRQTs?&2P}hFOG;D9xeS@zIzba}ye4goY`q3J4L`aGhRp3mwS#}{Ry**ZS3P3cE}A1$7lq*53MK@5gBT((A5L< zdp2=Tlc+kJQH#y#&P9%H_^cZZvx8_zp@g*1!$)BF^gYH1d8d*PT^;`fUC`49=U}bY zt3#!``yuM_!yk~FX>!5lieyQ~W*-5saD}6PKw(L6qWjZC>wC|nE;N`6n!=7&(J$_P zJ$kTlM~^&^FKeZRg*&Bl918I#kZ+(tb;6fPFEI?@f;+-Z(aYad`SfWnqX-Burg z8M1A=Kh}v2{o-Y?B$ldjw4W3y{tI^>FkAk6@A{u7uOa`}k$R&I>@Zq5XPTSqE{7O~ zArw28?@Y} zI{V3S-vgzQ!@?gUOQl6mzO|vbT~5zAIqAG+i%zQ&LlE&r7d}nJbvtX7&CHNhqHA)1 zK7bNg4U6pWL!Vl8Vp#h(%FG$`5@);sL$W7zi10o&(rY7$%!s*E z*;@VmwAow2!1#HV)eQ?FJ_%*DzBQl??V)yf?$sYxBc3?M+#he0HR$Mx%E5BEJ?Hr( zx#3S_QAf8dXIDerD)le?4J3QCFA1%GHc`LY%9bpwPrFL!%4L!R2X3 zrtglavs^hl@AJ<~-m0Ynri{ExH>&UT(%i}87-GkjzPM0NtVm}z0;}^W@X3E&#w}5I z8R0=$A>FSHtO|0|G~27hyL44lOWz8V=MkU0m5b0!-o}C%hdPn@fo8A z9Yd*2oNv955~qfYj32X$8XJQ?5vngE@?Hp3^86k7GvoIof+uK55Rs3sY;96rD~-a+ zDhT>X$CRX>)x3 z8?3dL#iOSo3>ly0M4#sh*7Mdzvxa`aO1-V%j~!jgD&(dt(RSRx5*2WgMttQo>z*4D zK8QFvk+^mENa!==--c8?ziqAi>3(E8sOG_g84ovk;7tnOkaSIhk}&0z<}ZNHp$2YV zi+_Mh(i})1VzDyo9F<(m=AdN>9fz&A@(_Qw5rH@&X{;jM%>!-o1b1^+zbA8&p-+qQ zp$v~~Q_VQJ6C72OrA${G4}FQASbFOf%*N3W^6sCf5&wH&o!xQ_ z(%LEPY21EwZSCb5>F#;`_2yld``-8|+R1Cd+GC0Tg8}5aYQ6pW`Vj_%>^)WxFLQO> zx7)6^J?F(7{J70%f9oh+>wRkodAYXdzi85W5{o7!dH#IL@BXHe2lLy1cF(DJ8^V9p z#{NH^&9)&8+5e*5ej54gCfUbV+7-HvAo7aAzE1jQCra7i`yJsn(`+Z5EdE~FmuH<<>>O(MN{Y|?URNA^SdGX#Le79qf+F6Lw);_qV4$DJf0KZ6!u4xT zX-R=7`DAIX`V$MYFk?_=7^%k{Z#P!uG+f@5@RKr?qQQ|Dff_CMqH|*Jz=O6PVTrd| zX6+HBDKZXrJSA-E@d3-#&(ZO~r+>4WUhBBQj zMVcgcGu^}GP9&xfdeK1AdZ)r$1b#s=JPwNjdzy}1SXUcfl)ZPQu_E*NJfp+eDE;7! z_ZzAHNcvBwe8cXnR*FDLo6w7QL1f*RII>~LjVBhX*=*VVw;%i$UcXta$vW-`qatO9 z&FH{Myq`v)!`=JL=lb&~)d{;#)2-u-Y2x%pp;(ISl|9#R+&9H9Vd?F%j>g$qtO^dN z4y;;BBixf3GW!L*cPoM}p>gnh1cAc7XFGRcWXD7=sxd`2>Bbx}wo2&9@KU~R)Rnq6 zNvhIvPVTQE{!B^J2qMDhbV0(*^VC95bt^EP2i-F7U;;E3NseJzibPO@8{!9f1&6CG zlzZJQ2ysIvN5! z>UoP29!JlRn0{`)!=43CQkz^)%peE?Z^yrb$jWo$+a&an=U?z%ZqEHHtLqDUH>Dluep^9prPIe~Q zGT|ucQfWuG@DbXW%cYDG9=E^bosTFa-iK_43#v*se<+!JBE5K|`woTpR_pPq!js=x zwXTYe4U&asj9oF=e%F&#+x6IahJG(Iqbv%|(doYQ73Y`HhMs|=qGM$$IbB`s4)Z%% zlp43|c7oz?$r(xx+imk*E9};3bMYGmj~`HeQmfxbMEv{x!DD@Wf1YdwX;mA z(MkbT=@+IN8A>2mesMh7gw-;t<+%#e5f$XOg*wngn}n9?G^d>|Av-n);8nVO!YYdK z*E}FP5XD$WJG;wO%M&h0mPCnA45=f^i2K6%<^|%uOsGj3o#jpIjgdN zUK0xoCUGInF?~8a-+TSb{Zp+(@$9|Ix}ZX;CNo|TS@ZA#yb!n z+vm9%P+zC?s1W2JP%QN?(dsVn`$Tbl>Kc!jPq!*lJaekZWwK7rX0L%)SN?Mizw3;V z5P>{*0o9g@r{1ol!&~ugD0b2ZYG^194p6&uUY6ArkD6zVl8o%v#L-QZw@c~q# z)6=giUI@Mcni3ajALQhWGW#e9By!&eYIW0JId7hM`@pDS%q;i_E3nY9 zwG@vIDd&o4i}s9dN|zhbCc0GnB81vTp=!xbXAAY}VMk>)DVAC$Q6Da&dNcW#==ZHv8e$2^`tpZK&MFN@DS~u#^(OY%= zLSfm_ib4kOxW~HAxc-__sK#5bG=^Qg;Q>bTbKFP#4z|%oMnRr#T?mO)5&9-~Ub^O- zx`A1BS1O|xO_@6tj>ZTa@yYfsF8$Sdu77#AFgUpyj0`m*4Ix{cQ7~C5GTY~+)S7ef zGNI;6fIE-V0O;gSdGY)MwW?)i8jxH&6*2Ll!pRk-FI^utL0YHH5&GRF@PG>|%58N* zGH{gU#SH0A2$=IQ4A^5~^L^&sLj!K;T7TU!N~b@3JhD6<`RV|L$NMph zURPd(&i&Uk|>H}yK~CLyZi&* z^m&+Bfme>6fJKW-`CzhhoQ@?_SP!QPha9jZ)<RNuI5rYx?z~( z)WXyxcr%cZ1r^JS(=6J$pLd7T2wfz7b$KZfbXQJ;3X??N3o zRb2}SGCZ58N0u*}x!u}QgQ3@dY>e+@DN)W)@d0W0KYv_J@^Q#Z4j}K8;4CnV>C(&7 zOx+bZl$FR44<$)POt-4!eFj%1aBlK92~CJfs$B_JPOnBTYyE-RY!4J^E2Fox)c1Zc zY?|1hQzk3a>t6q8%NtW!_kw}1AJy3_CWg zTXB9mS(%j3yd7qX2&b4IG#r!F&^oWP-`bPLm=S>hbrDymH>H=|#4Cb_SIWZ$6M^a5`hu6+wlbkZWMGNBog-+qkU+w=wV-{aK>|pxhmCN0&=AN7J z@+_A;F&1@g<>U=)pT=QNb7G;wB<7^C>@Tr^wYL(h4*%PB|C>qjlCUh+|IQWa{X&xc zf0?&BRkkWp1(R9#S`+8!47GPpTLx$Ba0bMU9W8l*Z5MRaHIpKlOS`i`r?6uQx*Mh`ie!1_jY{&JgC9g zMc{=gmYHaG3Fs4>^SJ%t6Uhc@Y~z#xA&qXPc_j?9#P0@oKoR6r4Q0z-?FtUAN)MUw zF#JqDB3+YO`Xb?F4`Od-`&pxA37rH#l6gBt{#}1zgS|q(e8p0_>=QGvwU1P(#!Dk= z8E)l5bG|xZ%nUy5@hkMzKL3t zpX<+`P_>Sm&Ya7rc!u4ZuEC`ycHROB7nQBqQ}Yv&ob~-jLWuiSSSIQ5_PpWRcnW6~ zNAY%&9zOdeR`eRd_HLS|sTSc!C6~W6oG4WuNzzOHh8zs&WIY`GH<)krjPkF2N}e#O zcTcgj<7YGVd<(64phj0HhbHnM%NNf}n&$D&p^O0}kNTSED&S)8n}}6CxZ-FENZz{b+PI+r)U_Of0+HrAojJ^sZpY)q-Tz(#>HA|FlH zMCV(XIsxO15{uCAVBvsjky4}EP?#7QHF=fDdxSddwmQ(5fm9l=Pjc5?ut@(iXXR>L z@bwt3t0U=p^CXp(rCo`0uj|V?Bh2BYhaWY{m4n|v!b0qW0!a}(Hb%p2&yCj2*3VeS z10kObQiIjL4Y|j~MV*24B?M@UlAC$t&`U+MLiAUiXDWp;=40zK9f67b^I*VMvcd7o z76HriGmhcoHvY?(SM~{h2|)7231?k)MU;sa^{7q@$LjtQ+4U5K;#k2+Zc zxMWHC46;(=lk8QGA2DCoU*TY%3@mG0{k{`P6{U!vikf0+ic z8a~&pvOBxuH?TCzkAy`P+YcVo9Y`WWzAGK?g^@xhY8ayk8Ml(g~s3kPY4;jM+dfa zz0;AxHVvv|OroxO3l4M}^Tp)8g<;OUsjV(Mh+Hh{7$9!#KDMCBuvBYGFugc~c`D$e zgq1!5%e{V#ZE1LgtdfCN4>|JuB;-{l`!;IjLecr|5QpWZBkVwKENL-4oS3}9jDHk+l?h&R%Oi0~sGo`=qBCpeix8l?GcpFO?F z{=1DmnvLI#)T^HJ)BWI=gZ}66CEjm(@bIDG$rwK@Eld=m2`P1|KPZ05k_$}!CnZ*k zKFAnuJtM!xB?b62OJ*@58e%|tV3B5!ZedIwl?uX=ai4x(@VbFq5ls*s=QC3~hzHcr z3tX?dpI`7qw!8&FxdQnPy2bhr>5jZ2Epj@BH)il(P6w6l9+{J8nrF`MaifmUB&u_K zGTf1(JeD&SuRZXt&(UL!RMc;AZ>`tih)b+`-|9pm!hKVMELolXtmDVV%iFl{q&0Dr zQF3{ob4i*eH3D=6tC?6lwH&>%MFZe%e#E?e{NqOfrgraFL9>4fg)7+&xBuo&1Z5e{4$Gn!B z>HQ9PQcWmzBw4!#ri$eBBec08axBf=S03?wSG;bgYZGhFHdZlW$fUq9^0hzx188xy ze|F+kc3C=xl<=yZ*>R{hJ&@Ne=x1cb6hg#JlPvWAJe$!3evDA4tcs1FB$4Q@|Eum|yV1?~h|kMESag1* zFT_4F_y`q0{z6_MoNJ(ur%odw$G3pF)9L!u|7`hDcSF5ls^PuiudD$-|7g@itZ1O% z?{?qRR6?oKm+&SWf@%qO(a1FEK%0wWnpbFW|6EEZT7*Q#{lkIm8 zl#(dBZFtHd6FIUL*xjt}Bmy9|rWQ7?@%g6y&zlQtM4T}XV*lb9>MV3eUI63xtD-kB zj>l(J4Z>nU3=oa@%)8Xx26C*{H{1c~MNQUa%oI0r&``UC0u`;Y5JyebNTnf zM@eqA#wcb^vjz?12DGBb90cQ}n&qLNK^qaj6IYy&S{#`sM(^ncD9L+pJ+kFCSd2nN zsfI`0$f3&rj+`BcPuUo3dOxJPfr!xgi34%nD<>MNwG;alDE^m{ox{kdpj;_-6`L1z)}ug2&f|_P-0<v6H<0)TqGre}X(5s&0G02;XfVJ3>*B@j^? zXEa_;Dhf=iSAhen0*)r|6CA5*;frQ@r)LUTd+h0MhD!+UuN7ABj+vj?Yvl7LOP?0@ z2p*pxD+$&o3-tzOB6=}7HNy8SS$j1Q!KMa|!EN~wT=J4rU7!j3FH zb#ue1)wA%m%V>R}dy)h^^_QElhTF<1EYj)WnLshZWvSV0d1cYr8ETm{>0wcsmN z@tcrIqqkaFIcmacb@cg8D@xyr_QgUB=}_6&UFEr_`#^uv*|zLmg1rwu9~F-6-_3`w z1gMK%_8VxCT>_=4r>~M?zm1{qeRwqG9a*dI9JD<*6&T{NiG<8RW<%fD7V9RirrN7d zebsZHGb9lE{wo(CV^~8ciFVtW{Jo~GjZH?izUNyJS3HZhik`P@Rh3;mFSs)GyO@E& zb0wLQjg8)L?noA2^WMi_g@Dw;!*X_vkThQV;9#z=rx#$=>l~ritIG2nufsH*BL$+{ zw8t;!+@4G;Jp)%I#M6YV0+=nr$<_D|-e8mqQrh6_=2wRI~rH_jBGuh@#JP#)FoUzuJK0 zs|f7Hn{s*KLJUptRo}k9=ckuhx!kulQ^_DAorV8Pq+NMY78ID(hpj zXDLAzu5MD|dKVx`EJ?@pK{qNBCZ0SW3E5$FOgd#pKjD}($FSP-B;|Pd7mTb&#VWzb zu9CIa?dAvQ#QIeCk^8l{YnkBd&h5ikd)KVPi#f3XSV@U!wK$>U7pCE%AC@_Ph-%|w zAxZhYcB9#=PR+`*cunVZwj@cF=9e1f_$^fp1XOi>bmi}Xw~2nOz#%qS0KC(i%Jpl% zA1o2=dXqNiAVw|Mc{W46xj?^3pd5j#rurXBou~&B3E)v3#yXG!+<=G)@#)r0_CpBJrI-yu`uASVJ8dCVHWanfrWd~Z8Uo3I0 z_sQz0_~;>wu;zgF^$h&7`8OAS_R+N^PjUV66pI(yS-Ghvq_r0K4sgt%QWyU=zocC{ zkh}eDUcE-3p4_X5(1E{!(1EW`*WF-ZDzLoj@zdd5!TRj>$FNz44t^Wq!~LGuEtVEqWdNZ0$5b_Cn1;7obJNLkc~(#EUGfL$Ht(@|6D5@TJqx&s#fZC^a%ly|q9WDORAoK6b8r zbJWl-LYmxuXXJ-Kc03<}y64F@Z~J5Tx7E8qL(l6Hg3ldA^J@_YFSSL&Ey{c`C)4q2 zdM@Q|IZY`z4C@Mr??A9&RNTRJH+j(Bn(A47&e7}36!54&IGDk=R3+okz1%B|vM&Jx ze*NRn#fXNlOY^5Ew`K~`n9}2Yw^@>9EPUW^ldFd$M>!%%>QPFTTX+$jK zsm?juw`7nP(eDb*j1zrZOc6Wum~Wb)39!CGVMq`k>HPewZ956?bj|D4YiA+p%Z|qL z{$&6KPP-uHBVG{>+_A`geAe^>GnEe6hR)6*5 zqLb~=XCS+zNnRBLuX3LM1?z43E;RKzaV-_+#9v0p$6p@pNmPSqr6T>Om+y7gL;3g# zvW;9|4V#wiuUz=e@JQ6HA(M)8+tJjnj-k(Tb9Z&1kc;F!=@{z!YT9iXPb5ND>k+|| z6=yNl>vfga4MML=u7U@TnN(8xnc~iN*U~0Uv>g7@tRf5z5b(fk5xiA!A^0fV^+=T~ zM8)9hq*uleC3f+|*7LKQL0g(OuDZISJ_s|-O?)T?6)#&s4`_EnZ_sjf-0*`*tD2u{ z@8!eC)$~Rt4Q})Dx1nurReJwkUWB7jV^nz^{jNqMlKOU?*Y{z6XS?rb!JPE~`F{x3 zI>G&ab>)950qdX*|KalTK<@HZrQ&8q zX65Bf>PFer>s0Nk(V@-s4tIPHz!$J#uUlRdtiFMsNNXjLN)CLXuIwr-MdGiTq4*RJ zd6}twaad0UD(5AXvlFl1LfE|B`+3s_fn1mU#%7VDDQWY5aFFJF4R@kOTp8s$timk+=V;M;MlI|Tk4UJ#_JWK z*;_Y%v63!2t@{{&MrFOM@*Fe4jNW4(Mc@Vx?da8uf~L!(qPz*0&QxZ1Bd7EL z$X_Y&%3T?xu&mxmUk$&kMX^eJZO%U!=da8M(ad{2U*dA*(l37+%TV9k8R9*s_1c=& zA%5&om(XKuWIUy(8}obz`{RcXyx4-X^SkqYC^-4Zd*fLa*$j#`qQXKR)}BU!b(G%< zK6Ex5FfRFreq<1uDy&&0o(*P`(=3#x z&KPdA@fS9EN4h`8C8$ins??WEU7m!^u4+u7xz7v0v6~-n6Pj-R#JN9;XX1tam3{o~ zC0VPyyhGSX=sx?GW^f%fUuBQofJdc4ZSwvNw@CyL&f-lYEk_vu0(PXhy0(5X7;LPuqX!sHm0??i z!05DVb=t>$#Hf+R4|kr~Rte9s-I;k5g*_!OaFUZs3k%h$?-juXs$gXN^#ql?Z&`Q6 zkhbfjn^!B*TdaEkZn6zfL)^3AG<9I!oq9ym0mkKW+QnVv|LCgpJF=?062P#d9jWBP z!3Htb483rlJ!*gL_2Z=KkxkODS}zXIWOMf!Q7i7bHGNM3uoo;0gMfAPV*Mmy(FQmP zE_c$;FRN>n`>3fI7(u1oGIKlqg*{n~y!XIY_XpUC?4krU0EwwjADF1*+bKkF-4?#(s?fL54Zx@l-8m+l0U9^H~Y8`>j`xRx^IPh)D$mDX%N1uxy~Yq~y} ziQeOwEfFZvnchEKfVW0Kp{7V~2b&hJMI;wNPHuV(sMIvQNro#brKUNbd>^fzFP(cI z=6v-W+toA5M>OzncRml*Q;z`3yw;8EUdCXYbFVhOXJ_WVK4AGi6UjAc2%u@c^xS{7 zlCpqpH8G(}3It>?KKHqQ-v_Q}I&dhfK?(#&C_a_F+^1mp9%WbLn(aJFX6KYMM#r0u z$YEF5I=8yvQ~O(t?CUF%h?Uf;LZuF`wP(f3tq7)`F@TQCnq)P?64cP8j#hEA7}{ky zHUcarS#GU-vPMz)D4Lb?NRG{NX-(Ufu^j_@Wb5`` ztc;=a^Pr|(&2u@V^*MwD!j;~UO(>Pu1D~v(RFThKnNYjqm2Da=85v^<(<;AC_14F+ zW0_A;)`uI}ANk2Id8(})*wM-RX#b)&tBt|K2O17O-GQ+{Ea=hStaf}&sgZLPiI=VT zZJ0nGKP3-bDBwavTfg#6wKrKZ2)wdG8vza3@pV+X^}9^}dBJ#f`n%wP)tz@qbz{i6 zH3eQf{XU3sI@Q3Xn1G-y*{w5;SdT^D!)?t@-nOK8cc-H!!0Kz8VR)wYveb5){u_Jcuf35}>XD19K0U7M^Dxo_;x5@d<#>ijhQFE~x34h9Lr=>5t$~=bo2hq8Bq!V=~Ww2G9FOv(DD0?QGd6hoY2EoZS`J{=<*Zr!f6TdLmoNzYP zky!FOY$5SFscw>j?XNZ;T`diSHFQdYLD=C*HkT3BCjealce-zL5nIAJ)H7RuJ~_hy z`e5$$y-&KIn_(7bBXHEq!u7oN*$ZZO0?G4;wajGyIO%VK@bot zDD2%t{C6rY9*6)KeQ3Y1m`l2ON{Y5;^dEssJ{A0C_hABTre^{+A>A)|*EL5?f0bY3W&of-L#w{2Cm`-bCzjC}r@7HpFC5rbZBR!mRBt`)r6LPF)!3Q6;g zQD{xsv0p3NI(clEs54t0y3J4X0JdbWq-9}6vkoO(a8)<~8=Hvh%=!kBq^YnoS~i!Z zQ5Z`*F6|)f!~px4V~r6DrIP zXQr0q^tmavgHUE?+3B(z~^B?@loByfDxzphRhnpojIHA9Y5$Tif&ro(TLHqB4#g zz=~`&A9h_G{OSq7#<2!Ivk=$$>O_P{_UluxVB`634-F>3Q!}jU=9Lr2WDeazQ}?p= zT4kyDJ^d~^E)D3l8)PyV-K|q2Mp{rl()xIUskvxwPahw+Pc6BX3l(P3%5L()DKMnm zjO9&kh309yNL8<3_GPblCV*@ob-nTtbu({pv(5RX#g>`M#fTeeA^vrrdogVfHYsK z)s|Yv=Ii2@AMUoFy|(LDdGQjMxJtio(r9p6g^M&mKDCwKeSs;b){P@yIA=bhEzQX|*-w_kUWTo)GcB z_>-uQrRjJ7DcN?d;2BOYhT3YYLy2kv&t{%2nJ*#nv&M?t3&a?d6WcHC$f)EAU2e3w zWI1uST~up2YKz$6hQ#yvJ)#4KD%*EGq4o_EXn5}ba>HF_ z0C#f0?EhJG+Z|n$a7WWoaCN=Lf3KBdO`ew;I#aKDJ@Mf$lPRGb&rMp0@4q1Wy)#0x z;1CAW^7ek*0q^db8+7^kuDSB@@mW;4A52qoa&U~ckM;NWzpn`c1qOWh58(YrGlF_( zkV=v><@4-)*tjGY%hj`Q8IV4p4kFE$it})Bs0{kwD5h)%@R1*8XDV=65Mn zS~-vht;6SxuDREj-y+lf3uflC5%ZAP6_R;_lsh$qS__T&kTL{j!Ldhe)^Vw zF>%rdqR3nr6F^8an$W)9b|)~pkIn|yrHz!F8afF|VjkR%dZ6eusoy9ED`vl_uRpZE zmI9x4=i5B@S|En0H66K>i4Mu1TT?6TWTHFnN;~fMQ=J9oLEzJ0#%~9uRf>{g94AaU z!4zN1aCG*0VgHI2(}X}S+DubzIFMRs_Hz>@-%DOctl?C5?9c0)lI83DoSexzVkiTt zE2n&FG>$Q~y_4Gj$Hn)Dt|g18V~9BMbIVHlp10j^0Ydk{q1J|V&LnHM(9*kI=!8p&{x#Pt+<`g+RQdB61WS?F~W?;-arWL)5~*4jO_ zf??NMoB-{%tqm^+T&J_lmB3F;@2fSB$mfdgtqV zyT4xcy1jmo%oTGqiXyjJYq}D$b&FXaJJ#`cd*p3pCB&KgaaeG7_k0FI2(?DXu9iyZ z_fCjx{z&eOYowg-P0qYr{mR62a z&U6|+A^}lHnj;c^58J;>-f3#oA)4y?>Z-K9UJZ}cP`=%egFvA*yU??jQ>~W`>*(#) zTVq~3;XzLX)BpTN0<5B=?PWBjsoNE;UL%C|mNY-(BHZJ9U(*msv-xU zvbb++75y7klsHc2&m&F!2`6&gDH)ZUScg&wd2u^o}cVrv;5Fk%IY2luI0tjKG}nl9SnNdv{i;KM>LK60)y7V z*V&Y_N4FPLt1fJFsmx)6lK*J<~ z9KWPRI~3Nzd|A(RUC{1+eJ-HoPPzczf>7K`N@!pt`75 zh{`0?gjPR$?ZkE!=R6Or?+4%dm+J9B%6m0D8JaO&zUb{S4a7Sd_&T<0*+Z`NIvXCn zA0Ezot(TZXZJ9T3zbStB&cjT_Ux%^M>g>I=-PZQNqygqfMEp@1^+WjIcl+(9I`w}Bc5Q&Jj0V)a%NY@i3<&yPc3 zD$#dQMpQs{K&j>5Z+5E!`G+3*=C<0WwIvhA5KK)7Ad1W8vYL|jdC+k-bES&eO`cpR zucgw}J*vU1>h+Qe$&K6se?2iXH#5@OZWZ_MX1mD+XZZx_ALxMoy`S*?eTI6gMOTb~ zlOiWl(~-25Xf}*-*KChrmzFrZFq)D||qO|bQe^;0?t zuX6S3J<7*enU{RltxTeB z{>jOh8_J-{f3NQ}`HQ2rtgW#nmrB`WG=Xxj^+7JleyRex6tf|)?P3h`07>$R9HmVz zUzE&;k+J3Kvk1-SqgMrXFg7wTxT*!yCKqx&2Cxb8tjO2pBYBC8euXw#Zh4vhzxKW}tf?hhdxMBdQ9S)TfsF-l!!Ep6?q$ktsXo;W#qs6MQTlHaSGGmazhikmXt6k$md_U*qrnLGUfSs$q8jVH=*5+W< zbaZqU&!mQ}Ma=AO>p)ouIj}o@ufW73tZE&4)9$B(RC zrlPqc=`?)3fu%J-^JTwIlPUWs=U9+=CI2-cE4`4uC_dJUCMLz=4juV9pR=4@^xDnu zx{Jg2M`gdxE)tCwvWm`fTI=oZP7h-c?x-Gl6~@pQ#qKrndI*EivMH*u-bMpBYHSHaq2X%8^W^u1TM z3&D_@n!4u3#%e~t)l87qAeUEa@+>KAH~e6Eq2h=tfpTyHT9s}SZ&qmbOHjE|A?b#R36#do6fZ2j{%frp`QWF&mFK&~_{NnB}{@0hkr zfnfq`lboFF?BSueVyUKfH(b=&(b04~AN5XU7l5G>KBe?D3W^grx3Dhglt#09rCUK7 zd-O#T%oXXZn>X(Xgn6?yPKj_0*5{Xq*n~hNOtzdbBE|7{3OXi(CD*+wFLDJj;@goL zESouC_acRE8yn6oVquH*5z%0}|G#_j4yLVW4_6j2ST1Q>o65U(iKX)BTgZYLoJ?tJrq0uCDrQtoqh8 zL;R>kv>hp zl*N!F%K5tsKXoB!i&u$@Tu}z2NPdUTiX|pgkC=OYd|p;Il`QLjgnXYzM}DyVzIfMw zsXPb!KFPApJd*rz>3y)VY1P$c1I0F-eoTtG_`+vWuy25oo%`|u!X84~jcZ?M#t9f1 z#YZ7!j7AcxOFdpE&9g9#7GSX|badGiWJ#{^B3R`oSCMY?Bn>0%Q+2(NA%ga1u^o6IajcUBb6 z%`x~+bj(UJMLD^-X))bs9Sb1Lyo;f(sK|XYyzvQJBtxUsb}6S62y%+nNR#8Ha9M&>|)qd%&G zvtlk0(jns%>jUPLquP~x{GUAmY$p(KP?KU{KDXR3*2v1r#1||8)3eRzp$wC_RMrw5JfUG8=>-A3xs6G$A!+7?IP~`H+W^stxA~aJ#gWamRAm z$ih#bl#Gngd}q%_mt9h}%}Gp*37h&L#$hz@aA@da&{E%NpILMNwjv%-asq-qQ?B^( z4YSznJp*up0t}+peF6-?=%Tk*H%WB1k3D8kgS)rX+=BRKXh^DBFFGeeT+|;kVd(?E$>2E+B0oZz_9|-Zk}Mvv$L~b@1ET#0MxJ{;^0HR=JuM# zL}a-Bsk0gCkvC4(&98Y`>76`@s0scVa_6p^q!hi$iAPbQiB#m{re_>qsNcYzI%`21 zf_nZjYGHBF(9ke#eO*3O>70zN`&~qi<|)gF@NlPq0F@UnGMU*TcD0@M8 z8yBt-RM*$mRNqZls_olOo+#>$O1HsE!OlgT6$OgPmDo`|Cwg(Pvhc!dV*oeXAY>{~ z6d=#hxo8*8^z#wdO?p$|=;X`MY>4O z>YowdN8s}SCW98y(aCnS2TbnYmJ^gmExo98lZh-;WKXm0dNeBi?q%Uv^|AZ6>F(d= zRY4(ChBGrWF^_3JvO1f(Ci-887E_k&b}i3#J20B<-Vxr-wujd z>TM$pWb6KSr&rfm^4y5%`L#izeT^Uvj_xNPlh&=&YJD(9-NYiydOO1DB>6{piMAeS zmSE5AUfzBHgpp1C4)TYwv#nc+phe{9srT_x^IE%ag`k8(yVU1(rQ6;Y=VT&JBD5>< zBvKLxZLxCt6Rg`_y43(ffvnwWiA=RKqaeSvpkULGx=vEk77q3!*(^eA6`L+Z73u!n zeH4f6>+Sz3Mk5E&O29!E7Lju6Wi%y2;o$%Vt*uj@2VRnijG2x8yOr&5+N~XNhHEgg zFfH1O|J`l=(;wRw<(UvU^&PZUf~Z$wiGTjHzEbu9xxa$(nx_Yi20WpxM}T0@j)RH zQxQ_J?ThNI2c*F;Kh~98dmld$2s#6N>c=QA({nM!&&}S#Xt!3Jz+h!ERfbHZ06vk5 z@T)IqaCv0yxI-w5YvhWmdnn z+5NNR=i_~v6;Adc4I{f}U zNn;r!BO`PySBw6lDxvxR9vzDA%4APA9ZM}JAYL_-_h%|rZtvmjSo8WkOgYx)Yifdg zeXZIrOAdN(uJ?j8FS_!ycI|v$e@ffs^BMz1zSC`S6I{oO<7(iBW9U0~$e9p0EAkSK zu5&x}{#K8+p*qy}Y~5ocH%B7{kpFHU|SRQ@F)%n3bI#t z4`^gg4wps`C@M^9<&hc=4!J;f;B@K_6!p|ilJnOEB!7_S5Y$h$>-WF*tqWS}K}(MVn7SLBZM8)lf?dFJ^Mb)!E5OAAuOH`AAUg7fBlLTyoe2 z(qB_+o0$M=uSXlqK!!Kf@zT67YZjxp(~^KIl+Wy-TpPHZc*_08|JISPs4X9N^(VQ{>g?ZLYua z(X8)HlA+sdo?YG=@aP=DVo-4Tv|r3FurY7Ap76Gr5NPAP0V*#A8T)OPYhO19%7xrM zT$C>ZLh>^}v$04=iTsq5l-N$eQ)G`imZk@|l@DvNQ5FareNXuO`Saf6c&&J$Z23;$ zeGGJsq^PCazv>@&4o%}h4U$49f)SxHW@LPt&To=((f@lG(_XsI%olsgC&bviIf(EJ zAt5Zla3FgG**A?I0)-i+u>4tmtZ1LX_e;(wyPW&03}O>`%?YdTL2-zd@9?sS2n4}c zJ+Uy3=Z*LG@e!~{3x!^;#unA$v$f*s<&BkAI_V-c zB$wfe=V84ywiy&xsCVKf-x-wzfqa!*;VL?e-~#dLp2tbjSfjGT!nPf%qzFFdM5p_7 zdvTJkU7P4|qdqv2)URPLG9Bnk5mD8owKuZPqp`iarO~7_=1Cfmb>#Dkrd!El^ED8TyD^bb1r(Em`JZ+U9-Bj zwl>MWhduXCD4uQua$c^&+}sU)Q19dj78EK-GO2(s9+`CFh{tTL`}&iD0zD>eP#pDk zbrm`dTY*xweQ5KVeZPD~e_jQ$CmZSbr5`KO465xMs>|90JmLCNuW=dRxZ1VHF&*76 z-wN0LwSH3;i``cs+6gKUe;w)o?F!?X7C4S(ufV9%Sw66|v?M5dr5lJ8{6D+^Hng^M zI~qnrL^!#?OR~ILjl>1xt36zdJUr$TWhNk7&bNa?zhWHxVas8E=jdlj79U@$pI%+S~8P`-gd&Oap*w_NHc1N!eia3PDNG z?k5`OK?z2H@mevsB>>i^3@x}Uy{9qg+#h+|)ih5wlK_64u_WQ}Yn3O+2E}%QsKcF$ z_KD`f;N*B+dc=V(eJF)VUbBi?BM8*cpV^*OROk#33)>sw|MUv*eFGNbnmH~`P8jj5 z=)9DaV`k_$sr}^_@N|)8ksH(Cw-n=|2%3)0&TT-9ESdTvy`y~XGQ9D()6*^ZrIxRW zQERAULN)1~?fo%8O|h?Y&q?;Mch4*;Iy36);-V)QpgWM!m*4x&CNg0 zaP@$!7a1Apbav4!@&exSBaA7rR{92$Gc($?!N6;$rl(^>?H6*3i-l*;6#1yI2pzGrKA%DCjiM`Enc|@6hKBIER@xoK`CL=z;wjCe1{b_W;4)bakZxl`x0u5ZJci zVUDn$O%PPAlsihIDD@{VoYvg=EWi*BpkpsANJ;aEi(eBHPr{AB0?#b16d8=gNIt8o z$heuqVyQlCZlKuvOpTI|kodB?H0>l;FTklTCLSwhW6GV3;ds2#oHz-t^D2u0PZMiK zN{~bBax<h3!X_x*5_`U#SU_b;XFXbrX3X7_1@YvX;V<*hRT?s$}eeDMF zui2oxmX=oAIOaAY2PkkC)YR14CIC1v7!kF@CRJjJGj8=oZA(|T0IatN~h z_z$Patz3?UB@|GoUp#Xb=s@6{XYS%eYCix7qayd~Kov9rIm--x;I2Ti4-8=?%m$&N ziH{#M^nK;DVu)W}Ugq4V0me^XVGY6=Yd3jQ-dlqKGB5f|Pip-68Awq!1@{FQ4_3bz#Hv}o*Amc0)PG#26M-@2q6=?$*H%8Rap%Q+nV!A~m`}!l z9;{63hp_SbLOfPc4rf60Jg&gj*I(}*C4r2O?MfnBbKF=nFf^0{`i3K!)>) zHp{{+1_H%MzzCGasBHBxE{HOC>~a97Q6n$W%M&0?V{=S?t>(pxy$@UqfK~d_Fe(N;&LdUS zdL*`~?g%yAjIf5XMh)(;$L1WHYcn<_|(W(^u8I@O6GJWRz zGq*Awgf#&{IVi+Y7)Jq-K>b{Q@GtBUSoY<_@`HRpU)KR#*~#&XadYuAzrw3cL{r^u z<=Sy0Z17H2c$Ft`H{M$sl%moB;-hYbvD@hI7<{fjewOxB_Iwb|c*^2np6pn8RTUnG zJ^vG^GV%Z5^q2$Wd%!7^L7|hm<&~A~Q}-6Y+<=XRsk&+1+h>7-D#RCF#%kbpx_8wi zh$W1=B}vBHZzu>8UdnndsTvH)L!ya22|#H)QR70|e_Z{Gs)dcLLQ)0hDd zpGAknS@EA?5OVJyYi!C*O46{;8|bXMsb>=PC(szS^Wa#S)cCZ$&~Y%A4yU6*s_Jh5#}WBt=2qk{*!1T?Q?Q=7oq-=Eb^4x0Nh` zGqSueKX4ChJQlxw%OZHC^j%ANb#CTP`cV5CQ)4I*W{>&MGsOX0Gpq#M8- zvX)HXxpSx)dURc#;ek1Sx*%kF;Ezhdlv^qNLZz0eH`r=L9##A@cf+%TT?V95Ro%8gp0CmyLK-16}Wv7KW1vGCciiLVTbvdfv+&@06f^bTd~cV zu&7FN@Cc}(o>Spf+Ql%dO5_70V*1bF`njAG>L)YKG1Uwc;N@|$vBhMw#kzybge#;is0y)5n8 zNH8N76gNEbTEFi(L3z+FNSiCN<)x=}Btjnc6Pe?kcG(j$17L&b`SW$xcmOZ?8Uq|z z!0vYc)2(FZ!m~97hWEJ)M7ll3S&^b71Q6k^el**j7*;y|<9n{c=3lwNaoL(%*Gv`W z8`eV3YxdDOjd7Cj~GAn2GE1) zfnwpODcOXU%hRtbK{W!-37O&JgItZL0z6dKt(Z;E{v)x8nHU=<&C)`)8k(99fNc;| zAY`Q$hmRL7_TucVsHzeU3^yG1OiWBf%g-rg583Ar^(Xp-q-C={?&#=<#UE&aUi<#( zwQ3I@P#6O3dR7dw)j#p5L{sYkkc*^J;M&J>q_;QIQKE(qwmi$SA$GPno4pw@YXc#h z!WF9r<7cGm>+8Yh4W$!_qR=0p-Y8q<>tY1N1JwD!O1BsRd>gDFVQcQ(xl>LO3I=LN z>8X;bUje8t*r7yaJ$n}1O|;ND7w(g6iW|=6K7tQ)&FKD;I61WkDv0=_@S4`v*C%0P zgs^hw&+p~q{W!E2di%7;_t7IX_o?2&!937;xs_ATUUj%|tC$6Q%#sW*)%)(-N!ET7 z==VDI7)*$(&k_l$?++abhuA zXV`t9e0gXOxO|_R9TeIFiQzzLG%^9KZ9Mp^J5v{C z`YBSboD3>x6>Cm#W!RRZN*FC7gM)`g4+XE9nB|kfW}#+Q{(7Oev8 zbfXB>PXv_&&CBMR@{H#-C zC6^XPC(KUZ7e8Ja$Ha$}U!#Ff6{OY&E?x~+RrBqHo@NZ+pf9#AyuL_o3MKee-AB5Q za-DmrqZF=jeDUX;c+~x;sh!_B+-pl&%J)ike)hEsRy#7mke8JaGuO3K~LpN+)T=?_znfYUxsK`ZG4`vGB zE%t)GJ+O#FAquj)Zp3V`g#DZ$NNnhZtkCN6!s=4>7}gGPEYffmi$#d#|uUEjEq>% z%+I&X&AF?n%*@PK2I@R}{}5!E0N{iwC+`VAyguxC^2}X@LCeh>G&Dpa@%^=SyC|>Y zsqJg>S;pUyCxf+WMe~Mxz?D$*SATAHN=r+J_x9@f>XGC$uCH5SvGNaf zbI0?*Q80PFA}!@(?KNAQQlJ?WQ&s*=U}*LgV?^X-3E2muqt9Oa>>}gJ<9T3e8o)fe zeA%ZKh;923;H^&{{>p5}@-lcbh;C|C#K^F)FlkxY$V10BAN2P2O3TSTTwDyR;U>K= z@mO3`ju^hOvf@}lpV4O?ZttBlX9oH?OhXEwXzP(cAucZ7Jz1X7>$LH`EIhdzM$y*>o!}j z8ywk@;kVx%mH2Bk@CDSHt!dnT4g8zMwR_vGOYD*%zaa<&U@K)_dOMT;@|M&lFpgR0})dShG{_k;+HHKIh=UTBZq^0m!HO}j*6e-!> F{2!>qmp=di diff --git a/intel_extension_for_transformers/llm/library/jblas/jblas/jit_base.h b/intel_extension_for_transformers/llm/library/jblas/jblas/jit_base.h deleted file mode 100644 index 143adb77176..00000000000 --- a/intel_extension_for_transformers/llm/library/jblas/jblas/jit_base.h +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright (c) 2023 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#pragma once -#include - -#include -#include -#include "xbyak/xbyak.h" -#include "xbyak/xbyak_util.h" - -#define OFFSET(field) offsetof(params, field) - -namespace jblas { - -namespace xbyak { -class JitBase : protected Xbyak::CodeGenerator { - protected: - JitBase(size_t size = 16 * 1024) : CodeGenerator(size) {} - - void load32(const Xbyak::Reg64& reg, const Xbyak::Address& addr) { - xor_(reg, reg); - mov(reg.cvt32(), addr); - } - - void vreg_push(const Xbyak::Reg64& baseaddr) { -#ifdef _WIN32 - for (int i = 0; i < 10; i++) { - movaps(xword[baseaddr + i * 16], Xbyak::Xmm(6 + i)); - } -#endif - } - - void vreg_pop(const Xbyak::Reg64& baseaddr) { -#ifdef _WIN32 - for (int i = 0; i < 10; i++) { - movaps(Xbyak::Xmm(6 + i), xword[baseaddr + i * 16]); - } -#endif - } - - void padto_le(const Xbyak::Reg64& _src, int padding) { - // _src=_src/padding*padding - if (padding == 1) { - return; - } - for (int i = 1; i < 16; i++) { - if ((1 << i) == padding) { - shr(_src, i); - shl(_src, i); - return; - } - } - assert(0); - } - - void generate_Nbitsmask(const Xbyak::Opmask& _msk, const Xbyak::Reg64& _pos, const Xbyak::Address& _total, - const Xbyak::Reg64& _tmp, const Xbyak::Reg64& _tmp1, int N) { - inLocalLabel(); - lea(_tmp, _total); - sub(_tmp, _pos); - cmp(_tmp, N); - jb(".maskflag"); - cmp(_tmp, 0); - jl(".zeroflag"); - uint64_t allmask = (static_cast(1) << N) - 1; - if (N == 64) { - allmask = static_cast(-1); - } - mov(_tmp, allmask); - kmovq(_msk, _tmp); - jmp(".maskend"); - L(".maskflag"); - mov(_tmp1, 1); - shlx(_tmp1, _tmp1, _tmp); - sub(_tmp1, 1); - kmovq(_msk, _tmp1); - jmp(".maskend"); - L(".zeroflag"); - mov(_tmp1, 0); - kmovq(_msk, _tmp1); - L(".maskend"); - outLocalLabel(); - } - void generate_Nbitsmask(const Xbyak::Opmask& _msk, const Xbyak::Reg64& _pos, const Xbyak::Reg64& _total, - const Xbyak::Reg64& _tmp, const Xbyak::Reg64& _tmp1, int N) { - generate_Nbitsmask(_msk, _pos, ptr[_total], _tmp, _tmp1, N); - } -}; - -class JitAvx : protected JitBase { - protected: - static int constexpr VBits = 256; - static int constexpr VecBytes = VBits / 8; - static int constexpr RegCount = 16; - typedef Xbyak::Ymm vreg_t; -}; - -class JitAvx2 : protected JitAvx { - protected: - static int constexpr VBits = 256; - typedef Xbyak::Ymm vreg_t; - void vxor(const vreg_t& x1, const vreg_t& x2, const Xbyak::Operand& op) { vpxor(x1, x2, op); } - - void loadbf16_f32(const Xbyak::Ymm& dst, const Xbyak::Address& addr) { - vpmovzxwd(dst, addr); - vpslld(dst, dst, 16); - } -}; - -class JitAvx512f : protected JitAvx2 { - protected: - static int constexpr VBits = 512; - static int constexpr VecBytes = VBits / 8; - static int constexpr RegCount = 32; - typedef Xbyak::Zmm vreg_t; - - void vxor(const vreg_t& x1, const vreg_t& x2, const Xbyak::Operand& op) { vpxorq(x1, x2, op); } - - void interleave_2rows_4regs(Xbyak::Zmm* src_2regs, Xbyak::Zmm* tmp_2reg) { - vpunpcklwd(tmp_2reg[0], src_2regs[0], src_2regs[1]); - vpunpckhwd(tmp_2reg[1], src_2regs[0], src_2regs[1]); - vshuff32x4(src_2regs[0], tmp_2reg[0], tmp_2reg[1], 0 | (1 << 2) | (0 << 4) | (1 << 6)); - vshuff32x4(src_2regs[0], src_2regs[0], src_2regs[0], 0 | (2 << 2) | (1 << 4) | (3 << 6)); - vshuff32x4(src_2regs[1], tmp_2reg[0], tmp_2reg[1], 2 | (3 << 2) | (2 << 4) | (3 << 6)); - vshuff32x4(src_2regs[1], src_2regs[1], src_2regs[1], 0 | (2 << 2) | (1 << 4) | (3 << 6)); - } - - void transpose16x16_4B(Xbyak::Zmm* src, Xbyak::Zmm* tmp, const int N = 16) { - for (int i = 0; i < 8; ++i) { - vpunpckldq(tmp[2 * i + 0], src[2 * i], src[2 * i + 1]); - vpunpckhdq(tmp[2 * i + 1], src[2 * i], src[2 * i + 1]); - } - - for (int i = 0; i < 4; ++i) { - vpunpcklqdq(src[4 * i + 0], tmp[4 * i + 0], tmp[4 * i + 2]); - vpunpckhqdq(src[4 * i + 1], tmp[4 * i + 0], tmp[4 * i + 2]); - vpunpcklqdq(src[4 * i + 2], tmp[4 * i + 1], tmp[4 * i + 3]); - vpunpckhqdq(src[4 * i + 3], tmp[4 * i + 1], tmp[4 * i + 3]); - } - - for (int i = 0; i < 2; ++i) { - vshufi32x4(tmp[8 * i + 0], src[8 * i + 0], src[8 * i + 4], 0x88); - vshufi32x4(tmp[8 * i + 1], src[8 * i + 1], src[8 * i + 5], 0x88); - vshufi32x4(tmp[8 * i + 2], src[8 * i + 2], src[8 * i + 6], 0x88); - vshufi32x4(tmp[8 * i + 3], src[8 * i + 3], src[8 * i + 7], 0x88); - vshufi32x4(tmp[8 * i + 4], src[8 * i + 0], src[8 * i + 4], 0xdd); - vshufi32x4(tmp[8 * i + 5], src[8 * i + 1], src[8 * i + 5], 0xdd); - vshufi32x4(tmp[8 * i + 6], src[8 * i + 2], src[8 * i + 6], 0xdd); - vshufi32x4(tmp[8 * i + 7], src[8 * i + 3], src[8 * i + 7], 0xdd); - } - - // last step and move out - for (int i = 0; i < N; ++i) { - vshufi32x4(src[i], tmp[i % 8], tmp[8 + i % 8], i < 8 ? 0x88 : 0xdd); - } - } - - void interleave_4rows_6regs(Xbyak::Zmm* src_4regs, Xbyak::Zmm* tmp_regs, const Xbyak::Opmask* masks) { - vpunpcklbw(tmp_regs[0], src_4regs[0], src_4regs[1]); - vpunpckhbw(tmp_regs[1], src_4regs[0], src_4regs[1]); - vpunpcklbw(tmp_regs[2], src_4regs[2], src_4regs[3]); - vpunpckhbw(tmp_regs[3], src_4regs[2], src_4regs[3]); - - vpunpcklwd(tmp_regs[4], tmp_regs[0], tmp_regs[2]); - vpunpckhwd(tmp_regs[5], tmp_regs[0], tmp_regs[2]); - vpunpcklwd(tmp_regs[0], tmp_regs[1], tmp_regs[3]); - vpunpckhwd(tmp_regs[2], tmp_regs[1], tmp_regs[3]); - vshuff32x4(tmp_regs[1], tmp_regs[4], tmp_regs[0], (4 << 4) | 4); - vshuff32x4(tmp_regs[3], tmp_regs[5], tmp_regs[2], (4 << 4) | 4); - vmovups(src_4regs[0], tmp_regs[1]); - vshuff32x4(src_4regs[0] | masks[0], tmp_regs[3], tmp_regs[3], 0 | (0 << 2) | (0 << 4) | (2 << 6)); - vmovups(src_4regs[1], tmp_regs[3]); - vshuff32x4(src_4regs[1] | masks[1], tmp_regs[1], tmp_regs[1], 1 | (0 << 2) | (3 << 4) | (0 << 6)); - vshuff32x4(tmp_regs[1], tmp_regs[4], tmp_regs[0], (14 << 4) | 14); - vshuff32x4(tmp_regs[3], tmp_regs[5], tmp_regs[2], (14 << 4) | 14); - vmovups(src_4regs[2], tmp_regs[1]); - vshuff32x4(src_4regs[2] | masks[0], tmp_regs[3], tmp_regs[3], 0 | (0 << 2) | (0 << 4) | (2 << 6)); - vmovups(src_4regs[3], tmp_regs[3]); - vshuff32x4(src_4regs[3] | masks[1], tmp_regs[1], tmp_regs[1], 1 | (0 << 2) | (3 << 4) | (0 << 6)); - } - - void cvt_fp32_bf16(const Xbyak::Ymm& _bf16, const Xbyak::Zmm& _fp32) { - vpsrld(_fp32, _fp32, 16); - vpmovdw(_bf16, _fp32); - } - - void loadbf16_f32(const Xbyak::Zmm& dst, const Xbyak::Address& addr) { - vpmovzxwd(dst, addr); - vpslld(dst, dst, 16); - } - - void broadcastbf16_f32(const Xbyak::Zmm& dst, const Xbyak::Reg64& tmp, const Xbyak::Address& addr) { - mov(tmp.cvt16(), addr); - shl(tmp.cvt32(), 16); - vpbroadcastd(dst, tmp.cvt32()); - } - - void store_fp32_bf16(const Xbyak::Zmm& _fp32, const Xbyak::Address& _add) { - auto bf16 = Xbyak::Ymm(_fp32.getIdx()); - cvt_fp32_bf16(bf16, _fp32); - vmovups(_add, bf16); - } -}; - -class JitAvx512_bf16 : protected JitAvx512f {}; - -class JitAvx512_fp16 : protected JitAvx512f {}; - -class JitAvx512vnni : protected JitAvx512f { - protected: - void vpdpbusds_(const Xbyak::Xmm& x1, const Xbyak::Xmm& x2, const Xbyak::Operand& op) { - vpdpbusds(x1, x2, op, Xbyak::EvexEncoding); - } -}; - -class JitAvxvnni : protected JitAvx2 { - protected: - void vpdpbusds_(const Xbyak::Xmm& x1, const Xbyak::Xmm& x2, const Xbyak::Operand& op) { - vpdpbusds(x1, x2, op, Xbyak::VexEncoding); - } -}; - -class JitAmxtile : protected JitAvx512f { - public: - struct alignas(64) tileconfig_t { - uint8_t palette_id; - uint8_t reserved[15]; - uint16_t colb[16]; - uint8_t rows[16]; - }; - static int constexpr TileCount = 8; - - typedef long long (*configure_t)(void*); - - static void generate_config(Xbyak::CodeGenerator* g) { - Xbyak::util::StackFrame st(g, 1, 0, 0); - auto& parambase = st.p[0]; - g->ldtilecfg(g->ptr[parambase]); - } - - static void configure_tiles(tileconfig_t& tc, int TILE_M, int TILE_N, int TILE_K, int elesize, int ANum, int BNum, - int CNum) { - // Filling tile configure structure. Could be done offline. - tc.palette_id = 1; - // Configure C tiles - int t = 0; - for (; t < CNum; ++t) { - tc.rows[t] = static_cast(TILE_M); - tc.colb[t] = static_cast(TILE_N * 4); - } - // Configure A tiles - for (; t < CNum + ANum; ++t) { - tc.rows[t] = static_cast(TILE_M); - tc.colb[t] = static_cast(TILE_K * elesize); - } - // Configure B tile. B effectively has 64 rows and 16 columns. - int kpack = 4 / elesize; - for (; t < CNum + ANum + BNum; ++t) { - tc.rows[t] = static_cast(TILE_K / kpack); - tc.colb[t] = static_cast(TILE_N * 4); - } - } -}; - -class JitAmxbf16 : protected JitAmxtile { - protected: - void cvt_fp32_bf16(const Xbyak::Ymm& _bf16, const Xbyak::Zmm& _fp32) { vcvtneps2bf16(_bf16, _fp32); } -}; - -class JitAmxint8 : protected JitAmxtile { - protected: - template - void _tdpb(const Xbyak::Tmm& x1, const Xbyak::Tmm& x2, const Xbyak::Tmm& x3); -}; -template <> -inline void JitAmxint8::_tdpb(const Xbyak::Tmm& x1, const Xbyak::Tmm& x2, const Xbyak::Tmm& x3) { - tdpbssd(x1, x2, x3); -} -template <> -inline void JitAmxint8::_tdpb(const Xbyak::Tmm& x1, const Xbyak::Tmm& x2, const Xbyak::Tmm& x3) { - tdpbsud(x1, x2, x3); -} -template <> -inline void JitAmxint8::_tdpb(const Xbyak::Tmm& x1, const Xbyak::Tmm& x2, const Xbyak::Tmm& x3) { - tdpbusd(x1, x2, x3); -} -template <> -inline void JitAmxint8::_tdpb(const Xbyak::Tmm& x1, const Xbyak::Tmm& x2, const Xbyak::Tmm& x3) { - tdpbuud(x1, x2, x3); -} -} // namespace xbyak -} // namespace jblas diff --git a/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas.h b/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas.h deleted file mode 100644 index d3fe2ff046c..00000000000 --- a/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas.h +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (c) 2023 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#pragma once -#include -enum JBLAS_CODE { - JblasSuccess = 0, - JblasInvalidParam = 1, - JblasInvalidISA = 2, - JblasRuntimeError = 4, - JblasNotSupport = 8, -}; -enum JBLAS_ISA : uint8_t { - JblasNoSIMD = 0, - JblasAVX, - JblasAVX2, - JblasAVX_VNNI, - JblasAVX512F, - JblasAVX512_VNNI, - JblasAMX_BF16, - JblasAMX_INT8, - JblasAVX512_FP16, - JblasAVX512_BF16, -}; -enum class JBLAS_DTYPE : uint32_t { - EleBitsMask = 0xff, - EleBitsShift = 0, - EleBitsUndef = 0, - EleBits4 = 4, - EleBits8 = 8, - EleBits16 = 16, - EleBits32 = 32, - EleBits64 = 64, - TypeMask = 0xff00, - TypeShift = 8, - TypeFloat = 0 << TypeShift, - TypeInt = 1 << TypeShift, - SubTypeMask = 0xff0000, - SubTypeShift = 16, - SubType0 = 0 << SubTypeShift, - SubType1 = 1 << SubTypeShift, - SubType2 = 2 << SubTypeShift, - SubType3 = 3 << SubTypeShift, - F64 = EleBits64 | TypeFloat, - F32 = EleBits32 | TypeFloat, - F16 = EleBits16 | TypeFloat, - BF16 = EleBits16 | TypeFloat | SubType1, - F8_E4M3 = EleBits8 | TypeFloat, - F8_E5M2 = EleBits8 | TypeFloat | SubType1, - F8_E3M4 = EleBits8 | TypeFloat | SubType2, - F8_E8M0 = EleBits8 | TypeFloat | SubType3, - S8 = EleBits8 | TypeInt, - U8 = EleBits8 | TypeInt | SubType1, - S4_CLIP = EleBits4 | TypeInt, - S4_FULLRANGE = EleBits4 | TypeInt | SubType1, - F4_E2M1 = EleBits4 | TypeFloat, - F4_BNB = EleBits4 | TypeFloat | SubType1, - F4_NF4 = EleBits4 | TypeFloat | SubType2, - S32 = EleBits32 | TypeInt, - U32 = EleBits32 | TypeInt | SubType1, -}; - -enum JBLAS_LAYOUT { JblasRowMajor = 101, JblasColMajor = 102 }; -enum JBLAS_TRANSPOSE { - JblasNoTrans = 111, - JblasTrans = 112, - JblasConjTrans = 113, -}; -enum JBLAS_ELTWISEOP { GELU, SWISH, TANH, EXP, LOW_PRECISION_EXP, RELU, LINEAR }; - -enum class JBLAS_PROLOGUEB_IDS : uint32_t { - Undef = (uint32_t)-1, - Begin = 0, - NormalBegin = Begin, - WeightPack = NormalBegin, - NormalEnd, - KBlockBegin = NormalEnd, - WeightKBlockNInteger = KBlockBegin, - WeightKBlockNFloat, - WeightKBlockS8, - WeightKBlockS4, - WeightKBlockF4, - WeightKBlockF8, - KBlockEnd, - End, -}; diff --git a/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_device.h b/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_device.h deleted file mode 100644 index 5cac1080bc6..00000000000 --- a/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_device.h +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright (c) 2023 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#pragma once -#include "jit_blas.h" -#include "xbyak/xbyak_util.h" - -namespace jblas { - -namespace device { - -struct X64_ISA { - int64_t MMX : 1; // 0 - int64_t SSE : 1; // 1 - int64_t SSE2 : 1; // 2 - int64_t SSE3 : 1; // 3 - int64_t SSSE3 : 1; // 4 - int64_t SSE41 : 1; // 5 - int64_t SSE42 : 1; // 6 - int64_t AVX : 1; // 7 - int64_t F16C : 1; // 8 - int64_t FMA : 1; // 9 - int64_t AVX2 : 1; // 10 - int64_t AVX_VNNI : 1; // 11 - int64_t AVX_VNNI_INT8 : 1; // 12 - int64_t AVX_NE_CONVERT : 1; // 13 - int64_t AVX_IFMA : 1; // 14 - int64_t AVX512F : 1; // 15 - int64_t AVX512BW : 1; // 16 - int64_t AVX512CD : 1; // 17 - int64_t AVX512DQ : 1; // 18 - int64_t AVX512ER : 1; // 19 - int64_t AVX512IFMA52 : 1; // 20 - int64_t AVX512PF : 1; // 21 - int64_t AVX512VL : 1; // 22 - int64_t AVX512VPOPCNTDQ : 1; // 23 - int64_t AVX512_4FMAPS : 1; // 24 - int64_t AVX512_4VNNIW : 1; // 25 - int64_t AVX512_BF16 : 1; // 26 - int64_t AVX512_BITALG : 1; // 27 - int64_t AVX512_VBMI : 1; // 28 - int64_t AVX512_VBMI2 : 1; // 29 - int64_t AVX512_VNNI : 1; // 30 - int64_t AVX512_VP2INTERSECT : 1; // 31 - int64_t AVX512_FP16 : 1; // 32 - int64_t AMX_TILE : 1; // 33 - int64_t AMX_BF16 : 1; // 34 - int64_t AMX_INT8 : 1; // 35 - int64_t AMX_FP16 : 1; // 36 - int64_t AMX_COMPLEX : 1; // 37 - int64_t reserved : (64 - 38); -}; - -class AVX2_Default { - public: - static constexpr bool MMX = 1; - static constexpr bool SSE = 1; - static constexpr bool SSE2 = 1; - static constexpr bool SSE3 = 1; - static constexpr bool SSSE3 = 1; - static constexpr bool SSE41 = 1; - static constexpr bool SSE42 = 1; - static constexpr bool AVX = 1; - static constexpr bool F16C = 1; - static constexpr bool FMA = 1; - static constexpr bool AVX2 = 1; - static constexpr bool AVX_VNNI = 0; - static constexpr bool AVX_VNNI_INT8 = 0; - static constexpr bool AVX_NE_CONVERT = 0; - static constexpr bool AVX_IFMA = 0; - static constexpr bool AVX512F = 0; - static constexpr bool AVX512BW = 0; - static constexpr bool AVX512CD = 0; - static constexpr bool AVX512DQ = 0; - static constexpr bool AVX512ER = 0; - static constexpr bool AVX512IFMA52 = 0; - static constexpr bool AVX512PF = 0; - static constexpr bool AVX512VL = 0; - static constexpr bool AVX512VPOPCNTDQ = 0; - static constexpr bool AVX512_4FMAPS = 0; - static constexpr bool AVX512_4VNNIW = 0; - static constexpr bool AVX512_BF16 = 0; - static constexpr bool AVX512_BITALG = 0; - static constexpr bool AVX512_VBMI = 0; - static constexpr bool AVX512_VBMI2 = 0; - static constexpr bool AVX512_VNNI = 0; - static constexpr bool AVX512_VP2INTERSECT = 0; - static constexpr bool AVX512_FP16 = 0; - static constexpr bool AMX_TILE = 0; - static constexpr bool AMX_BF16 = 0; - static constexpr bool AMX_INT8 = 0; - static constexpr bool AMX_FP16 = 0; - static constexpr bool AMX_COMPLEX = 0; -}; - -class AVX512_VNNI_Default { - public: - static constexpr bool MMX = 1; - static constexpr bool SSE = 1; - static constexpr bool SSE2 = 1; - static constexpr bool SSE3 = 1; - static constexpr bool SSSE3 = 1; - static constexpr bool SSE41 = 1; - static constexpr bool SSE42 = 1; - static constexpr bool AVX = 1; - static constexpr bool F16C = 1; - static constexpr bool FMA = 1; - static constexpr bool AVX2 = 1; - static constexpr bool AVX_VNNI = 0; - static constexpr bool AVX_VNNI_INT8 = 0; - static constexpr bool AVX_NE_CONVERT = 0; - static constexpr bool AVX_IFMA = 0; - static constexpr bool AVX512F = 1; - static constexpr bool AVX512BW = 1; - static constexpr bool AVX512CD = 1; - static constexpr bool AVX512DQ = 1; - static constexpr bool AVX512ER = 0; - static constexpr bool AVX512IFMA52 = 0; - static constexpr bool AVX512PF = 0; - static constexpr bool AVX512VL = 1; - static constexpr bool AVX512VPOPCNTDQ = 0; - static constexpr bool AVX512_4FMAPS = 0; - static constexpr bool AVX512_4VNNIW = 0; - static constexpr bool AVX512_BF16 = 0; - static constexpr bool AVX512_BITALG = 0; - static constexpr bool AVX512_VBMI = 0; - static constexpr bool AVX512_VBMI2 = 0; - static constexpr bool AVX512_VNNI = 1; - static constexpr bool AVX512_VP2INTERSECT = 0; - static constexpr bool AVX512_FP16 = 0; - static constexpr bool AMX_TILE = 0; - static constexpr bool AMX_BF16 = 0; - static constexpr bool AMX_INT8 = 0; - static constexpr bool AMX_FP16 = 0; - static constexpr bool AMX_COMPLEX = 0; -}; - -class SapphireRapids { - public: - static constexpr bool MMX = 1; - static constexpr bool SSE = 1; - static constexpr bool SSE2 = 1; - static constexpr bool SSE3 = 1; - static constexpr bool SSSE3 = 1; - static constexpr bool SSE41 = 1; - static constexpr bool SSE42 = 1; - static constexpr bool AVX = 1; - static constexpr bool F16C = 1; - static constexpr bool FMA = 1; - static constexpr bool AVX2 = 1; - static constexpr bool AVX_VNNI = 0; - static constexpr bool AVX_VNNI_INT8 = 0; - static constexpr bool AVX_NE_CONVERT = 0; - static constexpr bool AVX_IFMA = 0; - static constexpr bool AVX512F = 1; - static constexpr bool AVX512BW = 1; - static constexpr bool AVX512CD = 1; - static constexpr bool AVX512DQ = 1; - static constexpr bool AVX512ER = 0; - static constexpr bool AVX512IFMA52 = 0; - static constexpr bool AVX512PF = 0; - static constexpr bool AVX512VL = 1; - static constexpr bool AVX512VPOPCNTDQ = 0; - static constexpr bool AVX512_4FMAPS = 0; - static constexpr bool AVX512_4VNNIW = 0; - static constexpr bool AVX512_BF16 = 0; - static constexpr bool AVX512_BITALG = 0; - static constexpr bool AVX512_VBMI = 0; - static constexpr bool AVX512_VBMI2 = 0; - static constexpr bool AVX512_VNNI = 1; - static constexpr bool AVX512_VP2INTERSECT = 0; - static constexpr bool AVX512_FP16 = 0; - static constexpr bool AMX_TILE = 1; - static constexpr bool AMX_BF16 = 1; - static constexpr bool AMX_INT8 = 1; - static constexpr bool AMX_FP16 = 0; - static constexpr bool AMX_COMPLEX = 0; -}; - -template -class isa_base { - public: - static bool constexpr avx = ISA_T >= JblasAVX; - static bool constexpr avx2 = ISA_T >= JblasAVX2; - static bool constexpr avx512f = ISA_T >= JblasAVX512F; - static bool constexpr avx512_vnni = ISA_T >= JblasAVX512_VNNI; - static bool constexpr avx512_fp16 = ISA_T >= JblasAVX512_FP16; - static bool constexpr amx_bf16 = ISA_T >= JblasAMX_BF16; - static bool constexpr amx_int8 = ISA_T >= JblasAMX_INT8; -}; - -class CpuDevice { - public: - inline void setThreads(int _nth) { - if (_nth <= 0) { - numthreads = numcores; - } else { - numthreads = std::min(numcores, _nth); - } - } - inline int getThreads() { return numthreads; } - inline int getCores() { return numcores; } - inline uint32_t getL2CacheSize() { return L2Cache; } - inline uint32_t getL1CacheSize() { return L1Cache; } - inline bool AVX() { return mHasAVX; } - inline bool AVX2() { return mHasAVX2; } - inline bool AVX_VNNI() { return mHasAVX_VNNI; } - inline bool AVX512F() { return mHasAVX512F; } - inline bool AVX512_VNNI() { return mHasAVX512_VNNI; } - inline bool AMX_INT8() { return mHasAMX_INT8; } - inline bool AMX_BF16() { return mHasAMX_BF16; } - inline bool AVX512_BF16() { return mHasAVX512_BF16; } - inline bool AVX512_FP16() { return mHasAVX512_FP16; } -#define ADD_FLAG(isa) mHas##isa = _cpu.has(_cpu.t##isa) - CpuDevice() { - static Xbyak::util::Cpu _cpu; - L1Cache = _cpu.getDataCacheSize(0); - L2Cache = _cpu.getDataCacheSize(1); - ADD_FLAG(AVX); - ADD_FLAG(AVX2); - ADD_FLAG(AVX512F); - ADD_FLAG(AVX512_VNNI); - ADD_FLAG(AVX_VNNI); - ADD_FLAG(AMX_BF16); - ADD_FLAG(AMX_INT8); - ADD_FLAG(AVX512_BF16); - ADD_FLAG(AVX512_FP16); - numcores = _cpu.getNumCores(Xbyak::util::IntelCpuTopologyLevel::CoreLevel); - numthreads = numcores; - } - - static CpuDevice* getInstance() { - static CpuDevice instance; - return &instance; - } - - void print() { - printf( - "AVX:%d AVX2:%d AVX512F:%d AVX_VNNI:%d AVX512_VNNI:%d AMX_INT8:%d AMX_BF16:%d AVX512_BF16:%d AVX512_FP16:%d\n", - mHasAVX, mHasAVX2, mHasAVX512F, mHasAVX_VNNI, mHasAVX512_VNNI, mHasAMX_INT8, mHasAMX_BF16, mHasAVX512_BF16, - mHasAVX512_FP16); - } -#undef ADD_FLAG - - protected: - uint32_t L2Cache, L1Cache; - bool mHasAVX2, mHasAVX_VNNI, mHasAVX, mHasAVX512_VNNI, mHasAMX_INT8, mHasAMX_BF16, mHasAVX512F, mHasAVX512_BF16, - mHasAVX512_FP16; - int numcores; - int numthreads; -}; - -#define GetCPUDevice() auto _cd = jblas::device::CpuDevice::getInstance(); - -class CpuBase { - public: - CpuBase() { - GetCPUDevice(); - mL2Cache = _cd->getL2CacheSize(); - mL1Cache = _cd->getL1CacheSize(); - mNumThreads = _cd->getThreads(); - } - size_t mL2Cache, mL1Cache; - int mNumThreads; -}; -} // namespace device -} // namespace jblas diff --git a/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_epilogue.h b/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_epilogue.h deleted file mode 100644 index 6c7e0ee4efe..00000000000 --- a/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_epilogue.h +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright (c) 2023 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#pragma once -#include - -#include "jit_base.h" -#include "jit_blas.h" -#include "jit_blas_utils.h" -#include "kernel_wrapper.h" - -namespace jblas { -namespace epilogue { -namespace gemm { - -template -struct ParamAccumulatorWriteBack { - DT* C; - int ldc; - void* elt_const_v; -}; - -template -class AccumulatorWriteBack { - public: - using SType = _SRC_T; - using DType = _DST_T; - using Param = ParamAccumulatorWriteBack; - - JBLAS_CODE forward(const _SRC_T* cacheptr, const int cachestep, const int M_offset, const int N_offset, const int M, - const int N, const Param& _param, void* tmpcache, size_t cachesize) { - auto COffset = M_offset * _param.ldc + N_offset; - auto cptr = _param.C + COffset; - return kernel::wrapper::Memcpy2D::template forward(cacheptr, cptr, M, N, cachestep, _param.ldc, - _param.elt_const_v); - } -}; - -template -class CustomAccumulatorWriteBackWithEltop { - public: - using Param = ParamAccumulatorWriteBack<_DST_T>; - JBLAS_CODE forward(const _SRC_T* cacheptr, const int cachestep, const int M_offset, const int N_offset, const int M, - const int N, const Param& _param, void* tmpcache, size_t cachesize) { - auto COffset = M_offset * _param.ldc + N_offset; - auto cptr = _param.C + COffset; - if constexpr (std::is_same<_SRC_T, float>::value && std::is_same<_DST_T, float>::value) { - return kernel::wrapper::Memcpy2D::template forward1(cacheptr, cptr, M, N, cachestep, - _param.ldc, _param.elt_const_v); - } else { - assert(false); - } - } -}; -template -using AccumulatorWriteBackFp32 = AccumulatorWriteBack; -template -using AccumulatorWriteBackInt32 = AccumulatorWriteBack; -template -using AccumulatorWriteBackBf16 = AccumulatorWriteBack; -template -using AccumulatorWriteBackFp16 = AccumulatorWriteBack; -template -using AccumulatorWriteBackBf16Fp32 = AccumulatorWriteBack; -template -using AccumulatorWriteBackFp16Fp32 = AccumulatorWriteBack; -template -using AccumulatorWriteBackFp32Bf16 = AccumulatorWriteBack; - -template -using AccumulatorWriteBackWithGeluFp32 = CustomAccumulatorWriteBackWithEltop; - -template -using AccumulatorWriteBackWithSwishFp32 = CustomAccumulatorWriteBackWithEltop; - -template -struct ParamAlphaBetaProcess { - DT *C, *D; - int ldc, ldd; - float alpha, beta; -}; -template -class AlphaBetaProcessFp32 { - public: - using Param = ParamAlphaBetaProcess; - - JBLAS_CODE forward(const float* cacheptr, const int cachestep, const int M_offset, const int N_offset, const int M, - const int N, const Param& _param, void* tmpcache, size_t cachesize) { - auto DOffset = M_offset * _param.ldd + N_offset; - auto COffset = M_offset * _param.ldc + N_offset; - auto cptr = _param.C + COffset; - auto dptr = _param.D + DOffset; - return kernel::wrapper::AlphaBetaF32F32::template forward(_param.alpha, cacheptr, cachestep, _param.beta, - dptr, _param.ldd, cptr, _param.ldc, M, N); - } -}; - -struct ParamCompFp32BlockEpilogue { - void* scales; - JBLAS_DTYPE scaledtype; - int ldsb; - int8_t* zps = nullptr; - float* reduce = nullptr; - int ldra; -}; -template -class CompFp32BlockEpilogue { - public: - using Param = ParamCompFp32BlockEpilogue; - JBLAS_CODE forward(const float* srcptr, float* dstptr, const int cachestep, const int M_offset, const int N_offset, - const int K_offset, const int M, const int N, const Param& _param, void* tmpcache, - size_t cachesize) { - auto ret = JblasNotSupport; - if (_param.scaledtype == JBLAS_DTYPE::F32) { - ret = kernel::wrapper::CompFp32BlockScale::template forward( - reinterpret_cast(_param.scales) + K_offset * _param.ldsb + N_offset, srcptr, cachestep, dstptr, - cachestep, M, N); - assert(ret == JblasSuccess); - if (_param.zps != nullptr) { - ret = kernel::wrapper::RemoveZeroPointBias::forward_wei( - dstptr, cachestep, M, N, _param.zps + K_offset * _param.ldsb + N_offset, - reinterpret_cast(_param.scales) + K_offset * _param.ldsb + N_offset, _param.ldra, - _param.reduce + M_offset * _param.ldra + K_offset); - } - assert(ret == JblasSuccess); - return ret; - } else if (_param.scaledtype == JBLAS_DTYPE::BF16) { - ret = kernel::wrapper::CompFp32BlockScale::template forward( - reinterpret_cast(_param.scales) + K_offset * _param.ldsb + N_offset, srcptr, cachestep, dstptr, - cachestep, M, N); - if (_param.zps != nullptr) { - assert(0); - } - assert(ret == JblasSuccess); - return ret; - } else if (_param.scaledtype == JBLAS_DTYPE::F8_E8M0) { - ret = kernel::wrapper::CompFp32BlockScale::template forward( - reinterpret_cast(_param.scales) + K_offset * _param.ldsb + N_offset, srcptr, cachestep, dstptr, - cachestep, M, N); - if (_param.zps != nullptr) { - assert(0); - } - } - return JblasNotSupport; - } -}; - -struct ParamDequantInt32ToFp32 { - float* C; - int ldc; - int ldsa; - float* scalesA; - float* scalesB; -}; -template -class DequantInt32ToFp32 { - public: - using Param = ParamDequantInt32ToFp32; - JBLAS_CODE forward(const int32_t* cacheptr, const int cachestep, const int M_offset, const int N_offset, const int M, - const int N, const Param& _param, void* tmpcache, size_t cachesize) { - auto COffset = M_offset * _param.ldc + N_offset; - auto cptr = _param.C + COffset; - return kernel::wrapper::DequanS32Fp32::template forward(cacheptr, cachestep, cptr, _param.ldc, M, N, - _param.scalesA + M_offset * _param.ldsa, _param.ldsa, - _param.scalesB + N_offset); - } -}; - -struct ParamCompInt8BlockEpilogue { - void* scalesB; - JBLAS_DTYPE scaleBdtype; - int ldsb; - float* scalesA; - int ldsa; - // optional if A asym - uint8_t* zpA = nullptr; - void* reduceB = nullptr; - JBLAS_DTYPE reduceBdtype = JBLAS_DTYPE::F32; - // optional if B asym - int8_t* zpB = nullptr; - float* reduceA = nullptr; - int K = 1; -}; -template -class CompInt8BlockEpilogue { - public: - using Param = ParamCompInt8BlockEpilogue; - JBLAS_CODE forward(const int32_t* srcptr, float* dstptr, const int cachestep, const int M_offset, const int N_offset, - const int K_offset, const int M, const int N, const Param& _param, void* tmpcache, - size_t cachesize) { - JBLAS_CODE ret = JblasNotSupport; - float* scab = nullptr; - size_t ScaleBTmpSize = N * sizeof(float); - size_t ReduceBTmpSize = N * sizeof(float); - assert(cachesize >= (ScaleBTmpSize + ReduceBTmpSize)); - if (_param.scaleBdtype == JBLAS_DTYPE::BF16) { - auto scache = reinterpret_cast(tmpcache); - ret = kernel::wrapper::Memcpy2DBf16CvtFp32::template forward( - reinterpret_cast(_param.scalesB) + N_offset + K_offset * _param.ldsb, scache, 1, N, N, N, - false); - assert(ret == JblasSuccess); - scab = scache; - } else if (_param.scaleBdtype == JBLAS_DTYPE::F32) { - scab = reinterpret_cast(_param.scalesB) + N_offset + K_offset * _param.ldsb; - } - float* redb = nullptr; - if (_param.reduceB) { - if (_param.reduceBdtype == JBLAS_DTYPE::BF16) { - auto rcache = reinterpret_cast(reinterpret_cast(tmpcache) + ScaleBTmpSize); - ret = kernel::wrapper::Memcpy2DBf16CvtFp32::template forward( - reinterpret_cast(_param.reduceB) + N_offset + K_offset * _param.ldsb, rcache, 1, N, N, N, - false); - assert(ret == JblasSuccess); - redb = rcache; - } else if (_param.reduceBdtype == JBLAS_DTYPE::F32) { - redb = reinterpret_cast(_param.reduceB) + N_offset + K_offset * _param.ldsb; - } - } - ret = kernel::wrapper::DequanS32Fp32::template forward( - srcptr, cachestep, reinterpret_cast(const_cast(srcptr)), cachestep, M, N, - _param.scalesA + M_offset * _param.ldsa + K_offset, _param.ldsa, scab); - assert(ret == JblasSuccess); - ret = kernel::wrapper::AccumulateFp32::template forward(reinterpret_cast(srcptr), cachestep, - dstptr, cachestep, M, N); - assert(ret == JblasSuccess); - - if (_param.zpA == nullptr) { - if (_param.zpB == nullptr) { - return ret; - } else { - ret = kernel::wrapper::RemoveZeroPointBias::template forward_wei( - dstptr, cachestep, M, N, _param.zpB + N_offset + K_offset * _param.ldsb, scab, _param.ldsa, - _param.reduceA + M_offset * _param.ldsa + K_offset); - } - } else { - if (_param.zpB == nullptr) { - ret = kernel::wrapper::RemoveZeroPointBias::template forward_act( - dstptr, cachestep, M, N, _param.zpA + M_offset * _param.ldsa + K_offset, - _param.scalesA + M_offset * _param.ldsa + K_offset, _param.ldsa, redb); - } else { - ret = kernel::wrapper::RemoveZeroPointBias::template forward_both( - dstptr, cachestep, M, N, _param.zpA + M_offset * _param.ldsa + K_offset, - _param.zpB + N_offset + K_offset * _param.ldsb, _param.scalesA + M_offset * _param.ldsa + K_offset, scab, - _param.ldsa, _param.K, _param.reduceA + M_offset * _param.ldsa + K_offset, redb); - } - } - return ret; - } -}; - -struct ParamZpDequantInt32ToFp32 { - // necessary - float* C; - int ldc; - int ldsa; - float* scalesA; - float* scalesB; - // optional if A asym - uint8_t* zpA = nullptr; - float* reduceB = nullptr; - // optional if B asym - int8_t* zpB = nullptr; - float* reduceA = nullptr; - int K = 1; -}; -template -class ZpDequantInt32ToFp32 { - public: - using Param = ParamZpDequantInt32ToFp32; - JBLAS_CODE forward(const int32_t* cacheptr, const int cachestep, const int M_offset, const int N_offset, const int M, - const int N, const Param& _param, void* tmpcache, size_t cachesize) { - auto COffset = M_offset * _param.ldc + N_offset; - auto cptr = _param.C + COffset; - auto ret = kernel::wrapper::DequanS32Fp32::template forward(cacheptr, cachestep, cptr, _param.ldc, M, N, - _param.scalesA + M_offset * _param.ldsa, - _param.ldsa, _param.scalesB + N_offset); - if (ret != JblasSuccess) { - return ret; - } - if (_param.zpA == nullptr && _param.zpB == nullptr) { - return ret; - } else if (_param.zpA != nullptr && _param.zpB == nullptr) { - ret = kernel::wrapper::RemoveZeroPointBias::template forward_act( - cptr, _param.ldc, M, N, _param.zpA + M_offset * _param.ldsa, _param.scalesA + M_offset * _param.ldsa, - _param.ldsa, _param.reduceB + N_offset); - } else if (_param.zpA == nullptr && _param.zpB != nullptr) { - ret = kernel::wrapper::RemoveZeroPointBias::template forward_wei( - cptr, _param.ldc, M, N, _param.zpB + N_offset, _param.scalesB + N_offset, _param.ldsa, - _param.reduceA + M_offset * _param.ldsa); - } else { - ret = kernel::wrapper::RemoveZeroPointBias::template forward_both( - cptr, _param.ldc, M, N, _param.zpA + M_offset * _param.ldsa, _param.zpB + N_offset, - _param.scalesA + M_offset * _param.ldsa, _param.scalesB + N_offset, _param.ldsa, _param.K, - _param.reduceA + M_offset * _param.ldsa, _param.reduceB + N_offset); - } - return ret; - } -}; - -struct ParamAlphaBetaProcessS32U8 { - uint8_t* C; - int ldc; - float alpha; - float scaleAcc, scaleC; - int zpC; -}; -template -class AlphaBetaProcessS32U8 { - public: - using Param = ParamAlphaBetaProcessS32U8; - JBLAS_CODE forward(const int32_t* cacheptr, const int cachestep, const int M_offset, const int N_offset, const int M, - const int N, const Param& _param, void* tmpcache, size_t cachesize) { - auto COffset = M_offset * _param.ldc + N_offset; - auto cptr = _param.C + COffset; - return kernel::wrapper::QuanOutS32U32::template forward(_param.alpha, cacheptr, cachestep, cptr, _param.ldc, - M, N, _param.scaleAcc, _param.scaleC, _param.zpC); - } -}; - -} // namespace gemm -} // namespace epilogue -} // namespace jblas diff --git a/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_gemm.h b/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_gemm.h deleted file mode 100644 index add580da3e4..00000000000 --- a/intel_extension_for_transformers/llm/library/jblas/jblas/jit_blas_gemm.h +++ /dev/null @@ -1,3544 +0,0 @@ -// Copyright (c) 2023 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#pragma once -#include - -#include "jit_blas_utils.h" -#include "jit_base.h" - -namespace jblas { -namespace gemm { -enum class CompType : uint16_t { - // base type, too many bits if reuse JBLAS_DTYPE - tFP32 = 0, - tBF16 = 1, - tFP16 = 2, - tS8 = 3, - tU8 = 4, - tS32 = 5, - tS16 = 6, - MASK_A = 0xf, - SHIFT_A = 0, - MASK_B = 0xf0, - SHIFT_B = 4, - MASK_C = 0xf00, - SHIFT_C = 8, - COMP_FP32 = (tFP32 << SHIFT_A) | (tFP32 << SHIFT_B) | (tFP32 << SHIFT_C), - COMP_BF16_FP32 = (tBF16 << SHIFT_A) | (tBF16 << SHIFT_B) | (tFP32 << SHIFT_C), - COMP_FP16_FP16 = (tFP16 << SHIFT_A) | (tFP16 << SHIFT_B) | (tFP16 << SHIFT_C), - COMP_INT8_US_INT32 = (tU8 << SHIFT_A) | (tS8 << SHIFT_B) | (tS32 << SHIFT_C), - COMP_INT8_UU_INT32 = (tU8 << SHIFT_A) | (tU8 << SHIFT_B) | (tS32 << SHIFT_C), - COMP_INT8_SS_INT32 = (tS8 << SHIFT_A) | (tS8 << SHIFT_B) | (tS32 << SHIFT_C), - COMP_INT8_SU_INT32 = (tS8 << SHIFT_A) | (tU8 << SHIFT_B) | (tS32 << SHIFT_C), - COMP_INT16_SS_INT32 = (tS16 << SHIFT_A) | (tS16 << SHIFT_B) | (tS32 << SHIFT_C), - COMP_INT8_US_FP32 = (tU8 << SHIFT_A) | (tS8 << SHIFT_B) | (tFP32 << SHIFT_C), - COMP_INT8_UU_FP32 = (tU8 << SHIFT_A) | (tU8 << SHIFT_B) | (tFP32 << SHIFT_C), - COMP_INT8_SS_FP32 = (tS8 << SHIFT_A) | (tS8 << SHIFT_B) | (tFP32 << SHIFT_C), - COMP_INT8_SU_FP32 = (tS8 << SHIFT_A) | (tU8 << SHIFT_B) | (tFP32 << SHIFT_C), -}; - -class CompTypeHelper { - public: - static inline uint64_t get_mask_val(CompType raw, CompType mask, CompType shift) { - return (static_cast(raw) & static_cast(mask)) >> static_cast(shift); - } - - static void parse_id(CompType id, uint64_t* vals) { - vals[0] = get_mask_val(id, CompType::MASK_A, CompType::SHIFT_A); - vals[1] = get_mask_val(id, CompType::MASK_B, CompType::SHIFT_B); - vals[2] = get_mask_val(id, CompType::MASK_C, CompType::SHIFT_C); - } - - static const char* to_str(CompType id) { - static char tmp[128]; - uint64_t vals[3]; - parse_id(id, vals); - sprintf(tmp, "A%d_B%d_C%d", static_cast(vals[0]), static_cast(vals[1]), static_cast(vals[2])); - return tmp; - } - - static inline uint64_t get_B(CompType id) { return get_mask_val(id, CompType::MASK_B, CompType::SHIFT_B); } - - static inline bool is_integer(CompType id) { - auto bt = get_B(id); - bool flag = false; - flag |= bt == static_cast(CompType::tS8); - flag |= bt == static_cast(CompType::tU8); - return flag; - } -}; - -class CoreAttr { - public: - // INT64=LSB|**8bits:NTile**||**8bits:PackRow**||**16bits:CompType**||**8bits:ISA**||**24bits:reversed**| - static uint64_t constexpr NTILE_MASK = 0xff, NTILE_SHIFT = 0, PACKROW_MASK = 0xff00, PACKROW_SHIFT = 8, - COMP_MASK = 0xffff0000, COMP_SHIFT = 16, ISA_MASK = 0xff00000000, ISA_SHIFT = 32; - - static inline uint64_t get_mask_val(uint64_t raw, uint64_t mask, uint64_t shift) { return (raw & mask) >> shift; } - - static constexpr uint64_t make_core_id(int NTile, int PackRow, CompType CompType, JBLAS_ISA ISA) { - return (static_cast(NTile) << NTILE_SHIFT) | (static_cast(PackRow) << PACKROW_SHIFT) | - (static_cast(CompType) << COMP_SHIFT) | (static_cast(ISA) << ISA_SHIFT); - } - static void parse_id(uint64_t id, uint64_t* vals) { - vals[0] = get_mask_val(id, NTILE_MASK, NTILE_SHIFT); - vals[1] = get_mask_val(id, PACKROW_MASK, PACKROW_SHIFT); - vals[2] = get_mask_val(id, COMP_MASK, COMP_SHIFT); - vals[3] = get_mask_val(id, ISA_MASK, ISA_SHIFT); - } - - static const char* to_str(uint64_t id) { - static char tmp[128]; - uint64_t vals[4]; - parse_id(id, vals); - sprintf(tmp, "N%d_PACK%d_COMP%d_ISA%d", static_cast(vals[0]), static_cast(vals[1]), - static_cast(vals[2]), static_cast(vals[3])); - return tmp; - } - - static inline int get_packrow(uint64_t id) { return static_cast(get_mask_val(id, PACKROW_MASK, PACKROW_SHIFT)); } - - static inline size_t get_bsize(uint64_t id) { - auto packrow = get_packrow(id); - return size_t(4 / packrow); - } - - static inline JBLAS_ISA get_ISA(uint64_t id) { return static_cast(get_mask_val(id, ISA_MASK, ISA_SHIFT)); } - - static inline CompType get_comp(uint64_t id) { - return static_cast(get_mask_val(id, COMP_MASK, COMP_SHIFT)); - } -}; - -namespace code { - -template -class Avx2N8P1 : protected jblas::xbyak::JitAvx2 { - public: - static int constexpr RegLen = 8, PackRow = 1; - static_assert(_NTILE % RegLen == 0); - static int constexpr NRegs = _NTILE / RegLen; - static int constexpr MRegs = _MTILE == 0 ? (RegCount - 1) / NRegs : _MTILE; - static_assert(NRegs * MRegs <= RegCount - 1); - static int constexpr NTILE = RegLen * NRegs, MTILE = MRegs, KTILE = 1; - static int constexpr KUNROLL = 2; - static auto constexpr ISA = JBLAS_ISA::JblasAVX2; - static auto constexpr COMPUTE = CompType::COMP_FP32; - typedef float AType; - typedef float BType; - typedef float CType; - - struct params { - AType* matA; - int astride; - BType* matB; - int bstride; - CType* matC; - int cstride; - int k; - int n; - int init; - }; - typedef long long (*func_t)(params*); - - int CRegCount = 0, BRegCount = 0, ARegCount = 0, TmpRegCount = 0; - int CReg = 0, BReg = 0, AReg = 0, TmpReg = 0; - static int constexpr BKStepSize = KTILE * NTILE * sizeof(BType); - static int constexpr AKStepSize = KTILE * sizeof(AType); - - void generate_code(int _mtile) { - assign_regs(); - reset(); - generate_mtile(_mtile); - ready(); - mKernel = getCode(); - } - func_t mKernel = nullptr; - - protected: - Xbyak::Reg64 parambase; - Xbyak::Reg64 reg_matAptr; - Xbyak::Reg64 reg_matBptr; - Xbyak::Reg64 reg_matCptr; - Xbyak::Reg64 reg_ksize; - Xbyak::Reg64 reg_nsize; - Xbyak::Reg64 reg_cstride; - Xbyak::Reg64 reg_astride; - Xbyak::Reg64 reg_iterk; - Xbyak::Reg64 reg_itern; - Xbyak::Reg64 reg_tmp; - Xbyak::Reg64 reg_tmp1; - Xbyak::Reg64 reg_tmp2; - Xbyak::Reg64 reg_ret = rax; - Xbyak::Opmask msk_wr = k1; - - void assign_regs() { - CRegCount = MRegs * NRegs; - ARegCount = 1; - BRegCount = RegCount - ARegCount - CRegCount; - if (BRegCount < NRegs) { - BRegCount = 0; - ARegCount = BRegCount + 1; - } - if (BRegCount > NRegs) { - BRegCount = NRegs; - } - CReg = 0; - BReg = CReg + CRegCount; - AReg = BReg + BRegCount; - TmpReg = AReg + ARegCount; - assert(TmpReg <= RegCount); - TmpRegCount = RegCount - TmpReg; - } - - void generate_mtile(int _mtile) { - inLocalLabel(); // use local label for multiple instance - Xbyak::util::StackFrame st(this, 1, 10, 16 * 10); - parambase = st.p[0]; - reg_matAptr = st.t[0]; - reg_matBptr = st.t[1]; - reg_matCptr = st.t[0]; - reg_ksize = st.t[2]; - reg_astride = st.t[3]; - reg_cstride = st.t[3]; - reg_iterk = st.t[4]; - reg_tmp = st.t[5]; - reg_tmp1 = st.t[6]; - reg_tmp2 = st.t[7]; - reg_nsize = st.t[8]; - reg_itern = st.t[9]; - reg_ret = rax; - - vreg_push(rsp); - - load32(reg_ksize, ptr[parambase + OFFSET(k)]); - load32(reg_nsize, ptr[parambase + OFFSET(n)]); - xor_(reg_itern, reg_itern); - L(".nloop"); - init_regs(_mtile); - mov(reg_matAptr, ptr[parambase + OFFSET(matA)]); - load32(reg_astride, ptr[parambase + OFFSET(astride)]); - mov(reg_matBptr, ptr[parambase + OFFSET(matB)]); - load32(reg_tmp, ptr[parambase + OFFSET(bstride)]); - imul(reg_tmp, reg_itern); - lea(reg_matBptr, ptr[reg_matBptr + reg_tmp]); - xor_(reg_iterk, reg_iterk); - generate_kloop(_mtile); - write_back(_mtile); - add(reg_itern, NTILE); - cmp(reg_itern, reg_nsize); - jb(".nloop"); - mov(reg_ret, 0); - vreg_pop(rsp); - - outLocalLabel(); // end of local label - } - - void generate_kloop(int _mtile) { - inLocalLabel(); - mov(reg_tmp, reg_ksize); - padto_le(reg_tmp, KUNROLL * KTILE); - cmp(reg_tmp, 0); - jz(".kloop", T_NEAR); - L(".unkloop"); - generate_fma(_mtile, KUNROLL); - add(reg_matAptr, KUNROLL * AKStepSize); - add(reg_matBptr, KUNROLL * BKStepSize); - add(reg_iterk, KUNROLL * KTILE); - cmp(reg_iterk, reg_tmp); // k iteration variable - jb(".unkloop"); - cmp(reg_tmp, reg_ksize); - jge(".kend", T_NEAR); - L(".kloop"); - generate_fma(_mtile, 1); - add(reg_matAptr, 1 * AKStepSize); - add(reg_matBptr, 1 * BKStepSize); - add(reg_iterk, 1 * KTILE); - cmp(reg_iterk, reg_ksize); // k iteration variable - jb(".kloop"); - L(".kend"); - outLocalLabel(); - } - - void generate_fma(int _mtile, int _ktile) { - for (int kk = 0; kk < _ktile; kk++) { - lea(reg_tmp1, ptr[reg_matAptr + kk * AKStepSize]); - if (BRegCount == NRegs) { - for (int i = 0; i < NRegs; i++) { - vmovups(vreg_t(BReg + i), ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - for (int mm = 0; mm < _mtile; mm++) { - vbroadcastss(vreg_t(AReg), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vfmadd231ps(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg), vreg_t(BReg + i)); - } - } - } else if (BRegCount == 0) { - for (int mm = 0; mm < _mtile; mm += ARegCount) { - int mm_re = utils::remainsize(mm, _mtile, ARegCount); - for (int imm = 0; imm < mm_re; imm++) { - vbroadcastss(vreg_t(AReg + imm), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vfmadd231ps(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg + imm), - ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - } - } - } else { - assert(0); - } - } - } - - void init_regs(int _mtile) { - inLocalLabel(); - load32(reg_tmp, ptr[parambase + OFFSET(init)]); - cmp(reg_tmp, 0); - je(".read", T_NEAR); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vxor(vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j)); - } - } - jmp(".end", T_NEAR); - L(".read"); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(vreg_t(CReg + i * NRegs + j), ptr[reg_matCptr + j * VecBytes]); - } - add(reg_matCptr, reg_cstride); - } - L(".end"); - outLocalLabel(); - } - - void write_back(int _mtile) { - inLocalLabel(); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(ptr[reg_matCptr + j * VecBytes], vreg_t(CReg + i * NRegs + j)); - } - add(reg_matCptr, reg_cstride); - } - outLocalLabel(); - } -}; - -template -class Avx512fN16P1 : protected jblas::xbyak::JitAvx512f { - public: - static int constexpr RegLen = 16, PackRow = 1; - static_assert(_NTILE % RegLen == 0); - static int constexpr NRegs = _NTILE / RegLen; - static int constexpr MRegs = _MTILE == 0 ? (RegCount - 1) / NRegs : _MTILE; - static_assert(NRegs * MRegs <= RegCount - 1); - static int constexpr NTILE = RegLen * NRegs, MTILE = MRegs, KTILE = 1; - static int constexpr KUNROLL = 2; - static auto constexpr ISA = JBLAS_ISA::JblasAVX512F; - static auto constexpr COMPUTE = CompType::COMP_FP32; - typedef float AType; - typedef float BType; - typedef float CType; - - struct params { - AType* matA; - int astride; - BType* matB; - int bstride; - CType* matC; - int cstride; - int k; - int n; - int init; - }; - typedef long long (*func_t)(params*); - - int CRegCount = 0, BRegCount = 0, ARegCount = 0, TmpRegCount = 0; - int CReg = 0, BReg = 0, AReg = 0, TmpReg = 0; - static int constexpr BKStepSize = KTILE * NTILE * sizeof(BType); - static int constexpr AKStepSize = KTILE * sizeof(AType); - - void generate_code(int _mtile) { - assign_regs(); - reset(); - generate_mtile(_mtile); - ready(); - mKernel = getCode(); - } - func_t mKernel = nullptr; - - protected: - Xbyak::Reg64 parambase; - Xbyak::Reg64 reg_matAptr; - Xbyak::Reg64 reg_matBptr; - Xbyak::Reg64 reg_matCptr; - Xbyak::Reg64 reg_ksize; - Xbyak::Reg64 reg_nsize; - Xbyak::Reg64 reg_cstride; - Xbyak::Reg64 reg_astride; - Xbyak::Reg64 reg_iterk; - Xbyak::Reg64 reg_itern; - Xbyak::Reg64 reg_tmp; - Xbyak::Reg64 reg_tmp1; - Xbyak::Reg64 reg_tmp2; - Xbyak::Reg64 reg_ret = rax; - Xbyak::Opmask msk_wr = k1; - - void assign_regs() { - CRegCount = MRegs * NRegs; - ARegCount = 1; - BRegCount = RegCount - ARegCount - CRegCount; - if (BRegCount < NRegs) { - BRegCount = 0; - ARegCount = BRegCount + 1; - } - if (BRegCount > NRegs) { - BRegCount = NRegs; - } - CReg = 0; - BReg = CReg + CRegCount; - AReg = BReg + BRegCount; - TmpReg = AReg + ARegCount; - assert(TmpReg <= RegCount); - TmpRegCount = RegCount - TmpReg; - } - - void generate_mtile(int _mtile) { - inLocalLabel(); // use local label for multiple instance - Xbyak::util::StackFrame st(this, 1, 10, 16 * 10); - parambase = st.p[0]; - reg_matAptr = st.t[0]; - reg_matBptr = st.t[1]; - reg_matCptr = st.t[0]; - reg_ksize = st.t[2]; - reg_astride = st.t[3]; - reg_cstride = st.t[3]; - reg_iterk = st.t[4]; - reg_tmp = st.t[5]; - reg_tmp1 = st.t[6]; - reg_tmp2 = st.t[7]; - reg_nsize = st.t[8]; - reg_itern = st.t[9]; - reg_ret = rax; - - vreg_push(rsp); - - load32(reg_ksize, ptr[parambase + OFFSET(k)]); - load32(reg_nsize, ptr[parambase + OFFSET(n)]); - xor_(reg_itern, reg_itern); - L(".nloop"); - init_regs(_mtile); - mov(reg_matAptr, ptr[parambase + OFFSET(matA)]); - load32(reg_astride, ptr[parambase + OFFSET(astride)]); - mov(reg_matBptr, ptr[parambase + OFFSET(matB)]); - load32(reg_tmp, ptr[parambase + OFFSET(bstride)]); - imul(reg_tmp, reg_itern); - lea(reg_matBptr, ptr[reg_matBptr + reg_tmp]); - xor_(reg_iterk, reg_iterk); - generate_kloop(_mtile); - write_back(_mtile); - add(reg_itern, NTILE); - cmp(reg_itern, reg_nsize); - jb(".nloop"); - mov(reg_ret, 0); - vreg_pop(rsp); - - outLocalLabel(); // end of local label - } - - void generate_kloop(int _mtile) { - inLocalLabel(); - mov(reg_tmp, reg_ksize); - padto_le(reg_tmp, KUNROLL * KTILE); - cmp(reg_tmp, 0); - jz(".kloop", T_NEAR); - L(".unkloop"); - generate_fma(_mtile, KUNROLL); - add(reg_matAptr, KUNROLL * AKStepSize); - add(reg_matBptr, KUNROLL * BKStepSize); - add(reg_iterk, KUNROLL * KTILE); - cmp(reg_iterk, reg_tmp); // k iteration variable - jb(".unkloop"); - cmp(reg_tmp, reg_ksize); - jge(".kend", T_NEAR); - L(".kloop"); - generate_fma(_mtile, 1); - add(reg_matAptr, 1 * AKStepSize); - add(reg_matBptr, 1 * BKStepSize); - add(reg_iterk, 1 * KTILE); - cmp(reg_iterk, reg_ksize); // k iteration variable - jb(".kloop"); - L(".kend"); - outLocalLabel(); - } - - void generate_fma(int _mtile, int _ktile) { - for (int kk = 0; kk < _ktile; kk++) { - lea(reg_tmp1, ptr[reg_matAptr + kk * AKStepSize]); - if (BRegCount == NRegs) { - for (int i = 0; i < NRegs; i++) { - vmovups(vreg_t(BReg + i), ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - for (int mm = 0; mm < _mtile; mm++) { - vbroadcastss(vreg_t(AReg), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vfmadd231ps(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg), vreg_t(BReg + i)); - } - } - } else if (BRegCount == 0) { - for (int mm = 0; mm < _mtile; mm += ARegCount) { - int mm_re = utils::remainsize(mm, _mtile, ARegCount); - for (int imm = 0; imm < mm_re; imm++) { - vbroadcastss(vreg_t(AReg + imm), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vfmadd231ps(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg + imm), - ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - } - } - } else { - assert(0); - } - } - } - - void init_regs(int _mtile) { - inLocalLabel(); - load32(reg_tmp, ptr[parambase + OFFSET(init)]); - cmp(reg_tmp, 0); - je(".read", T_NEAR); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vxor(vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j)); - } - } - jmp(".end", T_NEAR); - L(".read"); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(vreg_t(CReg + i * NRegs + j), ptr[reg_matCptr + j * VecBytes]); - } - add(reg_matCptr, reg_cstride); - } - L(".end"); - outLocalLabel(); - } - - void write_back(int _mtile) { - inLocalLabel(); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(ptr[reg_matCptr + j * VecBytes], vreg_t(CReg + i * NRegs + j)); - } - add(reg_matCptr, reg_cstride); - } - outLocalLabel(); - } -}; - -template -class Avx512fp16N32P1 : protected jblas::xbyak::JitAvx512_fp16 { - public: - static int constexpr RegLen = 32, PackRow = 1; - static_assert(_NTILE % RegLen == 0); - static int constexpr NRegs = _NTILE / RegLen; - static int constexpr MRegs = _MTILE == 0 ? (RegCount - 1) / NRegs : _MTILE; - static_assert(NRegs * MRegs <= RegCount - 1); - static int constexpr NTILE = RegLen * NRegs, MTILE = MRegs, KTILE = 1; - static int constexpr KUNROLL = 2; - static auto constexpr ISA = JBLAS_ISA::JblasAVX512_FP16; - static auto constexpr COMPUTE = CompType::COMP_FP16_FP16; - typedef utils::fp16 AType; - typedef utils::fp16 BType; - typedef utils::fp16 CType; - - struct params { - AType* matA; - int astride; - BType* matB; - int bstride; - CType* matC; - int cstride; - int k; - int n; - int init; - }; - typedef long long (*func_t)(params*); - - int CRegCount = 0, BRegCount = 0, ARegCount = 0, TmpRegCount = 0; - int CReg = 0, BReg = 0, AReg = 0, TmpReg = 0; - static int constexpr BKStepSize = KTILE * NTILE * sizeof(BType); - static int constexpr AKStepSize = KTILE * sizeof(AType); - - void generate_code(int _mtile) { - assign_regs(); - reset(); - generate_mtile(_mtile); - ready(); - mKernel = getCode(); - } - func_t mKernel = nullptr; - - protected: - Xbyak::Reg64 parambase; - Xbyak::Reg64 reg_matAptr; - Xbyak::Reg64 reg_matBptr; - Xbyak::Reg64 reg_matCptr; - Xbyak::Reg64 reg_ksize; - Xbyak::Reg64 reg_nsize; - Xbyak::Reg64 reg_cstride; - Xbyak::Reg64 reg_astride; - Xbyak::Reg64 reg_iterk; - Xbyak::Reg64 reg_itern; - Xbyak::Reg64 reg_tmp; - Xbyak::Reg64 reg_tmp1; - Xbyak::Reg64 reg_tmp2; - Xbyak::Reg64 reg_ret = rax; - Xbyak::Opmask msk_wr = k1; - - void assign_regs() { - CRegCount = MRegs * NRegs; - ARegCount = 1; - BRegCount = RegCount - ARegCount - CRegCount; - if (BRegCount < NRegs) { - BRegCount = 0; - ARegCount = BRegCount + 1; - } - if (BRegCount > NRegs) { - BRegCount = NRegs; - } - CReg = 0; - BReg = CReg + CRegCount; - AReg = BReg + BRegCount; - TmpReg = AReg + ARegCount; - assert(TmpReg <= RegCount); - TmpRegCount = RegCount - TmpReg; - } - - void generate_mtile(int _mtile) { - inLocalLabel(); // use local label for multiple instance - Xbyak::util::StackFrame st(this, 1, 10, 16 * 10); - parambase = st.p[0]; - reg_matAptr = st.t[0]; - reg_matBptr = st.t[1]; - reg_matCptr = st.t[0]; - reg_ksize = st.t[2]; - reg_astride = st.t[3]; - reg_cstride = st.t[3]; - reg_iterk = st.t[4]; - reg_tmp = st.t[5]; - reg_tmp1 = st.t[6]; - reg_tmp2 = st.t[7]; - reg_nsize = st.t[8]; - reg_itern = st.t[9]; - reg_ret = rax; - - vreg_push(rsp); - - load32(reg_ksize, ptr[parambase + OFFSET(k)]); - load32(reg_nsize, ptr[parambase + OFFSET(n)]); - xor_(reg_itern, reg_itern); - L(".nloop"); - init_regs(_mtile); - mov(reg_matAptr, ptr[parambase + OFFSET(matA)]); - load32(reg_astride, ptr[parambase + OFFSET(astride)]); - mov(reg_matBptr, ptr[parambase + OFFSET(matB)]); - load32(reg_tmp, ptr[parambase + OFFSET(bstride)]); - imul(reg_tmp, reg_itern); - lea(reg_matBptr, ptr[reg_matBptr + reg_tmp]); - xor_(reg_iterk, reg_iterk); - generate_kloop(_mtile); - write_back(_mtile); - add(reg_itern, NTILE); - cmp(reg_itern, reg_nsize); - jb(".nloop"); - mov(reg_ret, 0); - vreg_pop(rsp); - - outLocalLabel(); // end of local label - } - - void generate_kloop(int _mtile) { - inLocalLabel(); - mov(reg_tmp, reg_ksize); - padto_le(reg_tmp, KUNROLL * KTILE); - cmp(reg_tmp, 0); - jz(".kloop", T_NEAR); - L(".unkloop"); - generate_fma(_mtile, KUNROLL); - add(reg_matAptr, KUNROLL * AKStepSize); - add(reg_matBptr, KUNROLL * BKStepSize); - add(reg_iterk, KUNROLL * KTILE); - cmp(reg_iterk, reg_tmp); // k iteration variable - jb(".unkloop"); - cmp(reg_tmp, reg_ksize); - jge(".kend", T_NEAR); - L(".kloop"); - generate_fma(_mtile, 1); - add(reg_matAptr, 1 * AKStepSize); - add(reg_matBptr, 1 * BKStepSize); - add(reg_iterk, 1 * KTILE); - cmp(reg_iterk, reg_ksize); // k iteration variable - jb(".kloop"); - L(".kend"); - outLocalLabel(); - } - - void generate_fma(int _mtile, int _ktile) { - for (int kk = 0; kk < _ktile; kk++) { - lea(reg_tmp1, ptr[reg_matAptr + kk * AKStepSize]); - if (BRegCount == NRegs) { - for (int i = 0; i < NRegs; i++) { - vmovups(vreg_t(BReg + i), ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - for (int mm = 0; mm < _mtile; mm++) { - vpbroadcastw(vreg_t(AReg), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vfmadd231ph(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg), vreg_t(BReg + i)); - } - } - } else if (BRegCount == 0) { - for (int mm = 0; mm < _mtile; mm += ARegCount) { - int mm_re = utils::remainsize(mm, _mtile, ARegCount); - for (int imm = 0; imm < mm_re; imm++) { - vpbroadcastw(vreg_t(AReg + imm), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vfmadd231ph(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg + imm), - ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - } - } - } else { - assert(0); - } - } - } - - void init_regs(int _mtile) { - inLocalLabel(); - load32(reg_tmp, ptr[parambase + OFFSET(init)]); - cmp(reg_tmp, 0); - je(".read", T_NEAR); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vxor(vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j)); - } - } - jmp(".end", T_NEAR); - L(".read"); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(vreg_t(CReg + i * NRegs + j), ptr[reg_matCptr + j * VecBytes]); - } - add(reg_matCptr, reg_cstride); - } - L(".end"); - outLocalLabel(); - } - - void write_back(int _mtile) { - inLocalLabel(); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(ptr[reg_matCptr + j * VecBytes], vreg_t(CReg + i * NRegs + j)); - } - add(reg_matCptr, reg_cstride); - } - outLocalLabel(); - } -}; - -template -class Avx512bf16N16P2 : protected jblas::xbyak::JitAvx512_bf16 { - public: - static int constexpr RegLen = 16, PackRow = 2; - static_assert(_NTILE % RegLen == 0); - static int constexpr NRegs = _NTILE / RegLen; - static int constexpr MRegs = _MTILE == 0 ? (RegCount - 1) / NRegs : _MTILE; - static_assert(NRegs * MRegs <= RegCount - 1); - static int constexpr NTILE = RegLen * NRegs, MTILE = MRegs, KTILE = 2; - static int constexpr KUNROLL = 2; - static auto constexpr ISA = JBLAS_ISA::JblasAVX512_BF16; - static auto constexpr COMPUTE = CompType::COMP_BF16_FP32; - typedef utils::bf16 AType; - typedef utils::bf16 BType; - typedef float CType; - - struct params { - AType* matA; - int astride; - BType* matB; - int bstride; - CType* matC; - int cstride; - int k; - int n; - int init; - }; - typedef long long (*func_t)(params*); - - int CRegCount = 0, BRegCount = 0, ARegCount = 0, TmpRegCount = 0; - int CReg = 0, BReg = 0, AReg = 0, TmpReg = 0; - static int constexpr BKStepSize = KTILE * NTILE * sizeof(BType); - static int constexpr AKStepSize = KTILE * sizeof(AType); - - void generate_code(int _mtile) { - assign_regs(); - reset(); - generate_mtile(_mtile); - ready(); - mKernel = getCode(); - } - func_t mKernel = nullptr; - - protected: - Xbyak::Reg64 parambase; - Xbyak::Reg64 reg_matAptr; - Xbyak::Reg64 reg_matBptr; - Xbyak::Reg64 reg_matCptr; - Xbyak::Reg64 reg_ksize; - Xbyak::Reg64 reg_nsize; - Xbyak::Reg64 reg_cstride; - Xbyak::Reg64 reg_astride; - Xbyak::Reg64 reg_iterk; - Xbyak::Reg64 reg_itern; - Xbyak::Reg64 reg_tmp; - Xbyak::Reg64 reg_tmp1; - Xbyak::Reg64 reg_tmp2; - Xbyak::Reg64 reg_ret = rax; - Xbyak::Opmask msk_wr = k1; - - void assign_regs() { - CRegCount = MRegs * NRegs; - ARegCount = 1; - BRegCount = RegCount - ARegCount - CRegCount; - if (BRegCount < NRegs) { - BRegCount = 0; - ARegCount = BRegCount + 1; - } - if (BRegCount > NRegs) { - BRegCount = NRegs; - } - CReg = 0; - BReg = CReg + CRegCount; - AReg = BReg + BRegCount; - TmpReg = AReg + ARegCount; - assert(TmpReg <= RegCount); - TmpRegCount = RegCount - TmpReg; - } - - void generate_mtile(int _mtile) { - inLocalLabel(); // use local label for multiple instance - Xbyak::util::StackFrame st(this, 1, 10, 16 * 10); - parambase = st.p[0]; - reg_matAptr = st.t[0]; - reg_matBptr = st.t[1]; - reg_matCptr = st.t[0]; - reg_ksize = st.t[2]; - reg_astride = st.t[3]; - reg_cstride = st.t[3]; - reg_iterk = st.t[4]; - reg_tmp = st.t[5]; - reg_tmp1 = st.t[6]; - reg_tmp2 = st.t[7]; - reg_nsize = st.t[8]; - reg_itern = st.t[9]; - reg_ret = rax; - - vreg_push(rsp); - - load32(reg_ksize, ptr[parambase + OFFSET(k)]); - load32(reg_nsize, ptr[parambase + OFFSET(n)]); - xor_(reg_itern, reg_itern); - L(".nloop"); - init_regs(_mtile); - mov(reg_matAptr, ptr[parambase + OFFSET(matA)]); - load32(reg_astride, ptr[parambase + OFFSET(astride)]); - mov(reg_matBptr, ptr[parambase + OFFSET(matB)]); - load32(reg_tmp, ptr[parambase + OFFSET(bstride)]); - imul(reg_tmp, reg_itern); - lea(reg_matBptr, ptr[reg_matBptr + reg_tmp]); - xor_(reg_iterk, reg_iterk); - generate_kloop(_mtile); - write_back(_mtile); - add(reg_itern, NTILE); - cmp(reg_itern, reg_nsize); - jb(".nloop"); - mov(reg_ret, 0); - vreg_pop(rsp); - - outLocalLabel(); // end of local label - } - - void generate_kloop(int _mtile) { - inLocalLabel(); - mov(reg_tmp, reg_ksize); - padto_le(reg_tmp, KUNROLL * KTILE); - cmp(reg_tmp, 0); - jz(".kloop", T_NEAR); - L(".unkloop"); - generate_fma(_mtile, KUNROLL); - add(reg_matAptr, KUNROLL * AKStepSize); - add(reg_matBptr, KUNROLL * BKStepSize); - add(reg_iterk, KUNROLL * KTILE); - cmp(reg_iterk, reg_tmp); // k iteration variable - jb(".unkloop"); - cmp(reg_tmp, reg_ksize); - jge(".kend", T_NEAR); - L(".kloop"); - generate_fma(_mtile, 1); - add(reg_matAptr, 1 * AKStepSize); - add(reg_matBptr, 1 * BKStepSize); - add(reg_iterk, 1 * KTILE); - cmp(reg_iterk, reg_ksize); // k iteration variable - jb(".kloop"); - L(".kend"); - outLocalLabel(); - } - - void generate_fma(int _mtile, int _ktile) { - for (int kk = 0; kk < _ktile; kk++) { - lea(reg_tmp1, ptr[reg_matAptr + kk * AKStepSize]); - if (BRegCount == NRegs) { - for (int i = 0; i < NRegs; i++) { - vmovups(vreg_t(BReg + i), ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - for (int mm = 0; mm < _mtile; mm++) { - vbroadcastss(vreg_t(AReg), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vdpbf16ps(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg), vreg_t(BReg + i)); - } - } - } else if (BRegCount == 0) { - for (int mm = 0; mm < _mtile; mm += ARegCount) { - int mm_re = utils::remainsize(mm, _mtile, ARegCount); - for (int imm = 0; imm < mm_re; imm++) { - vbroadcastss(vreg_t(AReg + imm), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vdpbf16ps(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg + imm), - ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - } - } - } else { - assert(0); - } - } - } - - void init_regs(int _mtile) { - inLocalLabel(); - load32(reg_tmp, ptr[parambase + OFFSET(init)]); - cmp(reg_tmp, 0); - je(".read", T_NEAR); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vxor(vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j)); - } - } - jmp(".end", T_NEAR); - L(".read"); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(vreg_t(CReg + i * NRegs + j), ptr[reg_matCptr + j * VecBytes]); - } - add(reg_matCptr, reg_cstride); - } - L(".end"); - outLocalLabel(); - } - - void write_back(int _mtile) { - inLocalLabel(); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(ptr[reg_matCptr + j * VecBytes], vreg_t(CReg + i * NRegs + j)); - } - add(reg_matCptr, reg_cstride); - } - outLocalLabel(); - } -}; - -template -class Avx512vnniN16P4 : protected jblas::xbyak::JitAvx512vnni { - public: - static int constexpr RegLen = 16, PackRow = 4; - static_assert(_NTILE % RegLen == 0); - static int constexpr NRegs = _NTILE / RegLen; - static int constexpr MRegs = _MTILE == 0 ? (RegCount - 1) / NRegs : _MTILE; - static_assert(NRegs * MRegs <= RegCount - 1); - static int constexpr NTILE = RegLen * NRegs, MTILE = MRegs, KTILE = 4; - static int constexpr KUNROLL = 2; - static auto constexpr ISA = JBLAS_ISA::JblasAVX512_VNNI; - static auto constexpr COMPUTE = CompType::COMP_INT8_US_INT32; - typedef uint8_t AType; - typedef int8_t BType; - typedef int32_t CType; - struct params { - AType* matA; - int astride; - BType* matB; - int bstride; - CType* matC; - int cstride; - int k; - int n; - int init; - }; - typedef long long (*func_t)(params*); - - int CRegCount = 0, BRegCount = 0, ARegCount = 0, TmpRegCount = 0; - int CReg = 0, BReg = 0, AReg = 0, TmpReg = 0; - static int constexpr BKStepSize = KTILE * NTILE * sizeof(BType); - static int constexpr AKStepSize = KTILE * sizeof(AType); - - void generate_code(int _mtile) { - assign_regs(); - reset(); - generate_mtile(_mtile); - ready(); - mKernel = getCode(); - } - func_t mKernel = nullptr; - - private: - Xbyak::Reg64 parambase; - Xbyak::Reg64 reg_matAptr; - Xbyak::Reg64 reg_matBptr; - Xbyak::Reg64 reg_matCptr; - Xbyak::Reg64 reg_ksize; - Xbyak::Reg64 reg_nsize; - Xbyak::Reg64 reg_cstride; - Xbyak::Reg64 reg_astride; - Xbyak::Reg64 reg_iterk; - Xbyak::Reg64 reg_itern; - Xbyak::Reg64 reg_tmp; - Xbyak::Reg64 reg_tmp1; - Xbyak::Reg64 reg_tmp2; - Xbyak::Reg64 reg_ret = rax; - - protected: - void assign_regs() { - CRegCount = MRegs * NRegs; - ARegCount = 1; - BRegCount = RegCount - ARegCount - CRegCount; - if (BRegCount < NRegs) { - BRegCount = 0; - ARegCount = BRegCount + 1; - } - if (BRegCount > NRegs) { - BRegCount = NRegs; - } - CReg = 0; - BReg = CReg + CRegCount; - AReg = BReg + BRegCount; - TmpReg = AReg + ARegCount; - assert(TmpReg <= RegCount); - TmpRegCount = RegCount - TmpReg; - } - - void generate_mtile(int _mtile) { - inLocalLabel(); - Xbyak::util::StackFrame st(this, 1, 10, 16 * 10); - parambase = st.p[0]; - reg_matAptr = st.t[0]; - reg_matBptr = st.t[1]; - reg_matCptr = st.t[0]; - reg_ksize = st.t[2]; - reg_astride = st.t[3]; - reg_cstride = st.t[3]; - reg_iterk = st.t[4]; - reg_tmp = st.t[5]; - reg_tmp1 = st.t[6]; - reg_tmp2 = st.t[7]; - reg_nsize = st.t[8]; - reg_itern = st.t[9]; - reg_ret = rax; - - vreg_push(rsp); - - load32(reg_ksize, ptr[parambase + OFFSET(k)]); - load32(reg_nsize, ptr[parambase + OFFSET(n)]); - xor_(reg_itern, reg_itern); - L(".nloop"); - init_regs(_mtile); - mov(reg_matAptr, ptr[parambase + OFFSET(matA)]); - load32(reg_astride, ptr[parambase + OFFSET(astride)]); - mov(reg_matBptr, ptr[parambase + OFFSET(matB)]); - load32(reg_tmp, ptr[parambase + OFFSET(bstride)]); - imul(reg_tmp, reg_itern); - lea(reg_matBptr, ptr[reg_matBptr + reg_tmp]); - xor_(reg_iterk, reg_iterk); - generate_kloop(_mtile); - write_back(_mtile); - add(reg_itern, NTILE); - cmp(reg_itern, reg_nsize); - jb(".nloop"); - mov(reg_ret, 0); - vreg_pop(rsp); - - outLocalLabel(); // end of local label - } - - void generate_kloop(int _mtile) { - inLocalLabel(); - mov(reg_tmp, reg_ksize); - padto_le(reg_tmp, KUNROLL * KTILE); - cmp(reg_tmp, 0); - jz(".kloop", T_NEAR); - L(".unkloop"); - generate_fma(_mtile, KUNROLL); - add(reg_matAptr, KUNROLL * AKStepSize); - add(reg_matBptr, KUNROLL * BKStepSize); - add(reg_iterk, KUNROLL * KTILE); - cmp(reg_iterk, reg_tmp); // k iteration variable - jb(".unkloop"); - cmp(reg_tmp, reg_ksize); - jge(".kend", T_NEAR); - L(".kloop"); - generate_fma(_mtile, 1); - add(reg_matAptr, 1 * AKStepSize); - add(reg_matBptr, 1 * BKStepSize); - add(reg_iterk, 1 * KTILE); - cmp(reg_iterk, reg_ksize); // k iteration variable - jb(".kloop"); - L(".kend"); - outLocalLabel(); - } - - void generate_fma(int _mtile, int _kunroll) { - for (int kk = 0; kk < _kunroll; kk++) { - lea(reg_tmp1, ptr[reg_matAptr + kk * AKStepSize]); - if (BRegCount == NRegs) { - for (int i = 0; i < NRegs; i++) { - vmovups(vreg_t(BReg + i), ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - for (int mm = 0; mm < _mtile; mm++) { - vpbroadcastd(vreg_t(AReg), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vpdpbusds_(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg), vreg_t(BReg + i)); - } - } - } else if (BRegCount == 0) { - for (int mm = 0; mm < _mtile; mm += ARegCount) { - int mm_re = utils::remainsize(mm, _mtile, ARegCount); - for (int imm = 0; imm < mm_re; imm++) { - vpbroadcastd(vreg_t(AReg + imm), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vpdpbusds_(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg + imm), - ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - } - } - } else { - assert(0); - } - } - } - - void init_regs(int _mtile) { - inLocalLabel(); - load32(reg_tmp, ptr[parambase + OFFSET(init)]); - cmp(reg_tmp, 0); - je(".read", T_NEAR); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vxor(vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j)); - } - } - jmp(".end", T_NEAR); - L(".read"); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(vreg_t(CReg + i * NRegs + j), ptr[reg_matCptr + j * VecBytes]); - } - add(reg_matCptr, reg_cstride); - } - L(".end"); - outLocalLabel(); - } - - void write_back(int _mtile) { - inLocalLabel(); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(ptr[reg_matCptr + j * VecBytes], vreg_t(CReg + i * NRegs + j)); - } - add(reg_matCptr, reg_cstride); - } - outLocalLabel(); - } -}; - -template -class AvxvnniN8P4 : protected jblas::xbyak::JitAvxvnni { - public: - static int constexpr RegLen = 8, PackRow = 4; - static_assert(_NTILE % RegLen == 0); - static int constexpr NRegs = _NTILE / RegLen; - static int constexpr MRegs = _MTILE == 0 ? (RegCount - 1) / NRegs : _MTILE; - static_assert(NRegs * MRegs <= RegCount - 1); - static int constexpr NTILE = RegLen * NRegs, MTILE = MRegs, KTILE = 4; - static int constexpr KUNROLL = 2; - static auto constexpr ISA = JBLAS_ISA::JblasAVX_VNNI; - static auto constexpr COMPUTE = CompType::COMP_INT8_US_INT32; - typedef uint8_t AType; - typedef int8_t BType; - typedef int32_t CType; - struct params { - AType* matA; - int astride; - BType* matB; - int bstride; - CType* matC; - int cstride; - int k; - int n; - int init; - }; - typedef long long (*func_t)(params*); - - int CRegCount = 0, BRegCount = 0, ARegCount = 0, TmpRegCount = 0; - int CReg = 0, BReg = 0, AReg = 0, TmpReg = 0; - static int constexpr BKStepSize = KTILE * NTILE * sizeof(BType); - static int constexpr AKStepSize = KTILE * sizeof(AType); - - void generate_code(int _mtile) { - assign_regs(); - reset(); - generate_mtile(_mtile); - ready(); - mKernel = getCode(); - } - func_t mKernel = nullptr; - - private: - Xbyak::Reg64 parambase; - Xbyak::Reg64 reg_matAptr; - Xbyak::Reg64 reg_matBptr; - Xbyak::Reg64 reg_matCptr; - Xbyak::Reg64 reg_ksize; - Xbyak::Reg64 reg_nsize; - Xbyak::Reg64 reg_cstride; - Xbyak::Reg64 reg_astride; - Xbyak::Reg64 reg_iterk; - Xbyak::Reg64 reg_itern; - Xbyak::Reg64 reg_tmp; - Xbyak::Reg64 reg_tmp1; - Xbyak::Reg64 reg_tmp2; - Xbyak::Reg64 reg_ret = rax; - Xbyak::Opmask msk_wr = k1; - - protected: - void assign_regs() { - CRegCount = MRegs * NRegs; - ARegCount = 1; - BRegCount = RegCount - ARegCount - CRegCount; - if (BRegCount < NRegs) { - BRegCount = 0; - ARegCount = BRegCount + 1; - } - if (BRegCount > NRegs) { - BRegCount = NRegs; - } - CReg = 0; - BReg = CReg + CRegCount; - AReg = BReg + BRegCount; - TmpReg = AReg + ARegCount; - assert(TmpReg <= RegCount); - TmpRegCount = RegCount - TmpReg; - } - - void generate_mtile(int _mtile) { - inLocalLabel(); - Xbyak::util::StackFrame st(this, 1, 10, 16 * 10); - parambase = st.p[0]; - reg_matAptr = st.t[0]; - reg_matBptr = st.t[1]; - reg_matCptr = st.t[0]; - reg_ksize = st.t[2]; - reg_astride = st.t[3]; - reg_cstride = st.t[3]; - reg_iterk = st.t[4]; - reg_tmp = st.t[5]; - reg_tmp1 = st.t[6]; - reg_tmp2 = st.t[7]; - reg_nsize = st.t[8]; - reg_itern = st.t[9]; - reg_ret = rax; - - vreg_push(rsp); - - load32(reg_ksize, ptr[parambase + OFFSET(k)]); - load32(reg_nsize, ptr[parambase + OFFSET(n)]); - xor_(reg_itern, reg_itern); - L(".nloop"); - init_regs(_mtile); - mov(reg_matAptr, ptr[parambase + OFFSET(matA)]); - load32(reg_astride, ptr[parambase + OFFSET(astride)]); - mov(reg_matBptr, ptr[parambase + OFFSET(matB)]); - load32(reg_tmp, ptr[parambase + OFFSET(bstride)]); - imul(reg_tmp, reg_itern); - lea(reg_matBptr, ptr[reg_matBptr + reg_tmp]); - xor_(reg_iterk, reg_iterk); - generate_kloop(_mtile); - write_back(_mtile); - add(reg_itern, NTILE); - cmp(reg_itern, reg_nsize); - jb(".nloop"); - mov(reg_ret, 0); - vreg_pop(rsp); - - outLocalLabel(); // end of local label - } - - void generate_kloop(int _mtile) { - inLocalLabel(); - mov(reg_tmp, reg_ksize); - padto_le(reg_tmp, KUNROLL * KTILE); - cmp(reg_tmp, 0); - jz(".kloop", T_NEAR); - L(".unkloop"); - generate_fma(_mtile, KUNROLL); - add(reg_matAptr, KUNROLL * AKStepSize); - add(reg_matBptr, KUNROLL * BKStepSize); - add(reg_iterk, KUNROLL * KTILE); - cmp(reg_iterk, reg_tmp); // k iteration variable - jb(".unkloop"); - cmp(reg_tmp, reg_ksize); - jge(".kend", T_NEAR); - L(".kloop"); - generate_fma(_mtile, 1); - add(reg_matAptr, 1 * AKStepSize); - add(reg_matBptr, 1 * BKStepSize); - add(reg_iterk, 1 * KTILE); - cmp(reg_iterk, reg_ksize); // k iteration variable - jb(".kloop"); - L(".kend"); - outLocalLabel(); - } - - void generate_fma(int _mtile, int _kunroll) { - for (int kk = 0; kk < _kunroll; kk++) { - lea(reg_tmp1, ptr[reg_matAptr + kk * AKStepSize]); - if (BRegCount == NRegs) { - for (int i = 0; i < NRegs; i++) { - vmovups(vreg_t(BReg + i), ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - for (int mm = 0; mm < _mtile; mm++) { - vpbroadcastd(vreg_t(AReg), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vpdpbusds_(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg), vreg_t(BReg + i)); - } - } - } else if (BRegCount == 0) { - for (int mm = 0; mm < _mtile; mm += ARegCount) { - int mm_re = utils::remainsize(mm, _mtile, ARegCount); - for (int imm = 0; imm < mm_re; imm++) { - vpbroadcastd(vreg_t(AReg + imm), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vpdpbusds_(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg + imm), - ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - } - } - } else { - assert(0); - } - } - } - - void init_regs(int _mtile) { - inLocalLabel(); - load32(reg_tmp, ptr[parambase + OFFSET(init)]); - cmp(reg_tmp, 0); - je(".read", T_NEAR); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vxor(vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j)); - } - } - jmp(".end", T_NEAR); - L(".read"); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(vreg_t(CReg + i * NRegs + j), ptr[reg_matCptr + j * VecBytes]); - } - add(reg_matCptr, reg_cstride); - } - L(".end"); - outLocalLabel(); - } - - void write_back(int _mtile) { - inLocalLabel(); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(ptr[reg_matCptr + j * VecBytes], vreg_t(CReg + i * NRegs + j)); - } - add(reg_matCptr, reg_cstride); - } - outLocalLabel(); - } -}; - -template -class Amxbf16N16P2 : protected jblas::xbyak::JitAmxbf16 { - public: - static int constexpr RegLen = 16, PackRow = 2; - static_assert(_NTILE % RegLen == 0); - static_assert(_MTILE % RegLen == 0); - static int constexpr NRegs = _NTILE / RegLen; - static int constexpr MRegs = _MTILE == 0 ? 1 : _MTILE / RegLen; - static_assert(NRegs * MRegs + 2 <= TileCount); - static int constexpr NTILE = RegLen * NRegs, MTILE = MRegs * RegLen, KTILE = 32; - static int constexpr KUNROLL = 2; - static auto constexpr ISA = JBLAS_ISA::JblasAMX_BF16; - static auto constexpr COMPUTE = CompType::COMP_BF16_FP32; - typedef utils::bf16 AType; - typedef utils::bf16 BType; - typedef float CType; - - struct params { - AType* matA; - int astride; - BType* matB; - int bstride; - CType* matC; - int cstride; - int k; - int n; - int init; - void* workspace; - }; - typedef long long (*func_t)(params*); - - int TmpRegCount = RegCount; - int TmpReg = 0; - int CTileCount = 0, ATileCount = 0, BTileCount = 0; - int CTile = 0, ATile = 0, BTile = 0; - static int constexpr BKStepSize = KTILE * NTILE * sizeof(BType); - static int constexpr AKStepSize = KTILE * sizeof(AType); - - void generate_code(int _mtile) { - assign_regs(); - reset(); - generate_mtile(_mtile); - ready(); - mKernel = getCode(); - } - func_t mKernel = nullptr; - - protected: - Xbyak::Reg64 parambase; - Xbyak::Reg64 reg_matAptr; - Xbyak::Reg64 reg_matBptr; - Xbyak::Reg64 reg_matCptr; - Xbyak::Reg64 reg_ksize; - Xbyak::Reg64 reg_nsize; - Xbyak::Reg64 reg_cstride; - Xbyak::Reg64 reg_astride; - Xbyak::Reg64 reg_iterk; - Xbyak::Reg64 reg_itern; - Xbyak::Reg64 reg_tmp; - Xbyak::Reg64 reg_tmp1; - Xbyak::Reg64 reg_tmp2; - Xbyak::Reg64 reg_tmp3; - Xbyak::Reg64 reg_ret = rax; - - void assign_regs() { - CTileCount = NRegs * MRegs; - auto tile_re = TileCount - CTileCount; - if (tile_re - 1 >= NRegs) { - BTileCount = NRegs; - ATileCount = tile_re - BTileCount; - } else if (tile_re - 1 >= MRegs) { - ATileCount = MRegs; - BTileCount = tile_re - ATileCount; - } else { - ATileCount = 1; - BTileCount = tile_re - ATileCount; - } - CTile = 0; - ATile = CTile + CTileCount; - BTile = ATile + ATileCount; - } - - void generate_mtile(int _mtile) { - inLocalLabel(); // use local label for multiple instance - Xbyak::util::StackFrame st(this, 1, 11, 16 * 10); - parambase = st.p[0]; - reg_matAptr = st.t[0]; - reg_matBptr = st.t[1]; - reg_matCptr = st.t[0]; - reg_ksize = st.t[2]; - reg_astride = st.t[3]; - reg_cstride = st.t[3]; - reg_iterk = st.t[4]; - reg_tmp = st.t[5]; - reg_tmp1 = st.t[6]; - reg_tmp2 = st.t[7]; - reg_tmp3 = st.t[10]; - reg_nsize = st.t[8]; - reg_itern = st.t[9]; - reg_ret = rax; - - vreg_push(rsp); - - load32(reg_ksize, ptr[parambase + OFFSET(k)]); - load32(reg_nsize, ptr[parambase + OFFSET(n)]); - xor_(reg_itern, reg_itern); - L(".nloop"); - init_regs(_mtile); - mov(reg_matAptr, ptr[parambase + OFFSET(matA)]); - load32(reg_astride, ptr[parambase + OFFSET(astride)]); - mov(reg_matBptr, ptr[parambase + OFFSET(matB)]); - load32(reg_tmp, ptr[parambase + OFFSET(bstride)]); - imul(reg_tmp, reg_itern); - lea(reg_matBptr, ptr[reg_matBptr + reg_tmp]); - xor_(reg_iterk, reg_iterk); - generate_kloop(_mtile); - write_back(_mtile); - add(reg_itern, NTILE); - cmp(reg_itern, reg_nsize); - jb(".nloop"); - mov(reg_ret, 0); - vreg_pop(rsp); - - outLocalLabel(); // end of local label - } - - void generate_kloop(int _mtile) { - inLocalLabel(); - mov(reg_tmp, reg_ksize); - padto_le(reg_tmp, KUNROLL * KTILE); - cmp(reg_tmp, 0); - jz(".kloop", T_NEAR); - L(".unkloop"); - generate_fma(_mtile, KUNROLL); - add(reg_matAptr, KUNROLL * AKStepSize); - add(reg_matBptr, KUNROLL * BKStepSize); - add(reg_iterk, KUNROLL * KTILE); - cmp(reg_iterk, reg_tmp); // k iteration variable - jb(".unkloop"); - cmp(reg_tmp, reg_ksize); - jge(".kend", T_NEAR); - L(".kloop"); - generate_fma(_mtile, 1); - add(reg_matAptr, 1 * AKStepSize); - add(reg_matBptr, 1 * BKStepSize); - add(reg_iterk, 1 * KTILE); - cmp(reg_iterk, reg_ksize); // k iteration variable - jb(".kloop"); - L(".kend"); - outLocalLabel(); - } - - void generate_fma(int _mtile, int kunrll) { - auto& reg_Bstride = reg_tmp1; - mov(reg_Bstride, NTILE * 4); - int mtiles = _mtile / RegLen; - for (int kk = 0; kk < kunrll; kk++) { - auto reg_Atmp = reg_tmp2; - if (mtiles == 1) { - reg_Atmp = reg_matAptr; - } else { - mov(reg_Atmp, reg_matAptr); - } - if (BTileCount == NRegs) { - for (int i = 0; i < NRegs; i++) { - tileloaddt1(Xbyak::Tmm(BTile + i), ptr[reg_matBptr + reg_Bstride + kk * BKStepSize + i * 64]); - } - for (int mm = 0; mm < mtiles; mm++) { - tileloadd(Xbyak::Tmm(ATile), ptr[reg_Atmp + reg_astride + kk * AKStepSize]); - for (int i = 0; i < NRegs; i++) { - tdpbf16ps(Xbyak::Tmm(CTile + mm * NRegs + i), Xbyak::Tmm(ATile), Xbyak::Tmm(BTile + i)); - } - if (mm != mtiles - 1) { - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - } - } - } else { - if (ATileCount == mtiles) { - for (int mm = 0; mm < mtiles; mm++) { - tileloadd(Xbyak::Tmm(ATile + mm), ptr[reg_Atmp + reg_astride + kk * AKStepSize]); - if (mm != mtiles - 1) { - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - } - } - for (int i = 0; i < NRegs; i++) { - tileloaddt1(Xbyak::Tmm(BTile), ptr[reg_matBptr + reg_Bstride + kk * BKStepSize + i * 64]); - for (int mm = 0; mm < mtiles; mm++) { - tdpbf16ps(Xbyak::Tmm(CTile + mm * NRegs + i), Xbyak::Tmm(ATile + mm), Xbyak::Tmm(BTile)); - } - } - } else { - for (int mm = 0; mm < mtiles; mm++) { - tileloadd(Xbyak::Tmm(ATile), ptr[reg_Atmp + reg_astride + kk * AKStepSize]); - for (int i = 0; i < NRegs; i++) { - tileloaddt1(Xbyak::Tmm(BTile), ptr[reg_matBptr + reg_Bstride + kk * BKStepSize + i * 64]); - tdpbf16ps(Xbyak::Tmm(CTile + mm * NRegs + i), Xbyak::Tmm(ATile), Xbyak::Tmm(BTile)); - } - if (mm != mtiles - 1) { - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - } - } - } - } - } - } - - void init_regs(int _mtile) { - inLocalLabel(); - load32(reg_tmp, ptr[parambase + OFFSET(init)]); - cmp(reg_tmp, 0); - je(".read", T_NEAR); - for (int i = 0; i < CTileCount; i++) { - tilezero(Xbyak::Tmm(CTile + i)); - } - jmp(".end", T_NEAR); - L(".read"); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - int mtnum = _mtile / 16; - for (int mm = 0; mm < mtnum; mm++) { - for (int i = 0; i < NRegs; i++) { - tileloaddt1(Xbyak::Tmm(CTile + mm * NRegs + i), ptr[reg_matCptr + reg_cstride + i * 64]); - } - if (mm != mtnum - 1) { - lea(reg_matCptr, ptr[reg_matCptr + 8 * reg_cstride]); - lea(reg_matCptr, ptr[reg_matCptr + 8 * reg_cstride]); - } - } - L(".end"); - outLocalLabel(); - } - - void write_back(int _mtile) { - inLocalLabel(); - mov(reg_tmp, dword[parambase + OFFSET(workspace)]); - mov(reg_tmp1, NTILE * 4); - for (int mm = 0; mm < MRegs; mm++) { - for (int i = 0; i < NRegs; i++) { - tilestored(ptr[reg_tmp + reg_tmp1 + i * 64 + mm * 16 * NTILE * 4], Xbyak::Tmm(CTile + mm * NRegs + i)); - } - } - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - int zunroll = TmpRegCount / NRegs; - for (int i = 0; i < _mtile; i += zunroll) { - int m_re = utils::remainsize(i, _mtile, zunroll); - for (int im = 0; im < m_re; im++) { - for (int j = 0; j < NRegs; j++) { - vmovups(vreg_t(TmpReg + im * NRegs + j), ptr[reg_tmp + j * 64 + (i + im) * NTILE * 4]); - vmovups(ptr[reg_matCptr + j * VecBytes], vreg_t(TmpReg + im * NRegs + j)); - } - add(reg_matCptr, reg_cstride); - } - } - outLocalLabel(); - } -}; - -template -class Amxint8N16P4 : protected jblas::xbyak::JitAmxint8 { - public: - static int constexpr RegLen = 16, PackRow = 4; - static_assert(_NTILE % RegLen == 0); - static_assert(_MTILE % RegLen == 0); - static int constexpr NRegs = _NTILE / RegLen; - static int constexpr MRegs = _MTILE == 0 ? 1 : _MTILE / RegLen; - static_assert(NRegs * MRegs + 2 <= TileCount); - static int constexpr NTILE = RegLen * NRegs, MTILE = MRegs * RegLen, KTILE = 64; - static int constexpr KUNROLL = 2; - static auto constexpr ISA = JBLAS_ISA::JblasAMX_INT8; - static auto constexpr COMPUTE = - (std::is_same_v - ? std::is_same_v ? CompType::COMP_INT8_SS_INT32 : CompType::COMP_INT8_SU_INT32 - : std::is_same_v ? CompType::COMP_INT8_US_INT32 - : CompType::COMP_INT8_UU_INT32); - using AType = AT; - using BType = BT; - typedef int32_t CType; - - struct params { - AType* matA; - int astride; - BType* matB; - int bstride; - CType* matC; - int cstride; - int k; - int n; - int init; - void* workspace; - }; - typedef long long (*func_t)(params*); - - int TmpRegCount = RegCount; - int TmpReg = 0; - int CTileCount = 0, ATileCount = 0, BTileCount = 0; - int CTile = 0, ATile = 0, BTile = 0; - static int constexpr BKStepSize = KTILE * NTILE * sizeof(BType); - static int constexpr AKStepSize = KTILE * sizeof(AType); - - void generate_code(int _mtile) { - assign_regs(); - reset(); - generate_mtile(_mtile); - ready(); - mKernel = getCode(); - } - func_t mKernel = nullptr; - - protected: - Xbyak::Reg64 parambase; - Xbyak::Reg64 reg_matAptr; - Xbyak::Reg64 reg_matBptr; - Xbyak::Reg64 reg_matCptr; - Xbyak::Reg64 reg_ksize; - Xbyak::Reg64 reg_nsize; - Xbyak::Reg64 reg_cstride; - Xbyak::Reg64 reg_astride; - Xbyak::Reg64 reg_iterk; - Xbyak::Reg64 reg_itern; - Xbyak::Reg64 reg_tmp; - Xbyak::Reg64 reg_tmp1; - Xbyak::Reg64 reg_tmp2; - Xbyak::Reg64 reg_tmp3; - Xbyak::Reg64 reg_ret = rax; - - void assign_regs() { - CTileCount = NRegs * MRegs; - auto tile_re = TileCount - CTileCount; - if (tile_re - 1 >= NRegs) { - BTileCount = NRegs; - ATileCount = tile_re - BTileCount; - } else if (tile_re - 1 >= MRegs) { - ATileCount = MRegs; - BTileCount = tile_re - ATileCount; - } else { - ATileCount = 1; - BTileCount = tile_re - ATileCount; - } - CTile = 0; - ATile = CTile + CTileCount; - BTile = ATile + ATileCount; - } - - void generate_mtile(int _mtile) { - inLocalLabel(); // use local label for multiple instance - Xbyak::util::StackFrame st(this, 1, 11, 16 * 10); - parambase = st.p[0]; - reg_matAptr = st.t[0]; - reg_matBptr = st.t[1]; - reg_matCptr = st.t[0]; - reg_ksize = st.t[2]; - reg_astride = st.t[3]; - reg_cstride = st.t[3]; - reg_iterk = st.t[4]; - reg_tmp = st.t[5]; - reg_tmp1 = st.t[6]; - reg_tmp2 = st.t[7]; - reg_tmp3 = st.t[10]; - reg_nsize = st.t[8]; - reg_itern = st.t[9]; - reg_ret = rax; - - vreg_push(rsp); - - load32(reg_ksize, ptr[parambase + OFFSET(k)]); - load32(reg_nsize, ptr[parambase + OFFSET(n)]); - xor_(reg_itern, reg_itern); - L(".nloop"); - init_regs(_mtile); - mov(reg_matAptr, ptr[parambase + OFFSET(matA)]); - load32(reg_astride, ptr[parambase + OFFSET(astride)]); - mov(reg_matBptr, ptr[parambase + OFFSET(matB)]); - load32(reg_tmp, ptr[parambase + OFFSET(bstride)]); - imul(reg_tmp, reg_itern); - lea(reg_matBptr, ptr[reg_matBptr + reg_tmp]); - xor_(reg_iterk, reg_iterk); - generate_kloop(_mtile); - write_back(_mtile); - add(reg_itern, NTILE); - cmp(reg_itern, reg_nsize); - jb(".nloop"); - mov(reg_ret, 0); - vreg_pop(rsp); - - outLocalLabel(); // end of local label - } - - void generate_kloop(int _mtile) { - inLocalLabel(); - mov(reg_tmp, reg_ksize); - padto_le(reg_tmp, KUNROLL * KTILE); - cmp(reg_tmp, 0); - jz(".kloop", T_NEAR); - L(".unkloop"); - generate_fma(_mtile, KUNROLL); - add(reg_matAptr, KUNROLL * AKStepSize); - add(reg_matBptr, KUNROLL * BKStepSize); - add(reg_iterk, KUNROLL * KTILE); - cmp(reg_iterk, reg_tmp); // k iteration variable - jb(".unkloop"); - cmp(reg_tmp, reg_ksize); - jge(".kend", T_NEAR); - L(".kloop"); - generate_fma(_mtile, 1); - add(reg_matAptr, 1 * AKStepSize); - add(reg_matBptr, 1 * BKStepSize); - add(reg_iterk, 1 * KTILE); - cmp(reg_iterk, reg_ksize); // k iteration variable - jb(".kloop"); - L(".kend"); - outLocalLabel(); - } - - void generate_fma(int _mtile, int kunrll) { - auto& reg_Bstride = reg_tmp1; - mov(reg_Bstride, NTILE * 4); - int mtiles = _mtile / RegLen; - - for (int kk = 0; kk < kunrll; kk++) { - auto reg_Atmp = reg_tmp2; - if (mtiles == 1) { - reg_Atmp = reg_matAptr; - } else { - mov(reg_Atmp, reg_matAptr); - } - if (BTileCount == NRegs) { - for (int i = 0; i < NRegs; i++) { - tileloaddt1(Xbyak::Tmm(BTile + i), ptr[reg_matBptr + reg_Bstride + kk * BKStepSize + i * 64]); - } - for (int mm = 0; mm < mtiles; mm++) { - tileloadd(Xbyak::Tmm(ATile), ptr[reg_Atmp + reg_astride + kk * AKStepSize]); - for (int i = 0; i < NRegs; i++) { - _tdpb(Xbyak::Tmm(CTile + mm * NRegs + i), Xbyak::Tmm(ATile), Xbyak::Tmm(BTile + i)); - } - if (mm != mtiles - 1) { - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - } - } - } else { - if (ATileCount == mtiles) { - for (int mm = 0; mm < mtiles; mm++) { - tileloadd(Xbyak::Tmm(ATile + mm), ptr[reg_Atmp + reg_astride + kk * AKStepSize]); - if (mm != mtiles - 1) { - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - } - } - for (int i = 0; i < NRegs; i++) { - tileloaddt1(Xbyak::Tmm(BTile), ptr[reg_matBptr + reg_Bstride + kk * BKStepSize + i * 64]); - for (int mm = 0; mm < mtiles; mm++) { - _tdpb(Xbyak::Tmm(CTile + mm * NRegs + i), Xbyak::Tmm(ATile + mm), Xbyak::Tmm(BTile)); - } - } - } else { - for (int mm = 0; mm < mtiles; mm++) { - tileloadd(Xbyak::Tmm(ATile), ptr[reg_Atmp + reg_astride + kk * AKStepSize]); - for (int i = 0; i < NRegs; i++) { - tileloaddt1(Xbyak::Tmm(BTile), ptr[reg_matBptr + reg_Bstride + kk * BKStepSize + i * 64]); - _tdpb(Xbyak::Tmm(CTile + mm * NRegs + i), Xbyak::Tmm(ATile), Xbyak::Tmm(BTile)); - } - if (mm != mtiles - 1) { - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - } - } - } - } - } - } - - void init_regs(int _mtile) { - inLocalLabel(); - load32(reg_tmp, ptr[parambase + OFFSET(init)]); - cmp(reg_tmp, 0); - je(".read", T_NEAR); - for (int i = 0; i < CTileCount; i++) { - tilezero(Xbyak::Tmm(CTile + i)); - } - jmp(".end", T_NEAR); - L(".read"); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - int mtnum = _mtile / 16; - for (int mm = 0; mm < mtnum; mm++) { - for (int i = 0; i < NRegs; i++) { - tileloaddt1(Xbyak::Tmm(CTile + mm * NRegs + i), ptr[reg_matCptr + reg_cstride + i * 64]); - } - if (mm != mtnum - 1) { - lea(reg_matCptr, ptr[reg_matCptr + 8 * reg_cstride]); - lea(reg_matCptr, ptr[reg_matCptr + 8 * reg_cstride]); - } - } - L(".end"); - outLocalLabel(); - } - - void write_back(int _mtile) { - inLocalLabel(); - mov(reg_tmp, dword[parambase + OFFSET(workspace)]); - mov(reg_tmp1, NTILE * 4); - for (int mm = 0; mm < MRegs; mm++) { - for (int i = 0; i < NRegs; i++) { - tilestored(ptr[reg_tmp + reg_tmp1 + i * 64 + mm * 16 * NTILE * 4], Xbyak::Tmm(CTile + mm * NRegs + i)); - } - } - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - int zunroll = TmpRegCount / NRegs; - for (int i = 0; i < _mtile; i += zunroll) { - int m_re = utils::remainsize(i, _mtile, zunroll); - for (int im = 0; im < m_re; im++) { - for (int j = 0; j < NRegs; j++) { - vmovups(vreg_t(TmpReg + im * NRegs + j), ptr[reg_tmp + j * 64 + (i + im) * NTILE * 4]); - vmovups(ptr[reg_matCptr + j * VecBytes], vreg_t(TmpReg + im * NRegs + j)); - } - add(reg_matCptr, reg_cstride); - } - } - outLocalLabel(); - } -}; -template -using Amxint8N16P4US = Amxint8N16P4; - -template -using Amxint8N16P4SS = Amxint8N16P4; - -class AmxConfigure : protected jblas::xbyak::JitAmxtile { - public: - typedef long long (*func_t)(tileconfig_t*); - - static void configure(int TILE_M, int TILE_N, int TILE_K, int elesize, int ANum, int BNum, int CNum) { - static AmxConfigure code; - tileconfig_t cfg; - std::memset(&cfg, 0, sizeof(cfg)); - configure_tiles(cfg, TILE_M, TILE_N, TILE_K, elesize, ANum, BNum, CNum); - code.mKernel(&cfg); - } - - protected: - AmxConfigure() { - generate_config(this); - mKernel = getCode(); - } - - func_t mKernel = nullptr; -}; - -namespace kblock { -// optimize for kblock gemm, each block size in k dimension has dequant operation -// all accumulators use fp32 dtype. -template -class Avx512fN16P1 : protected jblas::xbyak::JitAvx512f { - public: - static int constexpr RegLen = 16, PackRow = 1; - static_assert(_NTILE % RegLen == 0); - static int constexpr NRegs = _NTILE / RegLen; - static int constexpr MRegs = _MTILE == 0 ? (RegCount - 1) / NRegs : _MTILE; - static_assert(NRegs * MRegs <= RegCount - 1); - static int constexpr NTILE = RegLen * NRegs, MTILE = MRegs, KTILE = 1; - static int constexpr KUNROLL = 2; - static auto constexpr ISA = JBLAS_ISA::JblasAVX512F; - static auto constexpr COMPUTE = CompType::COMP_FP32; - typedef float AType; - typedef float BType; - typedef float CType; - - struct params { - AType* matA; - int astride; - BType* matB; - int bstride; - CType* matC; - int cstride; - int k; - int n; - int init; - }; - typedef long long (*func_t)(params*); - - int CRegCount = 0, BRegCount = 0, ARegCount = 0, TmpRegCount = 0; - int CReg = 0, BReg = 0, AReg = 0, TmpReg = 0; - static int constexpr BKStepSize = KTILE * NTILE * sizeof(BType); - static int constexpr AKStepSize = KTILE * sizeof(AType); - - void generate_code(int _mtile) { - assign_regs(); - reset(); - generate_mtile(_mtile); - ready(); - mKernel = getCode(); - } - func_t mKernel = nullptr; - - protected: - Xbyak::Reg64 parambase; - Xbyak::Reg64 reg_matAptr; - Xbyak::Reg64 reg_matBptr; - Xbyak::Reg64 reg_matCptr; - Xbyak::Reg64 reg_ksize; - Xbyak::Reg64 reg_nsize; - Xbyak::Reg64 reg_cstride; - Xbyak::Reg64 reg_astride; - Xbyak::Reg64 reg_iterk; - Xbyak::Reg64 reg_itern; - Xbyak::Reg64 reg_tmp; - Xbyak::Reg64 reg_tmp1; - Xbyak::Reg64 reg_tmp2; - Xbyak::Reg64 reg_ret = rax; - Xbyak::Opmask msk_wr = k1; - - void assign_regs() { - CRegCount = MRegs * NRegs; - ARegCount = 1; - BRegCount = RegCount - ARegCount - CRegCount; - if (BRegCount < NRegs) { - BRegCount = 0; - ARegCount = BRegCount + 1; - } - if (BRegCount > NRegs) { - BRegCount = NRegs; - } - CReg = 0; - BReg = CReg + CRegCount; - AReg = BReg + BRegCount; - TmpReg = AReg + ARegCount; - assert(TmpReg <= RegCount); - TmpRegCount = RegCount - TmpReg; - } - - void generate_mtile(int _mtile) { - inLocalLabel(); // use local label for multiple instance - Xbyak::util::StackFrame st(this, 1, 10, 16 * 10); - parambase = st.p[0]; - reg_matAptr = st.t[0]; - reg_matBptr = st.t[1]; - reg_matCptr = st.t[0]; - reg_ksize = st.t[2]; - reg_astride = st.t[3]; - reg_cstride = st.t[3]; - reg_iterk = st.t[4]; - reg_tmp = st.t[5]; - reg_tmp1 = st.t[6]; - reg_tmp2 = st.t[7]; - reg_nsize = st.t[8]; - reg_itern = st.t[9]; - reg_ret = rax; - - vreg_push(rsp); - - load32(reg_ksize, ptr[parambase + OFFSET(k)]); - load32(reg_nsize, ptr[parambase + OFFSET(n)]); - xor_(reg_itern, reg_itern); - L(".nloop"); - init_regs(_mtile); - mov(reg_matAptr, ptr[parambase + OFFSET(matA)]); - load32(reg_astride, ptr[parambase + OFFSET(astride)]); - mov(reg_matBptr, ptr[parambase + OFFSET(matB)]); - load32(reg_tmp, ptr[parambase + OFFSET(bstride)]); - imul(reg_tmp, reg_itern); - lea(reg_matBptr, ptr[reg_matBptr + reg_tmp]); - xor_(reg_iterk, reg_iterk); - generate_kloop(_mtile); - write_back(_mtile); - add(reg_itern, NTILE); - cmp(reg_itern, reg_nsize); - jb(".nloop"); - mov(reg_ret, 0); - vreg_pop(rsp); - - outLocalLabel(); // end of local label - } - - void generate_kloop(int _mtile) { - inLocalLabel(); - mov(reg_tmp, reg_ksize); - padto_le(reg_tmp, KUNROLL * KTILE); - cmp(reg_tmp, 0); - jz(".kloop", T_NEAR); - L(".unkloop"); - generate_fma(_mtile, KUNROLL); - add(reg_matAptr, KUNROLL * AKStepSize); - add(reg_matBptr, KUNROLL * BKStepSize); - add(reg_iterk, KUNROLL * KTILE); - cmp(reg_iterk, reg_tmp); // k iteration variable - jb(".unkloop"); - cmp(reg_tmp, reg_ksize); - jge(".kend", T_NEAR); - L(".kloop"); - generate_fma(_mtile, 1); - add(reg_matAptr, 1 * AKStepSize); - add(reg_matBptr, 1 * BKStepSize); - add(reg_iterk, 1 * KTILE); - cmp(reg_iterk, reg_ksize); // k iteration variable - jb(".kloop"); - L(".kend"); - outLocalLabel(); - } - - void generate_fma(int _mtile, int _ktile) { - for (int kk = 0; kk < _ktile; kk++) { - lea(reg_tmp1, ptr[reg_matAptr + kk * AKStepSize]); - if (BRegCount == NRegs) { - for (int i = 0; i < NRegs; i++) { - vmovups(vreg_t(BReg + i), ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - for (int mm = 0; mm < _mtile; mm++) { - vbroadcastss(vreg_t(AReg), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vfmadd231ps(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg), vreg_t(BReg + i)); - } - } - } else if (BRegCount == 0) { - for (int mm = 0; mm < _mtile; mm += ARegCount) { - int mm_re = utils::remainsize(mm, _mtile, ARegCount); - for (int imm = 0; imm < mm_re; imm++) { - vbroadcastss(vreg_t(AReg + imm), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vfmadd231ps(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg + imm), - ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - } - } - } else { - assert(0); - } - } - } - - void init_regs(int _mtile) { - inLocalLabel(); - load32(reg_tmp, ptr[parambase + OFFSET(init)]); - cmp(reg_tmp, 0); - je(".read", T_NEAR); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vxor(vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j)); - } - } - jmp(".end", T_NEAR); - L(".read"); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(vreg_t(CReg + i * NRegs + j), ptr[reg_matCptr + j * VecBytes]); - } - add(reg_matCptr, reg_cstride); - } - L(".end"); - outLocalLabel(); - } - - void write_back(int _mtile) { - inLocalLabel(); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(ptr[reg_matCptr + j * VecBytes], vreg_t(CReg + i * NRegs + j)); - } - add(reg_matCptr, reg_cstride); - } - outLocalLabel(); - } -}; - -template -class Avx512vnniN16P4 : protected jblas::xbyak::JitAvx512vnni { - public: - static int constexpr RegLen = 16, PackRow = 4; - static_assert(_NTILE % RegLen == 0); - static int constexpr NRegs = _NTILE / RegLen; - static int constexpr MRegs = _MTILE == 0 ? (RegCount - 1 - NRegs) / (NRegs * 2) : _MTILE; - static_assert(NRegs * MRegs <= RegCount - 1); - static int constexpr NTILE = RegLen * NRegs, MTILE = MRegs, KTILE = 4; - static int constexpr KUNROLL = 2; - static auto constexpr ISA = JBLAS_ISA::JblasAVX512_VNNI; - static auto constexpr COMPUTE = CompType::COMP_INT8_US_FP32; - typedef uint8_t AType; - typedef int8_t BType; - typedef float CType; - - struct params { - AType* matA; - int astride; - BType* matB; - int bstride; - CType* matC; - int cstride; - uint8_t* zpA; - float* scaleA; - int ldsa; - float* scaleB; - float* reduceB; - int ldsb; - int k; - int n; - int kblock; - int init; - float kscale; - }; - typedef long long (*func_t)(params*); - - int CRegCount = 0, BRegCount = 0, ARegCount = 0, TmpRegCount = 0; - int CReg = 0, CF32Reg = 0, BReg = 0, AReg = 0, TmpReg = 0; - static int constexpr BKStepSize = KTILE * NTILE * sizeof(BType); - static int constexpr AKStepSize = KTILE * sizeof(AType); - - void generate_code(int _mtile) { - assign_regs(); - reset(); - generate_mtile(_mtile); - ready(); - mKernel = getCode(); - } - func_t mKernel = nullptr; - - protected: - Xbyak::Reg64 parambase; - Xbyak::Reg64 reg_matAptr; - Xbyak::Reg64 reg_matBptr; - Xbyak::Reg64 reg_matCptr; - Xbyak::Reg64 reg_ksize; - Xbyak::Reg64 reg_nsize; - Xbyak::Reg64 reg_cstride; - Xbyak::Reg64 reg_astride; - Xbyak::Reg64 reg_iterk; - Xbyak::Reg64 reg_iterkb; - Xbyak::Reg64 reg_itern; - Xbyak::Reg64 reg_tmp; - Xbyak::Reg64 reg_tmp1; - Xbyak::Reg64 reg_tmp2; - Xbyak::Reg64 reg_tmp3; - Xbyak::Reg64 reg_tmp4; - Xbyak::Reg64 reg_ret = rax; - - void assign_regs() { - CRegCount = MRegs * NRegs; - ARegCount = 1; - BRegCount = NRegs; - CReg = 0; - CF32Reg = CReg + CRegCount; - BReg = CF32Reg + CRegCount; - AReg = BReg + BRegCount; - TmpReg = AReg + ARegCount; - assert(TmpReg < RegCount); - TmpRegCount = RegCount - TmpReg; - assert(TmpRegCount >= 1); - } - - void generate_mtile(int _mtile) { - inLocalLabel(); // use local label for multiple instance - Xbyak::util::StackFrame st(this, 1, 13, 16 * 10); - parambase = st.p[0]; - reg_matAptr = st.t[0]; - reg_matBptr = st.t[1]; - reg_matCptr = st.t[0]; - reg_ksize = st.t[2]; - reg_astride = st.t[3]; - reg_cstride = st.t[3]; - reg_iterk = st.t[4]; - reg_iterkb = st.t[12]; - reg_tmp = st.t[5]; - reg_tmp1 = st.t[6]; - reg_tmp2 = st.t[7]; - reg_tmp3 = st.t[10]; - reg_tmp4 = st.t[11]; - reg_nsize = st.t[8]; - reg_itern = st.t[9]; - reg_ret = rax; - - vreg_push(rsp); - - load32(reg_ksize, ptr[parambase + OFFSET(k)]); - load32(reg_nsize, ptr[parambase + OFFSET(n)]); - xor_(reg_itern, reg_itern); - L(".nloop"); - init_regs(_mtile); - mov(reg_matAptr, ptr[parambase + OFFSET(matA)]); - load32(reg_astride, ptr[parambase + OFFSET(astride)]); - mov(reg_matBptr, ptr[parambase + OFFSET(matB)]); - load32(reg_tmp, ptr[parambase + OFFSET(bstride)]); - imul(reg_tmp, reg_itern); - lea(reg_matBptr, ptr[reg_matBptr + reg_tmp]); - xor_(reg_iterk, reg_iterk); - generate_kloop(_mtile); - write_back(_mtile); - add(reg_itern, NTILE); - cmp(reg_itern, reg_nsize); - jb(".nloop"); - mov(reg_ret, 0); - vreg_pop(rsp); - - outLocalLabel(); // end of local label - } - - void generate_kloop(int _mtile) { - inLocalLabel(); - xor_(reg_iterkb, reg_iterkb); - L(".kloop"); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vpxorq(Xbyak::Zmm(CReg + i * NRegs + j), Xbyak::Zmm(CReg + i * NRegs + j), Xbyak::Zmm(CReg + i * NRegs + j)); - } - } - xor_(reg_tmp2, reg_tmp2); - load32(reg_tmp3, ptr[parambase + OFFSET(kblock)]); - mov(reg_tmp, reg_tmp3); - padto_le(reg_tmp, KUNROLL * KTILE); - cmp(reg_tmp, 0); - jz(".kbloop", T_NEAR); - L(".unkbloop"); - generate_fma(_mtile, KUNROLL, reg_tmp1); - add(reg_matAptr, KUNROLL * AKStepSize); - add(reg_matBptr, KUNROLL * BKStepSize); - add(reg_tmp2, KUNROLL * KTILE); - cmp(reg_tmp2, reg_tmp); - jb(".unkbloop"); - cmp(reg_tmp, reg_tmp3); - jge(".kend", T_NEAR); - L(".kbloop"); - generate_fma(_mtile, 1, reg_tmp1); - add(reg_matAptr, 1 * AKStepSize); - add(reg_matBptr, 1 * BKStepSize); - add(reg_tmp2, 1 * KTILE); - cmp(reg_tmp2, reg_tmp3); - jb(".kbloop"); - L(".kend"); - add(reg_iterk, reg_tmp2); - generate_f32_accumulate(_mtile); - generate_zp_correction(_mtile); - inc(reg_iterkb); - cmp(reg_iterk, reg_ksize); // k iteration variable - jb(".kloop"); - - outLocalLabel(); - } - - void generate_fma(int _mtile, int _ktile, Xbyak::Reg64& tmp) { - for (int kk = 0; kk < _ktile; kk++) { - lea(tmp, ptr[reg_matAptr + kk * AKStepSize]); - for (int i = 0; i < NRegs; i++) { - vmovups(vreg_t(BReg + i), ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - for (int mm = 0; mm < _mtile; mm++) { - vpbroadcastd(vreg_t(AReg), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vpdpbusds_(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg), vreg_t(BReg + i)); - } - } - } - } - - void init_regs(int _mtile) { - inLocalLabel(); - load32(reg_tmp, ptr[parambase + OFFSET(init)]); - cmp(reg_tmp, 0); - je(".read", T_NEAR); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vxor(vreg_t(CF32Reg + i * NRegs + j), vreg_t(CF32Reg + i * NRegs + j), vreg_t(CF32Reg + i * NRegs + j)); - } - } - jmp(".end", T_NEAR); - L(".read"); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(vreg_t(CF32Reg + i * NRegs + j), ptr[reg_matCptr + j * VecBytes]); - } - add(reg_matCptr, reg_cstride); - } - L(".end"); - outLocalLabel(); - } - - void generate_f32_accumulate(int _mtile) { - load32(reg_tmp, ptr[parambase + OFFSET(ldsb)]); - imul(reg_tmp, reg_iterkb); - mov(reg_tmp2, ptr[parambase + OFFSET(scaleB)]); - lea(reg_tmp2, ptr[reg_tmp2 + reg_tmp * sizeof(float)]); - lea(reg_tmp2, ptr[reg_tmp2 + reg_itern * sizeof(float)]); - - mov(reg_tmp, ptr[parambase + OFFSET(scaleA)]); - lea(reg_tmp, ptr[reg_tmp + reg_iterkb * sizeof(float)]); - load32(reg_tmp1, ptr[parambase + OFFSET(ldsa)]); - for (int i = 0; i < NRegs; i++) { - vmovups(Xbyak::Zmm(BReg + i), ptr[reg_tmp2 + i * VecBytes]); - } - for (int mm = 0; mm < _mtile; mm++) { - vbroadcastss(Xbyak::Zmm(TmpReg), ptr[reg_tmp]); - lea(reg_tmp, ptr[reg_tmp + reg_tmp1 * sizeof(float)]); - for (int i = 0; i < NRegs; i++) { - vcvtdq2ps(Xbyak::Zmm(CReg + mm * NRegs + i), Xbyak::Zmm(CReg + mm * NRegs + i)); - vmulps(Xbyak::Zmm(AReg), Xbyak::Zmm(TmpReg), Xbyak::Zmm(BReg + i)); - vmulps(Xbyak::Zmm(CReg + mm * NRegs + i), Xbyak::Zmm(AReg)); - vaddps(Xbyak::Zmm(CF32Reg + mm * NRegs + i), Xbyak::Zmm(CReg + mm * NRegs + i)); - } - } - } - - void generate_zp_correction(int _mtile) { - inLocalLabel(); - mov(reg_tmp, ptr[parambase + OFFSET(zpA)]); - cmp(reg_tmp, 0); - je(".NOZP", T_NEAR); - lea(reg_tmp, ptr[reg_tmp + reg_iterkb * sizeof(AType)]); - auto& reg_zpA = reg_tmp; - - load32(reg_tmp1, ptr[parambase + OFFSET(ldsb)]); - imul(reg_tmp1, reg_iterkb); - mov(reg_tmp2, ptr[parambase + OFFSET(reduceB)]); - lea(reg_tmp2, ptr[reg_tmp2 + reg_tmp1 * sizeof(float)]); - lea(reg_tmp2, ptr[reg_tmp2 + reg_itern * sizeof(float)]); - auto& reg_redB = reg_tmp2; - - mov(reg_tmp1, ptr[parambase + OFFSET(scaleA)]); - lea(reg_tmp1, ptr[reg_tmp1 + reg_iterkb * sizeof(float)]); - auto& reg_scaleA = reg_tmp1; - - load32(reg_tmp3, ptr[parambase + OFFSET(ldsa)]); - auto& reg_ldsa = reg_tmp3; - for (int i = 0; i < NRegs; i++) { - vmovups(Xbyak::Zmm(BReg + i), ptr[reg_redB + i * VecBytes]); - } - - vbroadcastss(vreg_t(TmpReg), ptr[parambase + OFFSET(kscale)]); - auto& reg_kscale = reg_tmp2; - - for (int i = 0; i < _mtile; i++) { - vpbroadcastb(Xbyak::Xmm(AReg), ptr[reg_zpA]); - vpmovzxbd(Xbyak::Zmm(AReg), Xbyak::Xmm(AReg)); - vcvtdq2ps(Xbyak::Zmm(AReg), Xbyak::Zmm(AReg)); - vmulps(Xbyak::Zmm(AReg), Xbyak::Zmm(AReg), zword_b[reg_scaleA]); - vmulps(Xbyak::Zmm(AReg), Xbyak::Zmm(AReg), vreg_t(TmpReg)); - for (int j = 0; j < NRegs; j++) { - vmulps(Xbyak::Zmm(CReg + j), Xbyak::Zmm(AReg), Xbyak::Zmm(BReg + j)); - vsubps(Xbyak::Zmm(CF32Reg + i * NRegs + j), Xbyak::Zmm(CReg + j)); - } - lea(reg_zpA, ptr[reg_zpA + reg_ldsa * sizeof(AType)]); - lea(reg_scaleA, ptr[reg_scaleA + reg_ldsa * sizeof(float)]); - } - L(".NOZP"); - outLocalLabel(); - } - - void write_back(int _mtile) { - inLocalLabel(); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(ptr[reg_matCptr + j * VecBytes], vreg_t(CF32Reg + i * NRegs + j)); - } - add(reg_matCptr, reg_cstride); - } - outLocalLabel(); - } -}; - -template -class AvxvnniN8P4 : protected jblas::xbyak::JitAvxvnni { - public: - static int constexpr RegLen = 8, PackRow = 4; - static_assert(_NTILE % RegLen == 0); - static int constexpr NRegs = _NTILE / RegLen; - static int constexpr MRegs = _MTILE == 0 ? (RegCount - 3) / (NRegs * 2) : _MTILE; - static_assert(NRegs * MRegs <= RegCount - 3); - static int constexpr NTILE = RegLen * NRegs, MTILE = MRegs, KTILE = 4; - static int constexpr KUNROLL = 2; - static auto constexpr ISA = JBLAS_ISA::JblasAVX_VNNI; - static auto constexpr COMPUTE = CompType::COMP_INT8_US_FP32; - typedef uint8_t AType; - typedef int8_t BType; - typedef float CType; - - struct params { - AType* matA; - int astride; - BType* matB; - int bstride; - CType* matC; - int cstride; - uint8_t* zpA; - float* scaleA; - int ldsa; - float* scaleB; - float* reduceB; - int ldsb; - int k; - int n; - int kblock; - int init; - float kscale; - }; - typedef long long (*func_t)(params*); - - int CRegCount = 0, BRegCount = 0, ARegCount = 0, TmpRegCount = 0; - int CReg = 0, CF32Reg = 0, BReg = 0, AReg = 0, TmpReg = 0; - static int constexpr BKStepSize = KTILE * NTILE * sizeof(BType); - static int constexpr AKStepSize = KTILE * sizeof(AType); - - void generate_code(int _mtile) { - assign_regs(); - reset(); - generate_mtile(_mtile); - ready(); - mKernel = getCode(); - } - func_t mKernel = nullptr; - - protected: - Xbyak::Reg64 parambase; - Xbyak::Reg64 reg_matAptr; - Xbyak::Reg64 reg_matBptr; - Xbyak::Reg64 reg_matCptr; - Xbyak::Reg64 reg_ksize; - Xbyak::Reg64 reg_nsize; - Xbyak::Reg64 reg_cstride; - Xbyak::Reg64 reg_astride; - Xbyak::Reg64 reg_iterk; - Xbyak::Reg64 reg_iterkb; - Xbyak::Reg64 reg_itern; - Xbyak::Reg64 reg_tmp; - Xbyak::Reg64 reg_tmp1; - Xbyak::Reg64 reg_tmp2; - Xbyak::Reg64 reg_tmp3; - Xbyak::Reg64 reg_tmp4; - Xbyak::Reg64 reg_ret = rax; - - void assign_regs() { - CRegCount = MRegs * NRegs; - ARegCount = 1; - BRegCount = RegCount - CRegCount - CRegCount - ARegCount - 2; - if (BRegCount >= NRegs) { - BRegCount = NRegs; - } else { - BRegCount = 0; - } - CReg = 0; - CF32Reg = CReg + CRegCount; - BReg = CF32Reg + CRegCount; - AReg = BReg + BRegCount; - TmpReg = AReg + ARegCount; - assert(TmpReg < RegCount); - TmpRegCount = RegCount - TmpReg; - assert(TmpRegCount >= 2); - } - - void generate_mtile(int _mtile) { - inLocalLabel(); // use local label for multiple instance - Xbyak::util::StackFrame st(this, 1, 13, 16 * 10); - parambase = st.p[0]; - reg_matAptr = st.t[0]; - reg_matBptr = st.t[1]; - reg_matCptr = st.t[0]; - reg_ksize = st.t[2]; - reg_astride = st.t[3]; - reg_cstride = st.t[3]; - reg_iterk = st.t[4]; - reg_iterkb = st.t[12]; - reg_tmp = st.t[5]; - reg_tmp1 = st.t[6]; - reg_tmp2 = st.t[7]; - reg_tmp3 = st.t[10]; - reg_tmp4 = st.t[11]; - reg_nsize = st.t[8]; - reg_itern = st.t[9]; - reg_ret = rax; - - vreg_push(rsp); - - load32(reg_ksize, ptr[parambase + OFFSET(k)]); - load32(reg_nsize, ptr[parambase + OFFSET(n)]); - xor_(reg_itern, reg_itern); - L(".nloop"); - init_regs(_mtile); - mov(reg_matAptr, ptr[parambase + OFFSET(matA)]); - load32(reg_astride, ptr[parambase + OFFSET(astride)]); - mov(reg_matBptr, ptr[parambase + OFFSET(matB)]); - load32(reg_tmp, ptr[parambase + OFFSET(bstride)]); - imul(reg_tmp, reg_itern); - lea(reg_matBptr, ptr[reg_matBptr + reg_tmp]); - xor_(reg_iterk, reg_iterk); - generate_kloop(_mtile); - write_back(_mtile); - add(reg_itern, NTILE); - cmp(reg_itern, reg_nsize); - jb(".nloop"); - mov(reg_ret, 0); - vreg_pop(rsp); - - outLocalLabel(); // end of local label - } - - void generate_kloop(int _mtile) { - inLocalLabel(); - xor_(reg_iterkb, reg_iterkb); - L(".kloop"); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vxor(vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j), vreg_t(CReg + i * NRegs + j)); - } - } - xor_(reg_tmp2, reg_tmp2); - load32(reg_tmp3, ptr[parambase + OFFSET(kblock)]); - mov(reg_tmp, reg_tmp3); - padto_le(reg_tmp, KUNROLL * KTILE); - cmp(reg_tmp, 0); - jz(".kbloop", T_NEAR); - L(".unkbloop"); - generate_fma(_mtile, KUNROLL, reg_tmp1); - add(reg_matAptr, KUNROLL * AKStepSize); - add(reg_matBptr, KUNROLL * BKStepSize); - add(reg_tmp2, KUNROLL * KTILE); - cmp(reg_tmp2, reg_tmp); - jb(".unkbloop"); - cmp(reg_tmp, reg_tmp3); - jge(".kend", T_NEAR); - L(".kbloop"); - generate_fma(_mtile, 1, reg_tmp1); - add(reg_matAptr, 1 * AKStepSize); - add(reg_matBptr, 1 * BKStepSize); - add(reg_tmp2, 1 * KTILE); - cmp(reg_tmp2, reg_tmp3); - jb(".kbloop"); - L(".kend"); - add(reg_iterk, reg_tmp2); - generate_f32_accumulate(_mtile); - generate_zp_correction(_mtile); - inc(reg_iterkb); - cmp(reg_iterk, reg_ksize); // k iteration variable - jb(".kloop"); - - outLocalLabel(); - } - - void generate_fma(int _mtile, int _ktile, Xbyak::Reg64& tmp) { - for (int kk = 0; kk < _ktile; kk++) { - lea(tmp, ptr[reg_matAptr + kk * AKStepSize]); - if (BRegCount == 0) { - for (int mm = 0; mm < _mtile; mm++) { - vpbroadcastd(vreg_t(AReg), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vpdpbusds_(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg), ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - } - } else { - for (int i = 0; i < NRegs; i++) { - vmovups(vreg_t(BReg + i), ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - for (int mm = 0; mm < _mtile; mm++) { - vpbroadcastd(vreg_t(AReg), ptr[reg_tmp1]); - add(reg_tmp1, reg_astride); - for (int i = 0; i < NRegs; i++) { - vpdpbusds_(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg), ptr[reg_matBptr + kk * BKStepSize + i * VecBytes]); - } - } - } - } - } - - void init_regs(int _mtile) { - inLocalLabel(); - load32(reg_tmp, ptr[parambase + OFFSET(init)]); - cmp(reg_tmp, 0); - je(".read", T_NEAR); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vxor(vreg_t(CF32Reg + i * NRegs + j), vreg_t(CF32Reg + i * NRegs + j), vreg_t(CF32Reg + i * NRegs + j)); - } - } - jmp(".end", T_NEAR); - L(".read"); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(vreg_t(CF32Reg + i * NRegs + j), ptr[reg_matCptr + j * VecBytes]); - } - add(reg_matCptr, reg_cstride); - } - L(".end"); - outLocalLabel(); - } - - void generate_f32_accumulate(int _mtile) { - load32(reg_tmp, ptr[parambase + OFFSET(ldsb)]); - imul(reg_tmp, reg_iterkb); - mov(reg_tmp2, ptr[parambase + OFFSET(scaleB)]); - lea(reg_tmp2, ptr[reg_tmp2 + reg_tmp * sizeof(float)]); - lea(reg_tmp2, ptr[reg_tmp2 + reg_itern * sizeof(float)]); - - mov(reg_tmp, ptr[parambase + OFFSET(scaleA)]); - lea(reg_tmp, ptr[reg_tmp + reg_iterkb * sizeof(float)]); - load32(reg_tmp1, ptr[parambase + OFFSET(ldsa)]); - if (BRegCount == NRegs) { - for (int i = 0; i < NRegs; i++) { - vmovups(vreg_t(BReg + i), ptr[reg_tmp2 + i * VecBytes]); - } - for (int mm = 0; mm < _mtile; mm++) { - vbroadcastss(vreg_t(TmpReg), ptr[reg_tmp]); - lea(reg_tmp, ptr[reg_tmp + reg_tmp1 * sizeof(float)]); - for (int i = 0; i < NRegs; i++) { - vcvtdq2ps(vreg_t(CReg + mm * NRegs + i), vreg_t(CReg + mm * NRegs + i)); - vmulps(vreg_t(AReg), vreg_t(TmpReg), vreg_t(BReg + i)); - vmulps(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg)); - vaddps(vreg_t(CF32Reg + mm * NRegs + i), vreg_t(CReg + mm * NRegs + i)); - } - } - } else { - for (int mm = 0; mm < _mtile; mm++) { - vbroadcastss(vreg_t(TmpReg), ptr[reg_tmp]); - lea(reg_tmp, ptr[reg_tmp + reg_tmp1 * sizeof(float)]); - for (int i = 0; i < NRegs; i++) { - vcvtdq2ps(vreg_t(CReg + mm * NRegs + i), vreg_t(CReg + mm * NRegs + i)); - vmovups(vreg_t(AReg), ptr[reg_tmp2 + i * VecBytes]); - vmulps(vreg_t(AReg), vreg_t(TmpReg)); - vmulps(vreg_t(CReg + mm * NRegs + i), vreg_t(AReg)); - vaddps(vreg_t(CF32Reg + mm * NRegs + i), vreg_t(CReg + mm * NRegs + i)); - } - } - } - } - - void generate_zp_correction(int _mtile) { - inLocalLabel(); - mov(reg_tmp, ptr[parambase + OFFSET(zpA)]); - cmp(reg_tmp, 0); - je(".NOZP", T_NEAR); - lea(reg_tmp, ptr[reg_tmp + reg_iterkb * sizeof(AType)]); - auto& reg_zpA = reg_tmp; - load32(reg_tmp1, ptr[parambase + OFFSET(ldsb)]); - imul(reg_tmp1, reg_iterkb); - mov(reg_tmp2, ptr[parambase + OFFSET(reduceB)]); - lea(reg_tmp2, ptr[reg_tmp2 + reg_tmp1 * sizeof(float)]); - lea(reg_tmp2, ptr[reg_tmp2 + reg_itern * sizeof(float)]); - auto& reg_redB = reg_tmp2; - - mov(reg_tmp1, ptr[parambase + OFFSET(scaleA)]); - lea(reg_tmp1, ptr[reg_tmp1 + reg_iterkb * sizeof(float)]); - auto& reg_scaleA = reg_tmp1; - - load32(reg_tmp3, ptr[parambase + OFFSET(ldsa)]); - auto& reg_ldsa = reg_tmp3; - - vbroadcastss(vreg_t(TmpReg), ptr[parambase + OFFSET(kscale)]); - auto& reg_kscale = reg_tmp4; - if (BRegCount == NRegs) { - for (int i = 0; i < NRegs; i++) { - vmovups(vreg_t(BReg + i), ptr[reg_redB + i * VecBytes]); - } - for (int i = 0; i < _mtile; i++) { - vpbroadcastb(Xbyak::Xmm(AReg), ptr[reg_zpA]); - vpmovzxbd(vreg_t(AReg), Xbyak::Xmm(AReg)); - vcvtdq2ps(vreg_t(AReg), vreg_t(AReg)); - vbroadcastss(vreg_t(TmpReg + 1), ptr[reg_scaleA]); - vmulps(vreg_t(AReg), vreg_t(AReg), vreg_t(TmpReg + 1)); - vmulps(vreg_t(AReg), vreg_t(AReg), vreg_t(TmpReg)); - for (int j = 0; j < NRegs; j++) { - vmulps(vreg_t(CReg + j), vreg_t(AReg), vreg_t(BReg + j)); - vsubps(vreg_t(CF32Reg + i * NRegs + j), vreg_t(CReg + j)); - } - lea(reg_zpA, ptr[reg_zpA + reg_ldsa * sizeof(AType)]); - lea(reg_scaleA, ptr[reg_scaleA + reg_ldsa * sizeof(float)]); - } - } else { - for (int i = 0; i < _mtile; i++) { - vpbroadcastb(Xbyak::Xmm(AReg), ptr[reg_zpA]); - vpmovzxbd(vreg_t(AReg), Xbyak::Xmm(AReg)); - vcvtdq2ps(vreg_t(AReg), vreg_t(AReg)); - vbroadcastss(vreg_t(TmpReg + 1), ptr[reg_scaleA]); - vmulps(vreg_t(AReg), vreg_t(AReg), vreg_t(TmpReg + 1)); - vmulps(vreg_t(AReg), vreg_t(AReg), vreg_t(TmpReg)); - for (int j = 0; j < NRegs; j++) { - vmulps(vreg_t(CReg + j), vreg_t(AReg), ptr[reg_redB + j * VecBytes]); - vsubps(vreg_t(CF32Reg + i * NRegs + j), vreg_t(CReg + j)); - } - lea(reg_zpA, ptr[reg_zpA + reg_ldsa * sizeof(AType)]); - lea(reg_scaleA, ptr[reg_scaleA + reg_ldsa * sizeof(float)]); - } - } - - L(".NOZP"); - outLocalLabel(); - } - - void write_back(int _mtile) { - inLocalLabel(); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(CType)]); - for (int i = 0; i < _mtile; i++) { - for (int j = 0; j < NRegs; j++) { - vmovups(ptr[reg_matCptr + j * VecBytes], vreg_t(CF32Reg + i * NRegs + j)); - } - add(reg_matCptr, reg_cstride); - } - outLocalLabel(); - } -}; - -template -class Amxint8N16P4 : protected jblas::xbyak::JitAmxint8 { - public: - static int constexpr RegLen = 16, PackRow = 4; - static_assert(_NTILE % RegLen == 0); - static_assert(_MTILE % RegLen == 0); - static int constexpr NRegs = _NTILE / RegLen; - static int constexpr MRegs = _MTILE == 0 ? 1 : _MTILE / RegLen; - static_assert(NRegs * MRegs + 2 <= TileCount); - static int constexpr NTILE = RegLen * NRegs, MTILE = MRegs * RegLen, KTILE = 64; - static int constexpr KUNROLL = 2; - static auto constexpr ISA = JBLAS_ISA::JblasAMX_INT8; - static auto constexpr COMPUTE = (std::is_same_v ? std::is_same_v ? CompType::COMP_INT8_SS_FP32 - : CompType::COMP_INT8_SU_FP32 - : std::is_same_v ? CompType::COMP_INT8_US_FP32 - : CompType::COMP_INT8_UU_FP32); - using AType = AT; - using BType = BT; - typedef float CType; - - struct params { - AType* matA; - int astride; - BType* matB; - int bstride; - CType* matC; - int cstride; - uint8_t* zpA; - float* scaleA; - int ldsa; - float* scaleB; - float* reduceB; - int ldsb; - int k; - int n; - int kblock; - int init; - float kscale; - void* workspace; - }; - typedef long long (*func_t)(params*); - - int TmpRegCount = RegCount; - int TmpReg = 0; - int CTileCount = 0, ATileCount = 0, BTileCount = 0; - int CTile = 0, ATile = 0, BTile = 0; - static int constexpr BKStepSize = KTILE * NTILE * sizeof(BType); - static int constexpr AKStepSize = KTILE * sizeof(AType); - - void generate_code(int _mtile) { - assign_regs(); - reset(); - generate_mtile(_mtile); - ready(); - mKernel = getCode(); - } - func_t mKernel = nullptr; - - protected: - Xbyak::Reg64 parambase; - Xbyak::Reg64 reg_matAptr; - Xbyak::Reg64 reg_matBptr; - Xbyak::Reg64 reg_matCptr; - Xbyak::Reg64 reg_ksize; - Xbyak::Reg64 reg_nsize; - Xbyak::Reg64 reg_cstride; - Xbyak::Reg64 reg_astride; - Xbyak::Reg64 reg_iterk; - Xbyak::Reg64 reg_iterkb; - Xbyak::Reg64 reg_itern; - Xbyak::Reg64 reg_tmp; - Xbyak::Reg64 reg_tmp1; - Xbyak::Reg64 reg_tmp2; - Xbyak::Reg64 reg_tmp3; - Xbyak::Reg64 reg_tmp4; - Xbyak::Reg64 reg_ret = rax; - - void assign_regs() { - CTileCount = NRegs * MRegs; - auto tile_re = TileCount - CTileCount; - if (tile_re - 1 >= NRegs) { - BTileCount = NRegs; - ATileCount = tile_re - BTileCount; - } else if (tile_re - 1 >= MRegs) { - ATileCount = MRegs; - BTileCount = tile_re - ATileCount; - } else { - ATileCount = 1; - BTileCount = tile_re - ATileCount; - } - CTile = 0; - ATile = CTile + CTileCount; - BTile = ATile + ATileCount; - } - - void generate_mtile(int _mtile) { - inLocalLabel(); // use local label for multiple instance - Xbyak::util::StackFrame st(this, 1, 13, 16 * 10); - parambase = st.p[0]; - reg_matAptr = st.t[0]; - reg_matBptr = st.t[1]; - reg_matCptr = st.t[0]; - reg_ksize = st.t[2]; - reg_astride = st.t[3]; - reg_cstride = st.t[3]; - reg_iterk = st.t[4]; - reg_iterkb = st.t[12]; - reg_tmp = st.t[5]; - reg_tmp1 = st.t[6]; - reg_tmp2 = st.t[7]; - reg_tmp3 = st.t[10]; - reg_tmp4 = st.t[11]; - reg_nsize = st.t[8]; - reg_itern = st.t[9]; - reg_ret = rax; - - vreg_push(rsp); - - load32(reg_ksize, ptr[parambase + OFFSET(k)]); - load32(reg_nsize, ptr[parambase + OFFSET(n)]); - xor_(reg_itern, reg_itern); - L(".nloop"); - init_regs(_mtile); - mov(reg_matAptr, ptr[parambase + OFFSET(matA)]); - load32(reg_astride, ptr[parambase + OFFSET(astride)]); - mov(reg_matBptr, ptr[parambase + OFFSET(matB)]); - load32(reg_tmp, ptr[parambase + OFFSET(bstride)]); - imul(reg_tmp, reg_itern); - lea(reg_matBptr, ptr[reg_matBptr + reg_tmp]); - xor_(reg_iterk, reg_iterk); - generate_kloop(_mtile); - write_back(_mtile); - add(reg_itern, NTILE); - cmp(reg_itern, reg_nsize); - jb(".nloop"); - mov(reg_ret, 0); - vreg_pop(rsp); - - outLocalLabel(); // end of local label - } - - void generate_kloop(int _mtile) { - inLocalLabel(); - xor_(reg_iterkb, reg_iterkb); - L(".kloop"); - for (int i = 0; i < CTileCount; i++) { - tilezero(Xbyak::Tmm(CTile + i)); - } - xor_(reg_tmp2, reg_tmp2); - load32(reg_tmp3, ptr[parambase + OFFSET(kblock)]); - mov(reg_tmp, reg_tmp3); - padto_le(reg_tmp, KUNROLL * KTILE); - cmp(reg_tmp, 0); - jz(".kbloop", T_NEAR); - L(".unkbloop"); - generate_fma(_mtile, KUNROLL, reg_tmp1, reg_tmp4); - add(reg_matAptr, KUNROLL * AKStepSize); - add(reg_matBptr, KUNROLL * BKStepSize); - add(reg_tmp2, KUNROLL * KTILE); - cmp(reg_tmp2, reg_tmp); - jb(".unkbloop"); - cmp(reg_tmp, reg_tmp3); - jge(".kend", T_NEAR); - L(".kbloop"); - generate_fma(_mtile, 1, reg_tmp1, reg_tmp4); - add(reg_matAptr, 1 * AKStepSize); - add(reg_matBptr, 1 * BKStepSize); - add(reg_tmp2, 1 * KTILE); - cmp(reg_tmp2, reg_tmp3); - jb(".kbloop"); - L(".kend"); - add(reg_iterk, reg_tmp2); - generate_f32_accumulate(_mtile); - generate_zp_correction(_mtile); - inc(reg_iterkb); - cmp(reg_iterk, reg_ksize); // k iteration variable - jb(".kloop"); - - outLocalLabel(); - } - - void generate_fma(int _mtile, int kunrll, Xbyak::Reg64& tmpreg, Xbyak::Reg64& tmpreg2) { - auto& reg_Bstride = tmpreg2; - mov(reg_Bstride, NTILE * 4); - int mtiles = _mtile / RegLen; - - for (int kk = 0; kk < kunrll; kk++) { - auto reg_Atmp = tmpreg; - if (mtiles == 1) { - reg_Atmp = reg_matAptr; - } else { - mov(reg_Atmp, reg_matAptr); - } - if (BTileCount == NRegs) { - for (int i = 0; i < NRegs; i++) { - tileloaddt1(Xbyak::Tmm(BTile + i), ptr[reg_matBptr + reg_Bstride + kk * BKStepSize + i * 64]); - } - for (int mm = 0; mm < mtiles; mm++) { - tileloadd(Xbyak::Tmm(ATile), ptr[reg_Atmp + reg_astride + kk * AKStepSize]); - for (int i = 0; i < NRegs; i++) { - _tdpb(Xbyak::Tmm(CTile + mm * NRegs + i), Xbyak::Tmm(ATile), Xbyak::Tmm(BTile + i)); - } - if (mm != mtiles - 1) { - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - } - } - } else { - if (ATileCount == mtiles) { - for (int mm = 0; mm < mtiles; mm++) { - tileloadd(Xbyak::Tmm(ATile + mm), ptr[reg_Atmp + reg_astride + kk * AKStepSize]); - if (mm != mtiles - 1) { - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - } - } - for (int i = 0; i < NRegs; i++) { - tileloaddt1(Xbyak::Tmm(BTile), ptr[reg_matBptr + reg_Bstride + kk * BKStepSize + i * 64]); - for (int mm = 0; mm < mtiles; mm++) { - _tdpb(Xbyak::Tmm(CTile + mm * NRegs + i), Xbyak::Tmm(ATile + mm), Xbyak::Tmm(BTile)); - } - } - } else { - for (int mm = 0; mm < mtiles; mm++) { - tileloadd(Xbyak::Tmm(ATile), ptr[reg_Atmp + reg_astride + kk * AKStepSize]); - for (int i = 0; i < NRegs; i++) { - tileloaddt1(Xbyak::Tmm(BTile), ptr[reg_matBptr + reg_Bstride + kk * BKStepSize + i * 64]); - _tdpb(Xbyak::Tmm(CTile + mm * NRegs + i), Xbyak::Tmm(ATile), Xbyak::Tmm(BTile)); - } - if (mm != mtiles - 1) { - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - lea(reg_Atmp, ptr[reg_Atmp + 8 * reg_astride]); - } - } - } - } - } - } - - void init_regs(int _mtile) { - inLocalLabel(); - load32(reg_tmp, ptr[parambase + OFFSET(init)]); - cmp(reg_tmp, 0); - je(".end", T_NEAR); - mov(reg_matCptr, ptr[parambase + OFFSET(matC)]); - load32(reg_cstride, ptr[parambase + OFFSET(cstride)]); - lea(reg_matCptr, ptr[reg_matCptr + reg_itern * sizeof(float)]); - int zunroll = TmpRegCount / NRegs; - for (int i = 0; i < _mtile; i += zunroll) { - int m_re = utils::remainsize(i, _mtile, zunroll); - for (int im = 0; im < m_re; im++) { - for (int j = 0; j < NRegs; j++) { - vxorps(vreg_t(TmpReg + im * NRegs + j), vreg_t(TmpReg + im * NRegs + j)); - vmovups(ptr[reg_matCptr + j * VecBytes], vreg_t(TmpReg + im * NRegs + j)); - } - add(reg_matCptr, reg_cstride); - } - } - L(".end"); - outLocalLabel(); - } - - void generate_f32_accumulate(int _mtile) { - mov(reg_tmp3, ptr[parambase + OFFSET(workspace)]); - mov(reg_tmp1, NTILE * 4); - for (int mm = 0; mm < MRegs; mm++) { - for (int i = 0; i < NRegs; i++) { - tilestored(ptr[reg_tmp3 + reg_tmp1 + i * 64 + mm * 16 * NTILE * 4], Xbyak::Tmm(CTile + mm * NRegs + i)); - } - } - load32(reg_tmp, ptr[parambase + OFFSET(ldsb)]); - imul(reg_tmp, reg_iterkb); - mov(reg_tmp2, ptr[parambase + OFFSET(scaleB)]); - lea(reg_tmp2, ptr[reg_tmp2 + reg_tmp * sizeof(float)]); - lea(reg_tmp2, ptr[reg_tmp2 + reg_itern * sizeof(float)]); - - mov(reg_tmp, ptr[parambase + OFFSET(scaleA)]); - lea(reg_tmp, ptr[reg_tmp + reg_iterkb * sizeof(float)]); - load32(reg_tmp1, ptr[parambase + OFFSET(ldsa)]); - int BReg = TmpReg; - int AReg = BReg + NRegs; - int SAReg = AReg + 1; - int CReg = SAReg + 1; - for (int i = 0; i < NRegs; i++) { - vmovups(Xbyak::Zmm(BReg + i), ptr[reg_tmp2 + i * VecBytes]); - } - mov(reg_tmp2, ptr[parambase + OFFSET(matC)]); - lea(reg_tmp2, ptr[reg_tmp2 + reg_itern * sizeof(float)]); - load32(reg_tmp4, dword[parambase + OFFSET(cstride)]); - for (int mm = 0; mm < _mtile; mm++) { - vbroadcastss(Xbyak::Zmm(SAReg), ptr[reg_tmp]); - lea(reg_tmp, ptr[reg_tmp + reg_tmp1 * sizeof(float)]); - for (int i = 0; i < NRegs; i++) { - vcvtdq2ps(Xbyak::Zmm(CReg + i), ptr[reg_tmp3 + i * 64 + mm * 4 * NTILE]); - vmulps(Xbyak::Zmm(AReg), Xbyak::Zmm(SAReg), Xbyak::Zmm(BReg + i)); - vmulps(Xbyak::Zmm(CReg + i), Xbyak::Zmm(AReg)); - vaddps(Xbyak::Zmm(CReg + i), ptr[reg_tmp2 + i * 64]); - vmovups(ptr[reg_tmp2 + i * 64], Xbyak::Zmm(CReg + i)); - } - add(reg_tmp2, reg_tmp4); - } - } - - void generate_zp_correction(int _mtile) { - inLocalLabel(); - mov(reg_tmp, ptr[parambase + OFFSET(zpA)]); - cmp(reg_tmp, 0); - je(".NOZP", T_NEAR); - lea(reg_tmp, ptr[reg_tmp + reg_iterkb * sizeof(AType)]); - auto& reg_zpA = reg_tmp; - - load32(reg_tmp1, ptr[parambase + OFFSET(ldsb)]); - imul(reg_tmp1, reg_iterkb); - mov(reg_tmp2, ptr[parambase + OFFSET(reduceB)]); - lea(reg_tmp2, ptr[reg_tmp2 + reg_tmp1 * sizeof(float)]); - lea(reg_tmp2, ptr[reg_tmp2 + reg_itern * sizeof(float)]); - auto& reg_redB = reg_tmp2; - - mov(reg_tmp1, ptr[parambase + OFFSET(scaleA)]); - lea(reg_tmp1, ptr[reg_tmp1 + reg_iterkb * sizeof(float)]); - auto& reg_scaleA = reg_tmp1; - - load32(reg_tmp3, ptr[parambase + OFFSET(ldsa)]); - auto& reg_ldsa = reg_tmp3; - int BReg = TmpReg; - int AReg = BReg + NRegs; - int SReg = AReg + 1; - int CReg = SReg + 1; - int CF32Reg = CReg + NRegs; - for (int i = 0; i < NRegs; i++) { - vmovups(Xbyak::Zmm(BReg + i), ptr[reg_redB + i * VecBytes]); - } - - vbroadcastss(vreg_t(SReg), ptr[parambase + OFFSET(kscale)]); - mov(reg_tmp2, ptr[parambase + OFFSET(matC)]); - lea(reg_tmp2, ptr[reg_tmp2 + reg_itern * sizeof(float)]); - load32(reg_tmp4, dword[parambase + OFFSET(cstride)]); - - for (int i = 0; i < _mtile; i++) { - vpbroadcastb(Xbyak::Xmm(AReg), ptr[reg_zpA]); - vpmovzxbd(Xbyak::Zmm(AReg), Xbyak::Xmm(AReg)); - vcvtdq2ps(Xbyak::Zmm(AReg), Xbyak::Zmm(AReg)); - vmulps(Xbyak::Zmm(AReg), Xbyak::Zmm(AReg), zword_b[reg_scaleA]); - vmulps(Xbyak::Zmm(AReg), Xbyak::Zmm(AReg), vreg_t(SReg)); - for (int j = 0; j < NRegs; j++) { - vmulps(Xbyak::Zmm(CReg + j), Xbyak::Zmm(AReg), Xbyak::Zmm(BReg + j)); - vmovups(Xbyak::Zmm(CF32Reg + j), ptr[reg_tmp2 + j * 64]); - vsubps(Xbyak::Zmm(CF32Reg + j), Xbyak::Zmm(CReg + j)); - vmovups(ptr[reg_tmp2 + j * 64], Xbyak::Zmm(CF32Reg + j)); - } - add(reg_tmp2, reg_tmp4); - lea(reg_zpA, ptr[reg_zpA + reg_ldsa * sizeof(AType)]); - lea(reg_scaleA, ptr[reg_scaleA + reg_ldsa * sizeof(float)]); - } - L(".NOZP"); - outLocalLabel(); - } - - void write_back(int _mtile) { (void)(_mtile); } -}; -template -using Amxint8N16P4US = kblock::Amxint8N16P4; - -template -using Amxint8N16P4SS = kblock::Amxint8N16P4; -} // namespace kblock -} // namespace code -template